Commit 5076c15862644edb91d2e3436b2fa3e07b28385d

Authored by Helge Deller
Committed by Kyle McMartin
1 parent 94c3e87a79

[PARISC] I/O-Space must be ioremap_nocache()'d

Addresses in F-space must be accessed uncached on most parisc machines.

Signed-off-by: Helge Deller <deller@parisc-linux.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>

Showing 13 changed files with 48 additions and 54 deletions Inline Diff

arch/parisc/kernel/perf.c
1 /* 1 /*
2 * Parisc performance counters 2 * Parisc performance counters
3 * Copyright (C) 2001 Randolph Chung <tausq@debian.org> 3 * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
4 * 4 *
5 * This code is derived, with permission, from HP/UX sources. 5 * This code is derived, with permission, from HP/UX sources.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option) 9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version. 10 * any later version.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22 /* 22 /*
23 * Edited comment from original sources: 23 * Edited comment from original sources:
24 * 24 *
25 * This driver programs the PCX-U/PCX-W performance counters 25 * This driver programs the PCX-U/PCX-W performance counters
26 * on the PA-RISC 2.0 chips. The driver keeps all images now 26 * on the PA-RISC 2.0 chips. The driver keeps all images now
27 * internally to the kernel to hopefully eliminate the possiblity 27 * internally to the kernel to hopefully eliminate the possiblity
28 * of a bad image halting the CPU. Also, there are different 28 * of a bad image halting the CPU. Also, there are different
29 * images for the PCX-W and later chips vs the PCX-U chips. 29 * images for the PCX-W and later chips vs the PCX-U chips.
30 * 30 *
31 * Only 1 process is allowed to access the driver at any time, 31 * Only 1 process is allowed to access the driver at any time,
32 * so the only protection that is needed is at open and close. 32 * so the only protection that is needed is at open and close.
33 * A variable "perf_enabled" is used to hold the state of the 33 * A variable "perf_enabled" is used to hold the state of the
34 * driver. The spinlock "perf_lock" is used to protect the 34 * driver. The spinlock "perf_lock" is used to protect the
35 * modification of the state during open/close operations so 35 * modification of the state during open/close operations so
36 * multiple processes don't get into the driver simultaneously. 36 * multiple processes don't get into the driver simultaneously.
37 * 37 *
38 * This driver accesses the processor directly vs going through 38 * This driver accesses the processor directly vs going through
39 * the PDC INTRIGUE calls. This is done to eliminate bugs introduced 39 * the PDC INTRIGUE calls. This is done to eliminate bugs introduced
40 * in various PDC revisions. The code is much more maintainable 40 * in various PDC revisions. The code is much more maintainable
41 * and reliable this way vs having to debug on every version of PDC 41 * and reliable this way vs having to debug on every version of PDC
42 * on every box. 42 * on every box.
43 */ 43 */
44 44
45 #include <linux/capability.h> 45 #include <linux/capability.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/proc_fs.h> 47 #include <linux/proc_fs.h>
48 #include <linux/miscdevice.h> 48 #include <linux/miscdevice.h>
49 #include <linux/spinlock.h> 49 #include <linux/spinlock.h>
50 50
51 #include <asm/uaccess.h> 51 #include <asm/uaccess.h>
52 #include <asm/perf.h> 52 #include <asm/perf.h>
53 #include <asm/parisc-device.h> 53 #include <asm/parisc-device.h>
54 #include <asm/processor.h> 54 #include <asm/processor.h>
55 #include <asm/runway.h> 55 #include <asm/runway.h>
56 #include <asm/io.h> /* for __raw_read() */ 56 #include <asm/io.h> /* for __raw_read() */
57 57
58 #include "perf_images.h" 58 #include "perf_images.h"
59 59
60 #define MAX_RDR_WORDS 24 60 #define MAX_RDR_WORDS 24
61 #define PERF_VERSION 2 /* derived from hpux's PI v2 interface */ 61 #define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
62 62
63 /* definition of RDR regs */ 63 /* definition of RDR regs */
64 struct rdr_tbl_ent { 64 struct rdr_tbl_ent {
65 uint16_t width; 65 uint16_t width;
66 uint8_t num_words; 66 uint8_t num_words;
67 uint8_t write_control; 67 uint8_t write_control;
68 }; 68 };
69 69
70 static int perf_processor_interface __read_mostly = UNKNOWN_INTF; 70 static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
71 static int perf_enabled __read_mostly; 71 static int perf_enabled __read_mostly;
72 static spinlock_t perf_lock; 72 static spinlock_t perf_lock;
73 struct parisc_device *cpu_device __read_mostly; 73 struct parisc_device *cpu_device __read_mostly;
74 74
75 /* RDRs to write for PCX-W */ 75 /* RDRs to write for PCX-W */
76 static const int perf_rdrs_W[] = 76 static const int perf_rdrs_W[] =
77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 }; 77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
78 78
79 /* RDRs to write for PCX-U */ 79 /* RDRs to write for PCX-U */
80 static const int perf_rdrs_U[] = 80 static const int perf_rdrs_U[] =
81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 }; 81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
82 82
83 /* RDR register descriptions for PCX-W */ 83 /* RDR register descriptions for PCX-W */
84 static const struct rdr_tbl_ent perf_rdr_tbl_W[] = { 84 static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
85 { 19, 1, 8 }, /* RDR 0 */ 85 { 19, 1, 8 }, /* RDR 0 */
86 { 16, 1, 16 }, /* RDR 1 */ 86 { 16, 1, 16 }, /* RDR 1 */
87 { 72, 2, 0 }, /* RDR 2 */ 87 { 72, 2, 0 }, /* RDR 2 */
88 { 81, 2, 0 }, /* RDR 3 */ 88 { 81, 2, 0 }, /* RDR 3 */
89 { 328, 6, 0 }, /* RDR 4 */ 89 { 328, 6, 0 }, /* RDR 4 */
90 { 160, 3, 0 }, /* RDR 5 */ 90 { 160, 3, 0 }, /* RDR 5 */
91 { 336, 6, 0 }, /* RDR 6 */ 91 { 336, 6, 0 }, /* RDR 6 */
92 { 164, 3, 0 }, /* RDR 7 */ 92 { 164, 3, 0 }, /* RDR 7 */
93 { 0, 0, 0 }, /* RDR 8 */ 93 { 0, 0, 0 }, /* RDR 8 */
94 { 35, 1, 0 }, /* RDR 9 */ 94 { 35, 1, 0 }, /* RDR 9 */
95 { 6, 1, 0 }, /* RDR 10 */ 95 { 6, 1, 0 }, /* RDR 10 */
96 { 18, 1, 0 }, /* RDR 11 */ 96 { 18, 1, 0 }, /* RDR 11 */
97 { 13, 1, 0 }, /* RDR 12 */ 97 { 13, 1, 0 }, /* RDR 12 */
98 { 8, 1, 0 }, /* RDR 13 */ 98 { 8, 1, 0 }, /* RDR 13 */
99 { 8, 1, 0 }, /* RDR 14 */ 99 { 8, 1, 0 }, /* RDR 14 */
100 { 8, 1, 0 }, /* RDR 15 */ 100 { 8, 1, 0 }, /* RDR 15 */
101 { 1530, 24, 0 }, /* RDR 16 */ 101 { 1530, 24, 0 }, /* RDR 16 */
102 { 16, 1, 0 }, /* RDR 17 */ 102 { 16, 1, 0 }, /* RDR 17 */
103 { 4, 1, 0 }, /* RDR 18 */ 103 { 4, 1, 0 }, /* RDR 18 */
104 { 0, 0, 0 }, /* RDR 19 */ 104 { 0, 0, 0 }, /* RDR 19 */
105 { 152, 3, 24 }, /* RDR 20 */ 105 { 152, 3, 24 }, /* RDR 20 */
106 { 152, 3, 24 }, /* RDR 21 */ 106 { 152, 3, 24 }, /* RDR 21 */
107 { 233, 4, 48 }, /* RDR 22 */ 107 { 233, 4, 48 }, /* RDR 22 */
108 { 233, 4, 48 }, /* RDR 23 */ 108 { 233, 4, 48 }, /* RDR 23 */
109 { 71, 2, 0 }, /* RDR 24 */ 109 { 71, 2, 0 }, /* RDR 24 */
110 { 71, 2, 0 }, /* RDR 25 */ 110 { 71, 2, 0 }, /* RDR 25 */
111 { 11, 1, 0 }, /* RDR 26 */ 111 { 11, 1, 0 }, /* RDR 26 */
112 { 18, 1, 0 }, /* RDR 27 */ 112 { 18, 1, 0 }, /* RDR 27 */
113 { 128, 2, 0 }, /* RDR 28 */ 113 { 128, 2, 0 }, /* RDR 28 */
114 { 0, 0, 0 }, /* RDR 29 */ 114 { 0, 0, 0 }, /* RDR 29 */
115 { 16, 1, 0 }, /* RDR 30 */ 115 { 16, 1, 0 }, /* RDR 30 */
116 { 16, 1, 0 }, /* RDR 31 */ 116 { 16, 1, 0 }, /* RDR 31 */
117 }; 117 };
118 118
119 /* RDR register descriptions for PCX-U */ 119 /* RDR register descriptions for PCX-U */
120 static const struct rdr_tbl_ent perf_rdr_tbl_U[] = { 120 static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
121 { 19, 1, 8 }, /* RDR 0 */ 121 { 19, 1, 8 }, /* RDR 0 */
122 { 32, 1, 16 }, /* RDR 1 */ 122 { 32, 1, 16 }, /* RDR 1 */
123 { 20, 1, 0 }, /* RDR 2 */ 123 { 20, 1, 0 }, /* RDR 2 */
124 { 0, 0, 0 }, /* RDR 3 */ 124 { 0, 0, 0 }, /* RDR 3 */
125 { 344, 6, 0 }, /* RDR 4 */ 125 { 344, 6, 0 }, /* RDR 4 */
126 { 176, 3, 0 }, /* RDR 5 */ 126 { 176, 3, 0 }, /* RDR 5 */
127 { 336, 6, 0 }, /* RDR 6 */ 127 { 336, 6, 0 }, /* RDR 6 */
128 { 0, 0, 0 }, /* RDR 7 */ 128 { 0, 0, 0 }, /* RDR 7 */
129 { 0, 0, 0 }, /* RDR 8 */ 129 { 0, 0, 0 }, /* RDR 8 */
130 { 0, 0, 0 }, /* RDR 9 */ 130 { 0, 0, 0 }, /* RDR 9 */
131 { 28, 1, 0 }, /* RDR 10 */ 131 { 28, 1, 0 }, /* RDR 10 */
132 { 33, 1, 0 }, /* RDR 11 */ 132 { 33, 1, 0 }, /* RDR 11 */
133 { 0, 0, 0 }, /* RDR 12 */ 133 { 0, 0, 0 }, /* RDR 12 */
134 { 230, 4, 0 }, /* RDR 13 */ 134 { 230, 4, 0 }, /* RDR 13 */
135 { 32, 1, 0 }, /* RDR 14 */ 135 { 32, 1, 0 }, /* RDR 14 */
136 { 128, 2, 0 }, /* RDR 15 */ 136 { 128, 2, 0 }, /* RDR 15 */
137 { 1494, 24, 0 }, /* RDR 16 */ 137 { 1494, 24, 0 }, /* RDR 16 */
138 { 18, 1, 0 }, /* RDR 17 */ 138 { 18, 1, 0 }, /* RDR 17 */
139 { 4, 1, 0 }, /* RDR 18 */ 139 { 4, 1, 0 }, /* RDR 18 */
140 { 0, 0, 0 }, /* RDR 19 */ 140 { 0, 0, 0 }, /* RDR 19 */
141 { 158, 3, 24 }, /* RDR 20 */ 141 { 158, 3, 24 }, /* RDR 20 */
142 { 158, 3, 24 }, /* RDR 21 */ 142 { 158, 3, 24 }, /* RDR 21 */
143 { 194, 4, 48 }, /* RDR 22 */ 143 { 194, 4, 48 }, /* RDR 22 */
144 { 194, 4, 48 }, /* RDR 23 */ 144 { 194, 4, 48 }, /* RDR 23 */
145 { 71, 2, 0 }, /* RDR 24 */ 145 { 71, 2, 0 }, /* RDR 24 */
146 { 71, 2, 0 }, /* RDR 25 */ 146 { 71, 2, 0 }, /* RDR 25 */
147 { 28, 1, 0 }, /* RDR 26 */ 147 { 28, 1, 0 }, /* RDR 26 */
148 { 33, 1, 0 }, /* RDR 27 */ 148 { 33, 1, 0 }, /* RDR 27 */
149 { 88, 2, 0 }, /* RDR 28 */ 149 { 88, 2, 0 }, /* RDR 28 */
150 { 32, 1, 0 }, /* RDR 29 */ 150 { 32, 1, 0 }, /* RDR 29 */
151 { 24, 1, 0 }, /* RDR 30 */ 151 { 24, 1, 0 }, /* RDR 30 */
152 { 16, 1, 0 }, /* RDR 31 */ 152 { 16, 1, 0 }, /* RDR 31 */
153 }; 153 };
154 154
155 /* 155 /*
156 * A non-zero write_control in the above tables is a byte offset into 156 * A non-zero write_control in the above tables is a byte offset into
157 * this array. 157 * this array.
158 */ 158 */
159 static const uint64_t perf_bitmasks[] = { 159 static const uint64_t perf_bitmasks[] = {
160 0x0000000000000000ul, /* first dbl word must be zero */ 160 0x0000000000000000ul, /* first dbl word must be zero */
161 0xfdffe00000000000ul, /* RDR0 bitmask */ 161 0xfdffe00000000000ul, /* RDR0 bitmask */
162 0x003f000000000000ul, /* RDR1 bitmask */ 162 0x003f000000000000ul, /* RDR1 bitmask */
163 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */ 163 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
164 0xfffffffffffffffful, 164 0xfffffffffffffffful,
165 0xfffffffc00000000ul, 165 0xfffffffc00000000ul,
166 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */ 166 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
167 0xfffffffffffffffful, 167 0xfffffffffffffffful,
168 0xfffffffffffffffcul, 168 0xfffffffffffffffcul,
169 0xff00000000000000ul 169 0xff00000000000000ul
170 }; 170 };
171 171
172 /* 172 /*
173 * Write control bitmasks for Pa-8700 processor given 173 * Write control bitmasks for Pa-8700 processor given
174 * somethings have changed slightly. 174 * somethings have changed slightly.
175 */ 175 */
176 static const uint64_t perf_bitmasks_piranha[] = { 176 static const uint64_t perf_bitmasks_piranha[] = {
177 0x0000000000000000ul, /* first dbl word must be zero */ 177 0x0000000000000000ul, /* first dbl word must be zero */
178 0xfdffe00000000000ul, /* RDR0 bitmask */ 178 0xfdffe00000000000ul, /* RDR0 bitmask */
179 0x003f000000000000ul, /* RDR1 bitmask */ 179 0x003f000000000000ul, /* RDR1 bitmask */
180 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */ 180 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
181 0xfffffffffffffffful, 181 0xfffffffffffffffful,
182 0xfffffffc00000000ul, 182 0xfffffffc00000000ul,
183 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */ 183 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
184 0xfffffffffffffffful, 184 0xfffffffffffffffful,
185 0xfffffffffffffffful, 185 0xfffffffffffffffful,
186 0xfffc000000000000ul 186 0xfffc000000000000ul
187 }; 187 };
188 188
189 static const uint64_t *bitmask_array; /* array of bitmasks to use */ 189 static const uint64_t *bitmask_array; /* array of bitmasks to use */
190 190
191 /****************************************************************************** 191 /******************************************************************************
192 * Function Prototypes 192 * Function Prototypes
193 *****************************************************************************/ 193 *****************************************************************************/
194 static int perf_config(uint32_t *image_ptr); 194 static int perf_config(uint32_t *image_ptr);
195 static int perf_release(struct inode *inode, struct file *file); 195 static int perf_release(struct inode *inode, struct file *file);
196 static int perf_open(struct inode *inode, struct file *file); 196 static int perf_open(struct inode *inode, struct file *file);
197 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 197 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
198 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 198 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
199 loff_t *ppos); 199 loff_t *ppos);
200 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 200 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201 static void perf_start_counters(void); 201 static void perf_start_counters(void);
202 static int perf_stop_counters(uint32_t *raddr); 202 static int perf_stop_counters(uint32_t *raddr);
203 static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); 203 static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
204 static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer); 204 static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
205 static int perf_rdr_clear(uint32_t rdr_num); 205 static int perf_rdr_clear(uint32_t rdr_num);
206 static int perf_write_image(uint64_t *memaddr); 206 static int perf_write_image(uint64_t *memaddr);
207 static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer); 207 static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
208 208
209 /* External Assembly Routines */ 209 /* External Assembly Routines */
210 extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width); 210 extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
211 extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width); 211 extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
212 extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer); 212 extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
213 extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer); 213 extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
214 extern void perf_intrigue_enable_perf_counters (void); 214 extern void perf_intrigue_enable_perf_counters (void);
215 extern void perf_intrigue_disable_perf_counters (void); 215 extern void perf_intrigue_disable_perf_counters (void);
216 216
217 /****************************************************************************** 217 /******************************************************************************
218 * Function Definitions 218 * Function Definitions
219 *****************************************************************************/ 219 *****************************************************************************/
220 220
221 221
222 /* 222 /*
223 * configure: 223 * configure:
224 * 224 *
225 * Configure the cpu with a given data image. First turn off the counters, 225 * Configure the cpu with a given data image. First turn off the counters,
226 * then download the image, then turn the counters back on. 226 * then download the image, then turn the counters back on.
227 */ 227 */
228 static int perf_config(uint32_t *image_ptr) 228 static int perf_config(uint32_t *image_ptr)
229 { 229 {
230 long error; 230 long error;
231 uint32_t raddr[4]; 231 uint32_t raddr[4];
232 232
233 /* Stop the counters*/ 233 /* Stop the counters*/
234 error = perf_stop_counters(raddr); 234 error = perf_stop_counters(raddr);
235 if (error != 0) { 235 if (error != 0) {
236 printk("perf_config: perf_stop_counters = %ld\n", error); 236 printk("perf_config: perf_stop_counters = %ld\n", error);
237 return -EINVAL; 237 return -EINVAL;
238 } 238 }
239 239
240 printk("Preparing to write image\n"); 240 printk("Preparing to write image\n");
241 /* Write the image to the chip */ 241 /* Write the image to the chip */
242 error = perf_write_image((uint64_t *)image_ptr); 242 error = perf_write_image((uint64_t *)image_ptr);
243 if (error != 0) { 243 if (error != 0) {
244 printk("perf_config: DOWNLOAD = %ld\n", error); 244 printk("perf_config: DOWNLOAD = %ld\n", error);
245 return -EINVAL; 245 return -EINVAL;
246 } 246 }
247 247
248 printk("Preparing to start counters\n"); 248 printk("Preparing to start counters\n");
249 249
250 /* Start the counters */ 250 /* Start the counters */
251 perf_start_counters(); 251 perf_start_counters();
252 252
253 return sizeof(uint32_t); 253 return sizeof(uint32_t);
254 } 254 }
255 255
256 /* 256 /*
257 * Open the device and initialize all of its memory. The device is only 257 * Open the device and initialize all of its memory. The device is only
258 * opened once, but can be "queried" by multiple processes that know its 258 * opened once, but can be "queried" by multiple processes that know its
259 * file descriptor. 259 * file descriptor.
260 */ 260 */
261 static int perf_open(struct inode *inode, struct file *file) 261 static int perf_open(struct inode *inode, struct file *file)
262 { 262 {
263 spin_lock(&perf_lock); 263 spin_lock(&perf_lock);
264 if (perf_enabled) { 264 if (perf_enabled) {
265 spin_unlock(&perf_lock); 265 spin_unlock(&perf_lock);
266 return -EBUSY; 266 return -EBUSY;
267 } 267 }
268 perf_enabled = 1; 268 perf_enabled = 1;
269 spin_unlock(&perf_lock); 269 spin_unlock(&perf_lock);
270 270
271 return 0; 271 return 0;
272 } 272 }
273 273
274 /* 274 /*
275 * Close the device. 275 * Close the device.
276 */ 276 */
277 static int perf_release(struct inode *inode, struct file *file) 277 static int perf_release(struct inode *inode, struct file *file)
278 { 278 {
279 spin_lock(&perf_lock); 279 spin_lock(&perf_lock);
280 perf_enabled = 0; 280 perf_enabled = 0;
281 spin_unlock(&perf_lock); 281 spin_unlock(&perf_lock);
282 282
283 return 0; 283 return 0;
284 } 284 }
285 285
286 /* 286 /*
287 * Read does nothing for this driver 287 * Read does nothing for this driver
288 */ 288 */
289 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos) 289 static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
290 { 290 {
291 return 0; 291 return 0;
292 } 292 }
293 293
294 /* 294 /*
295 * write: 295 * write:
296 * 296 *
297 * This routine downloads the image to the chip. It must be 297 * This routine downloads the image to the chip. It must be
298 * called on the processor that the download should happen 298 * called on the processor that the download should happen
299 * on. 299 * on.
300 */ 300 */
301 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 301 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
302 loff_t *ppos) 302 loff_t *ppos)
303 { 303 {
304 int err; 304 int err;
305 size_t image_size; 305 size_t image_size;
306 uint32_t image_type; 306 uint32_t image_type;
307 uint32_t interface_type; 307 uint32_t interface_type;
308 uint32_t test; 308 uint32_t test;
309 309
310 if (perf_processor_interface == ONYX_INTF) 310 if (perf_processor_interface == ONYX_INTF)
311 image_size = PCXU_IMAGE_SIZE; 311 image_size = PCXU_IMAGE_SIZE;
312 else if (perf_processor_interface == CUDA_INTF) 312 else if (perf_processor_interface == CUDA_INTF)
313 image_size = PCXW_IMAGE_SIZE; 313 image_size = PCXW_IMAGE_SIZE;
314 else 314 else
315 return -EFAULT; 315 return -EFAULT;
316 316
317 if (!capable(CAP_SYS_ADMIN)) 317 if (!capable(CAP_SYS_ADMIN))
318 return -EACCES; 318 return -EACCES;
319 319
320 if (count != sizeof(uint32_t)) 320 if (count != sizeof(uint32_t))
321 return -EIO; 321 return -EIO;
322 322
323 if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0) 323 if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
324 return err; 324 return err;
325 325
326 /* Get the interface type and test type */ 326 /* Get the interface type and test type */
327 interface_type = (image_type >> 16) & 0xffff; 327 interface_type = (image_type >> 16) & 0xffff;
328 test = (image_type & 0xffff); 328 test = (image_type & 0xffff);
329 329
330 /* Make sure everything makes sense */ 330 /* Make sure everything makes sense */
331 331
332 /* First check the machine type is correct for 332 /* First check the machine type is correct for
333 the requested image */ 333 the requested image */
334 if (((perf_processor_interface == CUDA_INTF) && 334 if (((perf_processor_interface == CUDA_INTF) &&
335 (interface_type != CUDA_INTF)) || 335 (interface_type != CUDA_INTF)) ||
336 ((perf_processor_interface == ONYX_INTF) && 336 ((perf_processor_interface == ONYX_INTF) &&
337 (interface_type != ONYX_INTF))) 337 (interface_type != ONYX_INTF)))
338 return -EINVAL; 338 return -EINVAL;
339 339
340 /* Next check to make sure the requested image 340 /* Next check to make sure the requested image
341 is valid */ 341 is valid */
342 if (((interface_type == CUDA_INTF) && 342 if (((interface_type == CUDA_INTF) &&
343 (test >= MAX_CUDA_IMAGES)) || 343 (test >= MAX_CUDA_IMAGES)) ||
344 ((interface_type == ONYX_INTF) && 344 ((interface_type == ONYX_INTF) &&
345 (test >= MAX_ONYX_IMAGES))) 345 (test >= MAX_ONYX_IMAGES)))
346 return -EINVAL; 346 return -EINVAL;
347 347
348 /* Copy the image into the processor */ 348 /* Copy the image into the processor */
349 if (interface_type == CUDA_INTF) 349 if (interface_type == CUDA_INTF)
350 return perf_config(cuda_images[test]); 350 return perf_config(cuda_images[test]);
351 else 351 else
352 return perf_config(onyx_images[test]); 352 return perf_config(onyx_images[test]);
353 353
354 return count; 354 return count;
355 } 355 }
356 356
357 /* 357 /*
358 * Patch the images that need to know the IVA addresses. 358 * Patch the images that need to know the IVA addresses.
359 */ 359 */
360 static void perf_patch_images(void) 360 static void perf_patch_images(void)
361 { 361 {
362 #if 0 /* FIXME!! */ 362 #if 0 /* FIXME!! */
363 /* 363 /*
364 * NOTE: this routine is VERY specific to the current TLB image. 364 * NOTE: this routine is VERY specific to the current TLB image.
365 * If the image is changed, this routine might also need to be changed. 365 * If the image is changed, this routine might also need to be changed.
366 */ 366 */
367 extern void $i_itlb_miss_2_0(); 367 extern void $i_itlb_miss_2_0();
368 extern void $i_dtlb_miss_2_0(); 368 extern void $i_dtlb_miss_2_0();
369 extern void PA2_0_iva(); 369 extern void PA2_0_iva();
370 370
371 /* 371 /*
372 * We can only use the lower 32-bits, the upper 32-bits should be 0 372 * We can only use the lower 32-bits, the upper 32-bits should be 0
373 * anyway given this is in the kernel 373 * anyway given this is in the kernel
374 */ 374 */
375 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); 375 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
376 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); 376 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
377 uint32_t IVAaddress = (uint32_t)&PA2_0_iva; 377 uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
378 378
379 if (perf_processor_interface == ONYX_INTF) { 379 if (perf_processor_interface == ONYX_INTF) {
380 /* clear last 2 bytes */ 380 /* clear last 2 bytes */
381 onyx_images[TLBMISS][15] &= 0xffffff00; 381 onyx_images[TLBMISS][15] &= 0xffffff00;
382 /* set 2 bytes */ 382 /* set 2 bytes */
383 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 383 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
384 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; 384 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
385 onyx_images[TLBMISS][17] = itlb_addr; 385 onyx_images[TLBMISS][17] = itlb_addr;
386 386
387 /* clear last 2 bytes */ 387 /* clear last 2 bytes */
388 onyx_images[TLBHANDMISS][15] &= 0xffffff00; 388 onyx_images[TLBHANDMISS][15] &= 0xffffff00;
389 /* set 2 bytes */ 389 /* set 2 bytes */
390 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); 390 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
391 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; 391 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
392 onyx_images[TLBHANDMISS][17] = itlb_addr; 392 onyx_images[TLBHANDMISS][17] = itlb_addr;
393 393
394 /* clear last 2 bytes */ 394 /* clear last 2 bytes */
395 onyx_images[BIG_CPI][15] &= 0xffffff00; 395 onyx_images[BIG_CPI][15] &= 0xffffff00;
396 /* set 2 bytes */ 396 /* set 2 bytes */
397 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); 397 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
398 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; 398 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
399 onyx_images[BIG_CPI][17] = itlb_addr; 399 onyx_images[BIG_CPI][17] = itlb_addr;
400 400
401 onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */ 401 onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
402 onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */ 402 onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
403 onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00; 403 onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
404 404
405 405
406 } else if (perf_processor_interface == CUDA_INTF) { 406 } else if (perf_processor_interface == CUDA_INTF) {
407 /* Cuda interface */ 407 /* Cuda interface */
408 cuda_images[TLBMISS][16] = 408 cuda_images[TLBMISS][16] =
409 (cuda_images[TLBMISS][16]&0xffff0000) | 409 (cuda_images[TLBMISS][16]&0xffff0000) |
410 ((dtlb_addr >> 8)&0x0000ffff); 410 ((dtlb_addr >> 8)&0x0000ffff);
411 cuda_images[TLBMISS][17] = 411 cuda_images[TLBMISS][17] =
412 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 412 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
413 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; 413 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
414 414
415 cuda_images[TLBHANDMISS][16] = 415 cuda_images[TLBHANDMISS][16] =
416 (cuda_images[TLBHANDMISS][16]&0xffff0000) | 416 (cuda_images[TLBHANDMISS][16]&0xffff0000) |
417 ((dtlb_addr >> 8)&0x0000ffff); 417 ((dtlb_addr >> 8)&0x0000ffff);
418 cuda_images[TLBHANDMISS][17] = 418 cuda_images[TLBHANDMISS][17] =
419 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 419 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
420 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; 420 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
421 421
422 cuda_images[BIG_CPI][16] = 422 cuda_images[BIG_CPI][16] =
423 (cuda_images[BIG_CPI][16]&0xffff0000) | 423 (cuda_images[BIG_CPI][16]&0xffff0000) |
424 ((dtlb_addr >> 8)&0x0000ffff); 424 ((dtlb_addr >> 8)&0x0000ffff);
425 cuda_images[BIG_CPI][17] = 425 cuda_images[BIG_CPI][17] =
426 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); 426 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
427 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; 427 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
428 } else { 428 } else {
429 /* Unknown type */ 429 /* Unknown type */
430 } 430 }
431 #endif 431 #endif
432 } 432 }
433 433
434 434
435 /* 435 /*
436 * ioctl routine 436 * ioctl routine
437 * All routines effect the processor that they are executed on. Thus you 437 * All routines effect the processor that they are executed on. Thus you
438 * must be running on the processor that you wish to change. 438 * must be running on the processor that you wish to change.
439 */ 439 */
440 440
441 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 441 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442 { 442 {
443 long error_start; 443 long error_start;
444 uint32_t raddr[4]; 444 uint32_t raddr[4];
445 int error = 0; 445 int error = 0;
446 446
447 switch (cmd) { 447 switch (cmd) {
448 448
449 case PA_PERF_ON: 449 case PA_PERF_ON:
450 /* Start the counters */ 450 /* Start the counters */
451 perf_start_counters(); 451 perf_start_counters();
452 break; 452 break;
453 453
454 case PA_PERF_OFF: 454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr); 455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) { 456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); 457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 error = -EFAULT; 458 error = -EFAULT;
459 break; 459 break;
460 } 460 }
461 461
462 /* copy out the Counters */ 462 /* copy out the Counters */
463 if (copy_to_user((void __user *)arg, raddr, 463 if (copy_to_user((void __user *)arg, raddr,
464 sizeof (raddr)) != 0) { 464 sizeof (raddr)) != 0) {
465 error = -EFAULT; 465 error = -EFAULT;
466 break; 466 break;
467 } 467 }
468 break; 468 break;
469 469
470 case PA_PERF_VERSION: 470 case PA_PERF_VERSION:
471 /* Return the version # */ 471 /* Return the version # */
472 error = put_user(PERF_VERSION, (int *)arg); 472 error = put_user(PERF_VERSION, (int *)arg);
473 break; 473 break;
474 474
475 default: 475 default:
476 error = -ENOTTY; 476 error = -ENOTTY;
477 } 477 }
478 478
479 return error; 479 return error;
480 } 480 }
481 481
482 static struct file_operations perf_fops = { 482 static struct file_operations perf_fops = {
483 .llseek = no_llseek, 483 .llseek = no_llseek,
484 .read = perf_read, 484 .read = perf_read,
485 .write = perf_write, 485 .write = perf_write,
486 .unlocked_ioctl = perf_ioctl, 486 .unlocked_ioctl = perf_ioctl,
487 .compat_ioctl = perf_ioctl, 487 .compat_ioctl = perf_ioctl,
488 .open = perf_open, 488 .open = perf_open,
489 .release = perf_release 489 .release = perf_release
490 }; 490 };
491 491
492 static struct miscdevice perf_dev = { 492 static struct miscdevice perf_dev = {
493 MISC_DYNAMIC_MINOR, 493 MISC_DYNAMIC_MINOR,
494 PA_PERF_DEV, 494 PA_PERF_DEV,
495 &perf_fops 495 &perf_fops
496 }; 496 };
497 497
498 /* 498 /*
499 * Initialize the module 499 * Initialize the module
500 */ 500 */
501 static int __init perf_init(void) 501 static int __init perf_init(void)
502 { 502 {
503 int ret; 503 int ret;
504 504
505 /* Determine correct processor interface to use */ 505 /* Determine correct processor interface to use */
506 bitmask_array = perf_bitmasks; 506 bitmask_array = perf_bitmasks;
507 507
508 if (boot_cpu_data.cpu_type == pcxu || 508 if (boot_cpu_data.cpu_type == pcxu ||
509 boot_cpu_data.cpu_type == pcxu_) { 509 boot_cpu_data.cpu_type == pcxu_) {
510 perf_processor_interface = ONYX_INTF; 510 perf_processor_interface = ONYX_INTF;
511 } else if (boot_cpu_data.cpu_type == pcxw || 511 } else if (boot_cpu_data.cpu_type == pcxw ||
512 boot_cpu_data.cpu_type == pcxw_ || 512 boot_cpu_data.cpu_type == pcxw_ ||
513 boot_cpu_data.cpu_type == pcxw2 || 513 boot_cpu_data.cpu_type == pcxw2 ||
514 boot_cpu_data.cpu_type == mako) { 514 boot_cpu_data.cpu_type == mako) {
515 perf_processor_interface = CUDA_INTF; 515 perf_processor_interface = CUDA_INTF;
516 if (boot_cpu_data.cpu_type == pcxw2 || 516 if (boot_cpu_data.cpu_type == pcxw2 ||
517 boot_cpu_data.cpu_type == mako) 517 boot_cpu_data.cpu_type == mako)
518 bitmask_array = perf_bitmasks_piranha; 518 bitmask_array = perf_bitmasks_piranha;
519 } else { 519 } else {
520 perf_processor_interface = UNKNOWN_INTF; 520 perf_processor_interface = UNKNOWN_INTF;
521 printk("Performance monitoring counters not supported on this processor\n"); 521 printk("Performance monitoring counters not supported on this processor\n");
522 return -ENODEV; 522 return -ENODEV;
523 } 523 }
524 524
525 ret = misc_register(&perf_dev); 525 ret = misc_register(&perf_dev);
526 if (ret) { 526 if (ret) {
527 printk(KERN_ERR "Performance monitoring counters: " 527 printk(KERN_ERR "Performance monitoring counters: "
528 "cannot register misc device.\n"); 528 "cannot register misc device.\n");
529 return ret; 529 return ret;
530 } 530 }
531 531
532 /* Patch the images to match the system */ 532 /* Patch the images to match the system */
533 perf_patch_images(); 533 perf_patch_images();
534 534
535 spin_lock_init(&perf_lock); 535 spin_lock_init(&perf_lock);
536 536
537 /* TODO: this only lets us access the first cpu.. what to do for SMP? */ 537 /* TODO: this only lets us access the first cpu.. what to do for SMP? */
538 cpu_device = cpu_data[0].dev; 538 cpu_device = cpu_data[0].dev;
539 printk("Performance monitoring counters enabled for %s\n", 539 printk("Performance monitoring counters enabled for %s\n",
540 cpu_data[0].dev->name); 540 cpu_data[0].dev->name);
541 541
542 return 0; 542 return 0;
543 } 543 }
544 544
545 /* 545 /*
546 * perf_start_counters(void) 546 * perf_start_counters(void)
547 * 547 *
548 * Start the counters. 548 * Start the counters.
549 */ 549 */
550 static void perf_start_counters(void) 550 static void perf_start_counters(void)
551 { 551 {
552 /* Enable performance monitor counters */ 552 /* Enable performance monitor counters */
553 perf_intrigue_enable_perf_counters(); 553 perf_intrigue_enable_perf_counters();
554 } 554 }
555 555
556 /* 556 /*
557 * perf_stop_counters 557 * perf_stop_counters
558 * 558 *
559 * Stop the performance counters and save counts 559 * Stop the performance counters and save counts
560 * in a per_processor array. 560 * in a per_processor array.
561 */ 561 */
562 static int perf_stop_counters(uint32_t *raddr) 562 static int perf_stop_counters(uint32_t *raddr)
563 { 563 {
564 uint64_t userbuf[MAX_RDR_WORDS]; 564 uint64_t userbuf[MAX_RDR_WORDS];
565 565
566 /* Disable performance counters */ 566 /* Disable performance counters */
567 perf_intrigue_disable_perf_counters(); 567 perf_intrigue_disable_perf_counters();
568 568
569 if (perf_processor_interface == ONYX_INTF) { 569 if (perf_processor_interface == ONYX_INTF) {
570 uint64_t tmp64; 570 uint64_t tmp64;
571 /* 571 /*
572 * Read the counters 572 * Read the counters
573 */ 573 */
574 if (!perf_rdr_read_ubuf(16, userbuf)) 574 if (!perf_rdr_read_ubuf(16, userbuf))
575 return -13; 575 return -13;
576 576
577 /* Counter0 is bits 1398 thru 1429 */ 577 /* Counter0 is bits 1398 thru 1429 */
578 tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000; 578 tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
579 tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff; 579 tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
580 /* OR sticky0 (bit 1430) to counter0 bit 32 */ 580 /* OR sticky0 (bit 1430) to counter0 bit 32 */
581 tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000; 581 tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
582 raddr[0] = (uint32_t)tmp64; 582 raddr[0] = (uint32_t)tmp64;
583 583
584 /* Counter1 is bits 1431 thru 1462 */ 584 /* Counter1 is bits 1431 thru 1462 */
585 tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff; 585 tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
586 /* OR sticky1 (bit 1463) to counter1 bit 32 */ 586 /* OR sticky1 (bit 1463) to counter1 bit 32 */
587 tmp64 |= (userbuf[22] << 23) & 0x0000000080000000; 587 tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
588 raddr[1] = (uint32_t)tmp64; 588 raddr[1] = (uint32_t)tmp64;
589 589
590 /* Counter2 is bits 1464 thru 1495 */ 590 /* Counter2 is bits 1464 thru 1495 */
591 tmp64 = (userbuf[22] << 24) & 0x00000000ff000000; 591 tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
592 tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff; 592 tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
593 /* OR sticky2 (bit 1496) to counter2 bit 32 */ 593 /* OR sticky2 (bit 1496) to counter2 bit 32 */
594 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; 594 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
595 raddr[2] = (uint32_t)tmp64; 595 raddr[2] = (uint32_t)tmp64;
596 596
597 /* Counter3 is bits 1497 thru 1528 */ 597 /* Counter3 is bits 1497 thru 1528 */
598 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; 598 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
599 /* OR sticky3 (bit 1529) to counter3 bit 32 */ 599 /* OR sticky3 (bit 1529) to counter3 bit 32 */
600 tmp64 |= (userbuf[23] << 25) & 0x0000000080000000; 600 tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
601 raddr[3] = (uint32_t)tmp64; 601 raddr[3] = (uint32_t)tmp64;
602 602
603 /* 603 /*
604 * Zero out the counters 604 * Zero out the counters
605 */ 605 */
606 606
607 /* 607 /*
608 * The counters and sticky-bits comprise the last 132 bits 608 * The counters and sticky-bits comprise the last 132 bits
609 * (1398 - 1529) of RDR16 on a U chip. We'll zero these 609 * (1398 - 1529) of RDR16 on a U chip. We'll zero these
610 * out the easy way: zero out last 10 bits of dword 21, 610 * out the easy way: zero out last 10 bits of dword 21,
611 * all of dword 22 and 58 bits (plus 6 don't care bits) of 611 * all of dword 22 and 58 bits (plus 6 don't care bits) of
612 * dword 23. 612 * dword 23.
613 */ 613 */
614 userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */ 614 userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
615 userbuf[22] = 0; 615 userbuf[22] = 0;
616 userbuf[23] = 0; 616 userbuf[23] = 0;
617 617
618 /* 618 /*
619 * Write back the zero'ed bytes + the image given 619 * Write back the zero'ed bytes + the image given
620 * the read was destructive. 620 * the read was destructive.
621 */ 621 */
622 perf_rdr_write(16, userbuf); 622 perf_rdr_write(16, userbuf);
623 } else { 623 } else {
624 624
625 /* 625 /*
626 * Read RDR-15 which contains the counters and sticky bits 626 * Read RDR-15 which contains the counters and sticky bits
627 */ 627 */
628 if (!perf_rdr_read_ubuf(15, userbuf)) { 628 if (!perf_rdr_read_ubuf(15, userbuf)) {
629 return -13; 629 return -13;
630 } 630 }
631 631
632 /* 632 /*
633 * Clear out the counters 633 * Clear out the counters
634 */ 634 */
635 perf_rdr_clear(15); 635 perf_rdr_clear(15);
636 636
637 /* 637 /*
638 * Copy the counters 638 * Copy the counters
639 */ 639 */
640 raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL); 640 raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
641 raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL); 641 raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
642 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); 642 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
643 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); 643 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
644 } 644 }
645 645
646 return 0; 646 return 0;
647 } 647 }
648 648
649 /* 649 /*
650 * perf_rdr_get_entry 650 * perf_rdr_get_entry
651 * 651 *
652 * Retrieve a pointer to the description of what this 652 * Retrieve a pointer to the description of what this
653 * RDR contains. 653 * RDR contains.
654 */ 654 */
655 static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num) 655 static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
656 { 656 {
657 if (perf_processor_interface == ONYX_INTF) { 657 if (perf_processor_interface == ONYX_INTF) {
658 return &perf_rdr_tbl_U[rdr_num]; 658 return &perf_rdr_tbl_U[rdr_num];
659 } else { 659 } else {
660 return &perf_rdr_tbl_W[rdr_num]; 660 return &perf_rdr_tbl_W[rdr_num];
661 } 661 }
662 } 662 }
663 663
664 /* 664 /*
665 * perf_rdr_read_ubuf 665 * perf_rdr_read_ubuf
666 * 666 *
667 * Read the RDR value into the buffer specified. 667 * Read the RDR value into the buffer specified.
668 */ 668 */
669 static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer) 669 static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
670 { 670 {
671 uint64_t data, data_mask = 0; 671 uint64_t data, data_mask = 0;
672 uint32_t width, xbits, i; 672 uint32_t width, xbits, i;
673 const struct rdr_tbl_ent *tentry; 673 const struct rdr_tbl_ent *tentry;
674 674
675 tentry = perf_rdr_get_entry(rdr_num); 675 tentry = perf_rdr_get_entry(rdr_num);
676 if ((width = tentry->width) == 0) 676 if ((width = tentry->width) == 0)
677 return 0; 677 return 0;
678 678
679 /* Clear out buffer */ 679 /* Clear out buffer */
680 i = tentry->num_words; 680 i = tentry->num_words;
681 while (i--) { 681 while (i--) {
682 buffer[i] = 0; 682 buffer[i] = 0;
683 } 683 }
684 684
685 /* Check for bits an even number of 64 */ 685 /* Check for bits an even number of 64 */
686 if ((xbits = width & 0x03f) != 0) { 686 if ((xbits = width & 0x03f) != 0) {
687 data_mask = 1; 687 data_mask = 1;
688 data_mask <<= (64 - xbits); 688 data_mask <<= (64 - xbits);
689 data_mask--; 689 data_mask--;
690 } 690 }
691 691
692 /* Grab all of the data */ 692 /* Grab all of the data */
693 i = tentry->num_words; 693 i = tentry->num_words;
694 while (i--) { 694 while (i--) {
695 695
696 if (perf_processor_interface == ONYX_INTF) { 696 if (perf_processor_interface == ONYX_INTF) {
697 data = perf_rdr_shift_in_U(rdr_num, width); 697 data = perf_rdr_shift_in_U(rdr_num, width);
698 } else { 698 } else {
699 data = perf_rdr_shift_in_W(rdr_num, width); 699 data = perf_rdr_shift_in_W(rdr_num, width);
700 } 700 }
701 if (xbits) { 701 if (xbits) {
702 buffer[i] |= (data << (64 - xbits)); 702 buffer[i] |= (data << (64 - xbits));
703 if (i) { 703 if (i) {
704 buffer[i-1] |= ((data >> xbits) & data_mask); 704 buffer[i-1] |= ((data >> xbits) & data_mask);
705 } 705 }
706 } else { 706 } else {
707 buffer[i] = data; 707 buffer[i] = data;
708 } 708 }
709 } 709 }
710 710
711 return 1; 711 return 1;
712 } 712 }
713 713
714 /* 714 /*
715 * perf_rdr_clear 715 * perf_rdr_clear
716 * 716 *
717 * Zero out the given RDR register 717 * Zero out the given RDR register
718 */ 718 */
719 static int perf_rdr_clear(uint32_t rdr_num) 719 static int perf_rdr_clear(uint32_t rdr_num)
720 { 720 {
721 const struct rdr_tbl_ent *tentry; 721 const struct rdr_tbl_ent *tentry;
722 int32_t i; 722 int32_t i;
723 723
724 tentry = perf_rdr_get_entry(rdr_num); 724 tentry = perf_rdr_get_entry(rdr_num);
725 725
726 if (tentry->width == 0) { 726 if (tentry->width == 0) {
727 return -1; 727 return -1;
728 } 728 }
729 729
730 i = tentry->num_words; 730 i = tentry->num_words;
731 while (i--) { 731 while (i--) {
732 if (perf_processor_interface == ONYX_INTF) { 732 if (perf_processor_interface == ONYX_INTF) {
733 perf_rdr_shift_out_U(rdr_num, 0UL); 733 perf_rdr_shift_out_U(rdr_num, 0UL);
734 } else { 734 } else {
735 perf_rdr_shift_out_W(rdr_num, 0UL); 735 perf_rdr_shift_out_W(rdr_num, 0UL);
736 } 736 }
737 } 737 }
738 738
739 return 0; 739 return 0;
740 } 740 }
741 741
742 742
743 /* 743 /*
744 * perf_write_image 744 * perf_write_image
745 * 745 *
746 * Write the given image out to the processor 746 * Write the given image out to the processor
747 */ 747 */
748 static int perf_write_image(uint64_t *memaddr) 748 static int perf_write_image(uint64_t *memaddr)
749 { 749 {
750 uint64_t buffer[MAX_RDR_WORDS]; 750 uint64_t buffer[MAX_RDR_WORDS];
751 uint64_t *bptr; 751 uint64_t *bptr;
752 uint32_t dwords; 752 uint32_t dwords;
753 const uint32_t *intrigue_rdr; 753 const uint32_t *intrigue_rdr;
754 const uint64_t *intrigue_bitmask; 754 const uint64_t *intrigue_bitmask;
755 uint64_t tmp64; 755 uint64_t tmp64;
756 void __iomem *runway; 756 void __iomem *runway;
757 const struct rdr_tbl_ent *tentry; 757 const struct rdr_tbl_ent *tentry;
758 int i; 758 int i;
759 759
760 /* Clear out counters */ 760 /* Clear out counters */
761 if (perf_processor_interface == ONYX_INTF) { 761 if (perf_processor_interface == ONYX_INTF) {
762 762
763 perf_rdr_clear(16); 763 perf_rdr_clear(16);
764 764
765 /* Toggle performance monitor */ 765 /* Toggle performance monitor */
766 perf_intrigue_enable_perf_counters(); 766 perf_intrigue_enable_perf_counters();
767 perf_intrigue_disable_perf_counters(); 767 perf_intrigue_disable_perf_counters();
768 768
769 intrigue_rdr = perf_rdrs_U; 769 intrigue_rdr = perf_rdrs_U;
770 } else { 770 } else {
771 perf_rdr_clear(15); 771 perf_rdr_clear(15);
772 intrigue_rdr = perf_rdrs_W; 772 intrigue_rdr = perf_rdrs_W;
773 } 773 }
774 774
775 /* Write all RDRs */ 775 /* Write all RDRs */
776 while (*intrigue_rdr != -1) { 776 while (*intrigue_rdr != -1) {
777 tentry = perf_rdr_get_entry(*intrigue_rdr); 777 tentry = perf_rdr_get_entry(*intrigue_rdr);
778 perf_rdr_read_ubuf(*intrigue_rdr, buffer); 778 perf_rdr_read_ubuf(*intrigue_rdr, buffer);
779 bptr = &buffer[0]; 779 bptr = &buffer[0];
780 dwords = tentry->num_words; 780 dwords = tentry->num_words;
781 if (tentry->write_control) { 781 if (tentry->write_control) {
782 intrigue_bitmask = &bitmask_array[tentry->write_control >> 3]; 782 intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
783 while (dwords--) { 783 while (dwords--) {
784 tmp64 = *intrigue_bitmask & *memaddr++; 784 tmp64 = *intrigue_bitmask & *memaddr++;
785 tmp64 |= (~(*intrigue_bitmask++)) & *bptr; 785 tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
786 *bptr++ = tmp64; 786 *bptr++ = tmp64;
787 } 787 }
788 } else { 788 } else {
789 while (dwords--) { 789 while (dwords--) {
790 *bptr++ = *memaddr++; 790 *bptr++ = *memaddr++;
791 } 791 }
792 } 792 }
793 793
794 perf_rdr_write(*intrigue_rdr, buffer); 794 perf_rdr_write(*intrigue_rdr, buffer);
795 intrigue_rdr++; 795 intrigue_rdr++;
796 } 796 }
797 797
798 /* 798 /*
799 * Now copy out the Runway stuff which is not in RDRs 799 * Now copy out the Runway stuff which is not in RDRs
800 */ 800 */
801 801
802 if (cpu_device == NULL) 802 if (cpu_device == NULL)
803 { 803 {
804 printk(KERN_ERR "write_image: cpu_device not yet initialized!\n"); 804 printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
805 return -1; 805 return -1;
806 } 806 }
807 807
808 runway = ioremap(cpu_device->hpa.start, 4096); 808 runway = ioremap_nocache(cpu_device->hpa.start, 4096);
809 809
810 /* Merge intrigue bits into Runway STATUS 0 */ 810 /* Merge intrigue bits into Runway STATUS 0 */
811 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; 811 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
812 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 812 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
813 runway + RUNWAY_STATUS); 813 runway + RUNWAY_STATUS);
814 814
815 /* Write RUNWAY DEBUG registers */ 815 /* Write RUNWAY DEBUG registers */
816 for (i = 0; i < 8; i++) { 816 for (i = 0; i < 8; i++) {
817 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); 817 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
818 } 818 }
819 819
820 return 0; 820 return 0;
821 } 821 }
822 822
823 /* 823 /*
824 * perf_rdr_write 824 * perf_rdr_write
825 * 825 *
826 * Write the given RDR register with the contents 826 * Write the given RDR register with the contents
827 * of the given buffer. 827 * of the given buffer.
828 */ 828 */
829 static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer) 829 static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
830 { 830 {
831 const struct rdr_tbl_ent *tentry; 831 const struct rdr_tbl_ent *tentry;
832 int32_t i; 832 int32_t i;
833 833
834 printk("perf_rdr_write\n"); 834 printk("perf_rdr_write\n");
835 tentry = perf_rdr_get_entry(rdr_num); 835 tentry = perf_rdr_get_entry(rdr_num);
836 if (tentry->width == 0) { return; } 836 if (tentry->width == 0) { return; }
837 837
838 i = tentry->num_words; 838 i = tentry->num_words;
839 while (i--) { 839 while (i--) {
840 if (perf_processor_interface == ONYX_INTF) { 840 if (perf_processor_interface == ONYX_INTF) {
841 perf_rdr_shift_out_U(rdr_num, buffer[i]); 841 perf_rdr_shift_out_U(rdr_num, buffer[i]);
842 } else { 842 } else {
843 perf_rdr_shift_out_W(rdr_num, buffer[i]); 843 perf_rdr_shift_out_W(rdr_num, buffer[i]);
844 } 844 }
845 } 845 }
846 printk("perf_rdr_write done\n"); 846 printk("perf_rdr_write done\n");
847 } 847 }
848 848
849 module_init(perf_init); 849 module_init(perf_init);
850 850
drivers/input/serio/gscps2.c
1 /* 1 /*
2 * drivers/input/serio/gscps2.c 2 * drivers/input/serio/gscps2.c
3 * 3 *
4 * Copyright (c) 2004 Helge Deller <deller@gmx.de> 4 * Copyright (c) 2004-2006 Helge Deller <deller@gmx.de>
5 * Copyright (c) 2002 Laurent Canet <canetl@esiee.fr> 5 * Copyright (c) 2002 Laurent Canet <canetl@esiee.fr>
6 * Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org> 6 * Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org>
7 * 7 *
8 * Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c 8 * Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c
9 * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca> 9 * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
10 * Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org> 10 * Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org>
11 * Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr> 11 * Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr>
12 * Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr> 12 * Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr>
13 * 13 *
14 * HP GSC PS/2 port driver, found in PA/RISC Workstations 14 * HP GSC PS/2 port driver, found in PA/RISC Workstations
15 * 15 *
16 * This file is subject to the terms and conditions of the GNU General Public 16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive 17 * License. See the file "COPYING" in the main directory of this archive
18 * for more details. 18 * for more details.
19 * 19 *
20 * TODO: 20 * TODO:
21 * - Dino testing (did HP ever shipped a machine on which this port 21 * - Dino testing (did HP ever shipped a machine on which this port
22 * was usable/enabled ?) 22 * was usable/enabled ?)
23 */ 23 */
24 24
25 #include <linux/config.h> 25 #include <linux/config.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/serio.h> 28 #include <linux/serio.h>
29 #include <linux/input.h> 29 #include <linux/input.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/ioport.h> 33 #include <linux/ioport.h>
34 #include <linux/pci_ids.h> 34 #include <linux/pci_ids.h>
35 35
36 #include <asm/irq.h> 36 #include <asm/irq.h>
37 #include <asm/io.h> 37 #include <asm/io.h>
38 #include <asm/parisc-device.h> 38 #include <asm/parisc-device.h>
39 39
40 MODULE_AUTHOR("Laurent Canet <canetl@esiee.fr>, Thibaut Varene <varenet@parisc-linux.org>, Helge Deller <deller@gmx.de>"); 40 MODULE_AUTHOR("Laurent Canet <canetl@esiee.fr>, Thibaut Varene <varenet@parisc-linux.org>, Helge Deller <deller@gmx.de>");
41 MODULE_DESCRIPTION("HP GSC PS2 port driver"); 41 MODULE_DESCRIPTION("HP GSC PS2 port driver");
42 MODULE_LICENSE("GPL"); 42 MODULE_LICENSE("GPL");
43 MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl); 43 MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl);
44 44
45 #define PFX "gscps2.c: " 45 #define PFX "gscps2.c: "
46 46
47 /* 47 /*
48 * Driver constants 48 * Driver constants
49 */ 49 */
50 50
51 /* various constants */ 51 /* various constants */
52 #define ENABLE 1 52 #define ENABLE 1
53 #define DISABLE 0 53 #define DISABLE 0
54 54
55 #define GSC_DINO_OFFSET 0x0800 /* offset for DINO controller versus LASI one */ 55 #define GSC_DINO_OFFSET 0x0800 /* offset for DINO controller versus LASI one */
56 56
57 /* PS/2 IO port offsets */ 57 /* PS/2 IO port offsets */
58 #define GSC_ID 0x00 /* device ID offset (see: GSC_ID_XXX) */ 58 #define GSC_ID 0x00 /* device ID offset (see: GSC_ID_XXX) */
59 #define GSC_RESET 0x00 /* reset port offset */ 59 #define GSC_RESET 0x00 /* reset port offset */
60 #define GSC_RCVDATA 0x04 /* receive port offset */ 60 #define GSC_RCVDATA 0x04 /* receive port offset */
61 #define GSC_XMTDATA 0x04 /* transmit port offset */ 61 #define GSC_XMTDATA 0x04 /* transmit port offset */
62 #define GSC_CONTROL 0x08 /* see: Control register bits */ 62 #define GSC_CONTROL 0x08 /* see: Control register bits */
63 #define GSC_STATUS 0x0C /* see: Status register bits */ 63 #define GSC_STATUS 0x0C /* see: Status register bits */
64 64
65 /* Control register bits */ 65 /* Control register bits */
66 #define GSC_CTRL_ENBL 0x01 /* enable interface */ 66 #define GSC_CTRL_ENBL 0x01 /* enable interface */
67 #define GSC_CTRL_LPBXR 0x02 /* loopback operation */ 67 #define GSC_CTRL_LPBXR 0x02 /* loopback operation */
68 #define GSC_CTRL_DIAG 0x20 /* directly control clock/data line */ 68 #define GSC_CTRL_DIAG 0x20 /* directly control clock/data line */
69 #define GSC_CTRL_DATDIR 0x40 /* data line direct control */ 69 #define GSC_CTRL_DATDIR 0x40 /* data line direct control */
70 #define GSC_CTRL_CLKDIR 0x80 /* clock line direct control */ 70 #define GSC_CTRL_CLKDIR 0x80 /* clock line direct control */
71 71
72 /* Status register bits */ 72 /* Status register bits */
73 #define GSC_STAT_RBNE 0x01 /* Receive Buffer Not Empty */ 73 #define GSC_STAT_RBNE 0x01 /* Receive Buffer Not Empty */
74 #define GSC_STAT_TBNE 0x02 /* Transmit Buffer Not Empty */ 74 #define GSC_STAT_TBNE 0x02 /* Transmit Buffer Not Empty */
75 #define GSC_STAT_TERR 0x04 /* Timeout Error */ 75 #define GSC_STAT_TERR 0x04 /* Timeout Error */
76 #define GSC_STAT_PERR 0x08 /* Parity Error */ 76 #define GSC_STAT_PERR 0x08 /* Parity Error */
77 #define GSC_STAT_CMPINTR 0x10 /* Composite Interrupt = irq on any port */ 77 #define GSC_STAT_CMPINTR 0x10 /* Composite Interrupt = irq on any port */
78 #define GSC_STAT_DATSHD 0x40 /* Data Line Shadow */ 78 #define GSC_STAT_DATSHD 0x40 /* Data Line Shadow */
79 #define GSC_STAT_CLKSHD 0x80 /* Clock Line Shadow */ 79 #define GSC_STAT_CLKSHD 0x80 /* Clock Line Shadow */
80 80
81 /* IDs returned by GSC_ID port register */ 81 /* IDs returned by GSC_ID port register */
82 #define GSC_ID_KEYBOARD 0 /* device ID values */ 82 #define GSC_ID_KEYBOARD 0 /* device ID values */
83 #define GSC_ID_MOUSE 1 83 #define GSC_ID_MOUSE 1
84 84
85 85
86 static irqreturn_t gscps2_interrupt(int irq, void *dev, struct pt_regs *regs); 86 static irqreturn_t gscps2_interrupt(int irq, void *dev, struct pt_regs *regs);
87 87
88 #define BUFFER_SIZE 0x0f 88 #define BUFFER_SIZE 0x0f
89 89
90 /* GSC PS/2 port device struct */ 90 /* GSC PS/2 port device struct */
91 struct gscps2port { 91 struct gscps2port {
92 struct list_head node; 92 struct list_head node;
93 struct parisc_device *padev; 93 struct parisc_device *padev;
94 struct serio *port; 94 struct serio *port;
95 spinlock_t lock; 95 spinlock_t lock;
96 char *addr; 96 char *addr;
97 u8 act, append; /* position in buffer[] */ 97 u8 act, append; /* position in buffer[] */
98 struct { 98 struct {
99 u8 data; 99 u8 data;
100 u8 str; 100 u8 str;
101 } buffer[BUFFER_SIZE+1]; 101 } buffer[BUFFER_SIZE+1];
102 int id; 102 int id;
103 }; 103 };
104 104
105 /* 105 /*
106 * Various HW level routines 106 * Various HW level routines
107 */ 107 */
108 108
109 #define gscps2_readb_input(x) readb((x)+GSC_RCVDATA) 109 #define gscps2_readb_input(x) readb((x)+GSC_RCVDATA)
110 #define gscps2_readb_control(x) readb((x)+GSC_CONTROL) 110 #define gscps2_readb_control(x) readb((x)+GSC_CONTROL)
111 #define gscps2_readb_status(x) readb((x)+GSC_STATUS) 111 #define gscps2_readb_status(x) readb((x)+GSC_STATUS)
112 #define gscps2_writeb_control(x, y) writeb((x), (y)+GSC_CONTROL) 112 #define gscps2_writeb_control(x, y) writeb((x), (y)+GSC_CONTROL)
113 113
114 114
115 /* 115 /*
116 * wait_TBE() - wait for Transmit Buffer Empty 116 * wait_TBE() - wait for Transmit Buffer Empty
117 */ 117 */
118 118
119 static int wait_TBE(char *addr) 119 static int wait_TBE(char *addr)
120 { 120 {
121 int timeout = 25000; /* device is expected to react within 250 msec */ 121 int timeout = 25000; /* device is expected to react within 250 msec */
122 while (gscps2_readb_status(addr) & GSC_STAT_TBNE) { 122 while (gscps2_readb_status(addr) & GSC_STAT_TBNE) {
123 if (!--timeout) 123 if (!--timeout)
124 return 0; /* This should not happen */ 124 return 0; /* This should not happen */
125 udelay(10); 125 udelay(10);
126 } 126 }
127 return 1; 127 return 1;
128 } 128 }
129 129
130 130
131 /* 131 /*
132 * gscps2_flush() - flush the receive buffer 132 * gscps2_flush() - flush the receive buffer
133 */ 133 */
134 134
135 static void gscps2_flush(struct gscps2port *ps2port) 135 static void gscps2_flush(struct gscps2port *ps2port)
136 { 136 {
137 while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE) 137 while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE)
138 gscps2_readb_input(ps2port->addr); 138 gscps2_readb_input(ps2port->addr);
139 ps2port->act = ps2port->append = 0; 139 ps2port->act = ps2port->append = 0;
140 } 140 }
141 141
142 /* 142 /*
143 * gscps2_writeb_output() - write a byte to the port 143 * gscps2_writeb_output() - write a byte to the port
144 * 144 *
145 * returns 1 on sucess, 0 on error 145 * returns 1 on sucess, 0 on error
146 */ 146 */
147 147
148 static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data) 148 static inline int gscps2_writeb_output(struct gscps2port *ps2port, u8 data)
149 { 149 {
150 unsigned long flags; 150 unsigned long flags;
151 char *addr = ps2port->addr; 151 char *addr = ps2port->addr;
152 152
153 if (!wait_TBE(addr)) { 153 if (!wait_TBE(addr)) {
154 printk(KERN_DEBUG PFX "timeout - could not write byte %#x\n", data); 154 printk(KERN_DEBUG PFX "timeout - could not write byte %#x\n", data);
155 return 0; 155 return 0;
156 } 156 }
157 157
158 while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE) 158 while (gscps2_readb_status(ps2port->addr) & GSC_STAT_RBNE)
159 /* wait */; 159 /* wait */;
160 160
161 spin_lock_irqsave(&ps2port->lock, flags); 161 spin_lock_irqsave(&ps2port->lock, flags);
162 writeb(data, addr+GSC_XMTDATA); 162 writeb(data, addr+GSC_XMTDATA);
163 spin_unlock_irqrestore(&ps2port->lock, flags); 163 spin_unlock_irqrestore(&ps2port->lock, flags);
164 164
165 /* this is ugly, but due to timing of the port it seems to be necessary. */ 165 /* this is ugly, but due to timing of the port it seems to be necessary. */
166 mdelay(6); 166 mdelay(6);
167 167
168 /* make sure any received data is returned as fast as possible */ 168 /* make sure any received data is returned as fast as possible */
169 /* this is important e.g. when we set the LEDs on the keyboard */ 169 /* this is important e.g. when we set the LEDs on the keyboard */
170 gscps2_interrupt(0, NULL, NULL); 170 gscps2_interrupt(0, NULL, NULL);
171 171
172 return 1; 172 return 1;
173 } 173 }
174 174
175 175
176 /* 176 /*
177 * gscps2_enable() - enables or disables the port 177 * gscps2_enable() - enables or disables the port
178 */ 178 */
179 179
180 static void gscps2_enable(struct gscps2port *ps2port, int enable) 180 static void gscps2_enable(struct gscps2port *ps2port, int enable)
181 { 181 {
182 unsigned long flags; 182 unsigned long flags;
183 u8 data; 183 u8 data;
184 184
185 /* now enable/disable the port */ 185 /* now enable/disable the port */
186 spin_lock_irqsave(&ps2port->lock, flags); 186 spin_lock_irqsave(&ps2port->lock, flags);
187 gscps2_flush(ps2port); 187 gscps2_flush(ps2port);
188 data = gscps2_readb_control(ps2port->addr); 188 data = gscps2_readb_control(ps2port->addr);
189 if (enable) 189 if (enable)
190 data |= GSC_CTRL_ENBL; 190 data |= GSC_CTRL_ENBL;
191 else 191 else
192 data &= ~GSC_CTRL_ENBL; 192 data &= ~GSC_CTRL_ENBL;
193 gscps2_writeb_control(data, ps2port->addr); 193 gscps2_writeb_control(data, ps2port->addr);
194 spin_unlock_irqrestore(&ps2port->lock, flags); 194 spin_unlock_irqrestore(&ps2port->lock, flags);
195 wait_TBE(ps2port->addr); 195 wait_TBE(ps2port->addr);
196 gscps2_flush(ps2port); 196 gscps2_flush(ps2port);
197 } 197 }
198 198
199 /* 199 /*
200 * gscps2_reset() - resets the PS/2 port 200 * gscps2_reset() - resets the PS/2 port
201 */ 201 */
202 202
203 static void gscps2_reset(struct gscps2port *ps2port) 203 static void gscps2_reset(struct gscps2port *ps2port)
204 { 204 {
205 char *addr = ps2port->addr; 205 char *addr = ps2port->addr;
206 unsigned long flags; 206 unsigned long flags;
207 207
208 /* reset the interface */ 208 /* reset the interface */
209 spin_lock_irqsave(&ps2port->lock, flags); 209 spin_lock_irqsave(&ps2port->lock, flags);
210 gscps2_flush(ps2port); 210 gscps2_flush(ps2port);
211 writeb(0xff, addr+GSC_RESET); 211 writeb(0xff, addr+GSC_RESET);
212 gscps2_flush(ps2port); 212 gscps2_flush(ps2port);
213 spin_unlock_irqrestore(&ps2port->lock, flags); 213 spin_unlock_irqrestore(&ps2port->lock, flags);
214 } 214 }
215 215
216 static LIST_HEAD(ps2port_list); 216 static LIST_HEAD(ps2port_list);
217 217
218 /** 218 /**
219 * gscps2_interrupt() - Interruption service routine 219 * gscps2_interrupt() - Interruption service routine
220 * 220 *
221 * This function reads received PS/2 bytes and processes them on 221 * This function reads received PS/2 bytes and processes them on
222 * all interfaces. 222 * all interfaces.
223 * The problematic part here is, that the keyboard and mouse PS/2 port 223 * The problematic part here is, that the keyboard and mouse PS/2 port
224 * share the same interrupt and it's not possible to send data if any 224 * share the same interrupt and it's not possible to send data if any
225 * one of them holds input data. To solve this problem we try to receive 225 * one of them holds input data. To solve this problem we try to receive
226 * the data as fast as possible and handle the reporting to the upper layer 226 * the data as fast as possible and handle the reporting to the upper layer
227 * later. 227 * later.
228 */ 228 */
229 229
230 static irqreturn_t gscps2_interrupt(int irq, void *dev, struct pt_regs *regs) 230 static irqreturn_t gscps2_interrupt(int irq, void *dev, struct pt_regs *regs)
231 { 231 {
232 struct gscps2port *ps2port; 232 struct gscps2port *ps2port;
233 233
234 list_for_each_entry(ps2port, &ps2port_list, node) { 234 list_for_each_entry(ps2port, &ps2port_list, node) {
235 235
236 unsigned long flags; 236 unsigned long flags;
237 spin_lock_irqsave(&ps2port->lock, flags); 237 spin_lock_irqsave(&ps2port->lock, flags);
238 238
239 while ( (ps2port->buffer[ps2port->append].str = 239 while ( (ps2port->buffer[ps2port->append].str =
240 gscps2_readb_status(ps2port->addr)) & GSC_STAT_RBNE ) { 240 gscps2_readb_status(ps2port->addr)) & GSC_STAT_RBNE ) {
241 ps2port->buffer[ps2port->append].data = 241 ps2port->buffer[ps2port->append].data =
242 gscps2_readb_input(ps2port->addr); 242 gscps2_readb_input(ps2port->addr);
243 ps2port->append = ((ps2port->append+1) & BUFFER_SIZE); 243 ps2port->append = ((ps2port->append+1) & BUFFER_SIZE);
244 } 244 }
245 245
246 spin_unlock_irqrestore(&ps2port->lock, flags); 246 spin_unlock_irqrestore(&ps2port->lock, flags);
247 247
248 } /* list_for_each_entry */ 248 } /* list_for_each_entry */
249 249
250 /* all data was read from the ports - now report the data to upper layer */ 250 /* all data was read from the ports - now report the data to upper layer */
251 251
252 list_for_each_entry(ps2port, &ps2port_list, node) { 252 list_for_each_entry(ps2port, &ps2port_list, node) {
253 253
254 while (ps2port->act != ps2port->append) { 254 while (ps2port->act != ps2port->append) {
255 255
256 unsigned int rxflags; 256 unsigned int rxflags;
257 u8 data, status; 257 u8 data, status;
258 258
259 /* Did new data arrived while we read existing data ? 259 /* Did new data arrived while we read existing data ?
260 If yes, exit now and let the new irq handler start over again */ 260 If yes, exit now and let the new irq handler start over again */
261 if (gscps2_readb_status(ps2port->addr) & GSC_STAT_CMPINTR) 261 if (gscps2_readb_status(ps2port->addr) & GSC_STAT_CMPINTR)
262 return IRQ_HANDLED; 262 return IRQ_HANDLED;
263 263
264 status = ps2port->buffer[ps2port->act].str; 264 status = ps2port->buffer[ps2port->act].str;
265 data = ps2port->buffer[ps2port->act].data; 265 data = ps2port->buffer[ps2port->act].data;
266 266
267 ps2port->act = ((ps2port->act+1) & BUFFER_SIZE); 267 ps2port->act = ((ps2port->act+1) & BUFFER_SIZE);
268 rxflags = ((status & GSC_STAT_TERR) ? SERIO_TIMEOUT : 0 ) | 268 rxflags = ((status & GSC_STAT_TERR) ? SERIO_TIMEOUT : 0 ) |
269 ((status & GSC_STAT_PERR) ? SERIO_PARITY : 0 ); 269 ((status & GSC_STAT_PERR) ? SERIO_PARITY : 0 );
270 270
271 serio_interrupt(ps2port->port, data, rxflags, regs); 271 serio_interrupt(ps2port->port, data, rxflags, regs);
272 272
273 } /* while() */ 273 } /* while() */
274 274
275 } /* list_for_each_entry */ 275 } /* list_for_each_entry */
276 276
277 return IRQ_HANDLED; 277 return IRQ_HANDLED;
278 } 278 }
279 279
280 280
281 /* 281 /*
282 * gscps2_write() - send a byte out through the aux interface. 282 * gscps2_write() - send a byte out through the aux interface.
283 */ 283 */
284 284
285 static int gscps2_write(struct serio *port, unsigned char data) 285 static int gscps2_write(struct serio *port, unsigned char data)
286 { 286 {
287 struct gscps2port *ps2port = port->port_data; 287 struct gscps2port *ps2port = port->port_data;
288 288
289 if (!gscps2_writeb_output(ps2port, data)) { 289 if (!gscps2_writeb_output(ps2port, data)) {
290 printk(KERN_DEBUG PFX "sending byte %#x failed.\n", data); 290 printk(KERN_DEBUG PFX "sending byte %#x failed.\n", data);
291 return -1; 291 return -1;
292 } 292 }
293 return 0; 293 return 0;
294 } 294 }
295 295
296 /* 296 /*
297 * gscps2_open() is called when a port is opened by the higher layer. 297 * gscps2_open() is called when a port is opened by the higher layer.
298 * It resets and enables the port. 298 * It resets and enables the port.
299 */ 299 */
300 300
301 static int gscps2_open(struct serio *port) 301 static int gscps2_open(struct serio *port)
302 { 302 {
303 struct gscps2port *ps2port = port->port_data; 303 struct gscps2port *ps2port = port->port_data;
304 304
305 gscps2_reset(ps2port); 305 gscps2_reset(ps2port);
306 306
307 /* enable it */ 307 /* enable it */
308 gscps2_enable(ps2port, ENABLE); 308 gscps2_enable(ps2port, ENABLE);
309 309
310 gscps2_interrupt(0, NULL, NULL); 310 gscps2_interrupt(0, NULL, NULL);
311 311
312 return 0; 312 return 0;
313 } 313 }
314 314
315 /* 315 /*
316 * gscps2_close() disables the port 316 * gscps2_close() disables the port
317 */ 317 */
318 318
319 static void gscps2_close(struct serio *port) 319 static void gscps2_close(struct serio *port)
320 { 320 {
321 struct gscps2port *ps2port = port->port_data; 321 struct gscps2port *ps2port = port->port_data;
322 gscps2_enable(ps2port, DISABLE); 322 gscps2_enable(ps2port, DISABLE);
323 } 323 }
324 324
325 /** 325 /**
326 * gscps2_probe() - Probes PS2 devices 326 * gscps2_probe() - Probes PS2 devices
327 * @return: success/error report 327 * @return: success/error report
328 */ 328 */
329 329
330 static int __init gscps2_probe(struct parisc_device *dev) 330 static int __init gscps2_probe(struct parisc_device *dev)
331 { 331 {
332 struct gscps2port *ps2port; 332 struct gscps2port *ps2port;
333 struct serio *serio; 333 struct serio *serio;
334 unsigned long hpa = dev->hpa.start; 334 unsigned long hpa = dev->hpa.start;
335 int ret; 335 int ret;
336 336
337 if (!dev->irq) 337 if (!dev->irq)
338 return -ENODEV; 338 return -ENODEV;
339 339
340 /* Offset for DINO PS/2. Works with LASI even */ 340 /* Offset for DINO PS/2. Works with LASI even */
341 if (dev->id.sversion == 0x96) 341 if (dev->id.sversion == 0x96)
342 hpa += GSC_DINO_OFFSET; 342 hpa += GSC_DINO_OFFSET;
343 343
344 ps2port = kmalloc(sizeof(struct gscps2port), GFP_KERNEL); 344 ps2port = kmalloc(sizeof(struct gscps2port), GFP_KERNEL);
345 serio = kmalloc(sizeof(struct serio), GFP_KERNEL); 345 serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
346 if (!ps2port || !serio) { 346 if (!ps2port || !serio) {
347 ret = -ENOMEM; 347 ret = -ENOMEM;
348 goto fail_nomem; 348 goto fail_nomem;
349 } 349 }
350 350
351 dev_set_drvdata(&dev->dev, ps2port); 351 dev_set_drvdata(&dev->dev, ps2port);
352 352
353 memset(ps2port, 0, sizeof(struct gscps2port)); 353 memset(ps2port, 0, sizeof(struct gscps2port));
354 memset(serio, 0, sizeof(struct serio)); 354 memset(serio, 0, sizeof(struct serio));
355 ps2port->port = serio; 355 ps2port->port = serio;
356 ps2port->padev = dev; 356 ps2port->padev = dev;
357 ps2port->addr = ioremap(hpa, GSC_STATUS + 4); 357 ps2port->addr = ioremap_nocache(hpa, GSC_STATUS + 4);
358 spin_lock_init(&ps2port->lock); 358 spin_lock_init(&ps2port->lock);
359 359
360 gscps2_reset(ps2port); 360 gscps2_reset(ps2port);
361 ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f; 361 ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f;
362 362
363 snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s", 363 snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s",
364 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse"); 364 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
365 strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys)); 365 strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys));
366 serio->id.type = SERIO_8042; 366 serio->id.type = SERIO_8042;
367 serio->write = gscps2_write; 367 serio->write = gscps2_write;
368 serio->open = gscps2_open; 368 serio->open = gscps2_open;
369 serio->close = gscps2_close; 369 serio->close = gscps2_close;
370 serio->port_data = ps2port; 370 serio->port_data = ps2port;
371 serio->dev.parent = &dev->dev; 371 serio->dev.parent = &dev->dev;
372 372
373 ret = -EBUSY; 373 ret = -EBUSY;
374 if (request_irq(dev->irq, gscps2_interrupt, SA_SHIRQ, ps2port->port->name, ps2port)) 374 if (request_irq(dev->irq, gscps2_interrupt, SA_SHIRQ, ps2port->port->name, ps2port))
375 goto fail_miserably; 375 goto fail_miserably;
376 376
377 if (ps2port->id != GSC_ID_KEYBOARD && ps2port->id != GSC_ID_MOUSE) { 377 if (ps2port->id != GSC_ID_KEYBOARD && ps2port->id != GSC_ID_MOUSE) {
378 printk(KERN_WARNING PFX "Unsupported PS/2 port at 0x%08lx (id=%d) ignored\n", 378 printk(KERN_WARNING PFX "Unsupported PS/2 port at 0x%08lx (id=%d) ignored\n",
379 hpa, ps2port->id); 379 hpa, ps2port->id);
380 ret = -ENODEV; 380 ret = -ENODEV;
381 goto fail; 381 goto fail;
382 } 382 }
383 383
384 #if 0 384 #if 0
385 if (!request_mem_region(hpa, GSC_STATUS + 4, ps2port->port.name)) 385 if (!request_mem_region(hpa, GSC_STATUS + 4, ps2port->port.name))
386 goto fail; 386 goto fail;
387 #endif 387 #endif
388 388
389 printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n", 389 printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n",
390 ps2port->port->name, 390 ps2port->port->name,
391 ps2port->addr, 391 ps2port->addr,
392 ps2port->padev->irq, 392 ps2port->padev->irq,
393 ps2port->port->phys); 393 ps2port->port->phys);
394 394
395 serio_register_port(ps2port->port); 395 serio_register_port(ps2port->port);
396 396
397 list_add_tail(&ps2port->node, &ps2port_list); 397 list_add_tail(&ps2port->node, &ps2port_list);
398 398
399 return 0; 399 return 0;
400 400
401 fail: 401 fail:
402 free_irq(dev->irq, ps2port); 402 free_irq(dev->irq, ps2port);
403 403
404 fail_miserably: 404 fail_miserably:
405 iounmap(ps2port->addr); 405 iounmap(ps2port->addr);
406 release_mem_region(dev->hpa.start, GSC_STATUS + 4); 406 release_mem_region(dev->hpa.start, GSC_STATUS + 4);
407 407
408 fail_nomem: 408 fail_nomem:
409 kfree(ps2port); 409 kfree(ps2port);
410 kfree(serio); 410 kfree(serio);
411 return ret; 411 return ret;
412 } 412 }
413 413
414 /** 414 /**
415 * gscps2_remove() - Removes PS2 devices 415 * gscps2_remove() - Removes PS2 devices
416 * @return: success/error report 416 * @return: success/error report
417 */ 417 */
418 418
419 static int __devexit gscps2_remove(struct parisc_device *dev) 419 static int __devexit gscps2_remove(struct parisc_device *dev)
420 { 420 {
421 struct gscps2port *ps2port = dev_get_drvdata(&dev->dev); 421 struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
422 422
423 serio_unregister_port(ps2port->port); 423 serio_unregister_port(ps2port->port);
424 free_irq(dev->irq, ps2port); 424 free_irq(dev->irq, ps2port);
425 gscps2_flush(ps2port); 425 gscps2_flush(ps2port);
426 list_del(&ps2port->node); 426 list_del(&ps2port->node);
427 iounmap(ps2port->addr); 427 iounmap(ps2port->addr);
428 #if 0 428 #if 0
429 release_mem_region(dev->hpa, GSC_STATUS + 4); 429 release_mem_region(dev->hpa, GSC_STATUS + 4);
430 #endif 430 #endif
431 dev_set_drvdata(&dev->dev, NULL); 431 dev_set_drvdata(&dev->dev, NULL);
432 kfree(ps2port); 432 kfree(ps2port);
433 return 0; 433 return 0;
434 } 434 }
435 435
436 436
437 static struct parisc_device_id gscps2_device_tbl[] = { 437 static struct parisc_device_id gscps2_device_tbl[] = {
438 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00084 }, /* LASI PS/2 */ 438 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00084 }, /* LASI PS/2 */
439 #ifdef DINO_TESTED 439 #ifdef DINO_TESTED
440 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00096 }, /* DINO PS/2 */ 440 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00096 }, /* DINO PS/2 */
441 #endif 441 #endif
442 { 0, } /* 0 terminated list */ 442 { 0, } /* 0 terminated list */
443 }; 443 };
444 444
445 static struct parisc_driver parisc_ps2_driver = { 445 static struct parisc_driver parisc_ps2_driver = {
446 .name = "gsc_ps2", 446 .name = "gsc_ps2",
447 .id_table = gscps2_device_tbl, 447 .id_table = gscps2_device_tbl,
448 .probe = gscps2_probe, 448 .probe = gscps2_probe,
449 .remove = gscps2_remove, 449 .remove = gscps2_remove,
450 }; 450 };
451 451
452 static int __init gscps2_init(void) 452 static int __init gscps2_init(void)
453 { 453 {
454 register_parisc_driver(&parisc_ps2_driver); 454 register_parisc_driver(&parisc_ps2_driver);
455 return 0; 455 return 0;
456 } 456 }
457 457
458 static void __exit gscps2_exit(void) 458 static void __exit gscps2_exit(void)
459 { 459 {
460 unregister_parisc_driver(&parisc_ps2_driver); 460 unregister_parisc_driver(&parisc_ps2_driver);
461 } 461 }
462 462
463 463
464 module_init(gscps2_init); 464 module_init(gscps2_init);
465 module_exit(gscps2_exit); 465 module_exit(gscps2_exit);
466 466
467 467
drivers/parisc/ccio-dma.c
1 /* 1 /*
2 ** ccio-dma.c: 2 ** ccio-dma.c:
3 ** DMA management routines for first generation cache-coherent machines. 3 ** DMA management routines for first generation cache-coherent machines.
4 ** Program U2/Uturn in "Virtual Mode" and use the I/O MMU. 4 ** Program U2/Uturn in "Virtual Mode" and use the I/O MMU.
5 ** 5 **
6 ** (c) Copyright 2000 Grant Grundler 6 ** (c) Copyright 2000 Grant Grundler
7 ** (c) Copyright 2000 Ryan Bradetich 7 ** (c) Copyright 2000 Ryan Bradetich
8 ** (c) Copyright 2000 Hewlett-Packard Company 8 ** (c) Copyright 2000 Hewlett-Packard Company
9 ** 9 **
10 ** This program is free software; you can redistribute it and/or modify 10 ** This program is free software; you can redistribute it and/or modify
11 ** it under the terms of the GNU General Public License as published by 11 ** it under the terms of the GNU General Public License as published by
12 ** the Free Software Foundation; either version 2 of the License, or 12 ** the Free Software Foundation; either version 2 of the License, or
13 ** (at your option) any later version. 13 ** (at your option) any later version.
14 ** 14 **
15 ** 15 **
16 ** "Real Mode" operation refers to U2/Uturn chip operation. 16 ** "Real Mode" operation refers to U2/Uturn chip operation.
17 ** U2/Uturn were designed to perform coherency checks w/o using 17 ** U2/Uturn were designed to perform coherency checks w/o using
18 ** the I/O MMU - basically what x86 does. 18 ** the I/O MMU - basically what x86 does.
19 ** 19 **
20 ** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at: 20 ** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at:
21 ** CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc 21 ** CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
22 ** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c 22 ** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
23 ** 23 **
24 ** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c. 24 ** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c.
25 ** 25 **
26 ** Drawbacks of using Real Mode are: 26 ** Drawbacks of using Real Mode are:
27 ** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal). 27 ** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal).
28 ** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute. 28 ** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute.
29 ** o Ability to do scatter/gather in HW is lost. 29 ** o Ability to do scatter/gather in HW is lost.
30 ** o Doesn't work under PCX-U/U+ machines since they didn't follow 30 ** o Doesn't work under PCX-U/U+ machines since they didn't follow
31 ** the coherency design originally worked out. Only PCX-W does. 31 ** the coherency design originally worked out. Only PCX-W does.
32 */ 32 */
33 33
34 #include <linux/config.h> 34 #include <linux/config.h>
35 #include <linux/types.h> 35 #include <linux/types.h>
36 #include <linux/init.h> 36 #include <linux/init.h>
37 #include <linux/mm.h> 37 #include <linux/mm.h>
38 #include <linux/spinlock.h> 38 #include <linux/spinlock.h>
39 #include <linux/slab.h> 39 #include <linux/slab.h>
40 #include <linux/string.h> 40 #include <linux/string.h>
41 #include <linux/pci.h> 41 #include <linux/pci.h>
42 #include <linux/reboot.h> 42 #include <linux/reboot.h>
43 #include <linux/proc_fs.h> 43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h> 44 #include <linux/seq_file.h>
45 45
46 #include <asm/byteorder.h> 46 #include <asm/byteorder.h>
47 #include <asm/cache.h> /* for L1_CACHE_BYTES */ 47 #include <asm/cache.h> /* for L1_CACHE_BYTES */
48 #include <asm/uaccess.h> 48 #include <asm/uaccess.h>
49 #include <asm/page.h> 49 #include <asm/page.h>
50 #include <asm/dma.h> 50 #include <asm/dma.h>
51 #include <asm/io.h> 51 #include <asm/io.h>
52 #include <asm/hardware.h> /* for register_module() */ 52 #include <asm/hardware.h> /* for register_module() */
53 #include <asm/parisc-device.h> 53 #include <asm/parisc-device.h>
54 54
55 /* 55 /*
56 ** Choose "ccio" since that's what HP-UX calls it. 56 ** Choose "ccio" since that's what HP-UX calls it.
57 ** Make it easier for folks to migrate from one to the other :^) 57 ** Make it easier for folks to migrate from one to the other :^)
58 */ 58 */
59 #define MODULE_NAME "ccio" 59 #define MODULE_NAME "ccio"
60 60
61 #undef DEBUG_CCIO_RES 61 #undef DEBUG_CCIO_RES
62 #undef DEBUG_CCIO_RUN 62 #undef DEBUG_CCIO_RUN
63 #undef DEBUG_CCIO_INIT 63 #undef DEBUG_CCIO_INIT
64 #undef DEBUG_CCIO_RUN_SG 64 #undef DEBUG_CCIO_RUN_SG
65 65
66 #ifdef CONFIG_PROC_FS 66 #ifdef CONFIG_PROC_FS
67 /* 67 /*
68 * CCIO_SEARCH_TIME can help measure how fast the bitmap search is. 68 * CCIO_SEARCH_TIME can help measure how fast the bitmap search is.
69 * impacts performance though - ditch it if you don't use it. 69 * impacts performance though - ditch it if you don't use it.
70 */ 70 */
71 #define CCIO_SEARCH_TIME 71 #define CCIO_SEARCH_TIME
72 #undef CCIO_MAP_STATS 72 #undef CCIO_MAP_STATS
73 #else 73 #else
74 #undef CCIO_SEARCH_TIME 74 #undef CCIO_SEARCH_TIME
75 #undef CCIO_MAP_STATS 75 #undef CCIO_MAP_STATS
76 #endif 76 #endif
77 77
78 #include <linux/proc_fs.h> 78 #include <linux/proc_fs.h>
79 #include <asm/runway.h> /* for proc_runway_root */ 79 #include <asm/runway.h> /* for proc_runway_root */
80 80
81 #ifdef DEBUG_CCIO_INIT 81 #ifdef DEBUG_CCIO_INIT
82 #define DBG_INIT(x...) printk(x) 82 #define DBG_INIT(x...) printk(x)
83 #else 83 #else
84 #define DBG_INIT(x...) 84 #define DBG_INIT(x...)
85 #endif 85 #endif
86 86
87 #ifdef DEBUG_CCIO_RUN 87 #ifdef DEBUG_CCIO_RUN
88 #define DBG_RUN(x...) printk(x) 88 #define DBG_RUN(x...) printk(x)
89 #else 89 #else
90 #define DBG_RUN(x...) 90 #define DBG_RUN(x...)
91 #endif 91 #endif
92 92
93 #ifdef DEBUG_CCIO_RES 93 #ifdef DEBUG_CCIO_RES
94 #define DBG_RES(x...) printk(x) 94 #define DBG_RES(x...) printk(x)
95 #else 95 #else
96 #define DBG_RES(x...) 96 #define DBG_RES(x...)
97 #endif 97 #endif
98 98
99 #ifdef DEBUG_CCIO_RUN_SG 99 #ifdef DEBUG_CCIO_RUN_SG
100 #define DBG_RUN_SG(x...) printk(x) 100 #define DBG_RUN_SG(x...) printk(x)
101 #else 101 #else
102 #define DBG_RUN_SG(x...) 102 #define DBG_RUN_SG(x...)
103 #endif 103 #endif
104 104
105 #define CCIO_INLINE inline 105 #define CCIO_INLINE inline
106 #define WRITE_U32(value, addr) __raw_writel(value, addr) 106 #define WRITE_U32(value, addr) __raw_writel(value, addr)
107 #define READ_U32(addr) __raw_readl(addr) 107 #define READ_U32(addr) __raw_readl(addr)
108 108
109 #define U2_IOA_RUNWAY 0x580 109 #define U2_IOA_RUNWAY 0x580
110 #define U2_BC_GSC 0x501 110 #define U2_BC_GSC 0x501
111 #define UTURN_IOA_RUNWAY 0x581 111 #define UTURN_IOA_RUNWAY 0x581
112 #define UTURN_BC_GSC 0x502 112 #define UTURN_BC_GSC 0x502
113 113
114 #define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */ 114 #define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
115 #define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */ 115 #define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
116 #define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */ 116 #define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
117 117
118 struct ioa_registers { 118 struct ioa_registers {
119 /* Runway Supervisory Set */ 119 /* Runway Supervisory Set */
120 int32_t unused1[12]; 120 int32_t unused1[12];
121 uint32_t io_command; /* Offset 12 */ 121 uint32_t io_command; /* Offset 12 */
122 uint32_t io_status; /* Offset 13 */ 122 uint32_t io_status; /* Offset 13 */
123 uint32_t io_control; /* Offset 14 */ 123 uint32_t io_control; /* Offset 14 */
124 int32_t unused2[1]; 124 int32_t unused2[1];
125 125
126 /* Runway Auxiliary Register Set */ 126 /* Runway Auxiliary Register Set */
127 uint32_t io_err_resp; /* Offset 0 */ 127 uint32_t io_err_resp; /* Offset 0 */
128 uint32_t io_err_info; /* Offset 1 */ 128 uint32_t io_err_info; /* Offset 1 */
129 uint32_t io_err_req; /* Offset 2 */ 129 uint32_t io_err_req; /* Offset 2 */
130 uint32_t io_err_resp_hi; /* Offset 3 */ 130 uint32_t io_err_resp_hi; /* Offset 3 */
131 uint32_t io_tlb_entry_m; /* Offset 4 */ 131 uint32_t io_tlb_entry_m; /* Offset 4 */
132 uint32_t io_tlb_entry_l; /* Offset 5 */ 132 uint32_t io_tlb_entry_l; /* Offset 5 */
133 uint32_t unused3[1]; 133 uint32_t unused3[1];
134 uint32_t io_pdir_base; /* Offset 7 */ 134 uint32_t io_pdir_base; /* Offset 7 */
135 uint32_t io_io_low_hv; /* Offset 8 */ 135 uint32_t io_io_low_hv; /* Offset 8 */
136 uint32_t io_io_high_hv; /* Offset 9 */ 136 uint32_t io_io_high_hv; /* Offset 9 */
137 uint32_t unused4[1]; 137 uint32_t unused4[1];
138 uint32_t io_chain_id_mask; /* Offset 11 */ 138 uint32_t io_chain_id_mask; /* Offset 11 */
139 uint32_t unused5[2]; 139 uint32_t unused5[2];
140 uint32_t io_io_low; /* Offset 14 */ 140 uint32_t io_io_low; /* Offset 14 */
141 uint32_t io_io_high; /* Offset 15 */ 141 uint32_t io_io_high; /* Offset 15 */
142 }; 142 };
143 143
144 /* 144 /*
145 ** IOA Registers 145 ** IOA Registers
146 ** ------------- 146 ** -------------
147 ** 147 **
148 ** Runway IO_CONTROL Register (+0x38) 148 ** Runway IO_CONTROL Register (+0x38)
149 ** 149 **
150 ** The Runway IO_CONTROL register controls the forwarding of transactions. 150 ** The Runway IO_CONTROL register controls the forwarding of transactions.
151 ** 151 **
152 ** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 | 152 ** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 |
153 ** | HV | TLB | reserved | HV | mode | reserved | 153 ** | HV | TLB | reserved | HV | mode | reserved |
154 ** 154 **
155 ** o mode field indicates the address translation of transactions 155 ** o mode field indicates the address translation of transactions
156 ** forwarded from Runway to GSC+: 156 ** forwarded from Runway to GSC+:
157 ** Mode Name Value Definition 157 ** Mode Name Value Definition
158 ** Off (default) 0 Opaque to matching addresses. 158 ** Off (default) 0 Opaque to matching addresses.
159 ** Include 1 Transparent for matching addresses. 159 ** Include 1 Transparent for matching addresses.
160 ** Peek 3 Map matching addresses. 160 ** Peek 3 Map matching addresses.
161 ** 161 **
162 ** + "Off" mode: Runway transactions which match the I/O range 162 ** + "Off" mode: Runway transactions which match the I/O range
163 ** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored. 163 ** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored.
164 ** + "Include" mode: all addresses within the I/O range specified 164 ** + "Include" mode: all addresses within the I/O range specified
165 ** by the IO_IO_LOW and IO_IO_HIGH registers are transparently 165 ** by the IO_IO_LOW and IO_IO_HIGH registers are transparently
166 ** forwarded. This is the I/O Adapter's normal operating mode. 166 ** forwarded. This is the I/O Adapter's normal operating mode.
167 ** + "Peek" mode: used during system configuration to initialize the 167 ** + "Peek" mode: used during system configuration to initialize the
168 ** GSC+ bus. Runway Write_Shorts in the address range specified by 168 ** GSC+ bus. Runway Write_Shorts in the address range specified by
169 ** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter 169 ** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter
170 ** *AND* the GSC+ address is remapped to the Broadcast Physical 170 ** *AND* the GSC+ address is remapped to the Broadcast Physical
171 ** Address space by setting the 14 high order address bits of the 171 ** Address space by setting the 14 high order address bits of the
172 ** 32 bit GSC+ address to ones. 172 ** 32 bit GSC+ address to ones.
173 ** 173 **
174 ** o TLB field affects transactions which are forwarded from GSC+ to Runway. 174 ** o TLB field affects transactions which are forwarded from GSC+ to Runway.
175 ** "Real" mode is the poweron default. 175 ** "Real" mode is the poweron default.
176 ** 176 **
177 ** TLB Mode Value Description 177 ** TLB Mode Value Description
178 ** Real 0 No TLB translation. Address is directly mapped and the 178 ** Real 0 No TLB translation. Address is directly mapped and the
179 ** virtual address is composed of selected physical bits. 179 ** virtual address is composed of selected physical bits.
180 ** Error 1 Software fills the TLB manually. 180 ** Error 1 Software fills the TLB manually.
181 ** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory). 181 ** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory).
182 ** 182 **
183 ** 183 **
184 ** IO_IO_LOW_HV +0x60 (HV dependent) 184 ** IO_IO_LOW_HV +0x60 (HV dependent)
185 ** IO_IO_HIGH_HV +0x64 (HV dependent) 185 ** IO_IO_HIGH_HV +0x64 (HV dependent)
186 ** IO_IO_LOW +0x78 (Architected register) 186 ** IO_IO_LOW +0x78 (Architected register)
187 ** IO_IO_HIGH +0x7c (Architected register) 187 ** IO_IO_HIGH +0x7c (Architected register)
188 ** 188 **
189 ** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the 189 ** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the
190 ** I/O Adapter address space, respectively. 190 ** I/O Adapter address space, respectively.
191 ** 191 **
192 ** 0 ... 7 | 8 ... 15 | 16 ... 31 | 192 ** 0 ... 7 | 8 ... 15 | 16 ... 31 |
193 ** 11111111 | 11111111 | address | 193 ** 11111111 | 11111111 | address |
194 ** 194 **
195 ** Each LOW/HIGH pair describes a disjoint address space region. 195 ** Each LOW/HIGH pair describes a disjoint address space region.
196 ** (2 per GSC+ port). Each incoming Runway transaction address is compared 196 ** (2 per GSC+ port). Each incoming Runway transaction address is compared
197 ** with both sets of LOW/HIGH registers. If the address is in the range 197 ** with both sets of LOW/HIGH registers. If the address is in the range
198 ** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction 198 ** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction
199 ** for forwarded to the respective GSC+ bus. 199 ** for forwarded to the respective GSC+ bus.
200 ** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying 200 ** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying
201 ** an address space region. 201 ** an address space region.
202 ** 202 **
203 ** In order for a Runway address to reside within GSC+ extended address space: 203 ** In order for a Runway address to reside within GSC+ extended address space:
204 ** Runway Address [0:7] must identically compare to 8'b11111111 204 ** Runway Address [0:7] must identically compare to 8'b11111111
205 ** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19] 205 ** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19]
206 ** Runway Address [12:23] must be greater than or equal to 206 ** Runway Address [12:23] must be greater than or equal to
207 ** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31]. 207 ** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31].
208 ** Runway Address [24:39] is not used in the comparison. 208 ** Runway Address [24:39] is not used in the comparison.
209 ** 209 **
210 ** When the Runway transaction is forwarded to GSC+, the GSC+ address is 210 ** When the Runway transaction is forwarded to GSC+, the GSC+ address is
211 ** as follows: 211 ** as follows:
212 ** GSC+ Address[0:3] 4'b1111 212 ** GSC+ Address[0:3] 4'b1111
213 ** GSC+ Address[4:29] Runway Address[12:37] 213 ** GSC+ Address[4:29] Runway Address[12:37]
214 ** GSC+ Address[30:31] 2'b00 214 ** GSC+ Address[30:31] 2'b00
215 ** 215 **
216 ** All 4 Low/High registers must be initialized (by PDC) once the lower bus 216 ** All 4 Low/High registers must be initialized (by PDC) once the lower bus
217 ** is interrogated and address space is defined. The operating system will 217 ** is interrogated and address space is defined. The operating system will
218 ** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following 218 ** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following
219 ** the PDC initialization. However, the hardware version dependent IO_IO_LOW 219 ** the PDC initialization. However, the hardware version dependent IO_IO_LOW
220 ** and IO_IO_HIGH registers should not be subsequently altered by the OS. 220 ** and IO_IO_HIGH registers should not be subsequently altered by the OS.
221 ** 221 **
222 ** Writes to both sets of registers will take effect immediately, bypassing 222 ** Writes to both sets of registers will take effect immediately, bypassing
223 ** the queues, which ensures that subsequent Runway transactions are checked 223 ** the queues, which ensures that subsequent Runway transactions are checked
224 ** against the updated bounds values. However reads are queued, introducing 224 ** against the updated bounds values. However reads are queued, introducing
225 ** the possibility of a read being bypassed by a subsequent write to the same 225 ** the possibility of a read being bypassed by a subsequent write to the same
226 ** register. This sequence can be avoided by having software wait for read 226 ** register. This sequence can be avoided by having software wait for read
227 ** returns before issuing subsequent writes. 227 ** returns before issuing subsequent writes.
228 */ 228 */
229 229
230 struct ioc { 230 struct ioc {
231 struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */ 231 struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
232 u8 *res_map; /* resource map, bit == pdir entry */ 232 u8 *res_map; /* resource map, bit == pdir entry */
233 u64 *pdir_base; /* physical base address */ 233 u64 *pdir_base; /* physical base address */
234 u32 pdir_size; /* bytes, function of IOV Space size */ 234 u32 pdir_size; /* bytes, function of IOV Space size */
235 u32 res_hint; /* next available IOVP - 235 u32 res_hint; /* next available IOVP -
236 circular search */ 236 circular search */
237 u32 res_size; /* size of resource map in bytes */ 237 u32 res_size; /* size of resource map in bytes */
238 spinlock_t res_lock; 238 spinlock_t res_lock;
239 239
240 #ifdef CCIO_SEARCH_TIME 240 #ifdef CCIO_SEARCH_TIME
241 #define CCIO_SEARCH_SAMPLE 0x100 241 #define CCIO_SEARCH_SAMPLE 0x100
242 unsigned long avg_search[CCIO_SEARCH_SAMPLE]; 242 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
243 unsigned long avg_idx; /* current index into avg_search */ 243 unsigned long avg_idx; /* current index into avg_search */
244 #endif 244 #endif
245 #ifdef CCIO_MAP_STATS 245 #ifdef CCIO_MAP_STATS
246 unsigned long used_pages; 246 unsigned long used_pages;
247 unsigned long msingle_calls; 247 unsigned long msingle_calls;
248 unsigned long msingle_pages; 248 unsigned long msingle_pages;
249 unsigned long msg_calls; 249 unsigned long msg_calls;
250 unsigned long msg_pages; 250 unsigned long msg_pages;
251 unsigned long usingle_calls; 251 unsigned long usingle_calls;
252 unsigned long usingle_pages; 252 unsigned long usingle_pages;
253 unsigned long usg_calls; 253 unsigned long usg_calls;
254 unsigned long usg_pages; 254 unsigned long usg_pages;
255 #endif 255 #endif
256 unsigned short cujo20_bug; 256 unsigned short cujo20_bug;
257 257
258 /* STUFF We don't need in performance path */ 258 /* STUFF We don't need in performance path */
259 u32 chainid_shift; /* specify bit location of chain_id */ 259 u32 chainid_shift; /* specify bit location of chain_id */
260 struct ioc *next; /* Linked list of discovered iocs */ 260 struct ioc *next; /* Linked list of discovered iocs */
261 const char *name; /* device name from firmware */ 261 const char *name; /* device name from firmware */
262 unsigned int hw_path; /* the hardware path this ioc is associatd with */ 262 unsigned int hw_path; /* the hardware path this ioc is associatd with */
263 struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */ 263 struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
264 struct resource mmio_region[2]; /* The "routed" MMIO regions */ 264 struct resource mmio_region[2]; /* The "routed" MMIO regions */
265 }; 265 };
266 266
267 static struct ioc *ioc_list; 267 static struct ioc *ioc_list;
268 static int ioc_count; 268 static int ioc_count;
269 269
270 /************************************************************** 270 /**************************************************************
271 * 271 *
272 * I/O Pdir Resource Management 272 * I/O Pdir Resource Management
273 * 273 *
274 * Bits set in the resource map are in use. 274 * Bits set in the resource map are in use.
275 * Each bit can represent a number of pages. 275 * Each bit can represent a number of pages.
276 * LSbs represent lower addresses (IOVA's). 276 * LSbs represent lower addresses (IOVA's).
277 * 277 *
278 * This was was copied from sba_iommu.c. Don't try to unify 278 * This was was copied from sba_iommu.c. Don't try to unify
279 * the two resource managers unless a way to have different 279 * the two resource managers unless a way to have different
280 * allocation policies is also adjusted. We'd like to avoid 280 * allocation policies is also adjusted. We'd like to avoid
281 * I/O TLB thrashing by having resource allocation policy 281 * I/O TLB thrashing by having resource allocation policy
282 * match the I/O TLB replacement policy. 282 * match the I/O TLB replacement policy.
283 * 283 *
284 ***************************************************************/ 284 ***************************************************************/
285 #define IOVP_SIZE PAGE_SIZE 285 #define IOVP_SIZE PAGE_SIZE
286 #define IOVP_SHIFT PAGE_SHIFT 286 #define IOVP_SHIFT PAGE_SHIFT
287 #define IOVP_MASK PAGE_MASK 287 #define IOVP_MASK PAGE_MASK
288 288
289 /* Convert from IOVP to IOVA and vice versa. */ 289 /* Convert from IOVP to IOVA and vice versa. */
290 #define CCIO_IOVA(iovp,offset) ((iovp) | (offset)) 290 #define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
291 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK) 291 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
292 292
293 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 293 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
294 #define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT) 294 #define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
295 #define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset) 295 #define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
296 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 296 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
297 297
298 /* 298 /*
299 ** Don't worry about the 150% average search length on a miss. 299 ** Don't worry about the 150% average search length on a miss.
300 ** If the search wraps around, and passes the res_hint, it will 300 ** If the search wraps around, and passes the res_hint, it will
301 ** cause the kernel to panic anyhow. 301 ** cause the kernel to panic anyhow.
302 */ 302 */
303 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ 303 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
304 for(; res_ptr < res_end; ++res_ptr) { \ 304 for(; res_ptr < res_end; ++res_ptr) { \
305 if(0 == (*res_ptr & mask)) { \ 305 if(0 == (*res_ptr & mask)) { \
306 *res_ptr |= mask; \ 306 *res_ptr |= mask; \
307 res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ 307 res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
308 ioc->res_hint = res_idx + (size >> 3); \ 308 ioc->res_hint = res_idx + (size >> 3); \
309 goto resource_found; \ 309 goto resource_found; \
310 } \ 310 } \
311 } 311 }
312 312
313 #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ 313 #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
314 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ 314 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
315 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \ 315 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
316 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \ 316 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
317 res_ptr = (u##size *)&(ioc)->res_map[0]; \ 317 res_ptr = (u##size *)&(ioc)->res_map[0]; \
318 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size); 318 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
319 319
320 /* 320 /*
321 ** Find available bit in this ioa's resource map. 321 ** Find available bit in this ioa's resource map.
322 ** Use a "circular" search: 322 ** Use a "circular" search:
323 ** o Most IOVA's are "temporary" - avg search time should be small. 323 ** o Most IOVA's are "temporary" - avg search time should be small.
324 ** o keep a history of what happened for debugging 324 ** o keep a history of what happened for debugging
325 ** o KISS. 325 ** o KISS.
326 ** 326 **
327 ** Perf optimizations: 327 ** Perf optimizations:
328 ** o search for log2(size) bits at a time. 328 ** o search for log2(size) bits at a time.
329 ** o search for available resource bits using byte/word/whatever. 329 ** o search for available resource bits using byte/word/whatever.
330 ** o use different search for "large" (eg > 4 pages) or "very large" 330 ** o use different search for "large" (eg > 4 pages) or "very large"
331 ** (eg > 16 pages) mappings. 331 ** (eg > 16 pages) mappings.
332 */ 332 */
333 333
334 /** 334 /**
335 * ccio_alloc_range - Allocate pages in the ioc's resource map. 335 * ccio_alloc_range - Allocate pages in the ioc's resource map.
336 * @ioc: The I/O Controller. 336 * @ioc: The I/O Controller.
337 * @pages_needed: The requested number of pages to be mapped into the 337 * @pages_needed: The requested number of pages to be mapped into the
338 * I/O Pdir... 338 * I/O Pdir...
339 * 339 *
340 * This function searches the resource map of the ioc to locate a range 340 * This function searches the resource map of the ioc to locate a range
341 * of available pages for the requested size. 341 * of available pages for the requested size.
342 */ 342 */
343 static int 343 static int
344 ccio_alloc_range(struct ioc *ioc, size_t size) 344 ccio_alloc_range(struct ioc *ioc, size_t size)
345 { 345 {
346 unsigned int pages_needed = size >> IOVP_SHIFT; 346 unsigned int pages_needed = size >> IOVP_SHIFT;
347 unsigned int res_idx; 347 unsigned int res_idx;
348 #ifdef CCIO_SEARCH_TIME 348 #ifdef CCIO_SEARCH_TIME
349 unsigned long cr_start = mfctl(16); 349 unsigned long cr_start = mfctl(16);
350 #endif 350 #endif
351 351
352 BUG_ON(pages_needed == 0); 352 BUG_ON(pages_needed == 0);
353 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE); 353 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
354 354
355 DBG_RES("%s() size: %d pages_needed %d\n", 355 DBG_RES("%s() size: %d pages_needed %d\n",
356 __FUNCTION__, size, pages_needed); 356 __FUNCTION__, size, pages_needed);
357 357
358 /* 358 /*
359 ** "seek and ye shall find"...praying never hurts either... 359 ** "seek and ye shall find"...praying never hurts either...
360 ** ggg sacrifices another 710 to the computer gods. 360 ** ggg sacrifices another 710 to the computer gods.
361 */ 361 */
362 362
363 if (pages_needed <= 8) { 363 if (pages_needed <= 8) {
364 /* 364 /*
365 * LAN traffic will not thrash the TLB IFF the same NIC 365 * LAN traffic will not thrash the TLB IFF the same NIC
366 * uses 8 adjacent pages to map seperate payload data. 366 * uses 8 adjacent pages to map seperate payload data.
367 * ie the same byte in the resource bit map. 367 * ie the same byte in the resource bit map.
368 */ 368 */
369 #if 0 369 #if 0
370 /* FIXME: bit search should shift it's way through 370 /* FIXME: bit search should shift it's way through
371 * an unsigned long - not byte at a time. As it is now, 371 * an unsigned long - not byte at a time. As it is now,
372 * we effectively allocate this byte to this mapping. 372 * we effectively allocate this byte to this mapping.
373 */ 373 */
374 unsigned long mask = ~(~0UL >> pages_needed); 374 unsigned long mask = ~(~0UL >> pages_needed);
375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8); 375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
376 #else 376 #else
377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8); 377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
378 #endif 378 #endif
379 } else if (pages_needed <= 16) { 379 } else if (pages_needed <= 16) {
380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16); 380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
381 } else if (pages_needed <= 32) { 381 } else if (pages_needed <= 32) {
382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32); 382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
383 #ifdef __LP64__ 383 #ifdef __LP64__
384 } else if (pages_needed <= 64) { 384 } else if (pages_needed <= 64) {
385 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64); 385 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
386 #endif 386 #endif
387 } else { 387 } else {
388 panic("%s: %s() Too many pages to map. pages_needed: %u\n", 388 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
389 __FILE__, __FUNCTION__, pages_needed); 389 __FILE__, __FUNCTION__, pages_needed);
390 } 390 }
391 391
392 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__, 392 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
393 __FUNCTION__); 393 __FUNCTION__);
394 394
395 resource_found: 395 resource_found:
396 396
397 DBG_RES("%s() res_idx %d res_hint: %d\n", 397 DBG_RES("%s() res_idx %d res_hint: %d\n",
398 __FUNCTION__, res_idx, ioc->res_hint); 398 __FUNCTION__, res_idx, ioc->res_hint);
399 399
400 #ifdef CCIO_SEARCH_TIME 400 #ifdef CCIO_SEARCH_TIME
401 { 401 {
402 unsigned long cr_end = mfctl(16); 402 unsigned long cr_end = mfctl(16);
403 unsigned long tmp = cr_end - cr_start; 403 unsigned long tmp = cr_end - cr_start;
404 /* check for roll over */ 404 /* check for roll over */
405 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 405 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
406 } 406 }
407 ioc->avg_search[ioc->avg_idx++] = cr_start; 407 ioc->avg_search[ioc->avg_idx++] = cr_start;
408 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1; 408 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
409 #endif 409 #endif
410 #ifdef CCIO_MAP_STATS 410 #ifdef CCIO_MAP_STATS
411 ioc->used_pages += pages_needed; 411 ioc->used_pages += pages_needed;
412 #endif 412 #endif
413 /* 413 /*
414 ** return the bit address. 414 ** return the bit address.
415 */ 415 */
416 return res_idx << 3; 416 return res_idx << 3;
417 } 417 }
418 418
419 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \ 419 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
420 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \ 420 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
421 BUG_ON((*res_ptr & mask) != mask); \ 421 BUG_ON((*res_ptr & mask) != mask); \
422 *res_ptr &= ~(mask); 422 *res_ptr &= ~(mask);
423 423
424 /** 424 /**
425 * ccio_free_range - Free pages from the ioc's resource map. 425 * ccio_free_range - Free pages from the ioc's resource map.
426 * @ioc: The I/O Controller. 426 * @ioc: The I/O Controller.
427 * @iova: The I/O Virtual Address. 427 * @iova: The I/O Virtual Address.
428 * @pages_mapped: The requested number of pages to be freed from the 428 * @pages_mapped: The requested number of pages to be freed from the
429 * I/O Pdir. 429 * I/O Pdir.
430 * 430 *
431 * This function frees the resouces allocated for the iova. 431 * This function frees the resouces allocated for the iova.
432 */ 432 */
433 static void 433 static void
434 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) 434 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
435 { 435 {
436 unsigned long iovp = CCIO_IOVP(iova); 436 unsigned long iovp = CCIO_IOVP(iova);
437 unsigned int res_idx = PDIR_INDEX(iovp) >> 3; 437 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
438 438
439 BUG_ON(pages_mapped == 0); 439 BUG_ON(pages_mapped == 0);
440 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); 440 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
441 BUG_ON(pages_mapped > BITS_PER_LONG); 441 BUG_ON(pages_mapped > BITS_PER_LONG);
442 442
443 DBG_RES("%s(): res_idx: %d pages_mapped %d\n", 443 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
444 __FUNCTION__, res_idx, pages_mapped); 444 __FUNCTION__, res_idx, pages_mapped);
445 445
446 #ifdef CCIO_MAP_STATS 446 #ifdef CCIO_MAP_STATS
447 ioc->used_pages -= pages_mapped; 447 ioc->used_pages -= pages_mapped;
448 #endif 448 #endif
449 449
450 if(pages_mapped <= 8) { 450 if(pages_mapped <= 8) {
451 #if 0 451 #if 0
452 /* see matching comments in alloc_range */ 452 /* see matching comments in alloc_range */
453 unsigned long mask = ~(~0UL >> pages_mapped); 453 unsigned long mask = ~(~0UL >> pages_mapped);
454 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8); 454 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
455 #else 455 #else
456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xff, 8); 456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xff, 8);
457 #endif 457 #endif
458 } else if(pages_mapped <= 16) { 458 } else if(pages_mapped <= 16) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffff, 16); 459 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffff, 16);
460 } else if(pages_mapped <= 32) { 460 } else if(pages_mapped <= 32) {
461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32); 461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
462 #ifdef __LP64__ 462 #ifdef __LP64__
463 } else if(pages_mapped <= 64) { 463 } else if(pages_mapped <= 64) {
464 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64); 464 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
465 #endif 465 #endif
466 } else { 466 } else {
467 panic("%s:%s() Too many pages to unmap.\n", __FILE__, 467 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
468 __FUNCTION__); 468 __FUNCTION__);
469 } 469 }
470 } 470 }
471 471
472 /**************************************************************** 472 /****************************************************************
473 ** 473 **
474 ** CCIO dma_ops support routines 474 ** CCIO dma_ops support routines
475 ** 475 **
476 *****************************************************************/ 476 *****************************************************************/
477 477
478 typedef unsigned long space_t; 478 typedef unsigned long space_t;
479 #define KERNEL_SPACE 0 479 #define KERNEL_SPACE 0
480 480
481 /* 481 /*
482 ** DMA "Page Type" and Hints 482 ** DMA "Page Type" and Hints
483 ** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be 483 ** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
484 ** set for subcacheline DMA transfers since we don't want to damage the 484 ** set for subcacheline DMA transfers since we don't want to damage the
485 ** other part of a cacheline. 485 ** other part of a cacheline.
486 ** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent(). 486 ** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent().
487 ** This bit tells U2 to do R/M/W for partial cachelines. "Streaming" 487 ** This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
488 ** data can avoid this if the mapping covers full cache lines. 488 ** data can avoid this if the mapping covers full cache lines.
489 ** o STOP_MOST is needed for atomicity across cachelines. 489 ** o STOP_MOST is needed for atomicity across cachelines.
490 ** Apperently only "some EISA devices" need this. 490 ** Apperently only "some EISA devices" need this.
491 ** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs 491 ** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
492 ** to use this hint iff the EISA devices needs this feature. 492 ** to use this hint iff the EISA devices needs this feature.
493 ** According to the U2 ERS, STOP_MOST enabled pages hurt performance. 493 ** According to the U2 ERS, STOP_MOST enabled pages hurt performance.
494 ** o PREFETCH should *not* be set for cases like Multiple PCI devices 494 ** o PREFETCH should *not* be set for cases like Multiple PCI devices
495 ** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC 495 ** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC
496 ** device can be fetched and multiply DMA streams will thrash the 496 ** device can be fetched and multiply DMA streams will thrash the
497 ** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules 497 ** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules
498 ** and Invalidation of Prefetch Entries". 498 ** and Invalidation of Prefetch Entries".
499 ** 499 **
500 ** FIXME: the default hints need to be per GSC device - not global. 500 ** FIXME: the default hints need to be per GSC device - not global.
501 ** 501 **
502 ** HP-UX dorks: linux device driver programming model is totally different 502 ** HP-UX dorks: linux device driver programming model is totally different
503 ** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers 503 ** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers
504 ** do special things to work on non-coherent platforms...linux has to 504 ** do special things to work on non-coherent platforms...linux has to
505 ** be much more careful with this. 505 ** be much more careful with this.
506 */ 506 */
507 #define IOPDIR_VALID 0x01UL 507 #define IOPDIR_VALID 0x01UL
508 #define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */ 508 #define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */
509 #ifdef CONFIG_EISA 509 #ifdef CONFIG_EISA
510 #define HINT_STOP_MOST 0x04UL /* LSL support */ 510 #define HINT_STOP_MOST 0x04UL /* LSL support */
511 #else 511 #else
512 #define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */ 512 #define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */
513 #endif 513 #endif
514 #define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */ 514 #define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */
515 #define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */ 515 #define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */
516 516
517 517
518 /* 518 /*
519 ** Use direction (ie PCI_DMA_TODEVICE) to pick hint. 519 ** Use direction (ie PCI_DMA_TODEVICE) to pick hint.
520 ** ccio_alloc_consistent() depends on this to get SAFE_DMA 520 ** ccio_alloc_consistent() depends on this to get SAFE_DMA
521 ** when it passes in BIDIRECTIONAL flag. 521 ** when it passes in BIDIRECTIONAL flag.
522 */ 522 */
523 static u32 hint_lookup[] = { 523 static u32 hint_lookup[] = {
524 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID, 524 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
525 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID, 525 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
526 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID, 526 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
527 }; 527 };
528 528
529 /** 529 /**
530 * ccio_io_pdir_entry - Initialize an I/O Pdir. 530 * ccio_io_pdir_entry - Initialize an I/O Pdir.
531 * @pdir_ptr: A pointer into I/O Pdir. 531 * @pdir_ptr: A pointer into I/O Pdir.
532 * @sid: The Space Identifier. 532 * @sid: The Space Identifier.
533 * @vba: The virtual address. 533 * @vba: The virtual address.
534 * @hints: The DMA Hint. 534 * @hints: The DMA Hint.
535 * 535 *
536 * Given a virtual address (vba, arg2) and space id, (sid, arg1), 536 * Given a virtual address (vba, arg2) and space id, (sid, arg1),
537 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir 537 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
538 * entry consists of 8 bytes as shown below (MSB == bit 0): 538 * entry consists of 8 bytes as shown below (MSB == bit 0):
539 * 539 *
540 * 540 *
541 * WORD 0: 541 * WORD 0:
542 * +------+----------------+-----------------------------------------------+ 542 * +------+----------------+-----------------------------------------------+
543 * | Phys | Virtual Index | Phys | 543 * | Phys | Virtual Index | Phys |
544 * | 0:3 | 0:11 | 4:19 | 544 * | 0:3 | 0:11 | 4:19 |
545 * |4 bits| 12 bits | 16 bits | 545 * |4 bits| 12 bits | 16 bits |
546 * +------+----------------+-----------------------------------------------+ 546 * +------+----------------+-----------------------------------------------+
547 * WORD 1: 547 * WORD 1:
548 * +-----------------------+-----------------------------------------------+ 548 * +-----------------------+-----------------------------------------------+
549 * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid | 549 * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
550 * | 20:39 | | Enable |Enable | |Enable|DMA | | 550 * | 20:39 | | Enable |Enable | |Enable|DMA | |
551 * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit | 551 * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
552 * +-----------------------+-----------------------------------------------+ 552 * +-----------------------+-----------------------------------------------+
553 * 553 *
554 * The virtual index field is filled with the results of the LCI 554 * The virtual index field is filled with the results of the LCI
555 * (Load Coherence Index) instruction. The 8 bits used for the virtual 555 * (Load Coherence Index) instruction. The 8 bits used for the virtual
556 * index are bits 12:19 of the value returned by LCI. 556 * index are bits 12:19 of the value returned by LCI.
557 */ 557 */
558 void CCIO_INLINE 558 void CCIO_INLINE
559 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 559 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
560 unsigned long hints) 560 unsigned long hints)
561 { 561 {
562 register unsigned long pa; 562 register unsigned long pa;
563 register unsigned long ci; /* coherent index */ 563 register unsigned long ci; /* coherent index */
564 564
565 /* We currently only support kernel addresses */ 565 /* We currently only support kernel addresses */
566 BUG_ON(sid != KERNEL_SPACE); 566 BUG_ON(sid != KERNEL_SPACE);
567 567
568 mtsp(sid,1); 568 mtsp(sid,1);
569 569
570 /* 570 /*
571 ** WORD 1 - low order word 571 ** WORD 1 - low order word
572 ** "hints" parm includes the VALID bit! 572 ** "hints" parm includes the VALID bit!
573 ** "dep" clobbers the physical address offset bits as well. 573 ** "dep" clobbers the physical address offset bits as well.
574 */ 574 */
575 pa = virt_to_phys(vba); 575 pa = virt_to_phys(vba);
576 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); 576 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
577 ((u32 *)pdir_ptr)[1] = (u32) pa; 577 ((u32 *)pdir_ptr)[1] = (u32) pa;
578 578
579 /* 579 /*
580 ** WORD 0 - high order word 580 ** WORD 0 - high order word
581 */ 581 */
582 582
583 #ifdef __LP64__ 583 #ifdef __LP64__
584 /* 584 /*
585 ** get bits 12:15 of physical address 585 ** get bits 12:15 of physical address
586 ** shift bits 16:31 of physical address 586 ** shift bits 16:31 of physical address
587 ** and deposit them 587 ** and deposit them
588 */ 588 */
589 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa)); 589 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
590 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa)); 590 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
591 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci)); 591 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
592 #else 592 #else
593 pa = 0; 593 pa = 0;
594 #endif 594 #endif
595 /* 595 /*
596 ** get CPU coherency index bits 596 ** get CPU coherency index bits
597 ** Grab virtual index [0:11] 597 ** Grab virtual index [0:11]
598 ** Deposit virt_idx bits into I/O PDIR word 598 ** Deposit virt_idx bits into I/O PDIR word
599 */ 599 */
600 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 600 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
601 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); 601 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
602 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); 602 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
603 603
604 ((u32 *)pdir_ptr)[0] = (u32) pa; 604 ((u32 *)pdir_ptr)[0] = (u32) pa;
605 605
606 606
607 /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) 607 /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
608 ** PCX-U/U+ do. (eg C200/C240) 608 ** PCX-U/U+ do. (eg C200/C240)
609 ** PCX-T'? Don't know. (eg C110 or similar K-class) 609 ** PCX-T'? Don't know. (eg C110 or similar K-class)
610 ** 610 **
611 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit". 611 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
612 ** Hopefully we can patch (NOP) these out at boot time somehow. 612 ** Hopefully we can patch (NOP) these out at boot time somehow.
613 ** 613 **
614 ** "Since PCX-U employs an offset hash that is incompatible with 614 ** "Since PCX-U employs an offset hash that is incompatible with
615 ** the real mode coherence index generation of U2, the PDIR entry 615 ** the real mode coherence index generation of U2, the PDIR entry
616 ** must be flushed to memory to retain coherence." 616 ** must be flushed to memory to retain coherence."
617 */ 617 */
618 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 618 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
619 asm volatile("sync"); 619 asm volatile("sync");
620 } 620 }
621 621
622 /** 622 /**
623 * ccio_clear_io_tlb - Remove stale entries from the I/O TLB. 623 * ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
624 * @ioc: The I/O Controller. 624 * @ioc: The I/O Controller.
625 * @iovp: The I/O Virtual Page. 625 * @iovp: The I/O Virtual Page.
626 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. 626 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
627 * 627 *
628 * Purge invalid I/O PDIR entries from the I/O TLB. 628 * Purge invalid I/O PDIR entries from the I/O TLB.
629 * 629 *
630 * FIXME: Can we change the byte_cnt to pages_mapped? 630 * FIXME: Can we change the byte_cnt to pages_mapped?
631 */ 631 */
632 static CCIO_INLINE void 632 static CCIO_INLINE void
633 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt) 633 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
634 { 634 {
635 u32 chain_size = 1 << ioc->chainid_shift; 635 u32 chain_size = 1 << ioc->chainid_shift;
636 636
637 iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */ 637 iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
638 byte_cnt += chain_size; 638 byte_cnt += chain_size;
639 639
640 while(byte_cnt > chain_size) { 640 while(byte_cnt > chain_size) {
641 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command); 641 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
642 iovp += chain_size; 642 iovp += chain_size;
643 byte_cnt -= chain_size; 643 byte_cnt -= chain_size;
644 } 644 }
645 } 645 }
646 646
647 /** 647 /**
648 * ccio_mark_invalid - Mark the I/O Pdir entries invalid. 648 * ccio_mark_invalid - Mark the I/O Pdir entries invalid.
649 * @ioc: The I/O Controller. 649 * @ioc: The I/O Controller.
650 * @iova: The I/O Virtual Address. 650 * @iova: The I/O Virtual Address.
651 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. 651 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
652 * 652 *
653 * Mark the I/O Pdir entries invalid and blow away the corresponding I/O 653 * Mark the I/O Pdir entries invalid and blow away the corresponding I/O
654 * TLB entries. 654 * TLB entries.
655 * 655 *
656 * FIXME: at some threshhold it might be "cheaper" to just blow 656 * FIXME: at some threshhold it might be "cheaper" to just blow
657 * away the entire I/O TLB instead of individual entries. 657 * away the entire I/O TLB instead of individual entries.
658 * 658 *
659 * FIXME: Uturn has 256 TLB entries. We don't need to purge every 659 * FIXME: Uturn has 256 TLB entries. We don't need to purge every
660 * PDIR entry - just once for each possible TLB entry. 660 * PDIR entry - just once for each possible TLB entry.
661 * (We do need to maker I/O PDIR entries invalid regardless). 661 * (We do need to maker I/O PDIR entries invalid regardless).
662 * 662 *
663 * FIXME: Can we change byte_cnt to pages_mapped? 663 * FIXME: Can we change byte_cnt to pages_mapped?
664 */ 664 */
665 static CCIO_INLINE void 665 static CCIO_INLINE void
666 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 666 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
667 { 667 {
668 u32 iovp = (u32)CCIO_IOVP(iova); 668 u32 iovp = (u32)CCIO_IOVP(iova);
669 size_t saved_byte_cnt; 669 size_t saved_byte_cnt;
670 670
671 /* round up to nearest page size */ 671 /* round up to nearest page size */
672 saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE); 672 saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
673 673
674 while(byte_cnt > 0) { 674 while(byte_cnt > 0) {
675 /* invalidate one page at a time */ 675 /* invalidate one page at a time */
676 unsigned int idx = PDIR_INDEX(iovp); 676 unsigned int idx = PDIR_INDEX(iovp);
677 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]); 677 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
678 678
679 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64))); 679 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
680 pdir_ptr[7] = 0; /* clear only VALID bit */ 680 pdir_ptr[7] = 0; /* clear only VALID bit */
681 /* 681 /*
682 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) 682 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
683 ** PCX-U/U+ do. (eg C200/C240) 683 ** PCX-U/U+ do. (eg C200/C240)
684 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit". 684 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
685 ** 685 **
686 ** Hopefully someone figures out how to patch (NOP) the 686 ** Hopefully someone figures out how to patch (NOP) the
687 ** FDC/SYNC out at boot time. 687 ** FDC/SYNC out at boot time.
688 */ 688 */
689 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7])); 689 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
690 690
691 iovp += IOVP_SIZE; 691 iovp += IOVP_SIZE;
692 byte_cnt -= IOVP_SIZE; 692 byte_cnt -= IOVP_SIZE;
693 } 693 }
694 694
695 asm volatile("sync"); 695 asm volatile("sync");
696 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt); 696 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
697 } 697 }
698 698
699 /**************************************************************** 699 /****************************************************************
700 ** 700 **
701 ** CCIO dma_ops 701 ** CCIO dma_ops
702 ** 702 **
703 *****************************************************************/ 703 *****************************************************************/
704 704
705 /** 705 /**
706 * ccio_dma_supported - Verify the IOMMU supports the DMA address range. 706 * ccio_dma_supported - Verify the IOMMU supports the DMA address range.
707 * @dev: The PCI device. 707 * @dev: The PCI device.
708 * @mask: A bit mask describing the DMA address range of the device. 708 * @mask: A bit mask describing the DMA address range of the device.
709 * 709 *
710 * This function implements the pci_dma_supported function. 710 * This function implements the pci_dma_supported function.
711 */ 711 */
712 static int 712 static int
713 ccio_dma_supported(struct device *dev, u64 mask) 713 ccio_dma_supported(struct device *dev, u64 mask)
714 { 714 {
715 if(dev == NULL) { 715 if(dev == NULL) {
716 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 716 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
717 BUG(); 717 BUG();
718 return 0; 718 return 0;
719 } 719 }
720 720
721 /* only support 32-bit devices (ie PCI/GSC) */ 721 /* only support 32-bit devices (ie PCI/GSC) */
722 return (int)(mask == 0xffffffffUL); 722 return (int)(mask == 0xffffffffUL);
723 } 723 }
724 724
725 /** 725 /**
726 * ccio_map_single - Map an address range into the IOMMU. 726 * ccio_map_single - Map an address range into the IOMMU.
727 * @dev: The PCI device. 727 * @dev: The PCI device.
728 * @addr: The start address of the DMA region. 728 * @addr: The start address of the DMA region.
729 * @size: The length of the DMA region. 729 * @size: The length of the DMA region.
730 * @direction: The direction of the DMA transaction (to/from device). 730 * @direction: The direction of the DMA transaction (to/from device).
731 * 731 *
732 * This function implements the pci_map_single function. 732 * This function implements the pci_map_single function.
733 */ 733 */
734 static dma_addr_t 734 static dma_addr_t
735 ccio_map_single(struct device *dev, void *addr, size_t size, 735 ccio_map_single(struct device *dev, void *addr, size_t size,
736 enum dma_data_direction direction) 736 enum dma_data_direction direction)
737 { 737 {
738 int idx; 738 int idx;
739 struct ioc *ioc; 739 struct ioc *ioc;
740 unsigned long flags; 740 unsigned long flags;
741 dma_addr_t iovp; 741 dma_addr_t iovp;
742 dma_addr_t offset; 742 dma_addr_t offset;
743 u64 *pdir_start; 743 u64 *pdir_start;
744 unsigned long hint = hint_lookup[(int)direction]; 744 unsigned long hint = hint_lookup[(int)direction];
745 745
746 BUG_ON(!dev); 746 BUG_ON(!dev);
747 ioc = GET_IOC(dev); 747 ioc = GET_IOC(dev);
748 748
749 BUG_ON(size <= 0); 749 BUG_ON(size <= 0);
750 750
751 /* save offset bits */ 751 /* save offset bits */
752 offset = ((unsigned long) addr) & ~IOVP_MASK; 752 offset = ((unsigned long) addr) & ~IOVP_MASK;
753 753
754 /* round up to nearest IOVP_SIZE */ 754 /* round up to nearest IOVP_SIZE */
755 size = ROUNDUP(size + offset, IOVP_SIZE); 755 size = ROUNDUP(size + offset, IOVP_SIZE);
756 spin_lock_irqsave(&ioc->res_lock, flags); 756 spin_lock_irqsave(&ioc->res_lock, flags);
757 757
758 #ifdef CCIO_MAP_STATS 758 #ifdef CCIO_MAP_STATS
759 ioc->msingle_calls++; 759 ioc->msingle_calls++;
760 ioc->msingle_pages += size >> IOVP_SHIFT; 760 ioc->msingle_pages += size >> IOVP_SHIFT;
761 #endif 761 #endif
762 762
763 idx = ccio_alloc_range(ioc, size); 763 idx = ccio_alloc_range(ioc, size);
764 iovp = (dma_addr_t)MKIOVP(idx); 764 iovp = (dma_addr_t)MKIOVP(idx);
765 765
766 pdir_start = &(ioc->pdir_base[idx]); 766 pdir_start = &(ioc->pdir_base[idx]);
767 767
768 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", 768 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
769 __FUNCTION__, addr, (long)iovp | offset, size); 769 __FUNCTION__, addr, (long)iovp | offset, size);
770 770
771 /* If not cacheline aligned, force SAFE_DMA on the whole mess */ 771 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
772 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) 772 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
773 hint |= HINT_SAFE_DMA; 773 hint |= HINT_SAFE_DMA;
774 774
775 while(size > 0) { 775 while(size > 0) {
776 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint); 776 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
777 777
778 DBG_RUN(" pdir %p %08x%08x\n", 778 DBG_RUN(" pdir %p %08x%08x\n",
779 pdir_start, 779 pdir_start,
780 (u32) (((u32 *) pdir_start)[0]), 780 (u32) (((u32 *) pdir_start)[0]),
781 (u32) (((u32 *) pdir_start)[1])); 781 (u32) (((u32 *) pdir_start)[1]));
782 ++pdir_start; 782 ++pdir_start;
783 addr += IOVP_SIZE; 783 addr += IOVP_SIZE;
784 size -= IOVP_SIZE; 784 size -= IOVP_SIZE;
785 } 785 }
786 786
787 spin_unlock_irqrestore(&ioc->res_lock, flags); 787 spin_unlock_irqrestore(&ioc->res_lock, flags);
788 788
789 /* form complete address */ 789 /* form complete address */
790 return CCIO_IOVA(iovp, offset); 790 return CCIO_IOVA(iovp, offset);
791 } 791 }
792 792
793 /** 793 /**
794 * ccio_unmap_single - Unmap an address range from the IOMMU. 794 * ccio_unmap_single - Unmap an address range from the IOMMU.
795 * @dev: The PCI device. 795 * @dev: The PCI device.
796 * @addr: The start address of the DMA region. 796 * @addr: The start address of the DMA region.
797 * @size: The length of the DMA region. 797 * @size: The length of the DMA region.
798 * @direction: The direction of the DMA transaction (to/from device). 798 * @direction: The direction of the DMA transaction (to/from device).
799 * 799 *
800 * This function implements the pci_unmap_single function. 800 * This function implements the pci_unmap_single function.
801 */ 801 */
802 static void 802 static void
803 ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 803 ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
804 enum dma_data_direction direction) 804 enum dma_data_direction direction)
805 { 805 {
806 struct ioc *ioc; 806 struct ioc *ioc;
807 unsigned long flags; 807 unsigned long flags;
808 dma_addr_t offset = iova & ~IOVP_MASK; 808 dma_addr_t offset = iova & ~IOVP_MASK;
809 809
810 BUG_ON(!dev); 810 BUG_ON(!dev);
811 ioc = GET_IOC(dev); 811 ioc = GET_IOC(dev);
812 812
813 DBG_RUN("%s() iovp 0x%lx/%x\n", 813 DBG_RUN("%s() iovp 0x%lx/%x\n",
814 __FUNCTION__, (long)iova, size); 814 __FUNCTION__, (long)iova, size);
815 815
816 iova ^= offset; /* clear offset bits */ 816 iova ^= offset; /* clear offset bits */
817 size += offset; 817 size += offset;
818 size = ROUNDUP(size, IOVP_SIZE); 818 size = ROUNDUP(size, IOVP_SIZE);
819 819
820 spin_lock_irqsave(&ioc->res_lock, flags); 820 spin_lock_irqsave(&ioc->res_lock, flags);
821 821
822 #ifdef CCIO_MAP_STATS 822 #ifdef CCIO_MAP_STATS
823 ioc->usingle_calls++; 823 ioc->usingle_calls++;
824 ioc->usingle_pages += size >> IOVP_SHIFT; 824 ioc->usingle_pages += size >> IOVP_SHIFT;
825 #endif 825 #endif
826 826
827 ccio_mark_invalid(ioc, iova, size); 827 ccio_mark_invalid(ioc, iova, size);
828 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT)); 828 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
829 spin_unlock_irqrestore(&ioc->res_lock, flags); 829 spin_unlock_irqrestore(&ioc->res_lock, flags);
830 } 830 }
831 831
832 /** 832 /**
833 * ccio_alloc_consistent - Allocate a consistent DMA mapping. 833 * ccio_alloc_consistent - Allocate a consistent DMA mapping.
834 * @dev: The PCI device. 834 * @dev: The PCI device.
835 * @size: The length of the DMA region. 835 * @size: The length of the DMA region.
836 * @dma_handle: The DMA address handed back to the device (not the cpu). 836 * @dma_handle: The DMA address handed back to the device (not the cpu).
837 * 837 *
838 * This function implements the pci_alloc_consistent function. 838 * This function implements the pci_alloc_consistent function.
839 */ 839 */
840 static void * 840 static void *
841 ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) 841 ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
842 { 842 {
843 void *ret; 843 void *ret;
844 #if 0 844 #if 0
845 /* GRANT Need to establish hierarchy for non-PCI devs as well 845 /* GRANT Need to establish hierarchy for non-PCI devs as well
846 ** and then provide matching gsc_map_xxx() functions for them as well. 846 ** and then provide matching gsc_map_xxx() functions for them as well.
847 */ 847 */
848 if(!hwdev) { 848 if(!hwdev) {
849 /* only support PCI */ 849 /* only support PCI */
850 *dma_handle = 0; 850 *dma_handle = 0;
851 return 0; 851 return 0;
852 } 852 }
853 #endif 853 #endif
854 ret = (void *) __get_free_pages(flag, get_order(size)); 854 ret = (void *) __get_free_pages(flag, get_order(size));
855 855
856 if (ret) { 856 if (ret) {
857 memset(ret, 0, size); 857 memset(ret, 0, size);
858 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL); 858 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
859 } 859 }
860 860
861 return ret; 861 return ret;
862 } 862 }
863 863
864 /** 864 /**
865 * ccio_free_consistent - Free a consistent DMA mapping. 865 * ccio_free_consistent - Free a consistent DMA mapping.
866 * @dev: The PCI device. 866 * @dev: The PCI device.
867 * @size: The length of the DMA region. 867 * @size: The length of the DMA region.
868 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. 868 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
869 * @dma_handle: The device address returned from the ccio_alloc_consistent. 869 * @dma_handle: The device address returned from the ccio_alloc_consistent.
870 * 870 *
871 * This function implements the pci_free_consistent function. 871 * This function implements the pci_free_consistent function.
872 */ 872 */
873 static void 873 static void
874 ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, 874 ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
875 dma_addr_t dma_handle) 875 dma_addr_t dma_handle)
876 { 876 {
877 ccio_unmap_single(dev, dma_handle, size, 0); 877 ccio_unmap_single(dev, dma_handle, size, 0);
878 free_pages((unsigned long)cpu_addr, get_order(size)); 878 free_pages((unsigned long)cpu_addr, get_order(size));
879 } 879 }
880 880
881 /* 881 /*
882 ** Since 0 is a valid pdir_base index value, can't use that 882 ** Since 0 is a valid pdir_base index value, can't use that
883 ** to determine if a value is valid or not. Use a flag to indicate 883 ** to determine if a value is valid or not. Use a flag to indicate
884 ** the SG list entry contains a valid pdir index. 884 ** the SG list entry contains a valid pdir index.
885 */ 885 */
886 #define PIDE_FLAG 0x80000000UL 886 #define PIDE_FLAG 0x80000000UL
887 887
888 #ifdef CCIO_MAP_STATS 888 #ifdef CCIO_MAP_STATS
889 #define IOMMU_MAP_STATS 889 #define IOMMU_MAP_STATS
890 #endif 890 #endif
891 #include "iommu-helpers.h" 891 #include "iommu-helpers.h"
892 892
893 /** 893 /**
894 * ccio_map_sg - Map the scatter/gather list into the IOMMU. 894 * ccio_map_sg - Map the scatter/gather list into the IOMMU.
895 * @dev: The PCI device. 895 * @dev: The PCI device.
896 * @sglist: The scatter/gather list to be mapped in the IOMMU. 896 * @sglist: The scatter/gather list to be mapped in the IOMMU.
897 * @nents: The number of entries in the scatter/gather list. 897 * @nents: The number of entries in the scatter/gather list.
898 * @direction: The direction of the DMA transaction (to/from device). 898 * @direction: The direction of the DMA transaction (to/from device).
899 * 899 *
900 * This function implements the pci_map_sg function. 900 * This function implements the pci_map_sg function.
901 */ 901 */
902 static int 902 static int
903 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 903 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
904 enum dma_data_direction direction) 904 enum dma_data_direction direction)
905 { 905 {
906 struct ioc *ioc; 906 struct ioc *ioc;
907 int coalesced, filled = 0; 907 int coalesced, filled = 0;
908 unsigned long flags; 908 unsigned long flags;
909 unsigned long hint = hint_lookup[(int)direction]; 909 unsigned long hint = hint_lookup[(int)direction];
910 unsigned long prev_len = 0, current_len = 0; 910 unsigned long prev_len = 0, current_len = 0;
911 int i; 911 int i;
912 912
913 BUG_ON(!dev); 913 BUG_ON(!dev);
914 ioc = GET_IOC(dev); 914 ioc = GET_IOC(dev);
915 915
916 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 916 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
917 917
918 /* Fast path single entry scatterlists. */ 918 /* Fast path single entry scatterlists. */
919 if (nents == 1) { 919 if (nents == 1) {
920 sg_dma_address(sglist) = ccio_map_single(dev, 920 sg_dma_address(sglist) = ccio_map_single(dev,
921 (void *)sg_virt_addr(sglist), sglist->length, 921 (void *)sg_virt_addr(sglist), sglist->length,
922 direction); 922 direction);
923 sg_dma_len(sglist) = sglist->length; 923 sg_dma_len(sglist) = sglist->length;
924 return 1; 924 return 1;
925 } 925 }
926 926
927 for(i = 0; i < nents; i++) 927 for(i = 0; i < nents; i++)
928 prev_len += sglist[i].length; 928 prev_len += sglist[i].length;
929 929
930 spin_lock_irqsave(&ioc->res_lock, flags); 930 spin_lock_irqsave(&ioc->res_lock, flags);
931 931
932 #ifdef CCIO_MAP_STATS 932 #ifdef CCIO_MAP_STATS
933 ioc->msg_calls++; 933 ioc->msg_calls++;
934 #endif 934 #endif
935 935
936 /* 936 /*
937 ** First coalesce the chunks and allocate I/O pdir space 937 ** First coalesce the chunks and allocate I/O pdir space
938 ** 938 **
939 ** If this is one DMA stream, we can properly map using the 939 ** If this is one DMA stream, we can properly map using the
940 ** correct virtual address associated with each DMA page. 940 ** correct virtual address associated with each DMA page.
941 ** w/o this association, we wouldn't have coherent DMA! 941 ** w/o this association, we wouldn't have coherent DMA!
942 ** Access to the virtual address is what forces a two pass algorithm. 942 ** Access to the virtual address is what forces a two pass algorithm.
943 */ 943 */
944 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); 944 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range);
945 945
946 /* 946 /*
947 ** Program the I/O Pdir 947 ** Program the I/O Pdir
948 ** 948 **
949 ** map the virtual addresses to the I/O Pdir 949 ** map the virtual addresses to the I/O Pdir
950 ** o dma_address will contain the pdir index 950 ** o dma_address will contain the pdir index
951 ** o dma_len will contain the number of bytes to map 951 ** o dma_len will contain the number of bytes to map
952 ** o page/offset contain the virtual address. 952 ** o page/offset contain the virtual address.
953 */ 953 */
954 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry); 954 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
955 955
956 spin_unlock_irqrestore(&ioc->res_lock, flags); 956 spin_unlock_irqrestore(&ioc->res_lock, flags);
957 957
958 BUG_ON(coalesced != filled); 958 BUG_ON(coalesced != filled);
959 959
960 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 960 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
961 961
962 for (i = 0; i < filled; i++) 962 for (i = 0; i < filled; i++)
963 current_len += sg_dma_len(sglist + i); 963 current_len += sg_dma_len(sglist + i);
964 964
965 BUG_ON(current_len != prev_len); 965 BUG_ON(current_len != prev_len);
966 966
967 return filled; 967 return filled;
968 } 968 }
969 969
970 /** 970 /**
971 * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU. 971 * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
972 * @dev: The PCI device. 972 * @dev: The PCI device.
973 * @sglist: The scatter/gather list to be unmapped from the IOMMU. 973 * @sglist: The scatter/gather list to be unmapped from the IOMMU.
974 * @nents: The number of entries in the scatter/gather list. 974 * @nents: The number of entries in the scatter/gather list.
975 * @direction: The direction of the DMA transaction (to/from device). 975 * @direction: The direction of the DMA transaction (to/from device).
976 * 976 *
977 * This function implements the pci_unmap_sg function. 977 * This function implements the pci_unmap_sg function.
978 */ 978 */
979 static void 979 static void
980 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 980 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
981 enum dma_data_direction direction) 981 enum dma_data_direction direction)
982 { 982 {
983 struct ioc *ioc; 983 struct ioc *ioc;
984 984
985 BUG_ON(!dev); 985 BUG_ON(!dev);
986 ioc = GET_IOC(dev); 986 ioc = GET_IOC(dev);
987 987
988 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n", 988 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
989 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 989 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
990 990
991 #ifdef CCIO_MAP_STATS 991 #ifdef CCIO_MAP_STATS
992 ioc->usg_calls++; 992 ioc->usg_calls++;
993 #endif 993 #endif
994 994
995 while(sg_dma_len(sglist) && nents--) { 995 while(sg_dma_len(sglist) && nents--) {
996 996
997 #ifdef CCIO_MAP_STATS 997 #ifdef CCIO_MAP_STATS
998 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; 998 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
999 #endif 999 #endif
1000 ccio_unmap_single(dev, sg_dma_address(sglist), 1000 ccio_unmap_single(dev, sg_dma_address(sglist),
1001 sg_dma_len(sglist), direction); 1001 sg_dma_len(sglist), direction);
1002 ++sglist; 1002 ++sglist;
1003 } 1003 }
1004 1004
1005 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1005 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1006 } 1006 }
1007 1007
1008 static struct hppa_dma_ops ccio_ops = { 1008 static struct hppa_dma_ops ccio_ops = {
1009 .dma_supported = ccio_dma_supported, 1009 .dma_supported = ccio_dma_supported,
1010 .alloc_consistent = ccio_alloc_consistent, 1010 .alloc_consistent = ccio_alloc_consistent,
1011 .alloc_noncoherent = ccio_alloc_consistent, 1011 .alloc_noncoherent = ccio_alloc_consistent,
1012 .free_consistent = ccio_free_consistent, 1012 .free_consistent = ccio_free_consistent,
1013 .map_single = ccio_map_single, 1013 .map_single = ccio_map_single,
1014 .unmap_single = ccio_unmap_single, 1014 .unmap_single = ccio_unmap_single,
1015 .map_sg = ccio_map_sg, 1015 .map_sg = ccio_map_sg,
1016 .unmap_sg = ccio_unmap_sg, 1016 .unmap_sg = ccio_unmap_sg,
1017 .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */ 1017 .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
1018 .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */ 1018 .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
1019 .dma_sync_sg_for_cpu = NULL, /* ditto */ 1019 .dma_sync_sg_for_cpu = NULL, /* ditto */
1020 .dma_sync_sg_for_device = NULL, /* ditto */ 1020 .dma_sync_sg_for_device = NULL, /* ditto */
1021 }; 1021 };
1022 1022
1023 #ifdef CONFIG_PROC_FS 1023 #ifdef CONFIG_PROC_FS
1024 static int ccio_proc_info(struct seq_file *m, void *p) 1024 static int ccio_proc_info(struct seq_file *m, void *p)
1025 { 1025 {
1026 int len = 0; 1026 int len = 0;
1027 struct ioc *ioc = ioc_list; 1027 struct ioc *ioc = ioc_list;
1028 1028
1029 while (ioc != NULL) { 1029 while (ioc != NULL) {
1030 unsigned int total_pages = ioc->res_size << 3; 1030 unsigned int total_pages = ioc->res_size << 3;
1031 unsigned long avg = 0, min, max; 1031 unsigned long avg = 0, min, max;
1032 int j; 1032 int j;
1033 1033
1034 len += seq_printf(m, "%s\n", ioc->name); 1034 len += seq_printf(m, "%s\n", ioc->name);
1035 1035
1036 len += seq_printf(m, "Cujo 2.0 bug : %s\n", 1036 len += seq_printf(m, "Cujo 2.0 bug : %s\n",
1037 (ioc->cujo20_bug ? "yes" : "no")); 1037 (ioc->cujo20_bug ? "yes" : "no"));
1038 1038
1039 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1039 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1040 total_pages * 8, total_pages); 1040 total_pages * 8, total_pages);
1041 1041
1042 #ifdef CCIO_MAP_STATS 1042 #ifdef CCIO_MAP_STATS
1043 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1043 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1044 total_pages - ioc->used_pages, ioc->used_pages, 1044 total_pages - ioc->used_pages, ioc->used_pages,
1045 (int)(ioc->used_pages * 100 / total_pages)); 1045 (int)(ioc->used_pages * 100 / total_pages));
1046 #endif 1046 #endif
1047 1047
1048 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1048 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1049 ioc->res_size, total_pages); 1049 ioc->res_size, total_pages);
1050 1050
1051 #ifdef CCIO_SEARCH_TIME 1051 #ifdef CCIO_SEARCH_TIME
1052 min = max = ioc->avg_search[0]; 1052 min = max = ioc->avg_search[0];
1053 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) { 1053 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1054 avg += ioc->avg_search[j]; 1054 avg += ioc->avg_search[j];
1055 if(ioc->avg_search[j] > max) 1055 if(ioc->avg_search[j] > max)
1056 max = ioc->avg_search[j]; 1056 max = ioc->avg_search[j];
1057 if(ioc->avg_search[j] < min) 1057 if(ioc->avg_search[j] < min)
1058 min = ioc->avg_search[j]; 1058 min = ioc->avg_search[j];
1059 } 1059 }
1060 avg /= CCIO_SEARCH_SAMPLE; 1060 avg /= CCIO_SEARCH_SAMPLE;
1061 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1061 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1062 min, avg, max); 1062 min, avg, max);
1063 #endif 1063 #endif
1064 #ifdef CCIO_MAP_STATS 1064 #ifdef CCIO_MAP_STATS
1065 len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", 1065 len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1066 ioc->msingle_calls, ioc->msingle_pages, 1066 ioc->msingle_calls, ioc->msingle_pages,
1067 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1067 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1068 1068
1069 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1069 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1070 min = ioc->usingle_calls - ioc->usg_calls; 1070 min = ioc->usingle_calls - ioc->usg_calls;
1071 max = ioc->usingle_pages - ioc->usg_pages; 1071 max = ioc->usingle_pages - ioc->usg_pages;
1072 len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", 1072 len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1073 min, max, (int)((max * 1000)/min)); 1073 min, max, (int)((max * 1000)/min));
1074 1074
1075 len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", 1075 len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1076 ioc->msg_calls, ioc->msg_pages, 1076 ioc->msg_calls, ioc->msg_pages,
1077 (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); 1077 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1078 1078
1079 len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", 1079 len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1080 ioc->usg_calls, ioc->usg_pages, 1080 ioc->usg_calls, ioc->usg_pages,
1081 (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); 1081 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1082 #endif /* CCIO_MAP_STATS */ 1082 #endif /* CCIO_MAP_STATS */
1083 1083
1084 ioc = ioc->next; 1084 ioc = ioc->next;
1085 } 1085 }
1086 1086
1087 return 0; 1087 return 0;
1088 } 1088 }
1089 1089
1090 static int ccio_proc_info_open(struct inode *inode, struct file *file) 1090 static int ccio_proc_info_open(struct inode *inode, struct file *file)
1091 { 1091 {
1092 return single_open(file, &ccio_proc_info, NULL); 1092 return single_open(file, &ccio_proc_info, NULL);
1093 } 1093 }
1094 1094
1095 static struct file_operations ccio_proc_info_fops = { 1095 static struct file_operations ccio_proc_info_fops = {
1096 .owner = THIS_MODULE, 1096 .owner = THIS_MODULE,
1097 .open = ccio_proc_info_open, 1097 .open = ccio_proc_info_open,
1098 .read = seq_read, 1098 .read = seq_read,
1099 .llseek = seq_lseek, 1099 .llseek = seq_lseek,
1100 .release = single_release, 1100 .release = single_release,
1101 }; 1101 };
1102 1102
1103 static int ccio_proc_bitmap_info(struct seq_file *m, void *p) 1103 static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1104 { 1104 {
1105 int len = 0; 1105 int len = 0;
1106 struct ioc *ioc = ioc_list; 1106 struct ioc *ioc = ioc_list;
1107 1107
1108 while (ioc != NULL) { 1108 while (ioc != NULL) {
1109 u32 *res_ptr = (u32 *)ioc->res_map; 1109 u32 *res_ptr = (u32 *)ioc->res_map;
1110 int j; 1110 int j;
1111 1111
1112 for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) { 1112 for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
1113 if ((j & 7) == 0) 1113 if ((j & 7) == 0)
1114 len += seq_puts(m, "\n "); 1114 len += seq_puts(m, "\n ");
1115 len += seq_printf(m, "%08x", *res_ptr); 1115 len += seq_printf(m, "%08x", *res_ptr);
1116 res_ptr++; 1116 res_ptr++;
1117 } 1117 }
1118 len += seq_puts(m, "\n\n"); 1118 len += seq_puts(m, "\n\n");
1119 ioc = ioc->next; 1119 ioc = ioc->next;
1120 break; /* XXX - remove me */ 1120 break; /* XXX - remove me */
1121 } 1121 }
1122 1122
1123 return 0; 1123 return 0;
1124 } 1124 }
1125 1125
1126 static int ccio_proc_bitmap_open(struct inode *inode, struct file *file) 1126 static int ccio_proc_bitmap_open(struct inode *inode, struct file *file)
1127 { 1127 {
1128 return single_open(file, &ccio_proc_bitmap_info, NULL); 1128 return single_open(file, &ccio_proc_bitmap_info, NULL);
1129 } 1129 }
1130 1130
1131 static struct file_operations ccio_proc_bitmap_fops = { 1131 static struct file_operations ccio_proc_bitmap_fops = {
1132 .owner = THIS_MODULE, 1132 .owner = THIS_MODULE,
1133 .open = ccio_proc_bitmap_open, 1133 .open = ccio_proc_bitmap_open,
1134 .read = seq_read, 1134 .read = seq_read,
1135 .llseek = seq_lseek, 1135 .llseek = seq_lseek,
1136 .release = single_release, 1136 .release = single_release,
1137 }; 1137 };
1138 #endif 1138 #endif
1139 1139
1140 /** 1140 /**
1141 * ccio_find_ioc - Find the ioc in the ioc_list 1141 * ccio_find_ioc - Find the ioc in the ioc_list
1142 * @hw_path: The hardware path of the ioc. 1142 * @hw_path: The hardware path of the ioc.
1143 * 1143 *
1144 * This function searches the ioc_list for an ioc that matches 1144 * This function searches the ioc_list for an ioc that matches
1145 * the provide hardware path. 1145 * the provide hardware path.
1146 */ 1146 */
1147 static struct ioc * ccio_find_ioc(int hw_path) 1147 static struct ioc * ccio_find_ioc(int hw_path)
1148 { 1148 {
1149 int i; 1149 int i;
1150 struct ioc *ioc; 1150 struct ioc *ioc;
1151 1151
1152 ioc = ioc_list; 1152 ioc = ioc_list;
1153 for (i = 0; i < ioc_count; i++) { 1153 for (i = 0; i < ioc_count; i++) {
1154 if (ioc->hw_path == hw_path) 1154 if (ioc->hw_path == hw_path)
1155 return ioc; 1155 return ioc;
1156 1156
1157 ioc = ioc->next; 1157 ioc = ioc->next;
1158 } 1158 }
1159 1159
1160 return NULL; 1160 return NULL;
1161 } 1161 }
1162 1162
1163 /** 1163 /**
1164 * ccio_get_iommu - Find the iommu which controls this device 1164 * ccio_get_iommu - Find the iommu which controls this device
1165 * @dev: The parisc device. 1165 * @dev: The parisc device.
1166 * 1166 *
1167 * This function searches through the registered IOMMU's and returns 1167 * This function searches through the registered IOMMU's and returns
1168 * the appropriate IOMMU for the device based on its hardware path. 1168 * the appropriate IOMMU for the device based on its hardware path.
1169 */ 1169 */
1170 void * ccio_get_iommu(const struct parisc_device *dev) 1170 void * ccio_get_iommu(const struct parisc_device *dev)
1171 { 1171 {
1172 dev = find_pa_parent_type(dev, HPHW_IOA); 1172 dev = find_pa_parent_type(dev, HPHW_IOA);
1173 if (!dev) 1173 if (!dev)
1174 return NULL; 1174 return NULL;
1175 1175
1176 return ccio_find_ioc(dev->hw_path); 1176 return ccio_find_ioc(dev->hw_path);
1177 } 1177 }
1178 1178
1179 #define CUJO_20_STEP 0x10000000 /* inc upper nibble */ 1179 #define CUJO_20_STEP 0x10000000 /* inc upper nibble */
1180 1180
1181 /* Cujo 2.0 has a bug which will silently corrupt data being transferred 1181 /* Cujo 2.0 has a bug which will silently corrupt data being transferred
1182 * to/from certain pages. To avoid this happening, we mark these pages 1182 * to/from certain pages. To avoid this happening, we mark these pages
1183 * as `used', and ensure that nothing will try to allocate from them. 1183 * as `used', and ensure that nothing will try to allocate from them.
1184 */ 1184 */
1185 void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp) 1185 void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1186 { 1186 {
1187 unsigned int idx; 1187 unsigned int idx;
1188 struct parisc_device *dev = parisc_parent(cujo); 1188 struct parisc_device *dev = parisc_parent(cujo);
1189 struct ioc *ioc = ccio_get_iommu(dev); 1189 struct ioc *ioc = ccio_get_iommu(dev);
1190 u8 *res_ptr; 1190 u8 *res_ptr;
1191 1191
1192 ioc->cujo20_bug = 1; 1192 ioc->cujo20_bug = 1;
1193 res_ptr = ioc->res_map; 1193 res_ptr = ioc->res_map;
1194 idx = PDIR_INDEX(iovp) >> 3; 1194 idx = PDIR_INDEX(iovp) >> 3;
1195 1195
1196 while (idx < ioc->res_size) { 1196 while (idx < ioc->res_size) {
1197 res_ptr[idx] |= 0xff; 1197 res_ptr[idx] |= 0xff;
1198 idx += PDIR_INDEX(CUJO_20_STEP) >> 3; 1198 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1199 } 1199 }
1200 } 1200 }
1201 1201
1202 #if 0 1202 #if 0
1203 /* GRANT - is this needed for U2 or not? */ 1203 /* GRANT - is this needed for U2 or not? */
1204 1204
1205 /* 1205 /*
1206 ** Get the size of the I/O TLB for this I/O MMU. 1206 ** Get the size of the I/O TLB for this I/O MMU.
1207 ** 1207 **
1208 ** If spa_shift is non-zero (ie probably U2), 1208 ** If spa_shift is non-zero (ie probably U2),
1209 ** then calculate the I/O TLB size using spa_shift. 1209 ** then calculate the I/O TLB size using spa_shift.
1210 ** 1210 **
1211 ** Otherwise we are supposed to get the IODC entry point ENTRY TLB 1211 ** Otherwise we are supposed to get the IODC entry point ENTRY TLB
1212 ** and execute it. However, both U2 and Uturn firmware supplies spa_shift. 1212 ** and execute it. However, both U2 and Uturn firmware supplies spa_shift.
1213 ** I think only Java (K/D/R-class too?) systems don't do this. 1213 ** I think only Java (K/D/R-class too?) systems don't do this.
1214 */ 1214 */
1215 static int 1215 static int
1216 ccio_get_iotlb_size(struct parisc_device *dev) 1216 ccio_get_iotlb_size(struct parisc_device *dev)
1217 { 1217 {
1218 if (dev->spa_shift == 0) { 1218 if (dev->spa_shift == 0) {
1219 panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__); 1219 panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__);
1220 } 1220 }
1221 return (1 << dev->spa_shift); 1221 return (1 << dev->spa_shift);
1222 } 1222 }
1223 #else 1223 #else
1224 1224
1225 /* Uturn supports 256 TLB entries */ 1225 /* Uturn supports 256 TLB entries */
1226 #define CCIO_CHAINID_SHIFT 8 1226 #define CCIO_CHAINID_SHIFT 8
1227 #define CCIO_CHAINID_MASK 0xff 1227 #define CCIO_CHAINID_MASK 0xff
1228 #endif /* 0 */ 1228 #endif /* 0 */
1229 1229
1230 /* We *can't* support JAVA (T600). Venture there at your own risk. */ 1230 /* We *can't* support JAVA (T600). Venture there at your own risk. */
1231 static struct parisc_device_id ccio_tbl[] = { 1231 static struct parisc_device_id ccio_tbl[] = {
1232 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */ 1232 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
1233 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */ 1233 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
1234 { 0, } 1234 { 0, }
1235 }; 1235 };
1236 1236
1237 static int ccio_probe(struct parisc_device *dev); 1237 static int ccio_probe(struct parisc_device *dev);
1238 1238
1239 static struct parisc_driver ccio_driver = { 1239 static struct parisc_driver ccio_driver = {
1240 .name = "ccio", 1240 .name = "ccio",
1241 .id_table = ccio_tbl, 1241 .id_table = ccio_tbl,
1242 .probe = ccio_probe, 1242 .probe = ccio_probe,
1243 }; 1243 };
1244 1244
1245 /** 1245 /**
1246 * ccio_ioc_init - Initalize the I/O Controller 1246 * ccio_ioc_init - Initalize the I/O Controller
1247 * @ioc: The I/O Controller. 1247 * @ioc: The I/O Controller.
1248 * 1248 *
1249 * Initalize the I/O Controller which includes setting up the 1249 * Initalize the I/O Controller which includes setting up the
1250 * I/O Page Directory, the resource map, and initalizing the 1250 * I/O Page Directory, the resource map, and initalizing the
1251 * U2/Uturn chip into virtual mode. 1251 * U2/Uturn chip into virtual mode.
1252 */ 1252 */
1253 static void 1253 static void
1254 ccio_ioc_init(struct ioc *ioc) 1254 ccio_ioc_init(struct ioc *ioc)
1255 { 1255 {
1256 int i; 1256 int i;
1257 unsigned int iov_order; 1257 unsigned int iov_order;
1258 u32 iova_space_size; 1258 u32 iova_space_size;
1259 1259
1260 /* 1260 /*
1261 ** Determine IOVA Space size from memory size. 1261 ** Determine IOVA Space size from memory size.
1262 ** 1262 **
1263 ** Ideally, PCI drivers would register the maximum number 1263 ** Ideally, PCI drivers would register the maximum number
1264 ** of DMA they can have outstanding for each device they 1264 ** of DMA they can have outstanding for each device they
1265 ** own. Next best thing would be to guess how much DMA 1265 ** own. Next best thing would be to guess how much DMA
1266 ** can be outstanding based on PCI Class/sub-class. Both 1266 ** can be outstanding based on PCI Class/sub-class. Both
1267 ** methods still require some "extra" to support PCI 1267 ** methods still require some "extra" to support PCI
1268 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1268 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1269 */ 1269 */
1270 1270
1271 iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver)); 1271 iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver));
1272 1272
1273 /* limit IOVA space size to 1MB-1GB */ 1273 /* limit IOVA space size to 1MB-1GB */
1274 1274
1275 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1275 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1276 iova_space_size = 1 << (20 - PAGE_SHIFT); 1276 iova_space_size = 1 << (20 - PAGE_SHIFT);
1277 #ifdef __LP64__ 1277 #ifdef __LP64__
1278 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1278 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1279 iova_space_size = 1 << (30 - PAGE_SHIFT); 1279 iova_space_size = 1 << (30 - PAGE_SHIFT);
1280 #endif 1280 #endif
1281 } 1281 }
1282 1282
1283 /* 1283 /*
1284 ** iova space must be log2() in size. 1284 ** iova space must be log2() in size.
1285 ** thus, pdir/res_map will also be log2(). 1285 ** thus, pdir/res_map will also be log2().
1286 */ 1286 */
1287 1287
1288 /* We could use larger page sizes in order to *decrease* the number 1288 /* We could use larger page sizes in order to *decrease* the number
1289 ** of mappings needed. (ie 8k pages means 1/2 the mappings). 1289 ** of mappings needed. (ie 8k pages means 1/2 the mappings).
1290 ** 1290 **
1291 ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either 1291 ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either
1292 ** since the pages must also be physically contiguous - typically 1292 ** since the pages must also be physically contiguous - typically
1293 ** this is the case under linux." 1293 ** this is the case under linux."
1294 */ 1294 */
1295 1295
1296 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1296 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1297 1297
1298 /* iova_space_size is now bytes, not pages */ 1298 /* iova_space_size is now bytes, not pages */
1299 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1299 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1300 1300
1301 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1301 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1302 1302
1303 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */ 1303 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */
1304 1304
1305 /* Verify it's a power of two */ 1305 /* Verify it's a power of two */
1306 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT)); 1306 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1307 1307
1308 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", 1308 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1309 __FUNCTION__, ioc->ioc_regs, 1309 __FUNCTION__, ioc->ioc_regs,
1310 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1310 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1311 iova_space_size>>20, 1311 iova_space_size>>20,
1312 iov_order + PAGE_SHIFT); 1312 iov_order + PAGE_SHIFT);
1313 1313
1314 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, 1314 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1315 get_order(ioc->pdir_size)); 1315 get_order(ioc->pdir_size));
1316 if(NULL == ioc->pdir_base) { 1316 if(NULL == ioc->pdir_base) {
1317 panic("%s() could not allocate I/O Page Table\n", __FUNCTION__); 1317 panic("%s() could not allocate I/O Page Table\n", __FUNCTION__);
1318 } 1318 }
1319 memset(ioc->pdir_base, 0, ioc->pdir_size); 1319 memset(ioc->pdir_base, 0, ioc->pdir_size);
1320 1320
1321 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base); 1321 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1322 DBG_INIT(" base %p\n", ioc->pdir_base); 1322 DBG_INIT(" base %p\n", ioc->pdir_base);
1323 1323
1324 /* resource map size dictated by pdir_size */ 1324 /* resource map size dictated by pdir_size */
1325 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3; 1325 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1326 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); 1326 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1327 1327
1328 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL, 1328 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1329 get_order(ioc->res_size)); 1329 get_order(ioc->res_size));
1330 if(NULL == ioc->res_map) { 1330 if(NULL == ioc->res_map) {
1331 panic("%s() could not allocate resource map\n", __FUNCTION__); 1331 panic("%s() could not allocate resource map\n", __FUNCTION__);
1332 } 1332 }
1333 memset(ioc->res_map, 0, ioc->res_size); 1333 memset(ioc->res_map, 0, ioc->res_size);
1334 1334
1335 /* Initialize the res_hint to 16 */ 1335 /* Initialize the res_hint to 16 */
1336 ioc->res_hint = 16; 1336 ioc->res_hint = 16;
1337 1337
1338 /* Initialize the spinlock */ 1338 /* Initialize the spinlock */
1339 spin_lock_init(&ioc->res_lock); 1339 spin_lock_init(&ioc->res_lock);
1340 1340
1341 /* 1341 /*
1342 ** Chainid is the upper most bits of an IOVP used to determine 1342 ** Chainid is the upper most bits of an IOVP used to determine
1343 ** which TLB entry an IOVP will use. 1343 ** which TLB entry an IOVP will use.
1344 */ 1344 */
1345 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT; 1345 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1346 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift); 1346 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1347 1347
1348 /* 1348 /*
1349 ** Initialize IOA hardware 1349 ** Initialize IOA hardware
1350 */ 1350 */
1351 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift, 1351 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1352 &ioc->ioc_regs->io_chain_id_mask); 1352 &ioc->ioc_regs->io_chain_id_mask);
1353 1353
1354 WRITE_U32(virt_to_phys(ioc->pdir_base), 1354 WRITE_U32(virt_to_phys(ioc->pdir_base),
1355 &ioc->ioc_regs->io_pdir_base); 1355 &ioc->ioc_regs->io_pdir_base);
1356 1356
1357 /* 1357 /*
1358 ** Go to "Virtual Mode" 1358 ** Go to "Virtual Mode"
1359 */ 1359 */
1360 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control); 1360 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1361 1361
1362 /* 1362 /*
1363 ** Initialize all I/O TLB entries to 0 (Valid bit off). 1363 ** Initialize all I/O TLB entries to 0 (Valid bit off).
1364 */ 1364 */
1365 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m); 1365 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1366 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l); 1366 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1367 1367
1368 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) { 1368 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1369 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)), 1369 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1370 &ioc->ioc_regs->io_command); 1370 &ioc->ioc_regs->io_command);
1371 } 1371 }
1372 } 1372 }
1373 1373
1374 static void 1374 static void
1375 ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) 1375 ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1376 { 1376 {
1377 int result; 1377 int result;
1378 1378
1379 res->parent = NULL; 1379 res->parent = NULL;
1380 res->flags = IORESOURCE_MEM; 1380 res->flags = IORESOURCE_MEM;
1381 /* 1381 /*
1382 * bracing ((signed) ...) are required for 64bit kernel because 1382 * bracing ((signed) ...) are required for 64bit kernel because
1383 * we only want to sign extend the lower 16 bits of the register. 1383 * we only want to sign extend the lower 16 bits of the register.
1384 * The upper 16-bits of range registers are hardcoded to 0xffff. 1384 * The upper 16-bits of range registers are hardcoded to 0xffff.
1385 */ 1385 */
1386 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16); 1386 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1387 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1); 1387 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1388 res->name = name; 1388 res->name = name;
1389 /* 1389 /*
1390 * Check if this MMIO range is disable 1390 * Check if this MMIO range is disable
1391 */ 1391 */
1392 if (res->end + 1 == res->start) 1392 if (res->end + 1 == res->start)
1393 return; 1393 return;
1394 1394
1395 /* On some platforms (e.g. K-Class), we have already registered 1395 /* On some platforms (e.g. K-Class), we have already registered
1396 * resources for devices reported by firmware. Some are children 1396 * resources for devices reported by firmware. Some are children
1397 * of ccio. 1397 * of ccio.
1398 * "insert" ccio ranges in the mmio hierarchy (/proc/iomem). 1398 * "insert" ccio ranges in the mmio hierarchy (/proc/iomem).
1399 */ 1399 */
1400 result = insert_resource(&iomem_resource, res); 1400 result = insert_resource(&iomem_resource, res);
1401 if (result < 0) { 1401 if (result < 0) {
1402 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n", 1402 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1403 __FUNCTION__, res->start, res->end); 1403 __FUNCTION__, res->start, res->end);
1404 } 1404 }
1405 } 1405 }
1406 1406
1407 static void __init ccio_init_resources(struct ioc *ioc) 1407 static void __init ccio_init_resources(struct ioc *ioc)
1408 { 1408 {
1409 struct resource *res = ioc->mmio_region; 1409 struct resource *res = ioc->mmio_region;
1410 char *name = kmalloc(14, GFP_KERNEL); 1410 char *name = kmalloc(14, GFP_KERNEL);
1411 1411
1412 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); 1412 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1413 1413
1414 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); 1414 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1415 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); 1415 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1416 } 1416 }
1417 1417
1418 static int new_ioc_area(struct resource *res, unsigned long size, 1418 static int new_ioc_area(struct resource *res, unsigned long size,
1419 unsigned long min, unsigned long max, unsigned long align) 1419 unsigned long min, unsigned long max, unsigned long align)
1420 { 1420 {
1421 if (max <= min) 1421 if (max <= min)
1422 return -EBUSY; 1422 return -EBUSY;
1423 1423
1424 res->start = (max - size + 1) &~ (align - 1); 1424 res->start = (max - size + 1) &~ (align - 1);
1425 res->end = res->start + size; 1425 res->end = res->start + size;
1426 1426
1427 /* We might be trying to expand the MMIO range to include 1427 /* We might be trying to expand the MMIO range to include
1428 * a child device that has already registered it's MMIO space. 1428 * a child device that has already registered it's MMIO space.
1429 * Use "insert" instead of request_resource(). 1429 * Use "insert" instead of request_resource().
1430 */ 1430 */
1431 if (!insert_resource(&iomem_resource, res)) 1431 if (!insert_resource(&iomem_resource, res))
1432 return 0; 1432 return 0;
1433 1433
1434 return new_ioc_area(res, size, min, max - size, align); 1434 return new_ioc_area(res, size, min, max - size, align);
1435 } 1435 }
1436 1436
1437 static int expand_ioc_area(struct resource *res, unsigned long size, 1437 static int expand_ioc_area(struct resource *res, unsigned long size,
1438 unsigned long min, unsigned long max, unsigned long align) 1438 unsigned long min, unsigned long max, unsigned long align)
1439 { 1439 {
1440 unsigned long start, len; 1440 unsigned long start, len;
1441 1441
1442 if (!res->parent) 1442 if (!res->parent)
1443 return new_ioc_area(res, size, min, max, align); 1443 return new_ioc_area(res, size, min, max, align);
1444 1444
1445 start = (res->start - size) &~ (align - 1); 1445 start = (res->start - size) &~ (align - 1);
1446 len = res->end - start + 1; 1446 len = res->end - start + 1;
1447 if (start >= min) { 1447 if (start >= min) {
1448 if (!adjust_resource(res, start, len)) 1448 if (!adjust_resource(res, start, len))
1449 return 0; 1449 return 0;
1450 } 1450 }
1451 1451
1452 start = res->start; 1452 start = res->start;
1453 len = ((size + res->end + align) &~ (align - 1)) - start; 1453 len = ((size + res->end + align) &~ (align - 1)) - start;
1454 if (start + len <= max) { 1454 if (start + len <= max) {
1455 if (!adjust_resource(res, start, len)) 1455 if (!adjust_resource(res, start, len))
1456 return 0; 1456 return 0;
1457 } 1457 }
1458 1458
1459 return -EBUSY; 1459 return -EBUSY;
1460 } 1460 }
1461 1461
1462 /* 1462 /*
1463 * Dino calls this function. Beware that we may get called on systems 1463 * Dino calls this function. Beware that we may get called on systems
1464 * which have no IOC (725, B180, C160L, etc) but do have a Dino. 1464 * which have no IOC (725, B180, C160L, etc) but do have a Dino.
1465 * So it's legal to find no parent IOC. 1465 * So it's legal to find no parent IOC.
1466 * 1466 *
1467 * Some other issues: one of the resources in the ioc may be unassigned. 1467 * Some other issues: one of the resources in the ioc may be unassigned.
1468 */ 1468 */
1469 int ccio_allocate_resource(const struct parisc_device *dev, 1469 int ccio_allocate_resource(const struct parisc_device *dev,
1470 struct resource *res, unsigned long size, 1470 struct resource *res, unsigned long size,
1471 unsigned long min, unsigned long max, unsigned long align) 1471 unsigned long min, unsigned long max, unsigned long align)
1472 { 1472 {
1473 struct resource *parent = &iomem_resource; 1473 struct resource *parent = &iomem_resource;
1474 struct ioc *ioc = ccio_get_iommu(dev); 1474 struct ioc *ioc = ccio_get_iommu(dev);
1475 if (!ioc) 1475 if (!ioc)
1476 goto out; 1476 goto out;
1477 1477
1478 parent = ioc->mmio_region; 1478 parent = ioc->mmio_region;
1479 if (parent->parent && 1479 if (parent->parent &&
1480 !allocate_resource(parent, res, size, min, max, align, NULL, NULL)) 1480 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1481 return 0; 1481 return 0;
1482 1482
1483 if ((parent + 1)->parent && 1483 if ((parent + 1)->parent &&
1484 !allocate_resource(parent + 1, res, size, min, max, align, 1484 !allocate_resource(parent + 1, res, size, min, max, align,
1485 NULL, NULL)) 1485 NULL, NULL))
1486 return 0; 1486 return 0;
1487 1487
1488 if (!expand_ioc_area(parent, size, min, max, align)) { 1488 if (!expand_ioc_area(parent, size, min, max, align)) {
1489 __raw_writel(((parent->start)>>16) | 0xffff0000, 1489 __raw_writel(((parent->start)>>16) | 0xffff0000,
1490 &ioc->ioc_regs->io_io_low); 1490 &ioc->ioc_regs->io_io_low);
1491 __raw_writel(((parent->end)>>16) | 0xffff0000, 1491 __raw_writel(((parent->end)>>16) | 0xffff0000,
1492 &ioc->ioc_regs->io_io_high); 1492 &ioc->ioc_regs->io_io_high);
1493 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) { 1493 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1494 parent++; 1494 parent++;
1495 __raw_writel(((parent->start)>>16) | 0xffff0000, 1495 __raw_writel(((parent->start)>>16) | 0xffff0000,
1496 &ioc->ioc_regs->io_io_low_hv); 1496 &ioc->ioc_regs->io_io_low_hv);
1497 __raw_writel(((parent->end)>>16) | 0xffff0000, 1497 __raw_writel(((parent->end)>>16) | 0xffff0000,
1498 &ioc->ioc_regs->io_io_high_hv); 1498 &ioc->ioc_regs->io_io_high_hv);
1499 } else { 1499 } else {
1500 return -EBUSY; 1500 return -EBUSY;
1501 } 1501 }
1502 1502
1503 out: 1503 out:
1504 return allocate_resource(parent, res, size, min, max, align, NULL,NULL); 1504 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1505 } 1505 }
1506 1506
1507 int ccio_request_resource(const struct parisc_device *dev, 1507 int ccio_request_resource(const struct parisc_device *dev,
1508 struct resource *res) 1508 struct resource *res)
1509 { 1509 {
1510 struct resource *parent; 1510 struct resource *parent;
1511 struct ioc *ioc = ccio_get_iommu(dev); 1511 struct ioc *ioc = ccio_get_iommu(dev);
1512 1512
1513 if (!ioc) { 1513 if (!ioc) {
1514 parent = &iomem_resource; 1514 parent = &iomem_resource;
1515 } else if ((ioc->mmio_region->start <= res->start) && 1515 } else if ((ioc->mmio_region->start <= res->start) &&
1516 (res->end <= ioc->mmio_region->end)) { 1516 (res->end <= ioc->mmio_region->end)) {
1517 parent = ioc->mmio_region; 1517 parent = ioc->mmio_region;
1518 } else if (((ioc->mmio_region + 1)->start <= res->start) && 1518 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1519 (res->end <= (ioc->mmio_region + 1)->end)) { 1519 (res->end <= (ioc->mmio_region + 1)->end)) {
1520 parent = ioc->mmio_region + 1; 1520 parent = ioc->mmio_region + 1;
1521 } else { 1521 } else {
1522 return -EBUSY; 1522 return -EBUSY;
1523 } 1523 }
1524 1524
1525 /* "transparent" bus bridges need to register MMIO resources 1525 /* "transparent" bus bridges need to register MMIO resources
1526 * firmware assigned them. e.g. children of hppb.c (e.g. K-class) 1526 * firmware assigned them. e.g. children of hppb.c (e.g. K-class)
1527 * registered their resources in the PDC "bus walk" (See 1527 * registered their resources in the PDC "bus walk" (See
1528 * arch/parisc/kernel/inventory.c). 1528 * arch/parisc/kernel/inventory.c).
1529 */ 1529 */
1530 return insert_resource(parent, res); 1530 return insert_resource(parent, res);
1531 } 1531 }
1532 1532
1533 /** 1533 /**
1534 * ccio_probe - Determine if ccio should claim this device. 1534 * ccio_probe - Determine if ccio should claim this device.
1535 * @dev: The device which has been found 1535 * @dev: The device which has been found
1536 * 1536 *
1537 * Determine if ccio should claim this chip (return 0) or not (return 1). 1537 * Determine if ccio should claim this chip (return 0) or not (return 1).
1538 * If so, initialize the chip and tell other partners in crime they 1538 * If so, initialize the chip and tell other partners in crime they
1539 * have work to do. 1539 * have work to do.
1540 */ 1540 */
1541 static int ccio_probe(struct parisc_device *dev) 1541 static int ccio_probe(struct parisc_device *dev)
1542 { 1542 {
1543 int i; 1543 int i;
1544 struct ioc *ioc, **ioc_p = &ioc_list; 1544 struct ioc *ioc, **ioc_p = &ioc_list;
1545 struct proc_dir_entry *info_entry, *bitmap_entry; 1545 struct proc_dir_entry *info_entry, *bitmap_entry;
1546 1546
1547 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); 1547 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1548 if (ioc == NULL) { 1548 if (ioc == NULL) {
1549 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); 1549 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1550 return 1; 1550 return 1;
1551 } 1551 }
1552 1552
1553 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; 1553 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1554 1554
1555 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa.start); 1555 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa.start);
1556 1556
1557 for (i = 0; i < ioc_count; i++) { 1557 for (i = 0; i < ioc_count; i++) {
1558 ioc_p = &(*ioc_p)->next; 1558 ioc_p = &(*ioc_p)->next;
1559 } 1559 }
1560 *ioc_p = ioc; 1560 *ioc_p = ioc;
1561 1561
1562 ioc->hw_path = dev->hw_path; 1562 ioc->hw_path = dev->hw_path;
1563 ioc->ioc_regs = ioremap(dev->hpa.start, 4096); 1563 ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
1564 ccio_ioc_init(ioc); 1564 ccio_ioc_init(ioc);
1565 ccio_init_resources(ioc); 1565 ccio_init_resources(ioc);
1566 hppa_dma_ops = &ccio_ops; 1566 hppa_dma_ops = &ccio_ops;
1567 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL); 1567 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1568 1568
1569 /* if this fails, no I/O cards will work, so may as well bug */ 1569 /* if this fails, no I/O cards will work, so may as well bug */
1570 BUG_ON(dev->dev.platform_data == NULL); 1570 BUG_ON(dev->dev.platform_data == NULL);
1571 HBA_DATA(dev->dev.platform_data)->iommu = ioc; 1571 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1572 1572
1573 if (ioc_count == 0) { 1573 if (ioc_count == 0) {
1574 info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); 1574 info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root);
1575 if (info_entry) 1575 if (info_entry)
1576 info_entry->proc_fops = &ccio_proc_info_fops; 1576 info_entry->proc_fops = &ccio_proc_info_fops;
1577 1577
1578 bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root); 1578 bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root);
1579 if (bitmap_entry) 1579 if (bitmap_entry)
1580 bitmap_entry->proc_fops = &ccio_proc_bitmap_fops; 1580 bitmap_entry->proc_fops = &ccio_proc_bitmap_fops;
1581 } 1581 }
1582 1582
1583 ioc_count++; 1583 ioc_count++;
1584 1584
1585 parisc_vmerge_boundary = IOVP_SIZE; 1585 parisc_vmerge_boundary = IOVP_SIZE;
1586 parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE; 1586 parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
1587 parisc_has_iommu(); 1587 parisc_has_iommu();
1588 return 0; 1588 return 0;
1589 } 1589 }
1590 1590
1591 /** 1591 /**
1592 * ccio_init - ccio initalization procedure. 1592 * ccio_init - ccio initalization procedure.
1593 * 1593 *
1594 * Register this driver. 1594 * Register this driver.
1595 */ 1595 */
1596 void __init ccio_init(void) 1596 void __init ccio_init(void)
1597 { 1597 {
1598 register_parisc_driver(&ccio_driver); 1598 register_parisc_driver(&ccio_driver);
1599 } 1599 }
1600 1600
1601 1601
drivers/parisc/dino.c
1 /* 1 /*
2 ** DINO manager 2 ** DINO manager
3 ** 3 **
4 ** (c) Copyright 1999 Red Hat Software 4 ** (c) Copyright 1999 Red Hat Software
5 ** (c) Copyright 1999 SuSE GmbH 5 ** (c) Copyright 1999 SuSE GmbH
6 ** (c) Copyright 1999,2000 Hewlett-Packard Company 6 ** (c) Copyright 1999,2000 Hewlett-Packard Company
7 ** (c) Copyright 2000 Grant Grundler 7 ** (c) Copyright 2000 Grant Grundler
8 ** (c) Copyright 2006 Helge Deller
8 ** 9 **
9 ** This program is free software; you can redistribute it and/or modify 10 ** This program is free software; you can redistribute it and/or modify
10 ** it under the terms of the GNU General Public License as published by 11 ** it under the terms of the GNU General Public License as published by
11 ** the Free Software Foundation; either version 2 of the License, or 12 ** the Free Software Foundation; either version 2 of the License, or
12 ** (at your option) any later version. 13 ** (at your option) any later version.
13 ** 14 **
14 ** This module provides access to Dino PCI bus (config/IOport spaces) 15 ** This module provides access to Dino PCI bus (config/IOport spaces)
15 ** and helps manage Dino IRQ lines. 16 ** and helps manage Dino IRQ lines.
16 ** 17 **
17 ** Dino interrupt handling is a bit complicated. 18 ** Dino interrupt handling is a bit complicated.
18 ** Dino always writes to the broadcast EIR via irr0 for now. 19 ** Dino always writes to the broadcast EIR via irr0 for now.
19 ** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!) 20 ** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!)
20 ** Only one processor interrupt is used for the 11 IRQ line 21 ** Only one processor interrupt is used for the 11 IRQ line
21 ** inputs to dino. 22 ** inputs to dino.
22 ** 23 **
23 ** The different between Built-in Dino and Card-Mode 24 ** The different between Built-in Dino and Card-Mode
24 ** dino is in chip initialization and pci device initialization. 25 ** dino is in chip initialization and pci device initialization.
25 ** 26 **
26 ** Linux drivers can only use Card-Mode Dino if pci devices I/O port 27 ** Linux drivers can only use Card-Mode Dino if pci devices I/O port
27 ** BARs are configured and used by the driver. Programming MMIO address 28 ** BARs are configured and used by the driver. Programming MMIO address
28 ** requires substantial knowledge of available Host I/O address ranges 29 ** requires substantial knowledge of available Host I/O address ranges
29 ** is currently not supported. Port/Config accessor functions are the 30 ** is currently not supported. Port/Config accessor functions are the
30 ** same. "BIOS" differences are handled within the existing routines. 31 ** same. "BIOS" differences are handled within the existing routines.
31 */ 32 */
32 33
33 /* Changes : 34 /* Changes :
34 ** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr) 35 ** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr)
35 ** - added support for the integrated RS232. 36 ** - added support for the integrated RS232.
36 */ 37 */
37 38
38 /* 39 /*
39 ** TODO: create a virtual address for each Dino HPA. 40 ** TODO: create a virtual address for each Dino HPA.
40 ** GSC code might be able to do this since IODC data tells us 41 ** GSC code might be able to do this since IODC data tells us
41 ** how many pages are used. PCI subsystem could (must?) do this 42 ** how many pages are used. PCI subsystem could (must?) do this
42 ** for PCI drivers devices which implement/use MMIO registers. 43 ** for PCI drivers devices which implement/use MMIO registers.
43 */ 44 */
44 45
45 #include <linux/config.h> 46 #include <linux/config.h>
46 #include <linux/delay.h> 47 #include <linux/delay.h>
47 #include <linux/types.h> 48 #include <linux/types.h>
48 #include <linux/kernel.h> 49 #include <linux/kernel.h>
49 #include <linux/pci.h> 50 #include <linux/pci.h>
50 #include <linux/init.h> 51 #include <linux/init.h>
51 #include <linux/ioport.h> 52 #include <linux/ioport.h>
52 #include <linux/slab.h> 53 #include <linux/slab.h>
53 #include <linux/interrupt.h> /* for struct irqaction */ 54 #include <linux/interrupt.h> /* for struct irqaction */
54 #include <linux/spinlock.h> /* for spinlock_t and prototypes */ 55 #include <linux/spinlock.h> /* for spinlock_t and prototypes */
55 56
56 #include <asm/pdc.h> 57 #include <asm/pdc.h>
57 #include <asm/page.h> 58 #include <asm/page.h>
58 #include <asm/system.h> 59 #include <asm/system.h>
59 #include <asm/io.h> 60 #include <asm/io.h>
60 #include <asm/hardware.h> 61 #include <asm/hardware.h>
61 62
62 #include "gsc.h" 63 #include "gsc.h"
63 64
64 #undef DINO_DEBUG 65 #undef DINO_DEBUG
65 66
66 #ifdef DINO_DEBUG 67 #ifdef DINO_DEBUG
67 #define DBG(x...) printk(x) 68 #define DBG(x...) printk(x)
68 #else 69 #else
69 #define DBG(x...) 70 #define DBG(x...)
70 #endif 71 #endif
71 72
72 /* 73 /*
73 ** Config accessor functions only pass in the 8-bit bus number 74 ** Config accessor functions only pass in the 8-bit bus number
74 ** and not the 8-bit "PCI Segment" number. Each Dino will be 75 ** and not the 8-bit "PCI Segment" number. Each Dino will be
75 ** assigned a PCI bus number based on "when" it's discovered. 76 ** assigned a PCI bus number based on "when" it's discovered.
76 ** 77 **
77 ** The "secondary" bus number is set to this before calling 78 ** The "secondary" bus number is set to this before calling
78 ** pci_scan_bus(). If any PPB's are present, the scan will 79 ** pci_scan_bus(). If any PPB's are present, the scan will
79 ** discover them and update the "secondary" and "subordinate" 80 ** discover them and update the "secondary" and "subordinate"
80 ** fields in Dino's pci_bus structure. 81 ** fields in Dino's pci_bus structure.
81 ** 82 **
82 ** Changes in the configuration *will* result in a different 83 ** Changes in the configuration *will* result in a different
83 ** bus number for each dino. 84 ** bus number for each dino.
84 */ 85 */
85 86
86 #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA) 87 #define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
87 #define is_cujo(id) ((id)->hversion == 0x682) 88 #define is_cujo(id) ((id)->hversion == 0x682)
88 89
89 #define DINO_IAR0 0x004 90 #define DINO_IAR0 0x004
90 #define DINO_IODC_ADDR 0x008 91 #define DINO_IODC_ADDR 0x008
91 #define DINO_IODC_DATA_0 0x008 92 #define DINO_IODC_DATA_0 0x008
92 #define DINO_IODC_DATA_1 0x008 93 #define DINO_IODC_DATA_1 0x008
93 #define DINO_IRR0 0x00C 94 #define DINO_IRR0 0x00C
94 #define DINO_IAR1 0x010 95 #define DINO_IAR1 0x010
95 #define DINO_IRR1 0x014 96 #define DINO_IRR1 0x014
96 #define DINO_IMR 0x018 97 #define DINO_IMR 0x018
97 #define DINO_IPR 0x01C 98 #define DINO_IPR 0x01C
98 #define DINO_TOC_ADDR 0x020 99 #define DINO_TOC_ADDR 0x020
99 #define DINO_ICR 0x024 100 #define DINO_ICR 0x024
100 #define DINO_ILR 0x028 101 #define DINO_ILR 0x028
101 #define DINO_IO_COMMAND 0x030 102 #define DINO_IO_COMMAND 0x030
102 #define DINO_IO_STATUS 0x034 103 #define DINO_IO_STATUS 0x034
103 #define DINO_IO_CONTROL 0x038 104 #define DINO_IO_CONTROL 0x038
104 #define DINO_IO_GSC_ERR_RESP 0x040 105 #define DINO_IO_GSC_ERR_RESP 0x040
105 #define DINO_IO_ERR_INFO 0x044 106 #define DINO_IO_ERR_INFO 0x044
106 #define DINO_IO_PCI_ERR_RESP 0x048 107 #define DINO_IO_PCI_ERR_RESP 0x048
107 #define DINO_IO_FBB_EN 0x05c 108 #define DINO_IO_FBB_EN 0x05c
108 #define DINO_IO_ADDR_EN 0x060 109 #define DINO_IO_ADDR_EN 0x060
109 #define DINO_PCI_ADDR 0x064 110 #define DINO_PCI_ADDR 0x064
110 #define DINO_CONFIG_DATA 0x068 111 #define DINO_CONFIG_DATA 0x068
111 #define DINO_IO_DATA 0x06c 112 #define DINO_IO_DATA 0x06c
112 #define DINO_MEM_DATA 0x070 /* Dino 3.x only */ 113 #define DINO_MEM_DATA 0x070 /* Dino 3.x only */
113 #define DINO_GSC2X_CONFIG 0x7b4 114 #define DINO_GSC2X_CONFIG 0x7b4
114 #define DINO_GMASK 0x800 115 #define DINO_GMASK 0x800
115 #define DINO_PAMR 0x804 116 #define DINO_PAMR 0x804
116 #define DINO_PAPR 0x808 117 #define DINO_PAPR 0x808
117 #define DINO_DAMODE 0x80c 118 #define DINO_DAMODE 0x80c
118 #define DINO_PCICMD 0x810 119 #define DINO_PCICMD 0x810
119 #define DINO_PCISTS 0x814 120 #define DINO_PCISTS 0x814
120 #define DINO_MLTIM 0x81c 121 #define DINO_MLTIM 0x81c
121 #define DINO_BRDG_FEAT 0x820 122 #define DINO_BRDG_FEAT 0x820
122 #define DINO_PCIROR 0x824 123 #define DINO_PCIROR 0x824
123 #define DINO_PCIWOR 0x828 124 #define DINO_PCIWOR 0x828
124 #define DINO_TLTIM 0x830 125 #define DINO_TLTIM 0x830
125 126
126 #define DINO_IRQS 11 /* bits 0-10 are architected */ 127 #define DINO_IRQS 11 /* bits 0-10 are architected */
127 #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ 128 #define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
128 #define DINO_LOCAL_IRQS (DINO_IRQS+1) 129 #define DINO_LOCAL_IRQS (DINO_IRQS+1)
129 130
130 #define DINO_MASK_IRQ(x) (1<<(x)) 131 #define DINO_MASK_IRQ(x) (1<<(x))
131 132
132 #define PCIINTA 0x001 133 #define PCIINTA 0x001
133 #define PCIINTB 0x002 134 #define PCIINTB 0x002
134 #define PCIINTC 0x004 135 #define PCIINTC 0x004
135 #define PCIINTD 0x008 136 #define PCIINTD 0x008
136 #define PCIINTE 0x010 137 #define PCIINTE 0x010
137 #define PCIINTF 0x020 138 #define PCIINTF 0x020
138 #define GSCEXTINT 0x040 139 #define GSCEXTINT 0x040
139 /* #define xxx 0x080 - bit 7 is "default" */ 140 /* #define xxx 0x080 - bit 7 is "default" */
140 /* #define xxx 0x100 - bit 8 not used */ 141 /* #define xxx 0x100 - bit 8 not used */
141 /* #define xxx 0x200 - bit 9 not used */ 142 /* #define xxx 0x200 - bit 9 not used */
142 #define RS232INT 0x400 143 #define RS232INT 0x400
143 144
144 struct dino_device 145 struct dino_device
145 { 146 {
146 struct pci_hba_data hba; /* 'C' inheritance - must be first */ 147 struct pci_hba_data hba; /* 'C' inheritance - must be first */
147 spinlock_t dinosaur_pen; 148 spinlock_t dinosaur_pen;
148 unsigned long txn_addr; /* EIR addr to generate interrupt */ 149 unsigned long txn_addr; /* EIR addr to generate interrupt */
149 u32 txn_data; /* EIR data assign to each dino */ 150 u32 txn_data; /* EIR data assign to each dino */
150 u32 imr; /* IRQ's which are enabled */ 151 u32 imr; /* IRQ's which are enabled */
151 int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ 152 int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
152 #ifdef DINO_DEBUG 153 #ifdef DINO_DEBUG
153 unsigned int dino_irr0; /* save most recent IRQ line stat */ 154 unsigned int dino_irr0; /* save most recent IRQ line stat */
154 #endif 155 #endif
155 }; 156 };
156 157
157 /* Looks nice and keeps the compiler happy */ 158 /* Looks nice and keeps the compiler happy */
158 #define DINO_DEV(d) ((struct dino_device *) d) 159 #define DINO_DEV(d) ((struct dino_device *) d)
159 160
160 161
161 /* 162 /*
162 * Dino Configuration Space Accessor Functions 163 * Dino Configuration Space Accessor Functions
163 */ 164 */
164 165
165 #define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos))) 166 #define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos)))
166 167
167 /* 168 /*
168 * keep the current highest bus count to assist in allocating busses. This 169 * keep the current highest bus count to assist in allocating busses. This
169 * tries to keep a global bus count total so that when we discover an 170 * tries to keep a global bus count total so that when we discover an
170 * entirely new bus, it can be given a unique bus number. 171 * entirely new bus, it can be given a unique bus number.
171 */ 172 */
172 static int dino_current_bus = 0; 173 static int dino_current_bus = 0;
173 174
174 static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where, 175 static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
175 int size, u32 *val) 176 int size, u32 *val)
176 { 177 {
177 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge)); 178 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
178 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 179 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
179 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3); 180 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
180 void __iomem *base_addr = d->hba.base_addr; 181 void __iomem *base_addr = d->hba.base_addr;
181 unsigned long flags; 182 unsigned long flags;
182 183
183 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where, 184 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where,
184 size); 185 size);
185 spin_lock_irqsave(&d->dinosaur_pen, flags); 186 spin_lock_irqsave(&d->dinosaur_pen, flags);
186 187
187 /* tell HW which CFG address */ 188 /* tell HW which CFG address */
188 __raw_writel(v, base_addr + DINO_PCI_ADDR); 189 __raw_writel(v, base_addr + DINO_PCI_ADDR);
189 190
190 /* generate cfg read cycle */ 191 /* generate cfg read cycle */
191 if (size == 1) { 192 if (size == 1) {
192 *val = readb(base_addr + DINO_CONFIG_DATA + (where & 3)); 193 *val = readb(base_addr + DINO_CONFIG_DATA + (where & 3));
193 } else if (size == 2) { 194 } else if (size == 2) {
194 *val = readw(base_addr + DINO_CONFIG_DATA + (where & 2)); 195 *val = readw(base_addr + DINO_CONFIG_DATA + (where & 2));
195 } else if (size == 4) { 196 } else if (size == 4) {
196 *val = readl(base_addr + DINO_CONFIG_DATA); 197 *val = readl(base_addr + DINO_CONFIG_DATA);
197 } 198 }
198 199
199 spin_unlock_irqrestore(&d->dinosaur_pen, flags); 200 spin_unlock_irqrestore(&d->dinosaur_pen, flags);
200 return 0; 201 return 0;
201 } 202 }
202 203
203 /* 204 /*
204 * Dino address stepping "feature": 205 * Dino address stepping "feature":
205 * When address stepping, Dino attempts to drive the bus one cycle too soon 206 * When address stepping, Dino attempts to drive the bus one cycle too soon
206 * even though the type of cycle (config vs. MMIO) might be different. 207 * even though the type of cycle (config vs. MMIO) might be different.
207 * The read of Ven/Prod ID is harmless and avoids Dino's address stepping. 208 * The read of Ven/Prod ID is harmless and avoids Dino's address stepping.
208 */ 209 */
209 static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where, 210 static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
210 int size, u32 val) 211 int size, u32 val)
211 { 212 {
212 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge)); 213 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
213 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 214 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
214 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3); 215 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
215 void __iomem *base_addr = d->hba.base_addr; 216 void __iomem *base_addr = d->hba.base_addr;
216 unsigned long flags; 217 unsigned long flags;
217 218
218 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where, 219 DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where,
219 size); 220 size);
220 spin_lock_irqsave(&d->dinosaur_pen, flags); 221 spin_lock_irqsave(&d->dinosaur_pen, flags);
221 222
222 /* avoid address stepping feature */ 223 /* avoid address stepping feature */
223 __raw_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR); 224 __raw_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR);
224 __raw_readl(base_addr + DINO_CONFIG_DATA); 225 __raw_readl(base_addr + DINO_CONFIG_DATA);
225 226
226 /* tell HW which CFG address */ 227 /* tell HW which CFG address */
227 __raw_writel(v, base_addr + DINO_PCI_ADDR); 228 __raw_writel(v, base_addr + DINO_PCI_ADDR);
228 /* generate cfg read cycle */ 229 /* generate cfg read cycle */
229 if (size == 1) { 230 if (size == 1) {
230 writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3)); 231 writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3));
231 } else if (size == 2) { 232 } else if (size == 2) {
232 writew(val, base_addr + DINO_CONFIG_DATA + (where & 2)); 233 writew(val, base_addr + DINO_CONFIG_DATA + (where & 2));
233 } else if (size == 4) { 234 } else if (size == 4) {
234 writel(val, base_addr + DINO_CONFIG_DATA); 235 writel(val, base_addr + DINO_CONFIG_DATA);
235 } 236 }
236 237
237 spin_unlock_irqrestore(&d->dinosaur_pen, flags); 238 spin_unlock_irqrestore(&d->dinosaur_pen, flags);
238 return 0; 239 return 0;
239 } 240 }
240 241
241 static struct pci_ops dino_cfg_ops = { 242 static struct pci_ops dino_cfg_ops = {
242 .read = dino_cfg_read, 243 .read = dino_cfg_read,
243 .write = dino_cfg_write, 244 .write = dino_cfg_write,
244 }; 245 };
245 246
246 247
247 /* 248 /*
248 * Dino "I/O Port" Space Accessor Functions 249 * Dino "I/O Port" Space Accessor Functions
249 * 250 *
250 * Many PCI devices don't require use of I/O port space (eg Tulip, 251 * Many PCI devices don't require use of I/O port space (eg Tulip,
251 * NCR720) since they export the same registers to both MMIO and 252 * NCR720) since they export the same registers to both MMIO and
252 * I/O port space. Performance is going to stink if drivers use 253 * I/O port space. Performance is going to stink if drivers use
253 * I/O port instead of MMIO. 254 * I/O port instead of MMIO.
254 */ 255 */
255 256
256 #define DINO_PORT_IN(type, size, mask) \ 257 #define DINO_PORT_IN(type, size, mask) \
257 static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \ 258 static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \
258 { \ 259 { \
259 u##size v; \ 260 u##size v; \
260 unsigned long flags; \ 261 unsigned long flags; \
261 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ 262 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
262 /* tell HW which IO Port address */ \ 263 /* tell HW which IO Port address */ \
263 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ 264 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
264 /* generate I/O PORT read cycle */ \ 265 /* generate I/O PORT read cycle */ \
265 v = read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \ 266 v = read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \
266 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \ 267 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
267 return v; \ 268 return v; \
268 } 269 }
269 270
270 DINO_PORT_IN(b, 8, 3) 271 DINO_PORT_IN(b, 8, 3)
271 DINO_PORT_IN(w, 16, 2) 272 DINO_PORT_IN(w, 16, 2)
272 DINO_PORT_IN(l, 32, 0) 273 DINO_PORT_IN(l, 32, 0)
273 274
274 #define DINO_PORT_OUT(type, size, mask) \ 275 #define DINO_PORT_OUT(type, size, mask) \
275 static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ 276 static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
276 { \ 277 { \
277 unsigned long flags; \ 278 unsigned long flags; \
278 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \ 279 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
279 /* tell HW which IO port address */ \ 280 /* tell HW which IO port address */ \
280 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \ 281 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
281 /* generate cfg write cycle */ \ 282 /* generate cfg write cycle */ \
282 write##type(val, d->base_addr+DINO_IO_DATA+(addr&mask)); \ 283 write##type(val, d->base_addr+DINO_IO_DATA+(addr&mask)); \
283 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \ 284 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
284 } 285 }
285 286
286 DINO_PORT_OUT(b, 8, 3) 287 DINO_PORT_OUT(b, 8, 3)
287 DINO_PORT_OUT(w, 16, 2) 288 DINO_PORT_OUT(w, 16, 2)
288 DINO_PORT_OUT(l, 32, 0) 289 DINO_PORT_OUT(l, 32, 0)
289 290
290 struct pci_port_ops dino_port_ops = { 291 struct pci_port_ops dino_port_ops = {
291 .inb = dino_in8, 292 .inb = dino_in8,
292 .inw = dino_in16, 293 .inw = dino_in16,
293 .inl = dino_in32, 294 .inl = dino_in32,
294 .outb = dino_out8, 295 .outb = dino_out8,
295 .outw = dino_out16, 296 .outw = dino_out16,
296 .outl = dino_out32 297 .outl = dino_out32
297 }; 298 };
298 299
299 static void dino_disable_irq(unsigned int irq) 300 static void dino_disable_irq(unsigned int irq)
300 { 301 {
301 struct dino_device *dino_dev = irq_desc[irq].handler_data; 302 struct dino_device *dino_dev = irq_desc[irq].handler_data;
302 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 303 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
303 304
304 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 305 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
305 306
306 /* Clear the matching bit in the IMR register */ 307 /* Clear the matching bit in the IMR register */
307 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); 308 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
308 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); 309 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
309 } 310 }
310 311
311 static void dino_enable_irq(unsigned int irq) 312 static void dino_enable_irq(unsigned int irq)
312 { 313 {
313 struct dino_device *dino_dev = irq_desc[irq].handler_data; 314 struct dino_device *dino_dev = irq_desc[irq].handler_data;
314 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS); 315 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
315 u32 tmp; 316 u32 tmp;
316 317
317 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq); 318 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
318 319
319 /* 320 /*
320 ** clear pending IRQ bits 321 ** clear pending IRQ bits
321 ** 322 **
322 ** This does NOT change ILR state! 323 ** This does NOT change ILR state!
323 ** See comment below for ILR usage. 324 ** See comment below for ILR usage.
324 */ 325 */
325 __raw_readl(dino_dev->hba.base_addr+DINO_IPR); 326 __raw_readl(dino_dev->hba.base_addr+DINO_IPR);
326 327
327 /* set the matching bit in the IMR register */ 328 /* set the matching bit in the IMR register */
328 dino_dev->imr |= DINO_MASK_IRQ(local_irq); /* used in dino_isr() */ 329 dino_dev->imr |= DINO_MASK_IRQ(local_irq); /* used in dino_isr() */
329 __raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR); 330 __raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
330 331
331 /* Emulate "Level Triggered" Interrupt 332 /* Emulate "Level Triggered" Interrupt
332 ** Basically, a driver is blowing it if the IRQ line is asserted 333 ** Basically, a driver is blowing it if the IRQ line is asserted
333 ** while the IRQ is disabled. But tulip.c seems to do that.... 334 ** while the IRQ is disabled. But tulip.c seems to do that....
334 ** Give 'em a kluge award and a nice round of applause! 335 ** Give 'em a kluge award and a nice round of applause!
335 ** 336 **
336 ** The gsc_write will generate an interrupt which invokes dino_isr(). 337 ** The gsc_write will generate an interrupt which invokes dino_isr().
337 ** dino_isr() will read IPR and find nothing. But then catch this 338 ** dino_isr() will read IPR and find nothing. But then catch this
338 ** when it also checks ILR. 339 ** when it also checks ILR.
339 */ 340 */
340 tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR); 341 tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR);
341 if (tmp & DINO_MASK_IRQ(local_irq)) { 342 if (tmp & DINO_MASK_IRQ(local_irq)) {
342 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", 343 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
343 __FUNCTION__, tmp); 344 __FUNCTION__, tmp);
344 gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); 345 gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
345 } 346 }
346 } 347 }
347 348
348 static unsigned int dino_startup_irq(unsigned int irq) 349 static unsigned int dino_startup_irq(unsigned int irq)
349 { 350 {
350 dino_enable_irq(irq); 351 dino_enable_irq(irq);
351 return 0; 352 return 0;
352 } 353 }
353 354
354 static struct hw_interrupt_type dino_interrupt_type = { 355 static struct hw_interrupt_type dino_interrupt_type = {
355 .typename = "GSC-PCI", 356 .typename = "GSC-PCI",
356 .startup = dino_startup_irq, 357 .startup = dino_startup_irq,
357 .shutdown = dino_disable_irq, 358 .shutdown = dino_disable_irq,
358 .enable = dino_enable_irq, 359 .enable = dino_enable_irq,
359 .disable = dino_disable_irq, 360 .disable = dino_disable_irq,
360 .ack = no_ack_irq, 361 .ack = no_ack_irq,
361 .end = no_end_irq, 362 .end = no_end_irq,
362 }; 363 };
363 364
364 365
365 /* 366 /*
366 * Handle a Processor interrupt generated by Dino. 367 * Handle a Processor interrupt generated by Dino.
367 * 368 *
368 * ilr_loop counter is a kluge to prevent a "stuck" IRQ line from 369 * ilr_loop counter is a kluge to prevent a "stuck" IRQ line from
369 * wedging the CPU. Could be removed or made optional at some point. 370 * wedging the CPU. Could be removed or made optional at some point.
370 */ 371 */
371 static irqreturn_t 372 static irqreturn_t
372 dino_isr(int irq, void *intr_dev, struct pt_regs *regs) 373 dino_isr(int irq, void *intr_dev, struct pt_regs *regs)
373 { 374 {
374 struct dino_device *dino_dev = intr_dev; 375 struct dino_device *dino_dev = intr_dev;
375 u32 mask; 376 u32 mask;
376 int ilr_loop = 100; 377 int ilr_loop = 100;
377 378
378 /* read and acknowledge pending interrupts */ 379 /* read and acknowledge pending interrupts */
379 #ifdef DINO_DEBUG 380 #ifdef DINO_DEBUG
380 dino_dev->dino_irr0 = 381 dino_dev->dino_irr0 =
381 #endif 382 #endif
382 mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK; 383 mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK;
383 384
384 if (mask == 0) 385 if (mask == 0)
385 return IRQ_NONE; 386 return IRQ_NONE;
386 387
387 ilr_again: 388 ilr_again:
388 do { 389 do {
389 int local_irq = __ffs(mask); 390 int local_irq = __ffs(mask);
390 int irq = dino_dev->global_irq[local_irq]; 391 int irq = dino_dev->global_irq[local_irq];
391 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n", 392 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
392 __FUNCTION__, irq, intr_dev, mask); 393 __FUNCTION__, irq, intr_dev, mask);
393 __do_IRQ(irq, regs); 394 __do_IRQ(irq, regs);
394 mask &= ~(1 << local_irq); 395 mask &= ~(1 << local_irq);
395 } while (mask); 396 } while (mask);
396 397
397 /* Support for level triggered IRQ lines. 398 /* Support for level triggered IRQ lines.
398 ** 399 **
399 ** Dropping this support would make this routine *much* faster. 400 ** Dropping this support would make this routine *much* faster.
400 ** But since PCI requires level triggered IRQ line to share lines... 401 ** But since PCI requires level triggered IRQ line to share lines...
401 ** device drivers may assume lines are level triggered (and not 402 ** device drivers may assume lines are level triggered (and not
402 ** edge triggered like EISA/ISA can be). 403 ** edge triggered like EISA/ISA can be).
403 */ 404 */
404 mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr; 405 mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr;
405 if (mask) { 406 if (mask) {
406 if (--ilr_loop > 0) 407 if (--ilr_loop > 0)
407 goto ilr_again; 408 goto ilr_again;
408 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", 409 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
409 dino_dev->hba.base_addr, mask); 410 dino_dev->hba.base_addr, mask);
410 return IRQ_NONE; 411 return IRQ_NONE;
411 } 412 }
412 return IRQ_HANDLED; 413 return IRQ_HANDLED;
413 } 414 }
414 415
415 static void dino_assign_irq(struct dino_device *dino, int local_irq, int *irqp) 416 static void dino_assign_irq(struct dino_device *dino, int local_irq, int *irqp)
416 { 417 {
417 int irq = gsc_assign_irq(&dino_interrupt_type, dino); 418 int irq = gsc_assign_irq(&dino_interrupt_type, dino);
418 if (irq == NO_IRQ) 419 if (irq == NO_IRQ)
419 return; 420 return;
420 421
421 *irqp = irq; 422 *irqp = irq;
422 dino->global_irq[local_irq] = irq; 423 dino->global_irq[local_irq] = irq;
423 } 424 }
424 425
425 static void dino_choose_irq(struct parisc_device *dev, void *ctrl) 426 static void dino_choose_irq(struct parisc_device *dev, void *ctrl)
426 { 427 {
427 int irq; 428 int irq;
428 struct dino_device *dino = ctrl; 429 struct dino_device *dino = ctrl;
429 430
430 switch (dev->id.sversion) { 431 switch (dev->id.sversion) {
431 case 0x00084: irq = 8; break; /* PS/2 */ 432 case 0x00084: irq = 8; break; /* PS/2 */
432 case 0x0008c: irq = 10; break; /* RS232 */ 433 case 0x0008c: irq = 10; break; /* RS232 */
433 case 0x00096: irq = 8; break; /* PS/2 */ 434 case 0x00096: irq = 8; break; /* PS/2 */
434 default: return; /* Unknown */ 435 default: return; /* Unknown */
435 } 436 }
436 437
437 dino_assign_irq(dino, irq, &dev->irq); 438 dino_assign_irq(dino, irq, &dev->irq);
438 } 439 }
439 440
440 441
441 /* 442 /*
442 * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de) 443 * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de)
443 * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...) 444 * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...)
444 */ 445 */
445 static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev) 446 static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev)
446 { 447 {
447 u8 new_irq = dev->irq - 1; 448 u8 new_irq = dev->irq - 1;
448 printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n", 449 printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n",
449 pci_name(dev), dev->irq, new_irq); 450 pci_name(dev), dev->irq, new_irq);
450 dev->irq = new_irq; 451 dev->irq = new_irq;
451 } 452 }
452 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); 453 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
453 454
454 455
455 static void __init 456 static void __init
456 dino_bios_init(void) 457 dino_bios_init(void)
457 { 458 {
458 DBG("dino_bios_init\n"); 459 DBG("dino_bios_init\n");
459 } 460 }
460 461
461 /* 462 /*
462 * dino_card_setup - Set up the memory space for a Dino in card mode. 463 * dino_card_setup - Set up the memory space for a Dino in card mode.
463 * @bus: the bus under this dino 464 * @bus: the bus under this dino
464 * 465 *
465 * Claim an 8MB chunk of unused IO space and call the generic PCI routines 466 * Claim an 8MB chunk of unused IO space and call the generic PCI routines
466 * to set up the addresses of the devices on this bus. 467 * to set up the addresses of the devices on this bus.
467 */ 468 */
468 #define _8MB 0x00800000UL 469 #define _8MB 0x00800000UL
469 static void __init 470 static void __init
470 dino_card_setup(struct pci_bus *bus, void __iomem *base_addr) 471 dino_card_setup(struct pci_bus *bus, void __iomem *base_addr)
471 { 472 {
472 int i; 473 int i;
473 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); 474 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
474 struct resource *res; 475 struct resource *res;
475 char name[128]; 476 char name[128];
476 int size; 477 int size;
477 478
478 res = &dino_dev->hba.lmmio_space; 479 res = &dino_dev->hba.lmmio_space;
479 res->flags = IORESOURCE_MEM; 480 res->flags = IORESOURCE_MEM;
480 size = scnprintf(name, sizeof(name), "Dino LMMIO (%s)", 481 size = scnprintf(name, sizeof(name), "Dino LMMIO (%s)",
481 bus->bridge->bus_id); 482 bus->bridge->bus_id);
482 res->name = kmalloc(size+1, GFP_KERNEL); 483 res->name = kmalloc(size+1, GFP_KERNEL);
483 if(res->name) 484 if(res->name)
484 strcpy((char *)res->name, name); 485 strcpy((char *)res->name, name);
485 else 486 else
486 res->name = dino_dev->hba.lmmio_space.name; 487 res->name = dino_dev->hba.lmmio_space.name;
487 488
488 489
489 if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB, 490 if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB,
490 F_EXTEND(0xf0000000UL) | _8MB, 491 F_EXTEND(0xf0000000UL) | _8MB,
491 F_EXTEND(0xffffffffUL) &~ _8MB, _8MB) < 0) { 492 F_EXTEND(0xffffffffUL) &~ _8MB, _8MB) < 0) {
492 struct list_head *ln, *tmp_ln; 493 struct list_head *ln, *tmp_ln;
493 494
494 printk(KERN_ERR "Dino: cannot attach bus %s\n", 495 printk(KERN_ERR "Dino: cannot attach bus %s\n",
495 bus->bridge->bus_id); 496 bus->bridge->bus_id);
496 /* kill the bus, we can't do anything with it */ 497 /* kill the bus, we can't do anything with it */
497 list_for_each_safe(ln, tmp_ln, &bus->devices) { 498 list_for_each_safe(ln, tmp_ln, &bus->devices) {
498 struct pci_dev *dev = pci_dev_b(ln); 499 struct pci_dev *dev = pci_dev_b(ln);
499 500
500 list_del(&dev->global_list); 501 list_del(&dev->global_list);
501 list_del(&dev->bus_list); 502 list_del(&dev->bus_list);
502 } 503 }
503 504
504 return; 505 return;
505 } 506 }
506 bus->resource[1] = res; 507 bus->resource[1] = res;
507 bus->resource[0] = &(dino_dev->hba.io_space); 508 bus->resource[0] = &(dino_dev->hba.io_space);
508 509
509 /* Now tell dino what range it has */ 510 /* Now tell dino what range it has */
510 for (i = 1; i < 31; i++) { 511 for (i = 1; i < 31; i++) {
511 if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB))) 512 if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB)))
512 break; 513 break;
513 } 514 }
514 DBG("DINO GSC WRITE i=%d, start=%lx, dino addr = %p\n", 515 DBG("DINO GSC WRITE i=%d, start=%lx, dino addr = %p\n",
515 i, res->start, base_addr + DINO_IO_ADDR_EN); 516 i, res->start, base_addr + DINO_IO_ADDR_EN);
516 __raw_writel(1 << i, base_addr + DINO_IO_ADDR_EN); 517 __raw_writel(1 << i, base_addr + DINO_IO_ADDR_EN);
517 } 518 }
518 519
519 static void __init 520 static void __init
520 dino_card_fixup(struct pci_dev *dev) 521 dino_card_fixup(struct pci_dev *dev)
521 { 522 {
522 u32 irq_pin; 523 u32 irq_pin;
523 524
524 /* 525 /*
525 ** REVISIT: card-mode PCI-PCI expansion chassis do exist. 526 ** REVISIT: card-mode PCI-PCI expansion chassis do exist.
526 ** Not sure they were ever productized. 527 ** Not sure they were ever productized.
527 ** Die here since we'll die later in dino_inb() anyway. 528 ** Die here since we'll die later in dino_inb() anyway.
528 */ 529 */
529 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 530 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
530 panic("Card-Mode Dino: PCI-PCI Bridge not supported\n"); 531 panic("Card-Mode Dino: PCI-PCI Bridge not supported\n");
531 } 532 }
532 533
533 /* 534 /*
534 ** Set Latency Timer to 0xff (not a shared bus) 535 ** Set Latency Timer to 0xff (not a shared bus)
535 ** Set CACHELINE_SIZE. 536 ** Set CACHELINE_SIZE.
536 */ 537 */
537 dino_cfg_write(dev->bus, dev->devfn, 538 dino_cfg_write(dev->bus, dev->devfn,
538 PCI_CACHE_LINE_SIZE, 2, 0xff00 | L1_CACHE_BYTES/4); 539 PCI_CACHE_LINE_SIZE, 2, 0xff00 | L1_CACHE_BYTES/4);
539 540
540 /* 541 /*
541 ** Program INT_LINE for card-mode devices. 542 ** Program INT_LINE for card-mode devices.
542 ** The cards are hardwired according to this algorithm. 543 ** The cards are hardwired according to this algorithm.
543 ** And it doesn't matter if PPB's are present or not since 544 ** And it doesn't matter if PPB's are present or not since
544 ** the IRQ lines bypass the PPB. 545 ** the IRQ lines bypass the PPB.
545 ** 546 **
546 ** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range. 547 ** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range.
547 ** The additional "-1" adjusts for skewing the IRQ<->slot. 548 ** The additional "-1" adjusts for skewing the IRQ<->slot.
548 */ 549 */
549 dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin); 550 dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin);
550 dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ; 551 dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
551 552
552 /* Shouldn't really need to do this but it's in case someone tries 553 /* Shouldn't really need to do this but it's in case someone tries
553 ** to bypass PCI services and look at the card themselves. 554 ** to bypass PCI services and look at the card themselves.
554 */ 555 */
555 dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, dev->irq); 556 dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, dev->irq);
556 } 557 }
557 558
558 /* The alignment contraints for PCI bridges under dino */ 559 /* The alignment contraints for PCI bridges under dino */
559 #define DINO_BRIDGE_ALIGN 0x100000 560 #define DINO_BRIDGE_ALIGN 0x100000
560 561
561 562
562 static void __init 563 static void __init
563 dino_fixup_bus(struct pci_bus *bus) 564 dino_fixup_bus(struct pci_bus *bus)
564 { 565 {
565 struct list_head *ln; 566 struct list_head *ln;
566 struct pci_dev *dev; 567 struct pci_dev *dev;
567 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); 568 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
568 int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num); 569 int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num);
569 570
570 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", 571 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
571 __FUNCTION__, bus, bus->secondary, 572 __FUNCTION__, bus, bus->secondary,
572 bus->bridge->platform_data); 573 bus->bridge->platform_data);
573 574
574 /* Firmware doesn't set up card-mode dino, so we have to */ 575 /* Firmware doesn't set up card-mode dino, so we have to */
575 if (is_card_dino(&dino_dev->hba.dev->id)) { 576 if (is_card_dino(&dino_dev->hba.dev->id)) {
576 dino_card_setup(bus, dino_dev->hba.base_addr); 577 dino_card_setup(bus, dino_dev->hba.base_addr);
577 } else if(bus->parent == NULL) { 578 } else if(bus->parent == NULL) {
578 /* must have a dino above it, reparent the resources 579 /* must have a dino above it, reparent the resources
579 * into the dino window */ 580 * into the dino window */
580 int i; 581 int i;
581 struct resource *res = &dino_dev->hba.lmmio_space; 582 struct resource *res = &dino_dev->hba.lmmio_space;
582 583
583 bus->resource[0] = &(dino_dev->hba.io_space); 584 bus->resource[0] = &(dino_dev->hba.io_space);
584 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) { 585 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
585 if(res[i].flags == 0) 586 if(res[i].flags == 0)
586 break; 587 break;
587 bus->resource[i+1] = &res[i]; 588 bus->resource[i+1] = &res[i];
588 } 589 }
589 590
590 } else if(bus->self) { 591 } else if(bus->self) {
591 int i; 592 int i;
592 593
593 pci_read_bridge_bases(bus); 594 pci_read_bridge_bases(bus);
594 595
595 596
596 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 597 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
597 if((bus->self->resource[i].flags & 598 if((bus->self->resource[i].flags &
598 (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 599 (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
599 continue; 600 continue;
600 601
601 if(bus->self->resource[i].flags & IORESOURCE_MEM) { 602 if(bus->self->resource[i].flags & IORESOURCE_MEM) {
602 /* There's a quirk to alignment of 603 /* There's a quirk to alignment of
603 * bridge memory resources: the start 604 * bridge memory resources: the start
604 * is the alignment and start-end is 605 * is the alignment and start-end is
605 * the size. However, firmware will 606 * the size. However, firmware will
606 * have assigned start and end, so we 607 * have assigned start and end, so we
607 * need to take this into account */ 608 * need to take this into account */
608 bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN; 609 bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN;
609 bus->self->resource[i].start = DINO_BRIDGE_ALIGN; 610 bus->self->resource[i].start = DINO_BRIDGE_ALIGN;
610 611
611 } 612 }
612 613
613 DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n", 614 DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
614 bus->self->dev.bus_id, i, 615 bus->self->dev.bus_id, i,
615 bus->self->resource[i].start, 616 bus->self->resource[i].start,
616 bus->self->resource[i].end); 617 bus->self->resource[i].end);
617 pci_assign_resource(bus->self, i); 618 pci_assign_resource(bus->self, i);
618 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n", 619 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
619 bus->self->dev.bus_id, i, 620 bus->self->dev.bus_id, i,
620 bus->self->resource[i].start, 621 bus->self->resource[i].start,
621 bus->self->resource[i].end); 622 bus->self->resource[i].end);
622 } 623 }
623 } 624 }
624 625
625 626
626 list_for_each(ln, &bus->devices) { 627 list_for_each(ln, &bus->devices) {
627 int i; 628 int i;
628 629
629 dev = pci_dev_b(ln); 630 dev = pci_dev_b(ln);
630 if (is_card_dino(&dino_dev->hba.dev->id)) 631 if (is_card_dino(&dino_dev->hba.dev->id))
631 dino_card_fixup(dev); 632 dino_card_fixup(dev);
632 633
633 /* 634 /*
634 ** P2PB's only have 2 BARs, no IRQs. 635 ** P2PB's only have 2 BARs, no IRQs.
635 ** I'd like to just ignore them for now. 636 ** I'd like to just ignore them for now.
636 */ 637 */
637 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) 638 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
638 continue; 639 continue;
639 640
640 /* Adjust the I/O Port space addresses */ 641 /* Adjust the I/O Port space addresses */
641 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 642 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
642 struct resource *res = &dev->resource[i]; 643 struct resource *res = &dev->resource[i];
643 if (res->flags & IORESOURCE_IO) { 644 if (res->flags & IORESOURCE_IO) {
644 res->start |= port_base; 645 res->start |= port_base;
645 res->end |= port_base; 646 res->end |= port_base;
646 } 647 }
647 #ifdef __LP64__ 648 #ifdef __LP64__
648 /* Sign Extend MMIO addresses */ 649 /* Sign Extend MMIO addresses */
649 else if (res->flags & IORESOURCE_MEM) { 650 else if (res->flags & IORESOURCE_MEM) {
650 res->start |= F_EXTEND(0UL); 651 res->start |= F_EXTEND(0UL);
651 res->end |= F_EXTEND(0UL); 652 res->end |= F_EXTEND(0UL);
652 } 653 }
653 #endif 654 #endif
654 } 655 }
655 /* null out the ROM resource if there is one (we don't 656 /* null out the ROM resource if there is one (we don't
656 * care about an expansion rom on parisc, since it 657 * care about an expansion rom on parisc, since it
657 * usually contains (x86) bios code) */ 658 * usually contains (x86) bios code) */
658 dev->resource[PCI_ROM_RESOURCE].flags = 0; 659 dev->resource[PCI_ROM_RESOURCE].flags = 0;
659 660
660 if(dev->irq == 255) { 661 if(dev->irq == 255) {
661 662
662 #define DINO_FIX_UNASSIGNED_INTERRUPTS 663 #define DINO_FIX_UNASSIGNED_INTERRUPTS
663 #ifdef DINO_FIX_UNASSIGNED_INTERRUPTS 664 #ifdef DINO_FIX_UNASSIGNED_INTERRUPTS
664 665
665 /* This code tries to assign an unassigned 666 /* This code tries to assign an unassigned
666 * interrupt. Leave it disabled unless you 667 * interrupt. Leave it disabled unless you
667 * *really* know what you're doing since the 668 * *really* know what you're doing since the
668 * pin<->interrupt line mapping varies by bus 669 * pin<->interrupt line mapping varies by bus
669 * and machine */ 670 * and machine */
670 671
671 u32 irq_pin; 672 u32 irq_pin;
672 673
673 dino_cfg_read(dev->bus, dev->devfn, 674 dino_cfg_read(dev->bus, dev->devfn,
674 PCI_INTERRUPT_PIN, 1, &irq_pin); 675 PCI_INTERRUPT_PIN, 1, &irq_pin);
675 irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ; 676 irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
676 printk(KERN_WARNING "Device %s has undefined IRQ, " 677 printk(KERN_WARNING "Device %s has undefined IRQ, "
677 "setting to %d\n", pci_name(dev), irq_pin); 678 "setting to %d\n", pci_name(dev), irq_pin);
678 dino_cfg_write(dev->bus, dev->devfn, 679 dino_cfg_write(dev->bus, dev->devfn,
679 PCI_INTERRUPT_LINE, 1, irq_pin); 680 PCI_INTERRUPT_LINE, 1, irq_pin);
680 dino_assign_irq(dino_dev, irq_pin, &dev->irq); 681 dino_assign_irq(dino_dev, irq_pin, &dev->irq);
681 #else 682 #else
682 dev->irq = 65535; 683 dev->irq = 65535;
683 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev)); 684 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev));
684 #endif 685 #endif
685 } else { 686 } else {
686 /* Adjust INT_LINE for that busses region */ 687 /* Adjust INT_LINE for that busses region */
687 dino_assign_irq(dino_dev, dev->irq, &dev->irq); 688 dino_assign_irq(dino_dev, dev->irq, &dev->irq);
688 } 689 }
689 } 690 }
690 } 691 }
691 692
692 693
693 struct pci_bios_ops dino_bios_ops = { 694 struct pci_bios_ops dino_bios_ops = {
694 .init = dino_bios_init, 695 .init = dino_bios_init,
695 .fixup_bus = dino_fixup_bus 696 .fixup_bus = dino_fixup_bus
696 }; 697 };
697 698
698 699
699 /* 700 /*
700 * Initialise a DINO controller chip 701 * Initialise a DINO controller chip
701 */ 702 */
702 static void __init 703 static void __init
703 dino_card_init(struct dino_device *dino_dev) 704 dino_card_init(struct dino_device *dino_dev)
704 { 705 {
705 u32 brdg_feat = 0x00784e05; 706 u32 brdg_feat = 0x00784e05;
706 unsigned long status; 707 unsigned long status;
707 708
708 status = __raw_readl(dino_dev->hba.base_addr+DINO_IO_STATUS); 709 status = __raw_readl(dino_dev->hba.base_addr+DINO_IO_STATUS);
709 if (status & 0x0000ff80) { 710 if (status & 0x0000ff80) {
710 __raw_writel(0x00000005, 711 __raw_writel(0x00000005,
711 dino_dev->hba.base_addr+DINO_IO_COMMAND); 712 dino_dev->hba.base_addr+DINO_IO_COMMAND);
712 udelay(1); 713 udelay(1);
713 } 714 }
714 715
715 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK); 716 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK);
716 __raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN); 717 __raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN);
717 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR); 718 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR);
718 719
719 #if 1 720 #if 1
720 /* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */ 721 /* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */
721 /* 722 /*
722 ** PCX-L processors don't support XQL like Dino wants it. 723 ** PCX-L processors don't support XQL like Dino wants it.
723 ** PCX-L2 ignore XQL signal and it doesn't matter. 724 ** PCX-L2 ignore XQL signal and it doesn't matter.
724 */ 725 */
725 brdg_feat &= ~0x4; /* UXQL */ 726 brdg_feat &= ~0x4; /* UXQL */
726 #endif 727 #endif
727 __raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT); 728 __raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT);
728 729
729 /* 730 /*
730 ** Don't enable address decoding until we know which I/O range 731 ** Don't enable address decoding until we know which I/O range
731 ** currently is available from the host. Only affects MMIO 732 ** currently is available from the host. Only affects MMIO
732 ** and not I/O port space. 733 ** and not I/O port space.
733 */ 734 */
734 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN); 735 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN);
735 736
736 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE); 737 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE);
737 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR); 738 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR);
738 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR); 739 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR);
739 740
740 __raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM); 741 __raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM);
741 __raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL); 742 __raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL);
742 __raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM); 743 __raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM);
743 744
744 /* Disable PAMR before writing PAPR */ 745 /* Disable PAMR before writing PAPR */
745 __raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR); 746 __raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR);
746 __raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR); 747 __raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR);
747 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR); 748 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR);
748 749
749 /* 750 /*
750 ** Dino ERS encourages enabling FBB (0x6f). 751 ** Dino ERS encourages enabling FBB (0x6f).
751 ** We can't until we know *all* devices below us can support it. 752 ** We can't until we know *all* devices below us can support it.
752 ** (Something in device configuration header tells us). 753 ** (Something in device configuration header tells us).
753 */ 754 */
754 __raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD); 755 __raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD);
755 756
756 /* Somewhere, the PCI spec says give devices 1 second 757 /* Somewhere, the PCI spec says give devices 1 second
757 ** to recover from the #RESET being de-asserted. 758 ** to recover from the #RESET being de-asserted.
758 ** Experience shows most devices only need 10ms. 759 ** Experience shows most devices only need 10ms.
759 ** This short-cut speeds up booting significantly. 760 ** This short-cut speeds up booting significantly.
760 */ 761 */
761 mdelay(pci_post_reset_delay); 762 mdelay(pci_post_reset_delay);
762 } 763 }
763 764
764 static int __init 765 static int __init
765 dino_bridge_init(struct dino_device *dino_dev, const char *name) 766 dino_bridge_init(struct dino_device *dino_dev, const char *name)
766 { 767 {
767 unsigned long io_addr; 768 unsigned long io_addr;
768 int result, i, count=0; 769 int result, i, count=0;
769 struct resource *res, *prevres = NULL; 770 struct resource *res, *prevres = NULL;
770 /* 771 /*
771 * Decoding IO_ADDR_EN only works for Built-in Dino 772 * Decoding IO_ADDR_EN only works for Built-in Dino
772 * since PDC has already initialized this. 773 * since PDC has already initialized this.
773 */ 774 */
774 775
775 io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN); 776 io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN);
776 if (io_addr == 0) { 777 if (io_addr == 0) {
777 printk(KERN_WARNING "%s: No PCI devices enabled.\n", name); 778 printk(KERN_WARNING "%s: No PCI devices enabled.\n", name);
778 return -ENODEV; 779 return -ENODEV;
779 } 780 }
780 781
781 res = &dino_dev->hba.lmmio_space; 782 res = &dino_dev->hba.lmmio_space;
782 for (i = 0; i < 32; i++) { 783 for (i = 0; i < 32; i++) {
783 unsigned long start, end; 784 unsigned long start, end;
784 785
785 if((io_addr & (1 << i)) == 0) 786 if((io_addr & (1 << i)) == 0)
786 continue; 787 continue;
787 788
788 start = (unsigned long)(signed int)(0xf0000000 | (i << 23)); 789 start = F_EXTEND(0xf0000000UL) | (i << 23);
789 end = start + 8 * 1024 * 1024 - 1; 790 end = start + 8 * 1024 * 1024 - 1;
790 791
791 DBG("DINO RANGE %d is at 0x%lx-0x%lx\n", count, 792 DBG("DINO RANGE %d is at 0x%lx-0x%lx\n", count,
792 start, end); 793 start, end);
793 794
794 if(prevres && prevres->end + 1 == start) { 795 if(prevres && prevres->end + 1 == start) {
795 prevres->end = end; 796 prevres->end = end;
796 } else { 797 } else {
797 if(count >= DINO_MAX_LMMIO_RESOURCES) { 798 if(count >= DINO_MAX_LMMIO_RESOURCES) {
798 printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end); 799 printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end);
799 break; 800 break;
800 } 801 }
801 prevres = res; 802 prevres = res;
802 res->start = start; 803 res->start = start;
803 res->end = end; 804 res->end = end;
804 res->flags = IORESOURCE_MEM; 805 res->flags = IORESOURCE_MEM;
805 res->name = kmalloc(64, GFP_KERNEL); 806 res->name = kmalloc(64, GFP_KERNEL);
806 if(res->name) 807 if(res->name)
807 snprintf((char *)res->name, 64, "%s LMMIO %d", 808 snprintf((char *)res->name, 64, "%s LMMIO %d",
808 name, count); 809 name, count);
809 res++; 810 res++;
810 count++; 811 count++;
811 } 812 }
812 } 813 }
813 814
814 res = &dino_dev->hba.lmmio_space; 815 res = &dino_dev->hba.lmmio_space;
815 816
816 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) { 817 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
817 if(res[i].flags == 0) 818 if(res[i].flags == 0)
818 break; 819 break;
819 820
820 result = ccio_request_resource(dino_dev->hba.dev, &res[i]); 821 result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
821 if (result < 0) { 822 if (result < 0) {
822 printk(KERN_ERR "%s: failed to claim PCI Bus address space %d (0x%lx-0x%lx)!\n", name, i, res[i].start, res[i].end); 823 printk(KERN_ERR "%s: failed to claim PCI Bus address space %d (0x%lx-0x%lx)!\n", name, i, res[i].start, res[i].end);
823 return result; 824 return result;
824 } 825 }
825 } 826 }
826 return 0; 827 return 0;
827 } 828 }
828 829
829 static int __init dino_common_init(struct parisc_device *dev, 830 static int __init dino_common_init(struct parisc_device *dev,
830 struct dino_device *dino_dev, const char *name) 831 struct dino_device *dino_dev, const char *name)
831 { 832 {
832 int status; 833 int status;
833 u32 eim; 834 u32 eim;
834 struct gsc_irq gsc_irq; 835 struct gsc_irq gsc_irq;
835 struct resource *res; 836 struct resource *res;
836 837
837 pcibios_register_hba(&dino_dev->hba); 838 pcibios_register_hba(&dino_dev->hba);
838 839
839 pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */ 840 pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */
840 pci_port = &dino_port_ops; 841 pci_port = &dino_port_ops;
841 842
842 /* 843 /*
843 ** Note: SMP systems can make use of IRR1/IAR1 registers 844 ** Note: SMP systems can make use of IRR1/IAR1 registers
844 ** But it won't buy much performance except in very 845 ** But it won't buy much performance except in very
845 ** specific applications/configurations. Note Dino 846 ** specific applications/configurations. Note Dino
846 ** still only has 11 IRQ input lines - just map some of them 847 ** still only has 11 IRQ input lines - just map some of them
847 ** to a different processor. 848 ** to a different processor.
848 */ 849 */
849 dev->irq = gsc_alloc_irq(&gsc_irq); 850 dev->irq = gsc_alloc_irq(&gsc_irq);
850 dino_dev->txn_addr = gsc_irq.txn_addr; 851 dino_dev->txn_addr = gsc_irq.txn_addr;
851 dino_dev->txn_data = gsc_irq.txn_data; 852 dino_dev->txn_data = gsc_irq.txn_data;
852 eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; 853 eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
853 854
854 /* 855 /*
855 ** Dino needs a PA "IRQ" to get a processor's attention. 856 ** Dino needs a PA "IRQ" to get a processor's attention.
856 ** arch/parisc/kernel/irq.c returns an EIRR bit. 857 ** arch/parisc/kernel/irq.c returns an EIRR bit.
857 */ 858 */
858 if (dev->irq < 0) { 859 if (dev->irq < 0) {
859 printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name); 860 printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name);
860 return 1; 861 return 1;
861 } 862 }
862 863
863 status = request_irq(dev->irq, dino_isr, 0, name, dino_dev); 864 status = request_irq(dev->irq, dino_isr, 0, name, dino_dev);
864 if (status) { 865 if (status) {
865 printk(KERN_WARNING "%s: request_irq() failed with %d\n", 866 printk(KERN_WARNING "%s: request_irq() failed with %d\n",
866 name, status); 867 name, status);
867 return 1; 868 return 1;
868 } 869 }
869 870
870 /* Support the serial port which is sometimes attached on built-in 871 /* Support the serial port which is sometimes attached on built-in
871 * Dino / Cujo chips. 872 * Dino / Cujo chips.
872 */ 873 */
873 874
874 gsc_fixup_irqs(dev, dino_dev, dino_choose_irq); 875 gsc_fixup_irqs(dev, dino_dev, dino_choose_irq);
875 876
876 /* 877 /*
877 ** This enables DINO to generate interrupts when it sees 878 ** This enables DINO to generate interrupts when it sees
878 ** any of its inputs *change*. Just asserting an IRQ 879 ** any of its inputs *change*. Just asserting an IRQ
879 ** before it's enabled (ie unmasked) isn't good enough. 880 ** before it's enabled (ie unmasked) isn't good enough.
880 */ 881 */
881 __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); 882 __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
882 883
883 /* 884 /*
884 ** Some platforms don't clear Dino's IRR0 register at boot time. 885 ** Some platforms don't clear Dino's IRR0 register at boot time.
885 ** Reading will clear it now. 886 ** Reading will clear it now.
886 */ 887 */
887 __raw_readl(dino_dev->hba.base_addr+DINO_IRR0); 888 __raw_readl(dino_dev->hba.base_addr+DINO_IRR0);
888 889
889 /* allocate I/O Port resource region */ 890 /* allocate I/O Port resource region */
890 res = &dino_dev->hba.io_space; 891 res = &dino_dev->hba.io_space;
891 if (!is_cujo(&dev->id)) { 892 if (!is_cujo(&dev->id)) {
892 res->name = "Dino I/O Port"; 893 res->name = "Dino I/O Port";
893 } else { 894 } else {
894 res->name = "Cujo I/O Port"; 895 res->name = "Cujo I/O Port";
895 } 896 }
896 res->start = HBA_PORT_BASE(dino_dev->hba.hba_num); 897 res->start = HBA_PORT_BASE(dino_dev->hba.hba_num);
897 res->end = res->start + (HBA_PORT_SPACE_SIZE - 1); 898 res->end = res->start + (HBA_PORT_SPACE_SIZE - 1);
898 res->flags = IORESOURCE_IO; /* do not mark it busy ! */ 899 res->flags = IORESOURCE_IO; /* do not mark it busy ! */
899 if (request_resource(&ioport_resource, res) < 0) { 900 if (request_resource(&ioport_resource, res) < 0) {
900 printk(KERN_ERR "%s: request I/O Port region failed " 901 printk(KERN_ERR "%s: request I/O Port region failed "
901 "0x%lx/%lx (hpa 0x%p)\n", 902 "0x%lx/%lx (hpa 0x%p)\n",
902 name, res->start, res->end, dino_dev->hba.base_addr); 903 name, res->start, res->end, dino_dev->hba.base_addr);
903 return 1; 904 return 1;
904 } 905 }
905 906
906 return 0; 907 return 0;
907 } 908 }
908 909
909 #define CUJO_RAVEN_ADDR F_EXTEND(0xf1000000UL) 910 #define CUJO_RAVEN_ADDR F_EXTEND(0xf1000000UL)
910 #define CUJO_FIREHAWK_ADDR F_EXTEND(0xf1604000UL) 911 #define CUJO_FIREHAWK_ADDR F_EXTEND(0xf1604000UL)
911 #define CUJO_RAVEN_BADPAGE 0x01003000UL 912 #define CUJO_RAVEN_BADPAGE 0x01003000UL
912 #define CUJO_FIREHAWK_BADPAGE 0x01607000UL 913 #define CUJO_FIREHAWK_BADPAGE 0x01607000UL
913 914
914 static const char *dino_vers[] = { 915 static const char *dino_vers[] = {
915 "2.0", 916 "2.0",
916 "2.1", 917 "2.1",
917 "3.0", 918 "3.0",
918 "3.1" 919 "3.1"
919 }; 920 };
920 921
921 static const char *cujo_vers[] = { 922 static const char *cujo_vers[] = {
922 "1.0", 923 "1.0",
923 "2.0" 924 "2.0"
924 }; 925 };
925 926
926 void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp); 927 void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
927 928
928 /* 929 /*
929 ** Determine if dino should claim this chip (return 0) or not (return 1). 930 ** Determine if dino should claim this chip (return 0) or not (return 1).
930 ** If so, initialize the chip appropriately (card-mode vs bridge mode). 931 ** If so, initialize the chip appropriately (card-mode vs bridge mode).
931 ** Much of the initialization is common though. 932 ** Much of the initialization is common though.
932 */ 933 */
933 static int __init dino_probe(struct parisc_device *dev) 934 static int __init dino_probe(struct parisc_device *dev)
934 { 935 {
935 struct dino_device *dino_dev; // Dino specific control struct 936 struct dino_device *dino_dev; // Dino specific control struct
936 const char *version = "unknown"; 937 const char *version = "unknown";
937 char *name; 938 char *name;
938 int is_cujo = 0; 939 int is_cujo = 0;
939 struct pci_bus *bus; 940 struct pci_bus *bus;
940 unsigned long hpa = dev->hpa.start; 941 unsigned long hpa = dev->hpa.start;
941 942
942 name = "Dino"; 943 name = "Dino";
943 if (is_card_dino(&dev->id)) { 944 if (is_card_dino(&dev->id)) {
944 version = "3.x (card mode)"; 945 version = "3.x (card mode)";
945 } else { 946 } else {
946 if (!is_cujo(&dev->id)) { 947 if (!is_cujo(&dev->id)) {
947 if (dev->id.hversion_rev < 4) { 948 if (dev->id.hversion_rev < 4) {
948 version = dino_vers[dev->id.hversion_rev]; 949 version = dino_vers[dev->id.hversion_rev];
949 } 950 }
950 } else { 951 } else {
951 name = "Cujo"; 952 name = "Cujo";
952 is_cujo = 1; 953 is_cujo = 1;
953 if (dev->id.hversion_rev < 2) { 954 if (dev->id.hversion_rev < 2) {
954 version = cujo_vers[dev->id.hversion_rev]; 955 version = cujo_vers[dev->id.hversion_rev];
955 } 956 }
956 } 957 }
957 } 958 }
958 959
959 printk("%s version %s found at 0x%lx\n", name, version, hpa); 960 printk("%s version %s found at 0x%lx\n", name, version, hpa);
960 961
961 if (!request_mem_region(hpa, PAGE_SIZE, name)) { 962 if (!request_mem_region(hpa, PAGE_SIZE, name)) {
962 printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n", 963 printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
963 hpa); 964 hpa);
964 return 1; 965 return 1;
965 } 966 }
966 967
967 /* Check for bugs */ 968 /* Check for bugs */
968 if (is_cujo && dev->id.hversion_rev == 1) { 969 if (is_cujo && dev->id.hversion_rev == 1) {
969 #ifdef CONFIG_IOMMU_CCIO 970 #ifdef CONFIG_IOMMU_CCIO
970 printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n"); 971 printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n");
971 if (hpa == (unsigned long)CUJO_RAVEN_ADDR) { 972 if (hpa == (unsigned long)CUJO_RAVEN_ADDR) {
972 ccio_cujo20_fixup(dev, CUJO_RAVEN_BADPAGE); 973 ccio_cujo20_fixup(dev, CUJO_RAVEN_BADPAGE);
973 } else if (hpa == (unsigned long)CUJO_FIREHAWK_ADDR) { 974 } else if (hpa == (unsigned long)CUJO_FIREHAWK_ADDR) {
974 ccio_cujo20_fixup(dev, CUJO_FIREHAWK_BADPAGE); 975 ccio_cujo20_fixup(dev, CUJO_FIREHAWK_BADPAGE);
975 } else { 976 } else {
976 printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", hpa); 977 printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", hpa);
977 } 978 }
978 #endif 979 #endif
979 } else if (!is_cujo && !is_card_dino(&dev->id) && 980 } else if (!is_cujo && !is_card_dino(&dev->id) &&
980 dev->id.hversion_rev < 3) { 981 dev->id.hversion_rev < 3) {
981 printk(KERN_WARNING 982 printk(KERN_WARNING
982 "The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n" 983 "The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n"
983 "data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n" 984 "data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n"
984 "Systems shipped after Aug 20, 1997 will not exhibit this problem.\n" 985 "Systems shipped after Aug 20, 1997 will not exhibit this problem.\n"
985 "Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n", 986 "Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n",
986 dev->id.hversion_rev); 987 dev->id.hversion_rev);
987 /* REVISIT: why are C200/C240 listed in the README table but not 988 /* REVISIT: why are C200/C240 listed in the README table but not
988 ** "Models affected"? Could be an omission in the original literature. 989 ** "Models affected"? Could be an omission in the original literature.
989 */ 990 */
990 } 991 }
991 992
992 dino_dev = kzalloc(sizeof(struct dino_device), GFP_KERNEL); 993 dino_dev = kzalloc(sizeof(struct dino_device), GFP_KERNEL);
993 if (!dino_dev) { 994 if (!dino_dev) {
994 printk("dino_init_chip - couldn't alloc dino_device\n"); 995 printk("dino_init_chip - couldn't alloc dino_device\n");
995 return 1; 996 return 1;
996 } 997 }
997 998
998 dino_dev->hba.dev = dev; 999 dino_dev->hba.dev = dev;
999 dino_dev->hba.base_addr = ioremap(hpa, 4096); 1000 dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
1000 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ 1001 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
1001 spin_lock_init(&dino_dev->dinosaur_pen); 1002 spin_lock_init(&dino_dev->dinosaur_pen);
1002 dino_dev->hba.iommu = ccio_get_iommu(dev); 1003 dino_dev->hba.iommu = ccio_get_iommu(dev);
1003 1004
1004 if (is_card_dino(&dev->id)) { 1005 if (is_card_dino(&dev->id)) {
1005 dino_card_init(dino_dev); 1006 dino_card_init(dino_dev);
1006 } else { 1007 } else {
1007 dino_bridge_init(dino_dev, name); 1008 dino_bridge_init(dino_dev, name);
1008 } 1009 }
1009 1010
1010 if (dino_common_init(dev, dino_dev, name)) 1011 if (dino_common_init(dev, dino_dev, name))
1011 return 1; 1012 return 1;
1012 1013
1013 dev->dev.platform_data = dino_dev; 1014 dev->dev.platform_data = dino_dev;
1014 1015
1015 /* 1016 /*
1016 ** It's not used to avoid chicken/egg problems 1017 ** It's not used to avoid chicken/egg problems
1017 ** with configuration accessor functions. 1018 ** with configuration accessor functions.
1018 */ 1019 */
1019 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus, 1020 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus,
1020 &dino_cfg_ops, NULL); 1021 &dino_cfg_ops, NULL);
1021 if(bus) { 1022 if(bus) {
1022 pci_bus_add_devices(bus); 1023 pci_bus_add_devices(bus);
1023 /* This code *depends* on scanning being single threaded 1024 /* This code *depends* on scanning being single threaded
1024 * if it isn't, this global bus number count will fail 1025 * if it isn't, this global bus number count will fail
1025 */ 1026 */
1026 dino_current_bus = bus->subordinate + 1; 1027 dino_current_bus = bus->subordinate + 1;
1027 pci_bus_assign_resources(bus); 1028 pci_bus_assign_resources(bus);
1028 } else { 1029 } else {
1029 printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n", dev->dev.bus_id, dino_current_bus); 1030 printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n", dev->dev.bus_id, dino_current_bus);
1030 /* increment the bus number in case of duplicates */ 1031 /* increment the bus number in case of duplicates */
1031 dino_current_bus++; 1032 dino_current_bus++;
1032 } 1033 }
1033 dino_dev->hba.hba_bus = bus; 1034 dino_dev->hba.hba_bus = bus;
1034 return 0; 1035 return 0;
1035 } 1036 }
1036 1037
1037 /* 1038 /*
1038 * Normally, we would just test sversion. But the Elroy PCI adapter has 1039 * Normally, we would just test sversion. But the Elroy PCI adapter has
1039 * the same sversion as Dino, so we have to check hversion as well. 1040 * the same sversion as Dino, so we have to check hversion as well.
1040 * Unfortunately, the J2240 PDC reports the wrong hversion for the first 1041 * Unfortunately, the J2240 PDC reports the wrong hversion for the first
1041 * Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240. 1042 * Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240.
1042 * For card-mode Dino, most machines report an sversion of 9D. But 715 1043 * For card-mode Dino, most machines report an sversion of 9D. But 715
1043 * and 725 firmware misreport it as 0x08080 for no adequately explained 1044 * and 725 firmware misreport it as 0x08080 for no adequately explained
1044 * reason. 1045 * reason.
1045 */ 1046 */
1046 static struct parisc_device_id dino_tbl[] = { 1047 static struct parisc_device_id dino_tbl[] = {
1047 { HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D },/* Card-mode Dino */ 1048 { HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D },/* Card-mode Dino */
1048 { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x08080 }, /* XXX */ 1049 { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x08080 }, /* XXX */
1049 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */ 1050 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */
1050 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */ 1051 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */
1051 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */ 1052 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */
1052 { 0, } 1053 { 0, }
1053 }; 1054 };
1054 1055
1055 static struct parisc_driver dino_driver = { 1056 static struct parisc_driver dino_driver = {
1056 .name = "dino", 1057 .name = "dino",
1057 .id_table = dino_tbl, 1058 .id_table = dino_tbl,
1058 .probe = dino_probe, 1059 .probe = dino_probe,
1059 }; 1060 };
1060 1061
1061 /* 1062 /*
1062 * One time initialization to let the world know Dino is here. 1063 * One time initialization to let the world know Dino is here.
1063 * This is the only routine which is NOT static. 1064 * This is the only routine which is NOT static.
1064 * Must be called exactly once before pci_init(). 1065 * Must be called exactly once before pci_init().
1065 */ 1066 */
1066 int __init dino_init(void) 1067 int __init dino_init(void)
1067 { 1068 {
1068 register_parisc_driver(&dino_driver); 1069 register_parisc_driver(&dino_driver);
1069 return 0; 1070 return 0;
1070 } 1071 }
1071 1072
1072 1073
drivers/parisc/eisa.c
1 /* 1 /*
2 * eisa.c - provide support for EISA adapters in PA-RISC machines 2 * eisa.c - provide support for EISA adapters in PA-RISC machines
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard 9 * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
10 * Copyright (c) 2001 Daniel Engstrom <5116@telia.com> 10 * Copyright (c) 2001 Daniel Engstrom <5116@telia.com>
11 * 11 *
12 * There are two distinct EISA adapters. Mongoose is found in machines 12 * There are two distinct EISA adapters. Mongoose is found in machines
13 * before the 712; then the Wax ASIC is used. To complicate matters, the 13 * before the 712; then the Wax ASIC is used. To complicate matters, the
14 * Wax ASIC also includes a PS/2 and RS-232 controller, but those are 14 * Wax ASIC also includes a PS/2 and RS-232 controller, but those are
15 * dealt with elsewhere; this file is concerned only with the EISA portions 15 * dealt with elsewhere; this file is concerned only with the EISA portions
16 * of Wax. 16 * of Wax.
17 * 17 *
18 * 18 *
19 * HINT: 19 * HINT:
20 * ----- 20 * -----
21 * To allow an ISA card to work properly in the EISA slot you need to 21 * To allow an ISA card to work properly in the EISA slot you need to
22 * set an edge trigger level. This may be done on the palo command line 22 * set an edge trigger level. This may be done on the palo command line
23 * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with 23 * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with
24 * n and n2 as the irq levels you want to use. 24 * n and n2 as the irq levels you want to use.
25 * 25 *
26 * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at 26 * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at
27 * irq levels 10 and 11. 27 * irq levels 10 and 11.
28 */ 28 */
29 29
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/ioport.h> 31 #include <linux/ioport.h>
32 #include <linux/interrupt.h> 32 #include <linux/interrupt.h>
33 #include <linux/kernel.h> 33 #include <linux/kernel.h>
34 #include <linux/module.h> 34 #include <linux/module.h>
35 #include <linux/pci.h> 35 #include <linux/pci.h>
36 #include <linux/sched.h> 36 #include <linux/sched.h>
37 #include <linux/spinlock.h> 37 #include <linux/spinlock.h>
38 #include <linux/eisa.h> 38 #include <linux/eisa.h>
39 39
40 #include <asm/byteorder.h> 40 #include <asm/byteorder.h>
41 #include <asm/io.h> 41 #include <asm/io.h>
42 #include <asm/hardware.h> 42 #include <asm/hardware.h>
43 #include <asm/processor.h> 43 #include <asm/processor.h>
44 #include <asm/parisc-device.h> 44 #include <asm/parisc-device.h>
45 #include <asm/delay.h> 45 #include <asm/delay.h>
46 #include <asm/eisa_bus.h> 46 #include <asm/eisa_bus.h>
47 #include <asm/eisa_eeprom.h> 47 #include <asm/eisa_eeprom.h>
48 48
49 #if 0 49 #if 0
50 #define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg ) 50 #define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg )
51 #else 51 #else
52 #define EISA_DBG(msg, arg... ) 52 #define EISA_DBG(msg, arg... )
53 #endif 53 #endif
54 54
55 #define SNAKES_EEPROM_BASE_ADDR 0xF0810400 55 #define SNAKES_EEPROM_BASE_ADDR 0xF0810400
56 #define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400 56 #define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400
57 57
58 static DEFINE_SPINLOCK(eisa_irq_lock); 58 static DEFINE_SPINLOCK(eisa_irq_lock);
59 59
60 void __iomem *eisa_eeprom_addr __read_mostly; 60 void __iomem *eisa_eeprom_addr __read_mostly;
61 61
62 /* We can only have one EISA adapter in the system because neither 62 /* We can only have one EISA adapter in the system because neither
63 * implementation can be flexed. 63 * implementation can be flexed.
64 */ 64 */
65 static struct eisa_ba { 65 static struct eisa_ba {
66 struct pci_hba_data hba; 66 struct pci_hba_data hba;
67 unsigned long eeprom_addr; 67 unsigned long eeprom_addr;
68 struct eisa_root_device root; 68 struct eisa_root_device root;
69 } eisa_dev; 69 } eisa_dev;
70 70
71 /* Port ops */ 71 /* Port ops */
72 72
73 static inline unsigned long eisa_permute(unsigned short port) 73 static inline unsigned long eisa_permute(unsigned short port)
74 { 74 {
75 if (port & 0x300) { 75 if (port & 0x300) {
76 return 0xfc000000 | ((port & 0xfc00) >> 6) 76 return 0xfc000000 | ((port & 0xfc00) >> 6)
77 | ((port & 0x3f8) << 9) | (port & 7); 77 | ((port & 0x3f8) << 9) | (port & 7);
78 } else { 78 } else {
79 return 0xfc000000 | port; 79 return 0xfc000000 | port;
80 } 80 }
81 } 81 }
82 82
83 unsigned char eisa_in8(unsigned short port) 83 unsigned char eisa_in8(unsigned short port)
84 { 84 {
85 if (EISA_bus) 85 if (EISA_bus)
86 return gsc_readb(eisa_permute(port)); 86 return gsc_readb(eisa_permute(port));
87 return 0xff; 87 return 0xff;
88 } 88 }
89 89
90 unsigned short eisa_in16(unsigned short port) 90 unsigned short eisa_in16(unsigned short port)
91 { 91 {
92 if (EISA_bus) 92 if (EISA_bus)
93 return le16_to_cpu(gsc_readw(eisa_permute(port))); 93 return le16_to_cpu(gsc_readw(eisa_permute(port)));
94 return 0xffff; 94 return 0xffff;
95 } 95 }
96 96
97 unsigned int eisa_in32(unsigned short port) 97 unsigned int eisa_in32(unsigned short port)
98 { 98 {
99 if (EISA_bus) 99 if (EISA_bus)
100 return le32_to_cpu(gsc_readl(eisa_permute(port))); 100 return le32_to_cpu(gsc_readl(eisa_permute(port)));
101 return 0xffffffff; 101 return 0xffffffff;
102 } 102 }
103 103
104 void eisa_out8(unsigned char data, unsigned short port) 104 void eisa_out8(unsigned char data, unsigned short port)
105 { 105 {
106 if (EISA_bus) 106 if (EISA_bus)
107 gsc_writeb(data, eisa_permute(port)); 107 gsc_writeb(data, eisa_permute(port));
108 } 108 }
109 109
110 void eisa_out16(unsigned short data, unsigned short port) 110 void eisa_out16(unsigned short data, unsigned short port)
111 { 111 {
112 if (EISA_bus) 112 if (EISA_bus)
113 gsc_writew(cpu_to_le16(data), eisa_permute(port)); 113 gsc_writew(cpu_to_le16(data), eisa_permute(port));
114 } 114 }
115 115
116 void eisa_out32(unsigned int data, unsigned short port) 116 void eisa_out32(unsigned int data, unsigned short port)
117 { 117 {
118 if (EISA_bus) 118 if (EISA_bus)
119 gsc_writel(cpu_to_le32(data), eisa_permute(port)); 119 gsc_writel(cpu_to_le32(data), eisa_permute(port));
120 } 120 }
121 121
122 #ifndef CONFIG_PCI 122 #ifndef CONFIG_PCI
123 /* We call these directly without PCI. See asm/io.h. */ 123 /* We call these directly without PCI. See asm/io.h. */
124 EXPORT_SYMBOL(eisa_in8); 124 EXPORT_SYMBOL(eisa_in8);
125 EXPORT_SYMBOL(eisa_in16); 125 EXPORT_SYMBOL(eisa_in16);
126 EXPORT_SYMBOL(eisa_in32); 126 EXPORT_SYMBOL(eisa_in32);
127 EXPORT_SYMBOL(eisa_out8); 127 EXPORT_SYMBOL(eisa_out8);
128 EXPORT_SYMBOL(eisa_out16); 128 EXPORT_SYMBOL(eisa_out16);
129 EXPORT_SYMBOL(eisa_out32); 129 EXPORT_SYMBOL(eisa_out32);
130 #endif 130 #endif
131 131
132 /* Interrupt handling */ 132 /* Interrupt handling */
133 133
134 /* cached interrupt mask registers */ 134 /* cached interrupt mask registers */
135 static int master_mask; 135 static int master_mask;
136 static int slave_mask; 136 static int slave_mask;
137 137
138 /* the trig level can be set with the 138 /* the trig level can be set with the
139 * eisa_irq_edge=n,n,n commandline parameter 139 * eisa_irq_edge=n,n,n commandline parameter
140 * We should really read this from the EEPROM 140 * We should really read this from the EEPROM
141 * in the furure. 141 * in the furure.
142 */ 142 */
143 /* irq 13,8,2,1,0 must be edge */ 143 /* irq 13,8,2,1,0 must be edge */
144 static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ 144 static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */
145 145
146 146
147 /* called by free irq */ 147 /* called by free irq */
148 static void eisa_disable_irq(unsigned int irq) 148 static void eisa_disable_irq(unsigned int irq)
149 { 149 {
150 unsigned long flags; 150 unsigned long flags;
151 151
152 EISA_DBG("disable irq %d\n", irq); 152 EISA_DBG("disable irq %d\n", irq);
153 /* just mask for now */ 153 /* just mask for now */
154 spin_lock_irqsave(&eisa_irq_lock, flags); 154 spin_lock_irqsave(&eisa_irq_lock, flags);
155 if (irq & 8) { 155 if (irq & 8) {
156 slave_mask |= (1 << (irq&7)); 156 slave_mask |= (1 << (irq&7));
157 eisa_out8(slave_mask, 0xa1); 157 eisa_out8(slave_mask, 0xa1);
158 } else { 158 } else {
159 master_mask |= (1 << (irq&7)); 159 master_mask |= (1 << (irq&7));
160 eisa_out8(master_mask, 0x21); 160 eisa_out8(master_mask, 0x21);
161 } 161 }
162 spin_unlock_irqrestore(&eisa_irq_lock, flags); 162 spin_unlock_irqrestore(&eisa_irq_lock, flags);
163 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); 163 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
164 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); 164 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
165 } 165 }
166 166
167 /* called by request irq */ 167 /* called by request irq */
168 static void eisa_enable_irq(unsigned int irq) 168 static void eisa_enable_irq(unsigned int irq)
169 { 169 {
170 unsigned long flags; 170 unsigned long flags;
171 EISA_DBG("enable irq %d\n", irq); 171 EISA_DBG("enable irq %d\n", irq);
172 172
173 spin_lock_irqsave(&eisa_irq_lock, flags); 173 spin_lock_irqsave(&eisa_irq_lock, flags);
174 if (irq & 8) { 174 if (irq & 8) {
175 slave_mask &= ~(1 << (irq&7)); 175 slave_mask &= ~(1 << (irq&7));
176 eisa_out8(slave_mask, 0xa1); 176 eisa_out8(slave_mask, 0xa1);
177 } else { 177 } else {
178 master_mask &= ~(1 << (irq&7)); 178 master_mask &= ~(1 << (irq&7));
179 eisa_out8(master_mask, 0x21); 179 eisa_out8(master_mask, 0x21);
180 } 180 }
181 spin_unlock_irqrestore(&eisa_irq_lock, flags); 181 spin_unlock_irqrestore(&eisa_irq_lock, flags);
182 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); 182 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
183 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); 183 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
184 } 184 }
185 185
186 static unsigned int eisa_startup_irq(unsigned int irq) 186 static unsigned int eisa_startup_irq(unsigned int irq)
187 { 187 {
188 eisa_enable_irq(irq); 188 eisa_enable_irq(irq);
189 return 0; 189 return 0;
190 } 190 }
191 191
192 static struct hw_interrupt_type eisa_interrupt_type = { 192 static struct hw_interrupt_type eisa_interrupt_type = {
193 .typename = "EISA", 193 .typename = "EISA",
194 .startup = eisa_startup_irq, 194 .startup = eisa_startup_irq,
195 .shutdown = eisa_disable_irq, 195 .shutdown = eisa_disable_irq,
196 .enable = eisa_enable_irq, 196 .enable = eisa_enable_irq,
197 .disable = eisa_disable_irq, 197 .disable = eisa_disable_irq,
198 .ack = no_ack_irq, 198 .ack = no_ack_irq,
199 .end = no_end_irq, 199 .end = no_end_irq,
200 }; 200 };
201 201
202 static irqreturn_t eisa_irq(int wax_irq, void *intr_dev, struct pt_regs *regs) 202 static irqreturn_t eisa_irq(int wax_irq, void *intr_dev, struct pt_regs *regs)
203 { 203 {
204 int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */ 204 int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */
205 unsigned long flags; 205 unsigned long flags;
206 206
207 spin_lock_irqsave(&eisa_irq_lock, flags); 207 spin_lock_irqsave(&eisa_irq_lock, flags);
208 /* read IRR command */ 208 /* read IRR command */
209 eisa_out8(0x0a, 0x20); 209 eisa_out8(0x0a, 0x20);
210 eisa_out8(0x0a, 0xa0); 210 eisa_out8(0x0a, 0xa0);
211 211
212 EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n", 212 EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n",
213 irq, eisa_in8(0x20), eisa_in8(0xa0)); 213 irq, eisa_in8(0x20), eisa_in8(0xa0));
214 214
215 /* read ISR command */ 215 /* read ISR command */
216 eisa_out8(0x0a, 0x20); 216 eisa_out8(0x0a, 0x20);
217 eisa_out8(0x0a, 0xa0); 217 eisa_out8(0x0a, 0xa0);
218 EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n", 218 EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n",
219 eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1)); 219 eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1));
220 220
221 irq &= 0xf; 221 irq &= 0xf;
222 222
223 /* mask irq and write eoi */ 223 /* mask irq and write eoi */
224 if (irq & 8) { 224 if (irq & 8) {
225 slave_mask |= (1 << (irq&7)); 225 slave_mask |= (1 << (irq&7));
226 eisa_out8(slave_mask, 0xa1); 226 eisa_out8(slave_mask, 0xa1);
227 eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */ 227 eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */
228 eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ 228 eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
229 229
230 } else { 230 } else {
231 master_mask |= (1 << (irq&7)); 231 master_mask |= (1 << (irq&7));
232 eisa_out8(master_mask, 0x21); 232 eisa_out8(master_mask, 0x21);
233 eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */ 233 eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */
234 } 234 }
235 spin_unlock_irqrestore(&eisa_irq_lock, flags); 235 spin_unlock_irqrestore(&eisa_irq_lock, flags);
236 236
237 __do_IRQ(irq, regs); 237 __do_IRQ(irq, regs);
238 238
239 spin_lock_irqsave(&eisa_irq_lock, flags); 239 spin_lock_irqsave(&eisa_irq_lock, flags);
240 /* unmask */ 240 /* unmask */
241 if (irq & 8) { 241 if (irq & 8) {
242 slave_mask &= ~(1 << (irq&7)); 242 slave_mask &= ~(1 << (irq&7));
243 eisa_out8(slave_mask, 0xa1); 243 eisa_out8(slave_mask, 0xa1);
244 } else { 244 } else {
245 master_mask &= ~(1 << (irq&7)); 245 master_mask &= ~(1 << (irq&7));
246 eisa_out8(master_mask, 0x21); 246 eisa_out8(master_mask, 0x21);
247 } 247 }
248 spin_unlock_irqrestore(&eisa_irq_lock, flags); 248 spin_unlock_irqrestore(&eisa_irq_lock, flags);
249 return IRQ_HANDLED; 249 return IRQ_HANDLED;
250 } 250 }
251 251
252 static irqreturn_t dummy_irq2_handler(int _, void *dev, struct pt_regs *regs) 252 static irqreturn_t dummy_irq2_handler(int _, void *dev, struct pt_regs *regs)
253 { 253 {
254 printk(KERN_ALERT "eisa: uhh, irq2?\n"); 254 printk(KERN_ALERT "eisa: uhh, irq2?\n");
255 return IRQ_HANDLED; 255 return IRQ_HANDLED;
256 } 256 }
257 257
258 static struct irqaction irq2_action = { 258 static struct irqaction irq2_action = {
259 .handler = dummy_irq2_handler, 259 .handler = dummy_irq2_handler,
260 .name = "cascade", 260 .name = "cascade",
261 }; 261 };
262 262
263 static void init_eisa_pic(void) 263 static void init_eisa_pic(void)
264 { 264 {
265 unsigned long flags; 265 unsigned long flags;
266 266
267 spin_lock_irqsave(&eisa_irq_lock, flags); 267 spin_lock_irqsave(&eisa_irq_lock, flags);
268 268
269 eisa_out8(0xff, 0x21); /* mask during init */ 269 eisa_out8(0xff, 0x21); /* mask during init */
270 eisa_out8(0xff, 0xa1); /* mask during init */ 270 eisa_out8(0xff, 0xa1); /* mask during init */
271 271
272 /* master pic */ 272 /* master pic */
273 eisa_out8(0x11,0x20); /* ICW1 */ 273 eisa_out8(0x11,0x20); /* ICW1 */
274 eisa_out8(0x00,0x21); /* ICW2 */ 274 eisa_out8(0x00,0x21); /* ICW2 */
275 eisa_out8(0x04,0x21); /* ICW3 */ 275 eisa_out8(0x04,0x21); /* ICW3 */
276 eisa_out8(0x01,0x21); /* ICW4 */ 276 eisa_out8(0x01,0x21); /* ICW4 */
277 eisa_out8(0x40,0x20); /* OCW2 */ 277 eisa_out8(0x40,0x20); /* OCW2 */
278 278
279 /* slave pic */ 279 /* slave pic */
280 eisa_out8(0x11,0xa0); /* ICW1 */ 280 eisa_out8(0x11,0xa0); /* ICW1 */
281 eisa_out8(0x08,0xa1); /* ICW2 */ 281 eisa_out8(0x08,0xa1); /* ICW2 */
282 eisa_out8(0x02,0xa1); /* ICW3 */ 282 eisa_out8(0x02,0xa1); /* ICW3 */
283 eisa_out8(0x01,0xa1); /* ICW4 */ 283 eisa_out8(0x01,0xa1); /* ICW4 */
284 eisa_out8(0x40,0xa0); /* OCW2 */ 284 eisa_out8(0x40,0xa0); /* OCW2 */
285 285
286 udelay(100); 286 udelay(100);
287 287
288 slave_mask = 0xff; 288 slave_mask = 0xff;
289 master_mask = 0xfb; 289 master_mask = 0xfb;
290 eisa_out8(slave_mask, 0xa1); /* OCW1 */ 290 eisa_out8(slave_mask, 0xa1); /* OCW1 */
291 eisa_out8(master_mask, 0x21); /* OCW1 */ 291 eisa_out8(master_mask, 0x21); /* OCW1 */
292 292
293 /* setup trig level */ 293 /* setup trig level */
294 EISA_DBG("EISA edge/level %04x\n", eisa_irq_level); 294 EISA_DBG("EISA edge/level %04x\n", eisa_irq_level);
295 295
296 eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */ 296 eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */
297 eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); 297 eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1);
298 298
299 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); 299 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
300 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); 300 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
301 EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0)); 301 EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0));
302 EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1)); 302 EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1));
303 303
304 spin_unlock_irqrestore(&eisa_irq_lock, flags); 304 spin_unlock_irqrestore(&eisa_irq_lock, flags);
305 } 305 }
306 306
307 /* Device initialisation */ 307 /* Device initialisation */
308 308
309 #define is_mongoose(dev) (dev->id.sversion == 0x00076) 309 #define is_mongoose(dev) (dev->id.sversion == 0x00076)
310 310
311 static int __devinit eisa_probe(struct parisc_device *dev) 311 static int __devinit eisa_probe(struct parisc_device *dev)
312 { 312 {
313 int i, result; 313 int i, result;
314 314
315 char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; 315 char *name = is_mongoose(dev) ? "Mongoose" : "Wax";
316 316
317 printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", 317 printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n",
318 name, dev->hpa.start); 318 name, dev->hpa.start);
319 319
320 eisa_dev.hba.dev = dev; 320 eisa_dev.hba.dev = dev;
321 eisa_dev.hba.iommu = ccio_get_iommu(dev); 321 eisa_dev.hba.iommu = ccio_get_iommu(dev);
322 322
323 eisa_dev.hba.lmmio_space.name = "EISA"; 323 eisa_dev.hba.lmmio_space.name = "EISA";
324 eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000); 324 eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000);
325 eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff); 325 eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff);
326 eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM; 326 eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM;
327 result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space); 327 result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space);
328 if (result < 0) { 328 if (result < 0) {
329 printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n"); 329 printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n");
330 return result; 330 return result;
331 } 331 }
332 eisa_dev.hba.io_space.name = "EISA"; 332 eisa_dev.hba.io_space.name = "EISA";
333 eisa_dev.hba.io_space.start = 0; 333 eisa_dev.hba.io_space.start = 0;
334 eisa_dev.hba.io_space.end = 0xffff; 334 eisa_dev.hba.io_space.end = 0xffff;
335 eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO; 335 eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO;
336 result = request_resource(&ioport_resource, &eisa_dev.hba.io_space); 336 result = request_resource(&ioport_resource, &eisa_dev.hba.io_space);
337 if (result < 0) { 337 if (result < 0) {
338 printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n"); 338 printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n");
339 return result; 339 return result;
340 } 340 }
341 pcibios_register_hba(&eisa_dev.hba); 341 pcibios_register_hba(&eisa_dev.hba);
342 342
343 result = request_irq(dev->irq, eisa_irq, SA_SHIRQ, "EISA", &eisa_dev); 343 result = request_irq(dev->irq, eisa_irq, SA_SHIRQ, "EISA", &eisa_dev);
344 if (result) { 344 if (result) {
345 printk(KERN_ERR "EISA: request_irq failed!\n"); 345 printk(KERN_ERR "EISA: request_irq failed!\n");
346 return result; 346 return result;
347 } 347 }
348 348
349 /* Reserve IRQ2 */ 349 /* Reserve IRQ2 */
350 irq_desc[2].action = &irq2_action; 350 irq_desc[2].action = &irq2_action;
351 351
352 for (i = 0; i < 16; i++) { 352 for (i = 0; i < 16; i++) {
353 irq_desc[i].handler = &eisa_interrupt_type; 353 irq_desc[i].handler = &eisa_interrupt_type;
354 } 354 }
355 355
356 EISA_bus = 1; 356 EISA_bus = 1;
357 357
358 if (dev->num_addrs) { 358 if (dev->num_addrs) {
359 /* newer firmware hand out the eeprom address */ 359 /* newer firmware hand out the eeprom address */
360 eisa_dev.eeprom_addr = dev->addr[0]; 360 eisa_dev.eeprom_addr = dev->addr[0];
361 } else { 361 } else {
362 /* old firmware, need to figure out the box */ 362 /* old firmware, need to figure out the box */
363 if (is_mongoose(dev)) { 363 if (is_mongoose(dev)) {
364 eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR; 364 eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR;
365 } else { 365 } else {
366 eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR; 366 eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
367 } 367 }
368 } 368 }
369 eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); 369 eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
370 result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, 370 result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
371 &eisa_dev.hba.lmmio_space); 371 &eisa_dev.hba.lmmio_space);
372 init_eisa_pic(); 372 init_eisa_pic();
373 373
374 if (result >= 0) { 374 if (result >= 0) {
375 /* FIXME : Don't enumerate the bus twice. */ 375 /* FIXME : Don't enumerate the bus twice. */
376 eisa_dev.root.dev = &dev->dev; 376 eisa_dev.root.dev = &dev->dev;
377 dev->dev.driver_data = &eisa_dev.root; 377 dev->dev.driver_data = &eisa_dev.root;
378 eisa_dev.root.bus_base_addr = 0; 378 eisa_dev.root.bus_base_addr = 0;
379 eisa_dev.root.res = &eisa_dev.hba.io_space; 379 eisa_dev.root.res = &eisa_dev.hba.io_space;
380 eisa_dev.root.slots = result; 380 eisa_dev.root.slots = result;
381 eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */ 381 eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */
382 if (eisa_root_register (&eisa_dev.root)) { 382 if (eisa_root_register (&eisa_dev.root)) {
383 printk(KERN_ERR "EISA: Failed to register EISA root\n"); 383 printk(KERN_ERR "EISA: Failed to register EISA root\n");
384 return -1; 384 return -1;
385 } 385 }
386 } 386 }
387 387
388 return 0; 388 return 0;
389 } 389 }
390 390
391 static struct parisc_device_id eisa_tbl[] = { 391 static struct parisc_device_id eisa_tbl[] = {
392 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */ 392 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */
393 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */ 393 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */
394 { 0, } 394 { 0, }
395 }; 395 };
396 396
397 MODULE_DEVICE_TABLE(parisc, eisa_tbl); 397 MODULE_DEVICE_TABLE(parisc, eisa_tbl);
398 398
399 static struct parisc_driver eisa_driver = { 399 static struct parisc_driver eisa_driver = {
400 .name = "eisa_ba", 400 .name = "eisa_ba",
401 .id_table = eisa_tbl, 401 .id_table = eisa_tbl,
402 .probe = eisa_probe, 402 .probe = eisa_probe,
403 }; 403 };
404 404
405 void __init eisa_init(void) 405 void __init eisa_init(void)
406 { 406 {
407 register_parisc_driver(&eisa_driver); 407 register_parisc_driver(&eisa_driver);
408 } 408 }
409 409
410 410
411 static unsigned int eisa_irq_configured; 411 static unsigned int eisa_irq_configured;
412 void eisa_make_irq_level(int num) 412 void eisa_make_irq_level(int num)
413 { 413 {
414 if (eisa_irq_configured& (1<<num)) { 414 if (eisa_irq_configured& (1<<num)) {
415 printk(KERN_WARNING 415 printk(KERN_WARNING
416 "IRQ %d polarity configured twice (last to level)\n", 416 "IRQ %d polarity configured twice (last to level)\n",
417 num); 417 num);
418 } 418 }
419 eisa_irq_level |= (1<<num); /* set the corresponding bit */ 419 eisa_irq_level |= (1<<num); /* set the corresponding bit */
420 eisa_irq_configured |= (1<<num); /* set the corresponding bit */ 420 eisa_irq_configured |= (1<<num); /* set the corresponding bit */
421 } 421 }
422 422
423 void eisa_make_irq_edge(int num) 423 void eisa_make_irq_edge(int num)
424 { 424 {
425 if (eisa_irq_configured& (1<<num)) { 425 if (eisa_irq_configured& (1<<num)) {
426 printk(KERN_WARNING 426 printk(KERN_WARNING
427 "IRQ %d polarity configured twice (last to edge)\n", 427 "IRQ %d polarity configured twice (last to edge)\n",
428 num); 428 num);
429 } 429 }
430 eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */ 430 eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */
431 eisa_irq_configured |= (1<<num); /* set the corresponding bit */ 431 eisa_irq_configured |= (1<<num); /* set the corresponding bit */
432 } 432 }
433 433
434 static int __init eisa_irq_setup(char *str) 434 static int __init eisa_irq_setup(char *str)
435 { 435 {
436 char *cur = str; 436 char *cur = str;
437 int val; 437 int val;
438 438
439 EISA_DBG("IRQ setup\n"); 439 EISA_DBG("IRQ setup\n");
440 while (cur != NULL) { 440 while (cur != NULL) {
441 char *pe; 441 char *pe;
442 442
443 val = (int) simple_strtoul(cur, &pe, 0); 443 val = (int) simple_strtoul(cur, &pe, 0);
444 if (val > 15 || val < 0) { 444 if (val > 15 || val < 0) {
445 printk(KERN_ERR "eisa: EISA irq value are 0-15\n"); 445 printk(KERN_ERR "eisa: EISA irq value are 0-15\n");
446 continue; 446 continue;
447 } 447 }
448 if (val == 2) { 448 if (val == 2) {
449 val = 9; 449 val = 9;
450 } 450 }
451 eisa_make_irq_edge(val); /* clear the corresponding bit */ 451 eisa_make_irq_edge(val); /* clear the corresponding bit */
452 EISA_DBG("setting IRQ %d to edge-triggered mode\n", val); 452 EISA_DBG("setting IRQ %d to edge-triggered mode\n", val);
453 453
454 if ((cur = strchr(cur, ','))) { 454 if ((cur = strchr(cur, ','))) {
455 cur++; 455 cur++;
456 } else { 456 } else {
457 break; 457 break;
458 } 458 }
459 } 459 }
460 return 1; 460 return 1;
461 } 461 }
462 462
463 __setup("eisa_irq_edge=", eisa_irq_setup); 463 __setup("eisa_irq_edge=", eisa_irq_setup);
464 464
465 465
drivers/parisc/iosapic.c
1 /* 1 /*
2 ** I/O Sapic Driver - PCI interrupt line support 2 ** I/O Sapic Driver - PCI interrupt line support
3 ** 3 **
4 ** (c) Copyright 1999 Grant Grundler 4 ** (c) Copyright 1999 Grant Grundler
5 ** (c) Copyright 1999 Hewlett-Packard Company 5 ** (c) Copyright 1999 Hewlett-Packard Company
6 ** 6 **
7 ** This program is free software; you can redistribute it and/or modify 7 ** This program is free software; you can redistribute it and/or modify
8 ** it under the terms of the GNU General Public License as published by 8 ** it under the terms of the GNU General Public License as published by
9 ** the Free Software Foundation; either version 2 of the License, or 9 ** the Free Software Foundation; either version 2 of the License, or
10 ** (at your option) any later version. 10 ** (at your option) any later version.
11 ** 11 **
12 ** The I/O sapic driver manages the Interrupt Redirection Table which is 12 ** The I/O sapic driver manages the Interrupt Redirection Table which is
13 ** the control logic to convert PCI line based interrupts into a Message 13 ** the control logic to convert PCI line based interrupts into a Message
14 ** Signaled Interrupt (aka Transaction Based Interrupt, TBI). 14 ** Signaled Interrupt (aka Transaction Based Interrupt, TBI).
15 ** 15 **
16 ** Acronyms 16 ** Acronyms
17 ** -------- 17 ** --------
18 ** HPA Hard Physical Address (aka MMIO address) 18 ** HPA Hard Physical Address (aka MMIO address)
19 ** IRQ Interrupt ReQuest. Implies Line based interrupt. 19 ** IRQ Interrupt ReQuest. Implies Line based interrupt.
20 ** IRT Interrupt Routing Table (provided by PAT firmware) 20 ** IRT Interrupt Routing Table (provided by PAT firmware)
21 ** IRdT Interrupt Redirection Table. IRQ line to TXN ADDR/DATA 21 ** IRdT Interrupt Redirection Table. IRQ line to TXN ADDR/DATA
22 ** table which is implemented in I/O SAPIC. 22 ** table which is implemented in I/O SAPIC.
23 ** ISR Interrupt Service Routine. aka Interrupt handler. 23 ** ISR Interrupt Service Routine. aka Interrupt handler.
24 ** MSI Message Signaled Interrupt. PCI 2.2 functionality. 24 ** MSI Message Signaled Interrupt. PCI 2.2 functionality.
25 ** aka Transaction Based Interrupt (or TBI). 25 ** aka Transaction Based Interrupt (or TBI).
26 ** PA Precision Architecture. HP's RISC architecture. 26 ** PA Precision Architecture. HP's RISC architecture.
27 ** RISC Reduced Instruction Set Computer. 27 ** RISC Reduced Instruction Set Computer.
28 ** 28 **
29 ** 29 **
30 ** What's a Message Signalled Interrupt? 30 ** What's a Message Signalled Interrupt?
31 ** ------------------------------------- 31 ** -------------------------------------
32 ** MSI is a write transaction which targets a processor and is similar 32 ** MSI is a write transaction which targets a processor and is similar
33 ** to a processor write to memory or MMIO. MSIs can be generated by I/O 33 ** to a processor write to memory or MMIO. MSIs can be generated by I/O
34 ** devices as well as processors and require *architecture* to work. 34 ** devices as well as processors and require *architecture* to work.
35 ** 35 **
36 ** PA only supports MSI. So I/O subsystems must either natively generate 36 ** PA only supports MSI. So I/O subsystems must either natively generate
37 ** MSIs (e.g. GSC or HP-PB) or convert line based interrupts into MSIs 37 ** MSIs (e.g. GSC or HP-PB) or convert line based interrupts into MSIs
38 ** (e.g. PCI and EISA). IA64 supports MSIs via a "local SAPIC" which 38 ** (e.g. PCI and EISA). IA64 supports MSIs via a "local SAPIC" which
39 ** acts on behalf of a processor. 39 ** acts on behalf of a processor.
40 ** 40 **
41 ** MSI allows any I/O device to interrupt any processor. This makes 41 ** MSI allows any I/O device to interrupt any processor. This makes
42 ** load balancing of the interrupt processing possible on an SMP platform. 42 ** load balancing of the interrupt processing possible on an SMP platform.
43 ** Interrupts are also ordered WRT to DMA data. It's possible on I/O 43 ** Interrupts are also ordered WRT to DMA data. It's possible on I/O
44 ** coherent systems to completely eliminate PIO reads from the interrupt 44 ** coherent systems to completely eliminate PIO reads from the interrupt
45 ** path. The device and driver must be designed and implemented to 45 ** path. The device and driver must be designed and implemented to
46 ** guarantee all DMA has been issued (issues about atomicity here) 46 ** guarantee all DMA has been issued (issues about atomicity here)
47 ** before the MSI is issued. I/O status can then safely be read from 47 ** before the MSI is issued. I/O status can then safely be read from
48 ** DMA'd data by the ISR. 48 ** DMA'd data by the ISR.
49 ** 49 **
50 ** 50 **
51 ** PA Firmware 51 ** PA Firmware
52 ** ----------- 52 ** -----------
53 ** PA-RISC platforms have two fundementally different types of firmware. 53 ** PA-RISC platforms have two fundementally different types of firmware.
54 ** For PCI devices, "Legacy" PDC initializes the "INTERRUPT_LINE" register 54 ** For PCI devices, "Legacy" PDC initializes the "INTERRUPT_LINE" register
55 ** and BARs similar to a traditional PC BIOS. 55 ** and BARs similar to a traditional PC BIOS.
56 ** The newer "PAT" firmware supports PDC calls which return tables. 56 ** The newer "PAT" firmware supports PDC calls which return tables.
57 ** PAT firmware only initializes PCI Console and Boot interface. 57 ** PAT firmware only initializes PCI Console and Boot interface.
58 ** With these tables, the OS can progam all other PCI devices. 58 ** With these tables, the OS can progam all other PCI devices.
59 ** 59 **
60 ** One such PAT PDC call returns the "Interrupt Routing Table" (IRT). 60 ** One such PAT PDC call returns the "Interrupt Routing Table" (IRT).
61 ** The IRT maps each PCI slot's INTA-D "output" line to an I/O SAPIC 61 ** The IRT maps each PCI slot's INTA-D "output" line to an I/O SAPIC
62 ** input line. If the IRT is not available, this driver assumes 62 ** input line. If the IRT is not available, this driver assumes
63 ** INTERRUPT_LINE register has been programmed by firmware. The latter 63 ** INTERRUPT_LINE register has been programmed by firmware. The latter
64 ** case also means online addition of PCI cards can NOT be supported 64 ** case also means online addition of PCI cards can NOT be supported
65 ** even if HW support is present. 65 ** even if HW support is present.
66 ** 66 **
67 ** All platforms with PAT firmware to date (Oct 1999) use one Interrupt 67 ** All platforms with PAT firmware to date (Oct 1999) use one Interrupt
68 ** Routing Table for the entire platform. 68 ** Routing Table for the entire platform.
69 ** 69 **
70 ** Where's the iosapic? 70 ** Where's the iosapic?
71 ** -------------------- 71 ** --------------------
72 ** I/O sapic is part of the "Core Electronics Complex". And on HP platforms 72 ** I/O sapic is part of the "Core Electronics Complex". And on HP platforms
73 ** it's integrated as part of the PCI bus adapter, "lba". So no bus walk 73 ** it's integrated as part of the PCI bus adapter, "lba". So no bus walk
74 ** will discover I/O Sapic. I/O Sapic driver learns about each device 74 ** will discover I/O Sapic. I/O Sapic driver learns about each device
75 ** when lba driver advertises the presence of the I/O sapic by calling 75 ** when lba driver advertises the presence of the I/O sapic by calling
76 ** iosapic_register(). 76 ** iosapic_register().
77 ** 77 **
78 ** 78 **
79 ** IRQ handling notes 79 ** IRQ handling notes
80 ** ------------------ 80 ** ------------------
81 ** The IO-SAPIC can indicate to the CPU which interrupt was asserted. 81 ** The IO-SAPIC can indicate to the CPU which interrupt was asserted.
82 ** So, unlike the GSC-ASIC and Dino, we allocate one CPU interrupt per 82 ** So, unlike the GSC-ASIC and Dino, we allocate one CPU interrupt per
83 ** IO-SAPIC interrupt and call the device driver's handler directly. 83 ** IO-SAPIC interrupt and call the device driver's handler directly.
84 ** The IO-SAPIC driver hijacks the CPU interrupt handler so it can 84 ** The IO-SAPIC driver hijacks the CPU interrupt handler so it can
85 ** issue the End Of Interrupt command to the IO-SAPIC. 85 ** issue the End Of Interrupt command to the IO-SAPIC.
86 ** 86 **
87 ** Overview of exported iosapic functions 87 ** Overview of exported iosapic functions
88 ** -------------------------------------- 88 ** --------------------------------------
89 ** (caveat: code isn't finished yet - this is just the plan) 89 ** (caveat: code isn't finished yet - this is just the plan)
90 ** 90 **
91 ** iosapic_init: 91 ** iosapic_init:
92 ** o initialize globals (lock, etc) 92 ** o initialize globals (lock, etc)
93 ** o try to read IRT. Presence of IRT determines if this is 93 ** o try to read IRT. Presence of IRT determines if this is
94 ** a PAT platform or not. 94 ** a PAT platform or not.
95 ** 95 **
96 ** iosapic_register(): 96 ** iosapic_register():
97 ** o create iosapic_info instance data structure 97 ** o create iosapic_info instance data structure
98 ** o allocate vector_info array for this iosapic 98 ** o allocate vector_info array for this iosapic
99 ** o initialize vector_info - read corresponding IRdT? 99 ** o initialize vector_info - read corresponding IRdT?
100 ** 100 **
101 ** iosapic_xlate_pin: (only called by fixup_irq for PAT platform) 101 ** iosapic_xlate_pin: (only called by fixup_irq for PAT platform)
102 ** o intr_pin = read cfg (INTERRUPT_PIN); 102 ** o intr_pin = read cfg (INTERRUPT_PIN);
103 ** o if (device under PCI-PCI bridge) 103 ** o if (device under PCI-PCI bridge)
104 ** translate slot/pin 104 ** translate slot/pin
105 ** 105 **
106 ** iosapic_fixup_irq: 106 ** iosapic_fixup_irq:
107 ** o if PAT platform (IRT present) 107 ** o if PAT platform (IRT present)
108 ** intr_pin = iosapic_xlate_pin(isi,pcidev): 108 ** intr_pin = iosapic_xlate_pin(isi,pcidev):
109 ** intr_line = find IRT entry(isi, PCI_SLOT(pcidev), intr_pin) 109 ** intr_line = find IRT entry(isi, PCI_SLOT(pcidev), intr_pin)
110 ** save IRT entry into vector_info later 110 ** save IRT entry into vector_info later
111 ** write cfg INTERRUPT_LINE (with intr_line)? 111 ** write cfg INTERRUPT_LINE (with intr_line)?
112 ** else 112 ** else
113 ** intr_line = pcidev->irq 113 ** intr_line = pcidev->irq
114 ** IRT pointer = NULL 114 ** IRT pointer = NULL
115 ** endif 115 ** endif
116 ** o locate vector_info (needs: isi, intr_line) 116 ** o locate vector_info (needs: isi, intr_line)
117 ** o allocate processor "irq" and get txn_addr/data 117 ** o allocate processor "irq" and get txn_addr/data
118 ** o request_irq(processor_irq, iosapic_interrupt, vector_info,...) 118 ** o request_irq(processor_irq, iosapic_interrupt, vector_info,...)
119 ** 119 **
120 ** iosapic_enable_irq: 120 ** iosapic_enable_irq:
121 ** o clear any pending IRQ on that line 121 ** o clear any pending IRQ on that line
122 ** o enable IRdT - call enable_irq(vector[line]->processor_irq) 122 ** o enable IRdT - call enable_irq(vector[line]->processor_irq)
123 ** o write EOI in case line is already asserted. 123 ** o write EOI in case line is already asserted.
124 ** 124 **
125 ** iosapic_disable_irq: 125 ** iosapic_disable_irq:
126 ** o disable IRdT - call disable_irq(vector[line]->processor_irq) 126 ** o disable IRdT - call disable_irq(vector[line]->processor_irq)
127 */ 127 */
128 128
129 129
130 /* FIXME: determine which include files are really needed */ 130 /* FIXME: determine which include files are really needed */
131 #include <linux/types.h> 131 #include <linux/types.h>
132 #include <linux/kernel.h> 132 #include <linux/kernel.h>
133 #include <linux/spinlock.h> 133 #include <linux/spinlock.h>
134 #include <linux/pci.h> 134 #include <linux/pci.h>
135 #include <linux/init.h> 135 #include <linux/init.h>
136 #include <linux/slab.h> 136 #include <linux/slab.h>
137 #include <linux/interrupt.h> 137 #include <linux/interrupt.h>
138 138
139 #include <asm/byteorder.h> /* get in-line asm for swab */ 139 #include <asm/byteorder.h> /* get in-line asm for swab */
140 #include <asm/pdc.h> 140 #include <asm/pdc.h>
141 #include <asm/pdcpat.h> 141 #include <asm/pdcpat.h>
142 #include <asm/page.h> 142 #include <asm/page.h>
143 #include <asm/system.h> 143 #include <asm/system.h>
144 #include <asm/io.h> /* read/write functions */ 144 #include <asm/io.h> /* read/write functions */
145 #ifdef CONFIG_SUPERIO 145 #ifdef CONFIG_SUPERIO
146 #include <asm/superio.h> 146 #include <asm/superio.h>
147 #endif 147 #endif
148 148
149 #include <asm/iosapic.h> 149 #include <asm/iosapic.h>
150 #include "./iosapic_private.h" 150 #include "./iosapic_private.h"
151 151
152 #define MODULE_NAME "iosapic" 152 #define MODULE_NAME "iosapic"
153 153
154 /* "local" compile flags */ 154 /* "local" compile flags */
155 #undef PCI_BRIDGE_FUNCS 155 #undef PCI_BRIDGE_FUNCS
156 #undef DEBUG_IOSAPIC 156 #undef DEBUG_IOSAPIC
157 #undef DEBUG_IOSAPIC_IRT 157 #undef DEBUG_IOSAPIC_IRT
158 158
159 159
160 #ifdef DEBUG_IOSAPIC 160 #ifdef DEBUG_IOSAPIC
161 #define DBG(x...) printk(x) 161 #define DBG(x...) printk(x)
162 #else /* DEBUG_IOSAPIC */ 162 #else /* DEBUG_IOSAPIC */
163 #define DBG(x...) 163 #define DBG(x...)
164 #endif /* DEBUG_IOSAPIC */ 164 #endif /* DEBUG_IOSAPIC */
165 165
166 #ifdef DEBUG_IOSAPIC_IRT 166 #ifdef DEBUG_IOSAPIC_IRT
167 #define DBG_IRT(x...) printk(x) 167 #define DBG_IRT(x...) printk(x)
168 #else 168 #else
169 #define DBG_IRT(x...) 169 #define DBG_IRT(x...)
170 #endif 170 #endif
171 171
172 #ifdef CONFIG_64BIT 172 #ifdef CONFIG_64BIT
173 #define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) 173 #define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
174 #else 174 #else
175 #define COMPARE_IRTE_ADDR(irte, hpa) \ 175 #define COMPARE_IRTE_ADDR(irte, hpa) \
176 ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) 176 ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
177 #endif 177 #endif
178 178
179 #define IOSAPIC_REG_SELECT 0x00 179 #define IOSAPIC_REG_SELECT 0x00
180 #define IOSAPIC_REG_WINDOW 0x10 180 #define IOSAPIC_REG_WINDOW 0x10
181 #define IOSAPIC_REG_EOI 0x40 181 #define IOSAPIC_REG_EOI 0x40
182 182
183 #define IOSAPIC_REG_VERSION 0x1 183 #define IOSAPIC_REG_VERSION 0x1
184 184
185 #define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2) 185 #define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2)
186 #define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2) 186 #define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2)
187 187
188 static inline unsigned int iosapic_read(void __iomem *iosapic, unsigned int reg) 188 static inline unsigned int iosapic_read(void __iomem *iosapic, unsigned int reg)
189 { 189 {
190 writel(reg, iosapic + IOSAPIC_REG_SELECT); 190 writel(reg, iosapic + IOSAPIC_REG_SELECT);
191 return readl(iosapic + IOSAPIC_REG_WINDOW); 191 return readl(iosapic + IOSAPIC_REG_WINDOW);
192 } 192 }
193 193
194 static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 val) 194 static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 val)
195 { 195 {
196 writel(reg, iosapic + IOSAPIC_REG_SELECT); 196 writel(reg, iosapic + IOSAPIC_REG_SELECT);
197 writel(val, iosapic + IOSAPIC_REG_WINDOW); 197 writel(val, iosapic + IOSAPIC_REG_WINDOW);
198 } 198 }
199 199
200 #define IOSAPIC_VERSION_MASK 0x000000ff 200 #define IOSAPIC_VERSION_MASK 0x000000ff
201 #define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK)) 201 #define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK))
202 202
203 #define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000 203 #define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000
204 #define IOSAPIC_MAX_ENTRY_SHIFT 0x10 204 #define IOSAPIC_MAX_ENTRY_SHIFT 0x10
205 #define IOSAPIC_IRDT_MAX_ENTRY(ver) \ 205 #define IOSAPIC_IRDT_MAX_ENTRY(ver) \
206 (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT) 206 (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
207 207
208 /* bits in the "low" I/O Sapic IRdT entry */ 208 /* bits in the "low" I/O Sapic IRdT entry */
209 #define IOSAPIC_IRDT_ENABLE 0x10000 209 #define IOSAPIC_IRDT_ENABLE 0x10000
210 #define IOSAPIC_IRDT_PO_LOW 0x02000 210 #define IOSAPIC_IRDT_PO_LOW 0x02000
211 #define IOSAPIC_IRDT_LEVEL_TRIG 0x08000 211 #define IOSAPIC_IRDT_LEVEL_TRIG 0x08000
212 #define IOSAPIC_IRDT_MODE_LPRI 0x00100 212 #define IOSAPIC_IRDT_MODE_LPRI 0x00100
213 213
214 /* bits in the "high" I/O Sapic IRdT entry */ 214 /* bits in the "high" I/O Sapic IRdT entry */
215 #define IOSAPIC_IRDT_ID_EID_SHIFT 0x10 215 #define IOSAPIC_IRDT_ID_EID_SHIFT 0x10
216 216
217 217
218 static DEFINE_SPINLOCK(iosapic_lock); 218 static DEFINE_SPINLOCK(iosapic_lock);
219 219
220 static inline void iosapic_eoi(void __iomem *addr, unsigned int data) 220 static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
221 { 221 {
222 __raw_writel(data, addr); 222 __raw_writel(data, addr);
223 } 223 }
224 224
225 /* 225 /*
226 ** REVISIT: future platforms may have more than one IRT. 226 ** REVISIT: future platforms may have more than one IRT.
227 ** If so, the following three fields form a structure which 227 ** If so, the following three fields form a structure which
228 ** then be linked into a list. Names are chosen to make searching 228 ** then be linked into a list. Names are chosen to make searching
229 ** for them easy - not necessarily accurate (eg "cell"). 229 ** for them easy - not necessarily accurate (eg "cell").
230 ** 230 **
231 ** Alternative: iosapic_info could point to the IRT it's in. 231 ** Alternative: iosapic_info could point to the IRT it's in.
232 ** iosapic_register() could search a list of IRT's. 232 ** iosapic_register() could search a list of IRT's.
233 */ 233 */
234 static struct irt_entry *irt_cell; 234 static struct irt_entry *irt_cell;
235 static size_t irt_num_entry; 235 static size_t irt_num_entry;
236 236
237 static struct irt_entry *iosapic_alloc_irt(int num_entries) 237 static struct irt_entry *iosapic_alloc_irt(int num_entries)
238 { 238 {
239 unsigned long a; 239 unsigned long a;
240 240
241 /* The IRT needs to be 8-byte aligned for the PDC call. 241 /* The IRT needs to be 8-byte aligned for the PDC call.
242 * Normally kmalloc would guarantee larger alignment, but 242 * Normally kmalloc would guarantee larger alignment, but
243 * if CONFIG_DEBUG_SLAB is enabled, then we can get only 243 * if CONFIG_DEBUG_SLAB is enabled, then we can get only
244 * 4-byte alignment on 32-bit kernels 244 * 4-byte alignment on 32-bit kernels
245 */ 245 */
246 a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL); 246 a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
247 a = (a + 7UL) & ~7UL; 247 a = (a + 7UL) & ~7UL;
248 return (struct irt_entry *)a; 248 return (struct irt_entry *)a;
249 } 249 }
250 250
251 /** 251 /**
252 * iosapic_load_irt - Fill in the interrupt routing table 252 * iosapic_load_irt - Fill in the interrupt routing table
253 * @cell_num: The cell number of the CPU we're currently executing on 253 * @cell_num: The cell number of the CPU we're currently executing on
254 * @irt: The address to place the new IRT at 254 * @irt: The address to place the new IRT at
255 * @return The number of entries found 255 * @return The number of entries found
256 * 256 *
257 * The "Get PCI INT Routing Table Size" option returns the number of 257 * The "Get PCI INT Routing Table Size" option returns the number of
258 * entries in the PCI interrupt routing table for the cell specified 258 * entries in the PCI interrupt routing table for the cell specified
259 * in the cell_number argument. The cell number must be for a cell 259 * in the cell_number argument. The cell number must be for a cell
260 * within the caller's protection domain. 260 * within the caller's protection domain.
261 * 261 *
262 * The "Get PCI INT Routing Table" option returns, for the cell 262 * The "Get PCI INT Routing Table" option returns, for the cell
263 * specified in the cell_number argument, the PCI interrupt routing 263 * specified in the cell_number argument, the PCI interrupt routing
264 * table in the caller allocated memory pointed to by mem_addr. 264 * table in the caller allocated memory pointed to by mem_addr.
265 * We assume the IRT only contains entries for I/O SAPIC and 265 * We assume the IRT only contains entries for I/O SAPIC and
266 * calculate the size based on the size of I/O sapic entries. 266 * calculate the size based on the size of I/O sapic entries.
267 * 267 *
268 * The PCI interrupt routing table entry format is derived from the 268 * The PCI interrupt routing table entry format is derived from the
269 * IA64 SAL Specification 2.4. The PCI interrupt routing table defines 269 * IA64 SAL Specification 2.4. The PCI interrupt routing table defines
270 * the routing of PCI interrupt signals between the PCI device output 270 * the routing of PCI interrupt signals between the PCI device output
271 * "pins" and the IO SAPICs' input "lines" (including core I/O PCI 271 * "pins" and the IO SAPICs' input "lines" (including core I/O PCI
272 * devices). This table does NOT include information for devices/slots 272 * devices). This table does NOT include information for devices/slots
273 * behind PCI to PCI bridges. See PCI to PCI Bridge Architecture Spec. 273 * behind PCI to PCI bridges. See PCI to PCI Bridge Architecture Spec.
274 * for the architected method of routing of IRQ's behind PPB's. 274 * for the architected method of routing of IRQ's behind PPB's.
275 */ 275 */
276 276
277 277
278 static int __init 278 static int __init
279 iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt) 279 iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
280 { 280 {
281 long status; /* PDC return value status */ 281 long status; /* PDC return value status */
282 struct irt_entry *table; /* start of interrupt routing tbl */ 282 struct irt_entry *table; /* start of interrupt routing tbl */
283 unsigned long num_entries = 0UL; 283 unsigned long num_entries = 0UL;
284 284
285 BUG_ON(!irt); 285 BUG_ON(!irt);
286 286
287 if (is_pdc_pat()) { 287 if (is_pdc_pat()) {
288 /* Use pat pdc routine to get interrupt routing table size */ 288 /* Use pat pdc routine to get interrupt routing table size */
289 DBG("calling get_irt_size (cell %ld)\n", cell_num); 289 DBG("calling get_irt_size (cell %ld)\n", cell_num);
290 status = pdc_pat_get_irt_size(&num_entries, cell_num); 290 status = pdc_pat_get_irt_size(&num_entries, cell_num);
291 DBG("get_irt_size: %ld\n", status); 291 DBG("get_irt_size: %ld\n", status);
292 292
293 BUG_ON(status != PDC_OK); 293 BUG_ON(status != PDC_OK);
294 BUG_ON(num_entries == 0); 294 BUG_ON(num_entries == 0);
295 295
296 /* 296 /*
297 ** allocate memory for interrupt routing table 297 ** allocate memory for interrupt routing table
298 ** This interface isn't really right. We are assuming 298 ** This interface isn't really right. We are assuming
299 ** the contents of the table are exclusively 299 ** the contents of the table are exclusively
300 ** for I/O sapic devices. 300 ** for I/O sapic devices.
301 */ 301 */
302 table = iosapic_alloc_irt(num_entries); 302 table = iosapic_alloc_irt(num_entries);
303 if (table == NULL) { 303 if (table == NULL) {
304 printk(KERN_WARNING MODULE_NAME ": read_irt : can " 304 printk(KERN_WARNING MODULE_NAME ": read_irt : can "
305 "not alloc mem for IRT\n"); 305 "not alloc mem for IRT\n");
306 return 0; 306 return 0;
307 } 307 }
308 308
309 /* get PCI INT routing table */ 309 /* get PCI INT routing table */
310 status = pdc_pat_get_irt(table, cell_num); 310 status = pdc_pat_get_irt(table, cell_num);
311 DBG("pdc_pat_get_irt: %ld\n", status); 311 DBG("pdc_pat_get_irt: %ld\n", status);
312 WARN_ON(status != PDC_OK); 312 WARN_ON(status != PDC_OK);
313 } else { 313 } else {
314 /* 314 /*
315 ** C3000/J5000 (and similar) platforms with Sprockets PDC 315 ** C3000/J5000 (and similar) platforms with Sprockets PDC
316 ** will return exactly one IRT for all iosapics. 316 ** will return exactly one IRT for all iosapics.
317 ** So if we have one, don't need to get it again. 317 ** So if we have one, don't need to get it again.
318 */ 318 */
319 if (irt_cell) 319 if (irt_cell)
320 return 0; 320 return 0;
321 321
322 /* Should be using the Elroy's HPA, but it's ignored anyway */ 322 /* Should be using the Elroy's HPA, but it's ignored anyway */
323 status = pdc_pci_irt_size(&num_entries, 0); 323 status = pdc_pci_irt_size(&num_entries, 0);
324 DBG("pdc_pci_irt_size: %ld\n", status); 324 DBG("pdc_pci_irt_size: %ld\n", status);
325 325
326 if (status != PDC_OK) { 326 if (status != PDC_OK) {
327 /* Not a "legacy" system with I/O SAPIC either */ 327 /* Not a "legacy" system with I/O SAPIC either */
328 return 0; 328 return 0;
329 } 329 }
330 330
331 BUG_ON(num_entries == 0); 331 BUG_ON(num_entries == 0);
332 332
333 table = iosapic_alloc_irt(num_entries); 333 table = iosapic_alloc_irt(num_entries);
334 if (!table) { 334 if (!table) {
335 printk(KERN_WARNING MODULE_NAME ": read_irt : can " 335 printk(KERN_WARNING MODULE_NAME ": read_irt : can "
336 "not alloc mem for IRT\n"); 336 "not alloc mem for IRT\n");
337 return 0; 337 return 0;
338 } 338 }
339 339
340 /* HPA ignored by this call too. */ 340 /* HPA ignored by this call too. */
341 status = pdc_pci_irt(num_entries, 0, table); 341 status = pdc_pci_irt(num_entries, 0, table);
342 BUG_ON(status != PDC_OK); 342 BUG_ON(status != PDC_OK);
343 } 343 }
344 344
345 /* return interrupt table address */ 345 /* return interrupt table address */
346 *irt = table; 346 *irt = table;
347 347
348 #ifdef DEBUG_IOSAPIC_IRT 348 #ifdef DEBUG_IOSAPIC_IRT
349 { 349 {
350 struct irt_entry *p = table; 350 struct irt_entry *p = table;
351 int i; 351 int i;
352 352
353 printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num); 353 printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
354 printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n", 354 printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
355 table, 355 table,
356 num_entries, 356 num_entries,
357 (int) sizeof(struct irt_entry)); 357 (int) sizeof(struct irt_entry));
358 358
359 for (i = 0 ; i < num_entries ; i++, p++) { 359 for (i = 0 ; i < num_entries ; i++, p++) {
360 printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n", 360 printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
361 p->entry_type, p->entry_length, p->interrupt_type, 361 p->entry_type, p->entry_length, p->interrupt_type,
362 p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id, 362 p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
363 p->src_seg_id, p->dest_iosapic_intin, 363 p->src_seg_id, p->dest_iosapic_intin,
364 ((u32 *) p)[2], 364 ((u32 *) p)[2],
365 ((u32 *) p)[3] 365 ((u32 *) p)[3]
366 ); 366 );
367 } 367 }
368 } 368 }
369 #endif /* DEBUG_IOSAPIC_IRT */ 369 #endif /* DEBUG_IOSAPIC_IRT */
370 370
371 return num_entries; 371 return num_entries;
372 } 372 }
373 373
374 374
375 375
376 void __init iosapic_init(void) 376 void __init iosapic_init(void)
377 { 377 {
378 unsigned long cell = 0; 378 unsigned long cell = 0;
379 379
380 DBG("iosapic_init()\n"); 380 DBG("iosapic_init()\n");
381 381
382 #ifdef __LP64__ 382 #ifdef __LP64__
383 if (is_pdc_pat()) { 383 if (is_pdc_pat()) {
384 int status; 384 int status;
385 struct pdc_pat_cell_num cell_info; 385 struct pdc_pat_cell_num cell_info;
386 386
387 status = pdc_pat_cell_get_number(&cell_info); 387 status = pdc_pat_cell_get_number(&cell_info);
388 if (status == PDC_OK) { 388 if (status == PDC_OK) {
389 cell = cell_info.cell_num; 389 cell = cell_info.cell_num;
390 } 390 }
391 } 391 }
392 #endif 392 #endif
393 393
394 /* get interrupt routing table for this cell */ 394 /* get interrupt routing table for this cell */
395 irt_num_entry = iosapic_load_irt(cell, &irt_cell); 395 irt_num_entry = iosapic_load_irt(cell, &irt_cell);
396 if (irt_num_entry == 0) 396 if (irt_num_entry == 0)
397 irt_cell = NULL; /* old PDC w/o iosapic */ 397 irt_cell = NULL; /* old PDC w/o iosapic */
398 } 398 }
399 399
400 400
401 /* 401 /*
402 ** Return the IRT entry in case we need to look something else up. 402 ** Return the IRT entry in case we need to look something else up.
403 */ 403 */
404 static struct irt_entry * 404 static struct irt_entry *
405 irt_find_irqline(struct iosapic_info *isi, u8 slot, u8 intr_pin) 405 irt_find_irqline(struct iosapic_info *isi, u8 slot, u8 intr_pin)
406 { 406 {
407 struct irt_entry *i = irt_cell; 407 struct irt_entry *i = irt_cell;
408 int cnt; /* track how many entries we've looked at */ 408 int cnt; /* track how many entries we've looked at */
409 u8 irq_devno = (slot << IRT_DEV_SHIFT) | (intr_pin-1); 409 u8 irq_devno = (slot << IRT_DEV_SHIFT) | (intr_pin-1);
410 410
411 DBG_IRT("irt_find_irqline() SLOT %d pin %d\n", slot, intr_pin); 411 DBG_IRT("irt_find_irqline() SLOT %d pin %d\n", slot, intr_pin);
412 412
413 for (cnt=0; cnt < irt_num_entry; cnt++, i++) { 413 for (cnt=0; cnt < irt_num_entry; cnt++, i++) {
414 414
415 /* 415 /*
416 ** Validate: entry_type, entry_length, interrupt_type 416 ** Validate: entry_type, entry_length, interrupt_type
417 ** 417 **
418 ** Difference between validate vs compare is the former 418 ** Difference between validate vs compare is the former
419 ** should print debug info and is not expected to "fail" 419 ** should print debug info and is not expected to "fail"
420 ** on current platforms. 420 ** on current platforms.
421 */ 421 */
422 if (i->entry_type != IRT_IOSAPIC_TYPE) { 422 if (i->entry_type != IRT_IOSAPIC_TYPE) {
423 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d type %d\n", i, cnt, i->entry_type); 423 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d type %d\n", i, cnt, i->entry_type);
424 continue; 424 continue;
425 } 425 }
426 426
427 if (i->entry_length != IRT_IOSAPIC_LENGTH) { 427 if (i->entry_length != IRT_IOSAPIC_LENGTH) {
428 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d length %d\n", i, cnt, i->entry_length); 428 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d length %d\n", i, cnt, i->entry_length);
429 continue; 429 continue;
430 } 430 }
431 431
432 if (i->interrupt_type != IRT_VECTORED_INTR) { 432 if (i->interrupt_type != IRT_VECTORED_INTR) {
433 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d interrupt_type %d\n", i, cnt, i->interrupt_type); 433 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d interrupt_type %d\n", i, cnt, i->interrupt_type);
434 continue; 434 continue;
435 } 435 }
436 436
437 if (!COMPARE_IRTE_ADDR(i, isi->isi_hpa)) 437 if (!COMPARE_IRTE_ADDR(i, isi->isi_hpa))
438 continue; 438 continue;
439 439
440 if ((i->src_bus_irq_devno & IRT_IRQ_DEVNO_MASK) != irq_devno) 440 if ((i->src_bus_irq_devno & IRT_IRQ_DEVNO_MASK) != irq_devno)
441 continue; 441 continue;
442 442
443 /* 443 /*
444 ** Ignore: src_bus_id and rc_seg_id correlate with 444 ** Ignore: src_bus_id and rc_seg_id correlate with
445 ** iosapic_info->isi_hpa on HP platforms. 445 ** iosapic_info->isi_hpa on HP platforms.
446 ** If needed, pass in "PFA" (aka config space addr) 446 ** If needed, pass in "PFA" (aka config space addr)
447 ** instead of slot. 447 ** instead of slot.
448 */ 448 */
449 449
450 /* Found it! */ 450 /* Found it! */
451 return i; 451 return i;
452 } 452 }
453 453
454 printk(KERN_WARNING MODULE_NAME ": 0x%lx : no IRT entry for slot %d, pin %d\n", 454 printk(KERN_WARNING MODULE_NAME ": 0x%lx : no IRT entry for slot %d, pin %d\n",
455 isi->isi_hpa, slot, intr_pin); 455 isi->isi_hpa, slot, intr_pin);
456 return NULL; 456 return NULL;
457 } 457 }
458 458
459 459
460 /* 460 /*
461 ** xlate_pin() supports the skewing of IRQ lines done by subsidiary bridges. 461 ** xlate_pin() supports the skewing of IRQ lines done by subsidiary bridges.
462 ** Legacy PDC already does this translation for us and stores it in INTR_LINE. 462 ** Legacy PDC already does this translation for us and stores it in INTR_LINE.
463 ** 463 **
464 ** PAT PDC needs to basically do what legacy PDC does: 464 ** PAT PDC needs to basically do what legacy PDC does:
465 ** o read PIN 465 ** o read PIN
466 ** o adjust PIN in case device is "behind" a PPB 466 ** o adjust PIN in case device is "behind" a PPB
467 ** (eg 4-port 100BT and SCSI/LAN "Combo Card") 467 ** (eg 4-port 100BT and SCSI/LAN "Combo Card")
468 ** o convert slot/pin to I/O SAPIC input line. 468 ** o convert slot/pin to I/O SAPIC input line.
469 ** 469 **
470 ** HP platforms only support: 470 ** HP platforms only support:
471 ** o one level of skewing for any number of PPBs 471 ** o one level of skewing for any number of PPBs
472 ** o only support PCI-PCI Bridges. 472 ** o only support PCI-PCI Bridges.
473 */ 473 */
474 static struct irt_entry * 474 static struct irt_entry *
475 iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev) 475 iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
476 { 476 {
477 u8 intr_pin, intr_slot; 477 u8 intr_pin, intr_slot;
478 478
479 pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin); 479 pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin);
480 480
481 DBG_IRT("iosapic_xlate_pin(%s) SLOT %d pin %d\n", 481 DBG_IRT("iosapic_xlate_pin(%s) SLOT %d pin %d\n",
482 pcidev->slot_name, PCI_SLOT(pcidev->devfn), intr_pin); 482 pcidev->slot_name, PCI_SLOT(pcidev->devfn), intr_pin);
483 483
484 if (intr_pin == 0) { 484 if (intr_pin == 0) {
485 /* The device does NOT support/use IRQ lines. */ 485 /* The device does NOT support/use IRQ lines. */
486 return NULL; 486 return NULL;
487 } 487 }
488 488
489 /* Check if pcidev behind a PPB */ 489 /* Check if pcidev behind a PPB */
490 if (NULL != pcidev->bus->self) { 490 if (NULL != pcidev->bus->self) {
491 /* Convert pcidev INTR_PIN into something we 491 /* Convert pcidev INTR_PIN into something we
492 ** can lookup in the IRT. 492 ** can lookup in the IRT.
493 */ 493 */
494 #ifdef PCI_BRIDGE_FUNCS 494 #ifdef PCI_BRIDGE_FUNCS
495 /* 495 /*
496 ** Proposal #1: 496 ** Proposal #1:
497 ** 497 **
498 ** call implementation specific translation function 498 ** call implementation specific translation function
499 ** This is architecturally "cleaner". HP-UX doesn't 499 ** This is architecturally "cleaner". HP-UX doesn't
500 ** support other secondary bus types (eg. E/ISA) directly. 500 ** support other secondary bus types (eg. E/ISA) directly.
501 ** May be needed for other processor (eg IA64) architectures 501 ** May be needed for other processor (eg IA64) architectures
502 ** or by some ambitous soul who wants to watch TV. 502 ** or by some ambitous soul who wants to watch TV.
503 */ 503 */
504 if (pci_bridge_funcs->xlate_intr_line) { 504 if (pci_bridge_funcs->xlate_intr_line) {
505 intr_pin = pci_bridge_funcs->xlate_intr_line(pcidev); 505 intr_pin = pci_bridge_funcs->xlate_intr_line(pcidev);
506 } 506 }
507 #else /* PCI_BRIDGE_FUNCS */ 507 #else /* PCI_BRIDGE_FUNCS */
508 struct pci_bus *p = pcidev->bus; 508 struct pci_bus *p = pcidev->bus;
509 /* 509 /*
510 ** Proposal #2: 510 ** Proposal #2:
511 ** The "pin" is skewed ((pin + dev - 1) % 4). 511 ** The "pin" is skewed ((pin + dev - 1) % 4).
512 ** 512 **
513 ** This isn't very clean since I/O SAPIC must assume: 513 ** This isn't very clean since I/O SAPIC must assume:
514 ** - all platforms only have PCI busses. 514 ** - all platforms only have PCI busses.
515 ** - only PCI-PCI bridge (eg not PCI-EISA, PCI-PCMCIA) 515 ** - only PCI-PCI bridge (eg not PCI-EISA, PCI-PCMCIA)
516 ** - IRQ routing is only skewed once regardless of 516 ** - IRQ routing is only skewed once regardless of
517 ** the number of PPB's between iosapic and device. 517 ** the number of PPB's between iosapic and device.
518 ** (Bit3 expansion chassis follows this rule) 518 ** (Bit3 expansion chassis follows this rule)
519 ** 519 **
520 ** Advantage is it's really easy to implement. 520 ** Advantage is it's really easy to implement.
521 */ 521 */
522 intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4; 522 intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4;
523 intr_pin++; /* convert back to INTA-D (1-4) */ 523 intr_pin++; /* convert back to INTA-D (1-4) */
524 #endif /* PCI_BRIDGE_FUNCS */ 524 #endif /* PCI_BRIDGE_FUNCS */
525 525
526 /* 526 /*
527 ** Locate the host slot the PPB nearest the Host bus 527 ** Locate the host slot the PPB nearest the Host bus
528 ** adapter. 528 ** adapter.
529 */ 529 */
530 while (NULL != p->parent->self) 530 while (NULL != p->parent->self)
531 p = p->parent; 531 p = p->parent;
532 532
533 intr_slot = PCI_SLOT(p->self->devfn); 533 intr_slot = PCI_SLOT(p->self->devfn);
534 } else { 534 } else {
535 intr_slot = PCI_SLOT(pcidev->devfn); 535 intr_slot = PCI_SLOT(pcidev->devfn);
536 } 536 }
537 DBG_IRT("iosapic_xlate_pin: bus %d slot %d pin %d\n", 537 DBG_IRT("iosapic_xlate_pin: bus %d slot %d pin %d\n",
538 pcidev->bus->secondary, intr_slot, intr_pin); 538 pcidev->bus->secondary, intr_slot, intr_pin);
539 539
540 return irt_find_irqline(isi, intr_slot, intr_pin); 540 return irt_find_irqline(isi, intr_slot, intr_pin);
541 } 541 }
542 542
543 static void iosapic_rd_irt_entry(struct vector_info *vi , u32 *dp0, u32 *dp1) 543 static void iosapic_rd_irt_entry(struct vector_info *vi , u32 *dp0, u32 *dp1)
544 { 544 {
545 struct iosapic_info *isp = vi->iosapic; 545 struct iosapic_info *isp = vi->iosapic;
546 u8 idx = vi->irqline; 546 u8 idx = vi->irqline;
547 547
548 *dp0 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY(idx)); 548 *dp0 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY(idx));
549 *dp1 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY_HI(idx)); 549 *dp1 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY_HI(idx));
550 } 550 }
551 551
552 552
553 static void iosapic_wr_irt_entry(struct vector_info *vi, u32 dp0, u32 dp1) 553 static void iosapic_wr_irt_entry(struct vector_info *vi, u32 dp0, u32 dp1)
554 { 554 {
555 struct iosapic_info *isp = vi->iosapic; 555 struct iosapic_info *isp = vi->iosapic;
556 556
557 DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %lx 0x%x 0x%x\n", 557 DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %lx 0x%x 0x%x\n",
558 vi->irqline, isp->isi_hpa, dp0, dp1); 558 vi->irqline, isp->isi_hpa, dp0, dp1);
559 559
560 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0); 560 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0);
561 561
562 /* Read the window register to flush the writes down to HW */ 562 /* Read the window register to flush the writes down to HW */
563 dp0 = readl(isp->addr+IOSAPIC_REG_WINDOW); 563 dp0 = readl(isp->addr+IOSAPIC_REG_WINDOW);
564 564
565 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1); 565 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1);
566 566
567 /* Read the window register to flush the writes down to HW */ 567 /* Read the window register to flush the writes down to HW */
568 dp1 = readl(isp->addr+IOSAPIC_REG_WINDOW); 568 dp1 = readl(isp->addr+IOSAPIC_REG_WINDOW);
569 } 569 }
570 570
571 /* 571 /*
572 ** set_irt prepares the data (dp0, dp1) according to the vector_info 572 ** set_irt prepares the data (dp0, dp1) according to the vector_info
573 ** and target cpu (id_eid). dp0/dp1 are then used to program I/O SAPIC 573 ** and target cpu (id_eid). dp0/dp1 are then used to program I/O SAPIC
574 ** IRdT for the given "vector" (aka IRQ line). 574 ** IRdT for the given "vector" (aka IRQ line).
575 */ 575 */
576 static void 576 static void
577 iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1) 577 iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
578 { 578 {
579 u32 mode = 0; 579 u32 mode = 0;
580 struct irt_entry *p = vi->irte; 580 struct irt_entry *p = vi->irte;
581 581
582 if ((p->polarity_trigger & IRT_PO_MASK) == IRT_ACTIVE_LO) 582 if ((p->polarity_trigger & IRT_PO_MASK) == IRT_ACTIVE_LO)
583 mode |= IOSAPIC_IRDT_PO_LOW; 583 mode |= IOSAPIC_IRDT_PO_LOW;
584 584
585 if (((p->polarity_trigger >> IRT_EL_SHIFT) & IRT_EL_MASK) == IRT_LEVEL_TRIG) 585 if (((p->polarity_trigger >> IRT_EL_SHIFT) & IRT_EL_MASK) == IRT_LEVEL_TRIG)
586 mode |= IOSAPIC_IRDT_LEVEL_TRIG; 586 mode |= IOSAPIC_IRDT_LEVEL_TRIG;
587 587
588 /* 588 /*
589 ** IA64 REVISIT 589 ** IA64 REVISIT
590 ** PA doesn't support EXTINT or LPRIO bits. 590 ** PA doesn't support EXTINT or LPRIO bits.
591 */ 591 */
592 592
593 *dp0 = mode | (u32) vi->txn_data; 593 *dp0 = mode | (u32) vi->txn_data;
594 594
595 /* 595 /*
596 ** Extracting id_eid isn't a real clean way of getting it. 596 ** Extracting id_eid isn't a real clean way of getting it.
597 ** But the encoding is the same for both PA and IA64 platforms. 597 ** But the encoding is the same for both PA and IA64 platforms.
598 */ 598 */
599 if (is_pdc_pat()) { 599 if (is_pdc_pat()) {
600 /* 600 /*
601 ** PAT PDC just hands it to us "right". 601 ** PAT PDC just hands it to us "right".
602 ** txn_addr comes from cpu_data[x].txn_addr. 602 ** txn_addr comes from cpu_data[x].txn_addr.
603 */ 603 */
604 *dp1 = (u32) (vi->txn_addr); 604 *dp1 = (u32) (vi->txn_addr);
605 } else { 605 } else {
606 /* 606 /*
607 ** eg if base_addr == 0xfffa0000), 607 ** eg if base_addr == 0xfffa0000),
608 ** we want to get 0xa0ff0000. 608 ** we want to get 0xa0ff0000.
609 ** 609 **
610 ** eid 0x0ff00000 -> 0x00ff0000 610 ** eid 0x0ff00000 -> 0x00ff0000
611 ** id 0x000ff000 -> 0xff000000 611 ** id 0x000ff000 -> 0xff000000
612 */ 612 */
613 *dp1 = (((u32)vi->txn_addr & 0x0ff00000) >> 4) | 613 *dp1 = (((u32)vi->txn_addr & 0x0ff00000) >> 4) |
614 (((u32)vi->txn_addr & 0x000ff000) << 12); 614 (((u32)vi->txn_addr & 0x000ff000) << 12);
615 } 615 }
616 DBG_IRT("iosapic_set_irt_data(): 0x%x 0x%x\n", *dp0, *dp1); 616 DBG_IRT("iosapic_set_irt_data(): 0x%x 0x%x\n", *dp0, *dp1);
617 } 617 }
618 618
619 619
620 static struct vector_info *iosapic_get_vector(unsigned int irq) 620 static struct vector_info *iosapic_get_vector(unsigned int irq)
621 { 621 {
622 return irq_desc[irq].handler_data; 622 return irq_desc[irq].handler_data;
623 } 623 }
624 624
625 static void iosapic_disable_irq(unsigned int irq) 625 static void iosapic_disable_irq(unsigned int irq)
626 { 626 {
627 unsigned long flags; 627 unsigned long flags;
628 struct vector_info *vi = iosapic_get_vector(irq); 628 struct vector_info *vi = iosapic_get_vector(irq);
629 u32 d0, d1; 629 u32 d0, d1;
630 630
631 spin_lock_irqsave(&iosapic_lock, flags); 631 spin_lock_irqsave(&iosapic_lock, flags);
632 iosapic_rd_irt_entry(vi, &d0, &d1); 632 iosapic_rd_irt_entry(vi, &d0, &d1);
633 d0 |= IOSAPIC_IRDT_ENABLE; 633 d0 |= IOSAPIC_IRDT_ENABLE;
634 iosapic_wr_irt_entry(vi, d0, d1); 634 iosapic_wr_irt_entry(vi, d0, d1);
635 spin_unlock_irqrestore(&iosapic_lock, flags); 635 spin_unlock_irqrestore(&iosapic_lock, flags);
636 } 636 }
637 637
638 static void iosapic_enable_irq(unsigned int irq) 638 static void iosapic_enable_irq(unsigned int irq)
639 { 639 {
640 struct vector_info *vi = iosapic_get_vector(irq); 640 struct vector_info *vi = iosapic_get_vector(irq);
641 u32 d0, d1; 641 u32 d0, d1;
642 642
643 /* data is initialized by fixup_irq */ 643 /* data is initialized by fixup_irq */
644 WARN_ON(vi->txn_irq == 0); 644 WARN_ON(vi->txn_irq == 0);
645 645
646 iosapic_set_irt_data(vi, &d0, &d1); 646 iosapic_set_irt_data(vi, &d0, &d1);
647 iosapic_wr_irt_entry(vi, d0, d1); 647 iosapic_wr_irt_entry(vi, d0, d1);
648 648
649 #ifdef DEBUG_IOSAPIC_IRT 649 #ifdef DEBUG_IOSAPIC_IRT
650 { 650 {
651 u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL); 651 u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL);
652 printk("iosapic_enable_irq(): regs %p", vi->eoi_addr); 652 printk("iosapic_enable_irq(): regs %p", vi->eoi_addr);
653 for ( ; t < vi->eoi_addr; t++) 653 for ( ; t < vi->eoi_addr; t++)
654 printk(" %x", readl(t)); 654 printk(" %x", readl(t));
655 printk("\n"); 655 printk("\n");
656 } 656 }
657 657
658 printk("iosapic_enable_irq(): sel "); 658 printk("iosapic_enable_irq(): sel ");
659 { 659 {
660 struct iosapic_info *isp = vi->iosapic; 660 struct iosapic_info *isp = vi->iosapic;
661 661
662 for (d0=0x10; d0<0x1e; d0++) { 662 for (d0=0x10; d0<0x1e; d0++) {
663 d1 = iosapic_read(isp->addr, d0); 663 d1 = iosapic_read(isp->addr, d0);
664 printk(" %x", d1); 664 printk(" %x", d1);
665 } 665 }
666 } 666 }
667 printk("\n"); 667 printk("\n");
668 #endif 668 #endif
669 669
670 /* 670 /*
671 * Issuing I/O SAPIC an EOI causes an interrupt IFF IRQ line is 671 * Issuing I/O SAPIC an EOI causes an interrupt IFF IRQ line is
672 * asserted. IRQ generally should not be asserted when a driver 672 * asserted. IRQ generally should not be asserted when a driver
673 * enables their IRQ. It can lead to "interesting" race conditions 673 * enables their IRQ. It can lead to "interesting" race conditions
674 * in the driver initialization sequence. 674 * in the driver initialization sequence.
675 */ 675 */
676 DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, 676 DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
677 vi->eoi_addr, vi->eoi_data); 677 vi->eoi_addr, vi->eoi_data);
678 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 678 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
679 } 679 }
680 680
681 /* 681 /*
682 * PARISC only supports PCI devices below I/O SAPIC. 682 * PARISC only supports PCI devices below I/O SAPIC.
683 * PCI only supports level triggered in order to share IRQ lines. 683 * PCI only supports level triggered in order to share IRQ lines.
684 * ergo I/O SAPIC must always issue EOI on parisc. 684 * ergo I/O SAPIC must always issue EOI on parisc.
685 * 685 *
686 * i386/ia64 support ISA devices and have to deal with 686 * i386/ia64 support ISA devices and have to deal with
687 * edge-triggered interrupts too. 687 * edge-triggered interrupts too.
688 */ 688 */
689 static void iosapic_end_irq(unsigned int irq) 689 static void iosapic_end_irq(unsigned int irq)
690 { 690 {
691 struct vector_info *vi = iosapic_get_vector(irq); 691 struct vector_info *vi = iosapic_get_vector(irq);
692 DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq, 692 DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
693 vi->eoi_addr, vi->eoi_data); 693 vi->eoi_addr, vi->eoi_data);
694 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 694 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
695 } 695 }
696 696
697 static unsigned int iosapic_startup_irq(unsigned int irq) 697 static unsigned int iosapic_startup_irq(unsigned int irq)
698 { 698 {
699 iosapic_enable_irq(irq); 699 iosapic_enable_irq(irq);
700 return 0; 700 return 0;
701 } 701 }
702 702
703 #ifdef CONFIG_SMP 703 #ifdef CONFIG_SMP
704 static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) 704 static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest)
705 { 705 {
706 struct vector_info *vi = iosapic_get_vector(irq); 706 struct vector_info *vi = iosapic_get_vector(irq);
707 u32 d0, d1, dummy_d0; 707 u32 d0, d1, dummy_d0;
708 unsigned long flags; 708 unsigned long flags;
709 709
710 if (cpu_check_affinity(irq, &dest)) 710 if (cpu_check_affinity(irq, &dest))
711 return; 711 return;
712 712
713 vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); 713 vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest));
714 714
715 spin_lock_irqsave(&iosapic_lock, flags); 715 spin_lock_irqsave(&iosapic_lock, flags);
716 /* d1 contains the destination CPU, so only want to set that 716 /* d1 contains the destination CPU, so only want to set that
717 * entry */ 717 * entry */
718 iosapic_rd_irt_entry(vi, &d0, &d1); 718 iosapic_rd_irt_entry(vi, &d0, &d1);
719 iosapic_set_irt_data(vi, &dummy_d0, &d1); 719 iosapic_set_irt_data(vi, &dummy_d0, &d1);
720 iosapic_wr_irt_entry(vi, d0, d1); 720 iosapic_wr_irt_entry(vi, d0, d1);
721 spin_unlock_irqrestore(&iosapic_lock, flags); 721 spin_unlock_irqrestore(&iosapic_lock, flags);
722 } 722 }
723 #endif 723 #endif
724 724
725 static struct hw_interrupt_type iosapic_interrupt_type = { 725 static struct hw_interrupt_type iosapic_interrupt_type = {
726 .typename = "IO-SAPIC-level", 726 .typename = "IO-SAPIC-level",
727 .startup = iosapic_startup_irq, 727 .startup = iosapic_startup_irq,
728 .shutdown = iosapic_disable_irq, 728 .shutdown = iosapic_disable_irq,
729 .enable = iosapic_enable_irq, 729 .enable = iosapic_enable_irq,
730 .disable = iosapic_disable_irq, 730 .disable = iosapic_disable_irq,
731 .ack = no_ack_irq, 731 .ack = no_ack_irq,
732 .end = iosapic_end_irq, 732 .end = iosapic_end_irq,
733 #ifdef CONFIG_SMP 733 #ifdef CONFIG_SMP
734 .set_affinity = iosapic_set_affinity_irq, 734 .set_affinity = iosapic_set_affinity_irq,
735 #endif 735 #endif
736 }; 736 };
737 737
738 int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) 738 int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
739 { 739 {
740 struct iosapic_info *isi = isi_obj; 740 struct iosapic_info *isi = isi_obj;
741 struct irt_entry *irte = NULL; /* only used if PAT PDC */ 741 struct irt_entry *irte = NULL; /* only used if PAT PDC */
742 struct vector_info *vi; 742 struct vector_info *vi;
743 int isi_line; /* line used by device */ 743 int isi_line; /* line used by device */
744 744
745 if (!isi) { 745 if (!isi) {
746 printk(KERN_WARNING MODULE_NAME ": hpa not registered for %s\n", 746 printk(KERN_WARNING MODULE_NAME ": hpa not registered for %s\n",
747 pci_name(pcidev)); 747 pci_name(pcidev));
748 return -1; 748 return -1;
749 } 749 }
750 750
751 #ifdef CONFIG_SUPERIO 751 #ifdef CONFIG_SUPERIO
752 /* 752 /*
753 * HACK ALERT! (non-compliant PCI device support) 753 * HACK ALERT! (non-compliant PCI device support)
754 * 754 *
755 * All SuckyIO interrupts are routed through the PIC's on function 1. 755 * All SuckyIO interrupts are routed through the PIC's on function 1.
756 * But SuckyIO OHCI USB controller gets an IRT entry anyway because 756 * But SuckyIO OHCI USB controller gets an IRT entry anyway because
757 * it advertises INT D for INT_PIN. Use that IRT entry to get the 757 * it advertises INT D for INT_PIN. Use that IRT entry to get the
758 * SuckyIO interrupt routing for PICs on function 1 (*BLEECCHH*). 758 * SuckyIO interrupt routing for PICs on function 1 (*BLEECCHH*).
759 */ 759 */
760 if (is_superio_device(pcidev)) { 760 if (is_superio_device(pcidev)) {
761 /* We must call superio_fixup_irq() to register the pdev */ 761 /* We must call superio_fixup_irq() to register the pdev */
762 pcidev->irq = superio_fixup_irq(pcidev); 762 pcidev->irq = superio_fixup_irq(pcidev);
763 763
764 /* Don't return if need to program the IOSAPIC's IRT... */ 764 /* Don't return if need to program the IOSAPIC's IRT... */
765 if (PCI_FUNC(pcidev->devfn) != SUPERIO_USB_FN) 765 if (PCI_FUNC(pcidev->devfn) != SUPERIO_USB_FN)
766 return pcidev->irq; 766 return pcidev->irq;
767 } 767 }
768 #endif /* CONFIG_SUPERIO */ 768 #endif /* CONFIG_SUPERIO */
769 769
770 /* lookup IRT entry for isi/slot/pin set */ 770 /* lookup IRT entry for isi/slot/pin set */
771 irte = iosapic_xlate_pin(isi, pcidev); 771 irte = iosapic_xlate_pin(isi, pcidev);
772 if (!irte) { 772 if (!irte) {
773 printk("iosapic: no IRTE for %s (IRQ not connected?)\n", 773 printk("iosapic: no IRTE for %s (IRQ not connected?)\n",
774 pci_name(pcidev)); 774 pci_name(pcidev));
775 return -1; 775 return -1;
776 } 776 }
777 DBG_IRT("iosapic_fixup_irq(): irte %p %x %x %x %x %x %x %x %x\n", 777 DBG_IRT("iosapic_fixup_irq(): irte %p %x %x %x %x %x %x %x %x\n",
778 irte, 778 irte,
779 irte->entry_type, 779 irte->entry_type,
780 irte->entry_length, 780 irte->entry_length,
781 irte->polarity_trigger, 781 irte->polarity_trigger,
782 irte->src_bus_irq_devno, 782 irte->src_bus_irq_devno,
783 irte->src_bus_id, 783 irte->src_bus_id,
784 irte->src_seg_id, 784 irte->src_seg_id,
785 irte->dest_iosapic_intin, 785 irte->dest_iosapic_intin,
786 (u32) irte->dest_iosapic_addr); 786 (u32) irte->dest_iosapic_addr);
787 isi_line = irte->dest_iosapic_intin; 787 isi_line = irte->dest_iosapic_intin;
788 788
789 /* get vector info for this input line */ 789 /* get vector info for this input line */
790 vi = isi->isi_vector + isi_line; 790 vi = isi->isi_vector + isi_line;
791 DBG_IRT("iosapic_fixup_irq: line %d vi 0x%p\n", isi_line, vi); 791 DBG_IRT("iosapic_fixup_irq: line %d vi 0x%p\n", isi_line, vi);
792 792
793 /* If this IRQ line has already been setup, skip it */ 793 /* If this IRQ line has already been setup, skip it */
794 if (vi->irte) 794 if (vi->irte)
795 goto out; 795 goto out;
796 796
797 vi->irte = irte; 797 vi->irte = irte;
798 798
799 /* 799 /*
800 * Allocate processor IRQ 800 * Allocate processor IRQ
801 * 801 *
802 * XXX/FIXME The txn_alloc_irq() code and related code should be 802 * XXX/FIXME The txn_alloc_irq() code and related code should be
803 * moved to enable_irq(). That way we only allocate processor IRQ 803 * moved to enable_irq(). That way we only allocate processor IRQ
804 * bits for devices that actually have drivers claiming them. 804 * bits for devices that actually have drivers claiming them.
805 * Right now we assign an IRQ to every PCI device present, 805 * Right now we assign an IRQ to every PCI device present,
806 * regardless of whether it's used or not. 806 * regardless of whether it's used or not.
807 */ 807 */
808 vi->txn_irq = txn_alloc_irq(8); 808 vi->txn_irq = txn_alloc_irq(8);
809 809
810 if (vi->txn_irq < 0) 810 if (vi->txn_irq < 0)
811 panic("I/O sapic: couldn't get TXN IRQ\n"); 811 panic("I/O sapic: couldn't get TXN IRQ\n");
812 812
813 /* enable_irq() will use txn_* to program IRdT */ 813 /* enable_irq() will use txn_* to program IRdT */
814 vi->txn_addr = txn_alloc_addr(vi->txn_irq); 814 vi->txn_addr = txn_alloc_addr(vi->txn_irq);
815 vi->txn_data = txn_alloc_data(vi->txn_irq); 815 vi->txn_data = txn_alloc_data(vi->txn_irq);
816 816
817 vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; 817 vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
818 vi->eoi_data = cpu_to_le32(vi->txn_data); 818 vi->eoi_data = cpu_to_le32(vi->txn_data);
819 819
820 cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); 820 cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
821 821
822 out: 822 out:
823 pcidev->irq = vi->txn_irq; 823 pcidev->irq = vi->txn_irq;
824 824
825 DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n", 825 DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n",
826 PCI_SLOT(pcidev->devfn), PCI_FUNC(pcidev->devfn), 826 PCI_SLOT(pcidev->devfn), PCI_FUNC(pcidev->devfn),
827 pcidev->vendor, pcidev->device, isi_line, pcidev->irq); 827 pcidev->vendor, pcidev->device, isi_line, pcidev->irq);
828 828
829 return pcidev->irq; 829 return pcidev->irq;
830 } 830 }
831 831
832 832
833 /* 833 /*
834 ** squirrel away the I/O Sapic Version 834 ** squirrel away the I/O Sapic Version
835 */ 835 */
836 static unsigned int 836 static unsigned int
837 iosapic_rd_version(struct iosapic_info *isi) 837 iosapic_rd_version(struct iosapic_info *isi)
838 { 838 {
839 return iosapic_read(isi->addr, IOSAPIC_REG_VERSION); 839 return iosapic_read(isi->addr, IOSAPIC_REG_VERSION);
840 } 840 }
841 841
842 842
843 /* 843 /*
844 ** iosapic_register() is called by "drivers" with an integrated I/O SAPIC. 844 ** iosapic_register() is called by "drivers" with an integrated I/O SAPIC.
845 ** Caller must be certain they have an I/O SAPIC and know its MMIO address. 845 ** Caller must be certain they have an I/O SAPIC and know its MMIO address.
846 ** 846 **
847 ** o allocate iosapic_info and add it to the list 847 ** o allocate iosapic_info and add it to the list
848 ** o read iosapic version and squirrel that away 848 ** o read iosapic version and squirrel that away
849 ** o read size of IRdT. 849 ** o read size of IRdT.
850 ** o allocate and initialize isi_vector[] 850 ** o allocate and initialize isi_vector[]
851 ** o allocate irq region 851 ** o allocate irq region
852 */ 852 */
853 void *iosapic_register(unsigned long hpa) 853 void *iosapic_register(unsigned long hpa)
854 { 854 {
855 struct iosapic_info *isi = NULL; 855 struct iosapic_info *isi = NULL;
856 struct irt_entry *irte = irt_cell; 856 struct irt_entry *irte = irt_cell;
857 struct vector_info *vip; 857 struct vector_info *vip;
858 int cnt; /* track how many entries we've looked at */ 858 int cnt; /* track how many entries we've looked at */
859 859
860 /* 860 /*
861 * Astro based platforms can only support PCI OLARD if they implement 861 * Astro based platforms can only support PCI OLARD if they implement
862 * PAT PDC. Legacy PDC omits LBAs with no PCI devices from the IRT. 862 * PAT PDC. Legacy PDC omits LBAs with no PCI devices from the IRT.
863 * Search the IRT and ignore iosapic's which aren't in the IRT. 863 * Search the IRT and ignore iosapic's which aren't in the IRT.
864 */ 864 */
865 for (cnt=0; cnt < irt_num_entry; cnt++, irte++) { 865 for (cnt=0; cnt < irt_num_entry; cnt++, irte++) {
866 WARN_ON(IRT_IOSAPIC_TYPE != irte->entry_type); 866 WARN_ON(IRT_IOSAPIC_TYPE != irte->entry_type);
867 if (COMPARE_IRTE_ADDR(irte, hpa)) 867 if (COMPARE_IRTE_ADDR(irte, hpa))
868 break; 868 break;
869 } 869 }
870 870
871 if (cnt >= irt_num_entry) { 871 if (cnt >= irt_num_entry) {
872 DBG("iosapic_register() ignoring 0x%lx (NOT FOUND)\n", hpa); 872 DBG("iosapic_register() ignoring 0x%lx (NOT FOUND)\n", hpa);
873 return NULL; 873 return NULL;
874 } 874 }
875 875
876 isi = (struct iosapic_info *)kzalloc(sizeof(struct iosapic_info), GFP_KERNEL); 876 isi = (struct iosapic_info *)kzalloc(sizeof(struct iosapic_info), GFP_KERNEL);
877 if (!isi) { 877 if (!isi) {
878 BUG(); 878 BUG();
879 return NULL; 879 return NULL;
880 } 880 }
881 881
882 isi->addr = ioremap(hpa, 4096); 882 isi->addr = ioremap_nocache(hpa, 4096);
883 isi->isi_hpa = hpa; 883 isi->isi_hpa = hpa;
884 isi->isi_version = iosapic_rd_version(isi); 884 isi->isi_version = iosapic_rd_version(isi);
885 isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1; 885 isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
886 886
887 vip = isi->isi_vector = (struct vector_info *) 887 vip = isi->isi_vector = (struct vector_info *)
888 kzalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL); 888 kzalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL);
889 if (vip == NULL) { 889 if (vip == NULL) {
890 kfree(isi); 890 kfree(isi);
891 return NULL; 891 return NULL;
892 } 892 }
893 893
894 for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) { 894 for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) {
895 vip->irqline = (unsigned char) cnt; 895 vip->irqline = (unsigned char) cnt;
896 vip->iosapic = isi; 896 vip->iosapic = isi;
897 } 897 }
898 return isi; 898 return isi;
899 } 899 }
900 900
901 901
902 #ifdef DEBUG_IOSAPIC 902 #ifdef DEBUG_IOSAPIC
903 903
904 static void 904 static void
905 iosapic_prt_irt(void *irt, long num_entry) 905 iosapic_prt_irt(void *irt, long num_entry)
906 { 906 {
907 unsigned int i, *irp = (unsigned int *) irt; 907 unsigned int i, *irp = (unsigned int *) irt;
908 908
909 909
910 printk(KERN_DEBUG MODULE_NAME ": Interrupt Routing Table (%lx entries)\n", num_entry); 910 printk(KERN_DEBUG MODULE_NAME ": Interrupt Routing Table (%lx entries)\n", num_entry);
911 911
912 for (i=0; i<num_entry; i++, irp += 4) { 912 for (i=0; i<num_entry; i++, irp += 4) {
913 printk(KERN_DEBUG "%p : %2d %.8x %.8x %.8x %.8x\n", 913 printk(KERN_DEBUG "%p : %2d %.8x %.8x %.8x %.8x\n",
914 irp, i, irp[0], irp[1], irp[2], irp[3]); 914 irp, i, irp[0], irp[1], irp[2], irp[3]);
915 } 915 }
916 } 916 }
917 917
918 918
919 static void 919 static void
920 iosapic_prt_vi(struct vector_info *vi) 920 iosapic_prt_vi(struct vector_info *vi)
921 { 921 {
922 printk(KERN_DEBUG MODULE_NAME ": vector_info[%d] is at %p\n", vi->irqline, vi); 922 printk(KERN_DEBUG MODULE_NAME ": vector_info[%d] is at %p\n", vi->irqline, vi);
923 printk(KERN_DEBUG "\t\tstatus: %.4x\n", vi->status); 923 printk(KERN_DEBUG "\t\tstatus: %.4x\n", vi->status);
924 printk(KERN_DEBUG "\t\ttxn_irq: %d\n", vi->txn_irq); 924 printk(KERN_DEBUG "\t\ttxn_irq: %d\n", vi->txn_irq);
925 printk(KERN_DEBUG "\t\ttxn_addr: %lx\n", vi->txn_addr); 925 printk(KERN_DEBUG "\t\ttxn_addr: %lx\n", vi->txn_addr);
926 printk(KERN_DEBUG "\t\ttxn_data: %lx\n", vi->txn_data); 926 printk(KERN_DEBUG "\t\ttxn_data: %lx\n", vi->txn_data);
927 printk(KERN_DEBUG "\t\teoi_addr: %p\n", vi->eoi_addr); 927 printk(KERN_DEBUG "\t\teoi_addr: %p\n", vi->eoi_addr);
928 printk(KERN_DEBUG "\t\teoi_data: %x\n", vi->eoi_data); 928 printk(KERN_DEBUG "\t\teoi_data: %x\n", vi->eoi_data);
929 } 929 }
930 930
931 931
932 static void 932 static void
933 iosapic_prt_isi(struct iosapic_info *isi) 933 iosapic_prt_isi(struct iosapic_info *isi)
934 { 934 {
935 printk(KERN_DEBUG MODULE_NAME ": io_sapic_info at %p\n", isi); 935 printk(KERN_DEBUG MODULE_NAME ": io_sapic_info at %p\n", isi);
936 printk(KERN_DEBUG "\t\tisi_hpa: %lx\n", isi->isi_hpa); 936 printk(KERN_DEBUG "\t\tisi_hpa: %lx\n", isi->isi_hpa);
937 printk(KERN_DEBUG "\t\tisi_status: %x\n", isi->isi_status); 937 printk(KERN_DEBUG "\t\tisi_status: %x\n", isi->isi_status);
938 printk(KERN_DEBUG "\t\tisi_version: %x\n", isi->isi_version); 938 printk(KERN_DEBUG "\t\tisi_version: %x\n", isi->isi_version);
939 printk(KERN_DEBUG "\t\tisi_vector: %p\n", isi->isi_vector); 939 printk(KERN_DEBUG "\t\tisi_vector: %p\n", isi->isi_vector);
940 } 940 }
941 #endif /* DEBUG_IOSAPIC */ 941 #endif /* DEBUG_IOSAPIC */
942 942
drivers/parisc/lba_pci.c
1 /* 1 /*
2 ** 2 **
3 ** PCI Lower Bus Adapter (LBA) manager 3 ** PCI Lower Bus Adapter (LBA) manager
4 ** 4 **
5 ** (c) Copyright 1999,2000 Grant Grundler 5 ** (c) Copyright 1999,2000 Grant Grundler
6 ** (c) Copyright 1999,2000 Hewlett-Packard Company 6 ** (c) Copyright 1999,2000 Hewlett-Packard Company
7 ** 7 **
8 ** This program is free software; you can redistribute it and/or modify 8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by 9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or 10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version. 11 ** (at your option) any later version.
12 ** 12 **
13 ** 13 **
14 ** This module primarily provides access to PCI bus (config/IOport 14 ** This module primarily provides access to PCI bus (config/IOport
15 ** spaces) on platforms with an SBA/LBA chipset. A/B/C/J/L/N-class 15 ** spaces) on platforms with an SBA/LBA chipset. A/B/C/J/L/N-class
16 ** with 4 digit model numbers - eg C3000 (and A400...sigh). 16 ** with 4 digit model numbers - eg C3000 (and A400...sigh).
17 ** 17 **
18 ** LBA driver isn't as simple as the Dino driver because: 18 ** LBA driver isn't as simple as the Dino driver because:
19 ** (a) this chip has substantial bug fixes between revisions 19 ** (a) this chip has substantial bug fixes between revisions
20 ** (Only one Dino bug has a software workaround :^( ) 20 ** (Only one Dino bug has a software workaround :^( )
21 ** (b) has more options which we don't (yet) support (DMA hints, OLARD) 21 ** (b) has more options which we don't (yet) support (DMA hints, OLARD)
22 ** (c) IRQ support lives in the I/O SAPIC driver (not with PCI driver) 22 ** (c) IRQ support lives in the I/O SAPIC driver (not with PCI driver)
23 ** (d) play nicely with both PAT and "Legacy" PA-RISC firmware (PDC). 23 ** (d) play nicely with both PAT and "Legacy" PA-RISC firmware (PDC).
24 ** (dino only deals with "Legacy" PDC) 24 ** (dino only deals with "Legacy" PDC)
25 ** 25 **
26 ** LBA driver passes the I/O SAPIC HPA to the I/O SAPIC driver. 26 ** LBA driver passes the I/O SAPIC HPA to the I/O SAPIC driver.
27 ** (I/O SAPIC is integratd in the LBA chip). 27 ** (I/O SAPIC is integratd in the LBA chip).
28 ** 28 **
29 ** FIXME: Add support to SBA and LBA drivers for DMA hint sets 29 ** FIXME: Add support to SBA and LBA drivers for DMA hint sets
30 ** FIXME: Add support for PCI card hot-plug (OLARD). 30 ** FIXME: Add support for PCI card hot-plug (OLARD).
31 */ 31 */
32 32
33 #include <linux/delay.h> 33 #include <linux/delay.h>
34 #include <linux/types.h> 34 #include <linux/types.h>
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/spinlock.h> 36 #include <linux/spinlock.h>
37 #include <linux/init.h> /* for __init and __devinit */ 37 #include <linux/init.h> /* for __init and __devinit */
38 #include <linux/pci.h> 38 #include <linux/pci.h>
39 #include <linux/ioport.h> 39 #include <linux/ioport.h>
40 #include <linux/slab.h> 40 #include <linux/slab.h>
41 #include <linux/smp_lock.h> 41 #include <linux/smp_lock.h>
42 42
43 #include <asm/byteorder.h> 43 #include <asm/byteorder.h>
44 #include <asm/pdc.h> 44 #include <asm/pdc.h>
45 #include <asm/pdcpat.h> 45 #include <asm/pdcpat.h>
46 #include <asm/page.h> 46 #include <asm/page.h>
47 #include <asm/system.h> 47 #include <asm/system.h>
48 48
49 #include <asm/hardware.h> /* for register_parisc_driver() stuff */ 49 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
50 #include <asm/parisc-device.h> 50 #include <asm/parisc-device.h>
51 #include <asm/iosapic.h> /* for iosapic_register() */ 51 #include <asm/iosapic.h> /* for iosapic_register() */
52 #include <asm/io.h> /* read/write stuff */ 52 #include <asm/io.h> /* read/write stuff */
53 53
54 #undef DEBUG_LBA /* general stuff */ 54 #undef DEBUG_LBA /* general stuff */
55 #undef DEBUG_LBA_PORT /* debug I/O Port access */ 55 #undef DEBUG_LBA_PORT /* debug I/O Port access */
56 #undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */ 56 #undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */
57 #undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */ 57 #undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */
58 58
59 #undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */ 59 #undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */
60 60
61 61
62 #ifdef DEBUG_LBA 62 #ifdef DEBUG_LBA
63 #define DBG(x...) printk(x) 63 #define DBG(x...) printk(x)
64 #else 64 #else
65 #define DBG(x...) 65 #define DBG(x...)
66 #endif 66 #endif
67 67
68 #ifdef DEBUG_LBA_PORT 68 #ifdef DEBUG_LBA_PORT
69 #define DBG_PORT(x...) printk(x) 69 #define DBG_PORT(x...) printk(x)
70 #else 70 #else
71 #define DBG_PORT(x...) 71 #define DBG_PORT(x...)
72 #endif 72 #endif
73 73
74 #ifdef DEBUG_LBA_CFG 74 #ifdef DEBUG_LBA_CFG
75 #define DBG_CFG(x...) printk(x) 75 #define DBG_CFG(x...) printk(x)
76 #else 76 #else
77 #define DBG_CFG(x...) 77 #define DBG_CFG(x...)
78 #endif 78 #endif
79 79
80 #ifdef DEBUG_LBA_PAT 80 #ifdef DEBUG_LBA_PAT
81 #define DBG_PAT(x...) printk(x) 81 #define DBG_PAT(x...) printk(x)
82 #else 82 #else
83 #define DBG_PAT(x...) 83 #define DBG_PAT(x...)
84 #endif 84 #endif
85 85
86 86
87 /* 87 /*
88 ** Config accessor functions only pass in the 8-bit bus number and not 88 ** Config accessor functions only pass in the 8-bit bus number and not
89 ** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus 89 ** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus
90 ** number based on what firmware wrote into the scratch register. 90 ** number based on what firmware wrote into the scratch register.
91 ** 91 **
92 ** The "secondary" bus number is set to this before calling 92 ** The "secondary" bus number is set to this before calling
93 ** pci_register_ops(). If any PPB's are present, the scan will 93 ** pci_register_ops(). If any PPB's are present, the scan will
94 ** discover them and update the "secondary" and "subordinate" 94 ** discover them and update the "secondary" and "subordinate"
95 ** fields in the pci_bus structure. 95 ** fields in the pci_bus structure.
96 ** 96 **
97 ** Changes in the configuration *may* result in a different 97 ** Changes in the configuration *may* result in a different
98 ** bus number for each LBA depending on what firmware does. 98 ** bus number for each LBA depending on what firmware does.
99 */ 99 */
100 100
101 #define MODULE_NAME "LBA" 101 #define MODULE_NAME "LBA"
102 102
103 #define LBA_FUNC_ID 0x0000 /* function id */ 103 #define LBA_FUNC_ID 0x0000 /* function id */
104 #define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */ 104 #define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
105 #define LBA_CAPABLE 0x0030 /* capabilities register */ 105 #define LBA_CAPABLE 0x0030 /* capabilities register */
106 106
107 #define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */ 107 #define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
108 #define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */ 108 #define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
109 109
110 #define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */ 110 #define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
111 #define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */ 111 #define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
112 #define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */ 112 #define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
113 113
114 #define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */ 114 #define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
115 #define LBA_ARB_PRI 0x0088 /* firmware sets this. */ 115 #define LBA_ARB_PRI 0x0088 /* firmware sets this. */
116 #define LBA_ARB_MODE 0x0090 /* firmware sets this. */ 116 #define LBA_ARB_MODE 0x0090 /* firmware sets this. */
117 #define LBA_ARB_MTLT 0x0098 /* firmware sets this. */ 117 #define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
118 118
119 #define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */ 119 #define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
120 120
121 #define LBA_STAT_CTL 0x0108 /* Status & Control */ 121 #define LBA_STAT_CTL 0x0108 /* Status & Control */
122 #define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */ 122 #define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
123 #define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */ 123 #define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
124 #define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */ 124 #define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
125 #define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */ 125 #define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
126 126
127 #define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */ 127 #define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
128 #define LBA_LMMIO_MASK 0x0208 128 #define LBA_LMMIO_MASK 0x0208
129 129
130 #define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */ 130 #define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
131 #define LBA_GMMIO_MASK 0x0218 131 #define LBA_GMMIO_MASK 0x0218
132 132
133 #define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */ 133 #define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
134 #define LBA_WLMMIO_MASK 0x0228 134 #define LBA_WLMMIO_MASK 0x0228
135 135
136 #define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */ 136 #define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
137 #define LBA_WGMMIO_MASK 0x0238 137 #define LBA_WGMMIO_MASK 0x0238
138 138
139 #define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */ 139 #define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
140 #define LBA_IOS_MASK 0x0248 140 #define LBA_IOS_MASK 0x0248
141 141
142 #define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */ 142 #define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
143 #define LBA_ELMMIO_MASK 0x0258 143 #define LBA_ELMMIO_MASK 0x0258
144 144
145 #define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */ 145 #define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
146 #define LBA_EIOS_MASK 0x0268 146 #define LBA_EIOS_MASK 0x0268
147 147
148 #define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */ 148 #define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
149 #define LBA_DMA_CTL 0x0278 /* firmware sets this */ 149 #define LBA_DMA_CTL 0x0278 /* firmware sets this */
150 150
151 #define LBA_IBASE 0x0300 /* SBA DMA support */ 151 #define LBA_IBASE 0x0300 /* SBA DMA support */
152 #define LBA_IMASK 0x0308 152 #define LBA_IMASK 0x0308
153 153
154 /* FIXME: ignore DMA Hint stuff until we can measure performance */ 154 /* FIXME: ignore DMA Hint stuff until we can measure performance */
155 #define LBA_HINT_CFG 0x0310 155 #define LBA_HINT_CFG 0x0310
156 #define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */ 156 #define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
157 157
158 #define LBA_BUS_MODE 0x0620 158 #define LBA_BUS_MODE 0x0620
159 159
160 /* ERROR regs are needed for config cycle kluges */ 160 /* ERROR regs are needed for config cycle kluges */
161 #define LBA_ERROR_CONFIG 0x0680 161 #define LBA_ERROR_CONFIG 0x0680
162 #define LBA_SMART_MODE 0x20 162 #define LBA_SMART_MODE 0x20
163 #define LBA_ERROR_STATUS 0x0688 163 #define LBA_ERROR_STATUS 0x0688
164 #define LBA_ROPE_CTL 0x06A0 164 #define LBA_ROPE_CTL 0x06A0
165 165
166 #define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */ 166 #define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
167 167
168 /* non-postable I/O port space, densely packed */ 168 /* non-postable I/O port space, densely packed */
169 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) 169 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
170 static void __iomem *astro_iop_base __read_mostly; 170 static void __iomem *astro_iop_base __read_mostly;
171 171
172 #define ELROY_HVERS 0x782 172 #define ELROY_HVERS 0x782
173 #define MERCURY_HVERS 0x783 173 #define MERCURY_HVERS 0x783
174 #define QUICKSILVER_HVERS 0x784 174 #define QUICKSILVER_HVERS 0x784
175 175
176 static inline int IS_ELROY(struct parisc_device *d) 176 static inline int IS_ELROY(struct parisc_device *d)
177 { 177 {
178 return (d->id.hversion == ELROY_HVERS); 178 return (d->id.hversion == ELROY_HVERS);
179 } 179 }
180 180
181 static inline int IS_MERCURY(struct parisc_device *d) 181 static inline int IS_MERCURY(struct parisc_device *d)
182 { 182 {
183 return (d->id.hversion == MERCURY_HVERS); 183 return (d->id.hversion == MERCURY_HVERS);
184 } 184 }
185 185
186 static inline int IS_QUICKSILVER(struct parisc_device *d) 186 static inline int IS_QUICKSILVER(struct parisc_device *d)
187 { 187 {
188 return (d->id.hversion == QUICKSILVER_HVERS); 188 return (d->id.hversion == QUICKSILVER_HVERS);
189 } 189 }
190 190
191 191
192 /* 192 /*
193 ** lba_device: Per instance Elroy data structure 193 ** lba_device: Per instance Elroy data structure
194 */ 194 */
195 struct lba_device { 195 struct lba_device {
196 struct pci_hba_data hba; 196 struct pci_hba_data hba;
197 197
198 spinlock_t lba_lock; 198 spinlock_t lba_lock;
199 void *iosapic_obj; 199 void *iosapic_obj;
200 200
201 #ifdef CONFIG_64BIT 201 #ifdef CONFIG_64BIT
202 void __iomem * iop_base; /* PA_VIEW - for IO port accessor funcs */ 202 void __iomem * iop_base; /* PA_VIEW - for IO port accessor funcs */
203 #endif 203 #endif
204 204
205 int flags; /* state/functionality enabled */ 205 int flags; /* state/functionality enabled */
206 int hw_rev; /* HW revision of chip */ 206 int hw_rev; /* HW revision of chip */
207 }; 207 };
208 208
209 209
210 static u32 lba_t32; 210 static u32 lba_t32;
211 211
212 /* lba flags */ 212 /* lba flags */
213 #define LBA_FLAG_SKIP_PROBE 0x10 213 #define LBA_FLAG_SKIP_PROBE 0x10
214 214
215 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE) 215 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
216 216
217 217
218 /* Looks nice and keeps the compiler happy */ 218 /* Looks nice and keeps the compiler happy */
219 #define LBA_DEV(d) ((struct lba_device *) (d)) 219 #define LBA_DEV(d) ((struct lba_device *) (d))
220 220
221 221
222 /* 222 /*
223 ** Only allow 8 subsidiary busses per LBA 223 ** Only allow 8 subsidiary busses per LBA
224 ** Problem is the PCI bus numbering is globally shared. 224 ** Problem is the PCI bus numbering is globally shared.
225 */ 225 */
226 #define LBA_MAX_NUM_BUSES 8 226 #define LBA_MAX_NUM_BUSES 8
227 227
228 /************************************ 228 /************************************
229 * LBA register read and write support 229 * LBA register read and write support
230 * 230 *
231 * BE WARNED: register writes are posted. 231 * BE WARNED: register writes are posted.
232 * (ie follow writes which must reach HW with a read) 232 * (ie follow writes which must reach HW with a read)
233 */ 233 */
234 #define READ_U8(addr) __raw_readb(addr) 234 #define READ_U8(addr) __raw_readb(addr)
235 #define READ_U16(addr) __raw_readw(addr) 235 #define READ_U16(addr) __raw_readw(addr)
236 #define READ_U32(addr) __raw_readl(addr) 236 #define READ_U32(addr) __raw_readl(addr)
237 #define WRITE_U8(value, addr) __raw_writeb(value, addr) 237 #define WRITE_U8(value, addr) __raw_writeb(value, addr)
238 #define WRITE_U16(value, addr) __raw_writew(value, addr) 238 #define WRITE_U16(value, addr) __raw_writew(value, addr)
239 #define WRITE_U32(value, addr) __raw_writel(value, addr) 239 #define WRITE_U32(value, addr) __raw_writel(value, addr)
240 240
241 #define READ_REG8(addr) readb(addr) 241 #define READ_REG8(addr) readb(addr)
242 #define READ_REG16(addr) readw(addr) 242 #define READ_REG16(addr) readw(addr)
243 #define READ_REG32(addr) readl(addr) 243 #define READ_REG32(addr) readl(addr)
244 #define READ_REG64(addr) readq(addr) 244 #define READ_REG64(addr) readq(addr)
245 #define WRITE_REG8(value, addr) writeb(value, addr) 245 #define WRITE_REG8(value, addr) writeb(value, addr)
246 #define WRITE_REG16(value, addr) writew(value, addr) 246 #define WRITE_REG16(value, addr) writew(value, addr)
247 #define WRITE_REG32(value, addr) writel(value, addr) 247 #define WRITE_REG32(value, addr) writel(value, addr)
248 248
249 249
250 #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8)) 250 #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
251 #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16)) 251 #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
252 #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f) 252 #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
253 #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7) 253 #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
254 254
255 255
256 /* 256 /*
257 ** Extract LBA (Rope) number from HPA 257 ** Extract LBA (Rope) number from HPA
258 ** REVISIT: 16 ropes for Stretch/Ike? 258 ** REVISIT: 16 ropes for Stretch/Ike?
259 */ 259 */
260 #define ROPES_PER_IOC 8 260 #define ROPES_PER_IOC 8
261 #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1)) 261 #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
262 262
263 263
264 static void 264 static void
265 lba_dump_res(struct resource *r, int d) 265 lba_dump_res(struct resource *r, int d)
266 { 266 {
267 int i; 267 int i;
268 268
269 if (NULL == r) 269 if (NULL == r)
270 return; 270 return;
271 271
272 printk(KERN_DEBUG "(%p)", r->parent); 272 printk(KERN_DEBUG "(%p)", r->parent);
273 for (i = d; i ; --i) printk(" "); 273 for (i = d; i ; --i) printk(" ");
274 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, r->start, r->end, r->flags); 274 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, r->start, r->end, r->flags);
275 lba_dump_res(r->child, d+2); 275 lba_dump_res(r->child, d+2);
276 lba_dump_res(r->sibling, d); 276 lba_dump_res(r->sibling, d);
277 } 277 }
278 278
279 279
280 /* 280 /*
281 ** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex 281 ** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex
282 ** workaround for cfg cycles: 282 ** workaround for cfg cycles:
283 ** -- preserve LBA state 283 ** -- preserve LBA state
284 ** -- prevent any DMA from occurring 284 ** -- prevent any DMA from occurring
285 ** -- turn on smart mode 285 ** -- turn on smart mode
286 ** -- probe with config writes before doing config reads 286 ** -- probe with config writes before doing config reads
287 ** -- check ERROR_STATUS 287 ** -- check ERROR_STATUS
288 ** -- clear ERROR_STATUS 288 ** -- clear ERROR_STATUS
289 ** -- restore LBA state 289 ** -- restore LBA state
290 ** 290 **
291 ** The workaround is only used for device discovery. 291 ** The workaround is only used for device discovery.
292 */ 292 */
293 293
294 static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d) 294 static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
295 { 295 {
296 u8 first_bus = d->hba.hba_bus->secondary; 296 u8 first_bus = d->hba.hba_bus->secondary;
297 u8 last_sub_bus = d->hba.hba_bus->subordinate; 297 u8 last_sub_bus = d->hba.hba_bus->subordinate;
298 298
299 if ((bus < first_bus) || 299 if ((bus < first_bus) ||
300 (bus > last_sub_bus) || 300 (bus > last_sub_bus) ||
301 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) { 301 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
302 return 0; 302 return 0;
303 } 303 }
304 304
305 return 1; 305 return 1;
306 } 306 }
307 307
308 308
309 309
310 #define LBA_CFG_SETUP(d, tok) { \ 310 #define LBA_CFG_SETUP(d, tok) { \
311 /* Save contents of error config register. */ \ 311 /* Save contents of error config register. */ \
312 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \ 312 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
313 \ 313 \
314 /* Save contents of status control register. */ \ 314 /* Save contents of status control register. */ \
315 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \ 315 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
316 \ 316 \
317 /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA \ 317 /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA \
318 ** arbitration for full bus walks. \ 318 ** arbitration for full bus walks. \
319 */ \ 319 */ \
320 /* Save contents of arb mask register. */ \ 320 /* Save contents of arb mask register. */ \
321 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \ 321 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
322 \ 322 \
323 /* \ 323 /* \
324 * Turn off all device arbitration bits (i.e. everything \ 324 * Turn off all device arbitration bits (i.e. everything \
325 * except arbitration enable bit). \ 325 * except arbitration enable bit). \
326 */ \ 326 */ \
327 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \ 327 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
328 \ 328 \
329 /* \ 329 /* \
330 * Set the smart mode bit so that master aborts don't cause \ 330 * Set the smart mode bit so that master aborts don't cause \
331 * LBA to go into PCI fatal mode (required). \ 331 * LBA to go into PCI fatal mode (required). \
332 */ \ 332 */ \
333 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \ 333 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
334 } 334 }
335 335
336 336
337 #define LBA_CFG_PROBE(d, tok) { \ 337 #define LBA_CFG_PROBE(d, tok) { \
338 /* \ 338 /* \
339 * Setup Vendor ID write and read back the address register \ 339 * Setup Vendor ID write and read back the address register \
340 * to make sure that LBA is the bus master. \ 340 * to make sure that LBA is the bus master. \
341 */ \ 341 */ \
342 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\ 342 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
343 /* \ 343 /* \
344 * Read address register to ensure that LBA is the bus master, \ 344 * Read address register to ensure that LBA is the bus master, \
345 * which implies that DMA traffic has stopped when DMA arb is off. \ 345 * which implies that DMA traffic has stopped when DMA arb is off. \
346 */ \ 346 */ \
347 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ 347 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
348 /* \ 348 /* \
349 * Generate a cfg write cycle (will have no affect on \ 349 * Generate a cfg write cycle (will have no affect on \
350 * Vendor ID register since read-only). \ 350 * Vendor ID register since read-only). \
351 */ \ 351 */ \
352 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \ 352 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
353 /* \ 353 /* \
354 * Make sure write has completed before proceeding further, \ 354 * Make sure write has completed before proceeding further, \
355 * i.e. before setting clear enable. \ 355 * i.e. before setting clear enable. \
356 */ \ 356 */ \
357 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ 357 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
358 } 358 }
359 359
360 360
361 /* 361 /*
362 * HPREVISIT: 362 * HPREVISIT:
363 * -- Can't tell if config cycle got the error. 363 * -- Can't tell if config cycle got the error.
364 * 364 *
365 * OV bit is broken until rev 4.0, so can't use OV bit and 365 * OV bit is broken until rev 4.0, so can't use OV bit and
366 * LBA_ERROR_LOG_ADDR to tell if error belongs to config cycle. 366 * LBA_ERROR_LOG_ADDR to tell if error belongs to config cycle.
367 * 367 *
368 * As of rev 4.0, no longer need the error check. 368 * As of rev 4.0, no longer need the error check.
369 * 369 *
370 * -- Even if we could tell, we still want to return -1 370 * -- Even if we could tell, we still want to return -1
371 * for **ANY** error (not just master abort). 371 * for **ANY** error (not just master abort).
372 * 372 *
373 * -- Only clear non-fatal errors (we don't want to bring 373 * -- Only clear non-fatal errors (we don't want to bring
374 * LBA out of pci-fatal mode). 374 * LBA out of pci-fatal mode).
375 * 375 *
376 * Actually, there is still a race in which 376 * Actually, there is still a race in which
377 * we could be clearing a fatal error. We will 377 * we could be clearing a fatal error. We will
378 * live with this during our initial bus walk 378 * live with this during our initial bus walk
379 * until rev 4.0 (no driver activity during 379 * until rev 4.0 (no driver activity during
380 * initial bus walk). The initial bus walk 380 * initial bus walk). The initial bus walk
381 * has race conditions concerning the use of 381 * has race conditions concerning the use of
382 * smart mode as well. 382 * smart mode as well.
383 */ 383 */
384 384
385 #define LBA_MASTER_ABORT_ERROR 0xc 385 #define LBA_MASTER_ABORT_ERROR 0xc
386 #define LBA_FATAL_ERROR 0x10 386 #define LBA_FATAL_ERROR 0x10
387 387
388 #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \ 388 #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
389 u32 error_status = 0; \ 389 u32 error_status = 0; \
390 /* \ 390 /* \
391 * Set clear enable (CE) bit. Unset by HW when new \ 391 * Set clear enable (CE) bit. Unset by HW when new \
392 * errors are logged -- LBA HW ERS section 14.3.3). \ 392 * errors are logged -- LBA HW ERS section 14.3.3). \
393 */ \ 393 */ \
394 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \ 394 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
395 error_status = READ_REG32(base + LBA_ERROR_STATUS); \ 395 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
396 if ((error_status & 0x1f) != 0) { \ 396 if ((error_status & 0x1f) != 0) { \
397 /* \ 397 /* \
398 * Fail the config read request. \ 398 * Fail the config read request. \
399 */ \ 399 */ \
400 error = 1; \ 400 error = 1; \
401 if ((error_status & LBA_FATAL_ERROR) == 0) { \ 401 if ((error_status & LBA_FATAL_ERROR) == 0) { \
402 /* \ 402 /* \
403 * Clear error status (if fatal bit not set) by setting \ 403 * Clear error status (if fatal bit not set) by setting \
404 * clear error log bit (CL). \ 404 * clear error log bit (CL). \
405 */ \ 405 */ \
406 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \ 406 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
407 } \ 407 } \
408 } \ 408 } \
409 } 409 }
410 410
411 #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \ 411 #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
412 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); 412 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
413 413
414 #define LBA_CFG_ADDR_SETUP(d, addr) { \ 414 #define LBA_CFG_ADDR_SETUP(d, addr) { \
415 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ 415 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
416 /* \ 416 /* \
417 * Read address register to ensure that LBA is the bus master, \ 417 * Read address register to ensure that LBA is the bus master, \
418 * which implies that DMA traffic has stopped when DMA arb is off. \ 418 * which implies that DMA traffic has stopped when DMA arb is off. \
419 */ \ 419 */ \
420 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ 420 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
421 } 421 }
422 422
423 423
424 #define LBA_CFG_RESTORE(d, base) { \ 424 #define LBA_CFG_RESTORE(d, base) { \
425 /* \ 425 /* \
426 * Restore status control register (turn off clear enable). \ 426 * Restore status control register (turn off clear enable). \
427 */ \ 427 */ \
428 WRITE_REG32(status_control, base + LBA_STAT_CTL); \ 428 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
429 /* \ 429 /* \
430 * Restore error config register (turn off smart mode). \ 430 * Restore error config register (turn off smart mode). \
431 */ \ 431 */ \
432 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \ 432 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
433 /* \ 433 /* \
434 * Restore arb mask register (reenables DMA arbitration). \ 434 * Restore arb mask register (reenables DMA arbitration). \
435 */ \ 435 */ \
436 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \ 436 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
437 } 437 }
438 438
439 439
440 440
441 static unsigned int 441 static unsigned int
442 lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size) 442 lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
443 { 443 {
444 u32 data = ~0U; 444 u32 data = ~0U;
445 int error = 0; 445 int error = 0;
446 u32 arb_mask = 0; /* used by LBA_CFG_SETUP/RESTORE */ 446 u32 arb_mask = 0; /* used by LBA_CFG_SETUP/RESTORE */
447 u32 error_config = 0; /* used by LBA_CFG_SETUP/RESTORE */ 447 u32 error_config = 0; /* used by LBA_CFG_SETUP/RESTORE */
448 u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */ 448 u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */
449 449
450 LBA_CFG_SETUP(d, tok); 450 LBA_CFG_SETUP(d, tok);
451 LBA_CFG_PROBE(d, tok); 451 LBA_CFG_PROBE(d, tok);
452 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); 452 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
453 if (!error) { 453 if (!error) {
454 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; 454 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
455 455
456 LBA_CFG_ADDR_SETUP(d, tok | reg); 456 LBA_CFG_ADDR_SETUP(d, tok | reg);
457 switch (size) { 457 switch (size) {
458 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break; 458 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
459 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break; 459 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
460 case 4: data = READ_REG32(data_reg); break; 460 case 4: data = READ_REG32(data_reg); break;
461 } 461 }
462 } 462 }
463 LBA_CFG_RESTORE(d, d->hba.base_addr); 463 LBA_CFG_RESTORE(d, d->hba.base_addr);
464 return(data); 464 return(data);
465 } 465 }
466 466
467 467
468 static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) 468 static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
469 { 469 {
470 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); 470 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
471 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 471 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
472 u32 tok = LBA_CFG_TOK(local_bus, devfn); 472 u32 tok = LBA_CFG_TOK(local_bus, devfn);
473 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; 473 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
474 474
475 if ((pos > 255) || (devfn > 255)) 475 if ((pos > 255) || (devfn > 255))
476 return -EINVAL; 476 return -EINVAL;
477 477
478 /* FIXME: B2K/C3600 workaround is always use old method... */ 478 /* FIXME: B2K/C3600 workaround is always use old method... */
479 /* if (!LBA_SKIP_PROBE(d)) */ { 479 /* if (!LBA_SKIP_PROBE(d)) */ {
480 /* original - Generate config cycle on broken elroy 480 /* original - Generate config cycle on broken elroy
481 with risk we will miss PCI bus errors. */ 481 with risk we will miss PCI bus errors. */
482 *data = lba_rd_cfg(d, tok, pos, size); 482 *data = lba_rd_cfg(d, tok, pos, size);
483 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data); 483 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data);
484 return 0; 484 return 0;
485 } 485 }
486 486
487 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) { 487 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
488 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos); 488 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos);
489 /* either don't want to look or know device isn't present. */ 489 /* either don't want to look or know device isn't present. */
490 *data = ~0U; 490 *data = ~0U;
491 return(0); 491 return(0);
492 } 492 }
493 493
494 /* Basic Algorithm 494 /* Basic Algorithm
495 ** Should only get here on fully working LBA rev. 495 ** Should only get here on fully working LBA rev.
496 ** This is how simple the code should have been. 496 ** This is how simple the code should have been.
497 */ 497 */
498 LBA_CFG_ADDR_SETUP(d, tok | pos); 498 LBA_CFG_ADDR_SETUP(d, tok | pos);
499 switch(size) { 499 switch(size) {
500 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break; 500 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
501 case 2: *data = READ_REG16(data_reg + (pos & 2)); break; 501 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
502 case 4: *data = READ_REG32(data_reg); break; 502 case 4: *data = READ_REG32(data_reg); break;
503 } 503 }
504 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data); 504 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);
505 return 0; 505 return 0;
506 } 506 }
507 507
508 508
509 static void 509 static void
510 lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size) 510 lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
511 { 511 {
512 int error = 0; 512 int error = 0;
513 u32 arb_mask = 0; 513 u32 arb_mask = 0;
514 u32 error_config = 0; 514 u32 error_config = 0;
515 u32 status_control = 0; 515 u32 status_control = 0;
516 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; 516 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
517 517
518 LBA_CFG_SETUP(d, tok); 518 LBA_CFG_SETUP(d, tok);
519 LBA_CFG_ADDR_SETUP(d, tok | reg); 519 LBA_CFG_ADDR_SETUP(d, tok | reg);
520 switch (size) { 520 switch (size) {
521 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break; 521 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
522 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break; 522 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
523 case 4: WRITE_REG32(data, data_reg); break; 523 case 4: WRITE_REG32(data, data_reg); break;
524 } 524 }
525 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); 525 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
526 LBA_CFG_RESTORE(d, d->hba.base_addr); 526 LBA_CFG_RESTORE(d, d->hba.base_addr);
527 } 527 }
528 528
529 529
530 /* 530 /*
531 * LBA 4.0 config write code implements non-postable semantics 531 * LBA 4.0 config write code implements non-postable semantics
532 * by doing a read of CONFIG ADDR after the write. 532 * by doing a read of CONFIG ADDR after the write.
533 */ 533 */
534 534
535 static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) 535 static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
536 { 536 {
537 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); 537 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
538 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 538 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
539 u32 tok = LBA_CFG_TOK(local_bus,devfn); 539 u32 tok = LBA_CFG_TOK(local_bus,devfn);
540 540
541 if ((pos > 255) || (devfn > 255)) 541 if ((pos > 255) || (devfn > 255))
542 return -EINVAL; 542 return -EINVAL;
543 543
544 if (!LBA_SKIP_PROBE(d)) { 544 if (!LBA_SKIP_PROBE(d)) {
545 /* Original Workaround */ 545 /* Original Workaround */
546 lba_wr_cfg(d, tok, pos, (u32) data, size); 546 lba_wr_cfg(d, tok, pos, (u32) data, size);
547 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data); 547 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);
548 return 0; 548 return 0;
549 } 549 }
550 550
551 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) { 551 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
552 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data); 552 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data);
553 return 1; /* New Workaround */ 553 return 1; /* New Workaround */
554 } 554 }
555 555
556 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data); 556 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
557 557
558 /* Basic Algorithm */ 558 /* Basic Algorithm */
559 LBA_CFG_ADDR_SETUP(d, tok | pos); 559 LBA_CFG_ADDR_SETUP(d, tok | pos);
560 switch(size) { 560 switch(size) {
561 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3)); 561 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
562 break; 562 break;
563 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2)); 563 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
564 break; 564 break;
565 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA); 565 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
566 break; 566 break;
567 } 567 }
568 /* flush posted write */ 568 /* flush posted write */
569 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR); 569 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
570 return 0; 570 return 0;
571 } 571 }
572 572
573 573
574 static struct pci_ops elroy_cfg_ops = { 574 static struct pci_ops elroy_cfg_ops = {
575 .read = elroy_cfg_read, 575 .read = elroy_cfg_read,
576 .write = elroy_cfg_write, 576 .write = elroy_cfg_write,
577 }; 577 };
578 578
579 /* 579 /*
580 * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy 580 * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy
581 * TR4.0 as no additional bugs were found in this areea between Elroy and 581 * TR4.0 as no additional bugs were found in this areea between Elroy and
582 * Mercury 582 * Mercury
583 */ 583 */
584 584
585 static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) 585 static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
586 { 586 {
587 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); 587 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
588 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 588 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
589 u32 tok = LBA_CFG_TOK(local_bus, devfn); 589 u32 tok = LBA_CFG_TOK(local_bus, devfn);
590 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; 590 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
591 591
592 if ((pos > 255) || (devfn > 255)) 592 if ((pos > 255) || (devfn > 255))
593 return -EINVAL; 593 return -EINVAL;
594 594
595 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); 595 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
596 switch(size) { 596 switch(size) {
597 case 1: 597 case 1:
598 *data = READ_REG8(data_reg + (pos & 3)); 598 *data = READ_REG8(data_reg + (pos & 3));
599 break; 599 break;
600 case 2: 600 case 2:
601 *data = READ_REG16(data_reg + (pos & 2)); 601 *data = READ_REG16(data_reg + (pos & 2));
602 break; 602 break;
603 case 4: 603 case 4:
604 *data = READ_REG32(data_reg); break; 604 *data = READ_REG32(data_reg); break;
605 break; 605 break;
606 } 606 }
607 607
608 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data); 608 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
609 return 0; 609 return 0;
610 } 610 }
611 611
612 /* 612 /*
613 * LBA 4.0 config write code implements non-postable semantics 613 * LBA 4.0 config write code implements non-postable semantics
614 * by doing a read of CONFIG ADDR after the write. 614 * by doing a read of CONFIG ADDR after the write.
615 */ 615 */
616 616
617 static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) 617 static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
618 { 618 {
619 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); 619 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
620 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; 620 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
621 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary; 621 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
622 u32 tok = LBA_CFG_TOK(local_bus,devfn); 622 u32 tok = LBA_CFG_TOK(local_bus,devfn);
623 623
624 if ((pos > 255) || (devfn > 255)) 624 if ((pos > 255) || (devfn > 255))
625 return -EINVAL; 625 return -EINVAL;
626 626
627 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data); 627 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data);
628 628
629 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); 629 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
630 switch(size) { 630 switch(size) {
631 case 1: 631 case 1:
632 WRITE_REG8 (data, data_reg + (pos & 3)); 632 WRITE_REG8 (data, data_reg + (pos & 3));
633 break; 633 break;
634 case 2: 634 case 2:
635 WRITE_REG16(data, data_reg + (pos & 2)); 635 WRITE_REG16(data, data_reg + (pos & 2));
636 break; 636 break;
637 case 4: 637 case 4:
638 WRITE_REG32(data, data_reg); 638 WRITE_REG32(data, data_reg);
639 break; 639 break;
640 } 640 }
641 641
642 /* flush posted write */ 642 /* flush posted write */
643 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR); 643 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
644 return 0; 644 return 0;
645 } 645 }
646 646
647 static struct pci_ops mercury_cfg_ops = { 647 static struct pci_ops mercury_cfg_ops = {
648 .read = mercury_cfg_read, 648 .read = mercury_cfg_read,
649 .write = mercury_cfg_write, 649 .write = mercury_cfg_write,
650 }; 650 };
651 651
652 652
653 static void 653 static void
654 lba_bios_init(void) 654 lba_bios_init(void)
655 { 655 {
656 DBG(MODULE_NAME ": lba_bios_init\n"); 656 DBG(MODULE_NAME ": lba_bios_init\n");
657 } 657 }
658 658
659 659
660 #ifdef CONFIG_64BIT 660 #ifdef CONFIG_64BIT
661 661
662 /* 662 /*
663 ** Determine if a device is already configured. 663 ** Determine if a device is already configured.
664 ** If so, reserve it resources. 664 ** If so, reserve it resources.
665 ** 665 **
666 ** Read PCI cfg command register and see if I/O or MMIO is enabled. 666 ** Read PCI cfg command register and see if I/O or MMIO is enabled.
667 ** PAT has to enable the devices it's using. 667 ** PAT has to enable the devices it's using.
668 ** 668 **
669 ** Note: resources are fixed up before we try to claim them. 669 ** Note: resources are fixed up before we try to claim them.
670 */ 670 */
671 static void 671 static void
672 lba_claim_dev_resources(struct pci_dev *dev) 672 lba_claim_dev_resources(struct pci_dev *dev)
673 { 673 {
674 u16 cmd; 674 u16 cmd;
675 int i, srch_flags; 675 int i, srch_flags;
676 676
677 (void) pci_read_config_word(dev, PCI_COMMAND, &cmd); 677 (void) pci_read_config_word(dev, PCI_COMMAND, &cmd);
678 678
679 srch_flags = (cmd & PCI_COMMAND_IO) ? IORESOURCE_IO : 0; 679 srch_flags = (cmd & PCI_COMMAND_IO) ? IORESOURCE_IO : 0;
680 if (cmd & PCI_COMMAND_MEMORY) 680 if (cmd & PCI_COMMAND_MEMORY)
681 srch_flags |= IORESOURCE_MEM; 681 srch_flags |= IORESOURCE_MEM;
682 682
683 if (!srch_flags) 683 if (!srch_flags)
684 return; 684 return;
685 685
686 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 686 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
687 if (dev->resource[i].flags & srch_flags) { 687 if (dev->resource[i].flags & srch_flags) {
688 pci_claim_resource(dev, i); 688 pci_claim_resource(dev, i);
689 DBG(" claimed %s %d [%lx,%lx]/%lx\n", 689 DBG(" claimed %s %d [%lx,%lx]/%lx\n",
690 pci_name(dev), i, 690 pci_name(dev), i,
691 dev->resource[i].start, 691 dev->resource[i].start,
692 dev->resource[i].end, 692 dev->resource[i].end,
693 dev->resource[i].flags 693 dev->resource[i].flags
694 ); 694 );
695 } 695 }
696 } 696 }
697 } 697 }
698 698
699 699
700 /* 700 /*
701 * truncate_pat_collision: Deal with overlaps or outright collisions 701 * truncate_pat_collision: Deal with overlaps or outright collisions
702 * between PAT PDC reported ranges. 702 * between PAT PDC reported ranges.
703 * 703 *
704 * Broken PA8800 firmware will report lmmio range that 704 * Broken PA8800 firmware will report lmmio range that
705 * overlaps with CPU HPA. Just truncate the lmmio range. 705 * overlaps with CPU HPA. Just truncate the lmmio range.
706 * 706 *
707 * BEWARE: conflicts with this lmmio range may be an 707 * BEWARE: conflicts with this lmmio range may be an
708 * elmmio range which is pointing down another rope. 708 * elmmio range which is pointing down another rope.
709 * 709 *
710 * FIXME: only deals with one collision per range...theoretically we 710 * FIXME: only deals with one collision per range...theoretically we
711 * could have several. Supporting more than one collision will get messy. 711 * could have several. Supporting more than one collision will get messy.
712 */ 712 */
713 static unsigned long 713 static unsigned long
714 truncate_pat_collision(struct resource *root, struct resource *new) 714 truncate_pat_collision(struct resource *root, struct resource *new)
715 { 715 {
716 unsigned long start = new->start; 716 unsigned long start = new->start;
717 unsigned long end = new->end; 717 unsigned long end = new->end;
718 struct resource *tmp = root->child; 718 struct resource *tmp = root->child;
719 719
720 if (end <= start || start < root->start || !tmp) 720 if (end <= start || start < root->start || !tmp)
721 return 0; 721 return 0;
722 722
723 /* find first overlap */ 723 /* find first overlap */
724 while (tmp && tmp->end < start) 724 while (tmp && tmp->end < start)
725 tmp = tmp->sibling; 725 tmp = tmp->sibling;
726 726
727 /* no entries overlap */ 727 /* no entries overlap */
728 if (!tmp) return 0; 728 if (!tmp) return 0;
729 729
730 /* found one that starts behind the new one 730 /* found one that starts behind the new one
731 ** Don't need to do anything. 731 ** Don't need to do anything.
732 */ 732 */
733 if (tmp->start >= end) return 0; 733 if (tmp->start >= end) return 0;
734 734
735 if (tmp->start <= start) { 735 if (tmp->start <= start) {
736 /* "front" of new one overlaps */ 736 /* "front" of new one overlaps */
737 new->start = tmp->end + 1; 737 new->start = tmp->end + 1;
738 738
739 if (tmp->end >= end) { 739 if (tmp->end >= end) {
740 /* AACCKK! totally overlaps! drop this range. */ 740 /* AACCKK! totally overlaps! drop this range. */
741 return 1; 741 return 1;
742 } 742 }
743 } 743 }
744 744
745 if (tmp->end < end ) { 745 if (tmp->end < end ) {
746 /* "end" of new one overlaps */ 746 /* "end" of new one overlaps */
747 new->end = tmp->start - 1; 747 new->end = tmp->start - 1;
748 } 748 }
749 749
750 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] " 750 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
751 "to [%lx,%lx]\n", 751 "to [%lx,%lx]\n",
752 start, end, 752 start, end,
753 new->start, new->end ); 753 new->start, new->end );
754 754
755 return 0; /* truncation successful */ 755 return 0; /* truncation successful */
756 } 756 }
757 757
758 #else 758 #else
759 #define lba_claim_dev_resources(dev) do { } while (0) 759 #define lba_claim_dev_resources(dev) do { } while (0)
760 #define truncate_pat_collision(r,n) (0) 760 #define truncate_pat_collision(r,n) (0)
761 #endif 761 #endif
762 762
763 /* 763 /*
764 ** The algorithm is generic code. 764 ** The algorithm is generic code.
765 ** But it needs to access local data structures to get the IRQ base. 765 ** But it needs to access local data structures to get the IRQ base.
766 ** Could make this a "pci_fixup_irq(bus, region)" but not sure 766 ** Could make this a "pci_fixup_irq(bus, region)" but not sure
767 ** it's worth it. 767 ** it's worth it.
768 ** 768 **
769 ** Called by do_pci_scan_bus() immediately after each PCI bus is walked. 769 ** Called by do_pci_scan_bus() immediately after each PCI bus is walked.
770 ** Resources aren't allocated until recursive buswalk below HBA is completed. 770 ** Resources aren't allocated until recursive buswalk below HBA is completed.
771 */ 771 */
772 static void 772 static void
773 lba_fixup_bus(struct pci_bus *bus) 773 lba_fixup_bus(struct pci_bus *bus)
774 { 774 {
775 struct list_head *ln; 775 struct list_head *ln;
776 #ifdef FBB_SUPPORT 776 #ifdef FBB_SUPPORT
777 u16 status; 777 u16 status;
778 #endif 778 #endif
779 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge)); 779 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
780 int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num); 780 int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);
781 781
782 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n", 782 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
783 bus, bus->secondary, bus->bridge->platform_data); 783 bus, bus->secondary, bus->bridge->platform_data);
784 784
785 /* 785 /*
786 ** Properly Setup MMIO resources for this bus. 786 ** Properly Setup MMIO resources for this bus.
787 ** pci_alloc_primary_bus() mangles this. 787 ** pci_alloc_primary_bus() mangles this.
788 */ 788 */
789 if (bus->self) { 789 if (bus->self) {
790 /* PCI-PCI Bridge */ 790 /* PCI-PCI Bridge */
791 pci_read_bridge_bases(bus); 791 pci_read_bridge_bases(bus);
792 } else { 792 } else {
793 /* Host-PCI Bridge */ 793 /* Host-PCI Bridge */
794 int err, i; 794 int err, i;
795 795
796 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", 796 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
797 ldev->hba.io_space.name, 797 ldev->hba.io_space.name,
798 ldev->hba.io_space.start, ldev->hba.io_space.end, 798 ldev->hba.io_space.start, ldev->hba.io_space.end,
799 ldev->hba.io_space.flags); 799 ldev->hba.io_space.flags);
800 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", 800 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
801 ldev->hba.lmmio_space.name, 801 ldev->hba.lmmio_space.name,
802 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, 802 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
803 ldev->hba.lmmio_space.flags); 803 ldev->hba.lmmio_space.flags);
804 804
805 err = request_resource(&ioport_resource, &(ldev->hba.io_space)); 805 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
806 if (err < 0) { 806 if (err < 0) {
807 lba_dump_res(&ioport_resource, 2); 807 lba_dump_res(&ioport_resource, 2);
808 BUG(); 808 BUG();
809 } 809 }
810 /* advertize Host bridge resources to PCI bus */ 810 /* advertize Host bridge resources to PCI bus */
811 bus->resource[0] = &(ldev->hba.io_space); 811 bus->resource[0] = &(ldev->hba.io_space);
812 i = 1; 812 i = 1;
813 813
814 if (ldev->hba.elmmio_space.start) { 814 if (ldev->hba.elmmio_space.start) {
815 err = request_resource(&iomem_resource, 815 err = request_resource(&iomem_resource,
816 &(ldev->hba.elmmio_space)); 816 &(ldev->hba.elmmio_space));
817 if (err < 0) { 817 if (err < 0) {
818 818
819 printk("FAILED: lba_fixup_bus() request for " 819 printk("FAILED: lba_fixup_bus() request for "
820 "elmmio_space [%lx/%lx]\n", 820 "elmmio_space [%lx/%lx]\n",
821 ldev->hba.elmmio_space.start, 821 ldev->hba.elmmio_space.start,
822 ldev->hba.elmmio_space.end); 822 ldev->hba.elmmio_space.end);
823 823
824 /* lba_dump_res(&iomem_resource, 2); */ 824 /* lba_dump_res(&iomem_resource, 2); */
825 /* BUG(); */ 825 /* BUG(); */
826 } else 826 } else
827 bus->resource[i++] = &(ldev->hba.elmmio_space); 827 bus->resource[i++] = &(ldev->hba.elmmio_space);
828 } 828 }
829 829
830 830
831 /* Overlaps with elmmio can (and should) fail here. 831 /* Overlaps with elmmio can (and should) fail here.
832 * We will prune (or ignore) the distributed range. 832 * We will prune (or ignore) the distributed range.
833 * 833 *
834 * FIXME: SBA code should register all elmmio ranges first. 834 * FIXME: SBA code should register all elmmio ranges first.
835 * that would take care of elmmio ranges routed 835 * that would take care of elmmio ranges routed
836 * to a different rope (already discovered) from 836 * to a different rope (already discovered) from
837 * getting registered *after* LBA code has already 837 * getting registered *after* LBA code has already
838 * registered it's distributed lmmio range. 838 * registered it's distributed lmmio range.
839 */ 839 */
840 if (truncate_pat_collision(&iomem_resource, 840 if (truncate_pat_collision(&iomem_resource,
841 &(ldev->hba.lmmio_space))) { 841 &(ldev->hba.lmmio_space))) {
842 842
843 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n", 843 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
844 ldev->hba.lmmio_space.start, 844 ldev->hba.lmmio_space.start,
845 ldev->hba.lmmio_space.end); 845 ldev->hba.lmmio_space.end);
846 } else { 846 } else {
847 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); 847 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
848 if (err < 0) { 848 if (err < 0) {
849 printk(KERN_ERR "FAILED: lba_fixup_bus() request for " 849 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
850 "lmmio_space [%lx/%lx]\n", 850 "lmmio_space [%lx/%lx]\n",
851 ldev->hba.lmmio_space.start, 851 ldev->hba.lmmio_space.start,
852 ldev->hba.lmmio_space.end); 852 ldev->hba.lmmio_space.end);
853 } else 853 } else
854 bus->resource[i++] = &(ldev->hba.lmmio_space); 854 bus->resource[i++] = &(ldev->hba.lmmio_space);
855 } 855 }
856 856
857 #ifdef CONFIG_64BIT 857 #ifdef CONFIG_64BIT
858 /* GMMIO is distributed range. Every LBA/Rope gets part it. */ 858 /* GMMIO is distributed range. Every LBA/Rope gets part it. */
859 if (ldev->hba.gmmio_space.flags) { 859 if (ldev->hba.gmmio_space.flags) {
860 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space)); 860 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
861 if (err < 0) { 861 if (err < 0) {
862 printk("FAILED: lba_fixup_bus() request for " 862 printk("FAILED: lba_fixup_bus() request for "
863 "gmmio_space [%lx/%lx]\n", 863 "gmmio_space [%lx/%lx]\n",
864 ldev->hba.gmmio_space.start, 864 ldev->hba.gmmio_space.start,
865 ldev->hba.gmmio_space.end); 865 ldev->hba.gmmio_space.end);
866 lba_dump_res(&iomem_resource, 2); 866 lba_dump_res(&iomem_resource, 2);
867 BUG(); 867 BUG();
868 } 868 }
869 bus->resource[i++] = &(ldev->hba.gmmio_space); 869 bus->resource[i++] = &(ldev->hba.gmmio_space);
870 } 870 }
871 #endif 871 #endif
872 872
873 } 873 }
874 874
875 list_for_each(ln, &bus->devices) { 875 list_for_each(ln, &bus->devices) {
876 int i; 876 int i;
877 struct pci_dev *dev = pci_dev_b(ln); 877 struct pci_dev *dev = pci_dev_b(ln);
878 878
879 DBG("lba_fixup_bus() %s\n", pci_name(dev)); 879 DBG("lba_fixup_bus() %s\n", pci_name(dev));
880 880
881 /* Virtualize Device/Bridge Resources. */ 881 /* Virtualize Device/Bridge Resources. */
882 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { 882 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
883 struct resource *res = &dev->resource[i]; 883 struct resource *res = &dev->resource[i];
884 884
885 /* If resource not allocated - skip it */ 885 /* If resource not allocated - skip it */
886 if (!res->start) 886 if (!res->start)
887 continue; 887 continue;
888 888
889 if (res->flags & IORESOURCE_IO) { 889 if (res->flags & IORESOURCE_IO) {
890 DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ", 890 DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ",
891 res->start, res->end); 891 res->start, res->end);
892 res->start |= lba_portbase; 892 res->start |= lba_portbase;
893 res->end |= lba_portbase; 893 res->end |= lba_portbase;
894 DBG("[%lx/%lx]\n", res->start, res->end); 894 DBG("[%lx/%lx]\n", res->start, res->end);
895 } else if (res->flags & IORESOURCE_MEM) { 895 } else if (res->flags & IORESOURCE_MEM) {
896 /* 896 /*
897 ** Convert PCI (IO_VIEW) addresses to 897 ** Convert PCI (IO_VIEW) addresses to
898 ** processor (PA_VIEW) addresses 898 ** processor (PA_VIEW) addresses
899 */ 899 */
900 DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ", 900 DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ",
901 res->start, res->end); 901 res->start, res->end);
902 res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start); 902 res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);
903 res->end = PCI_HOST_ADDR(HBA_DATA(ldev), res->end); 903 res->end = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);
904 DBG("[%lx/%lx]\n", res->start, res->end); 904 DBG("[%lx/%lx]\n", res->start, res->end);
905 } else { 905 } else {
906 DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX", 906 DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX",
907 res->flags, res->start, res->end); 907 res->flags, res->start, res->end);
908 } 908 }
909 } 909 }
910 910
911 #ifdef FBB_SUPPORT 911 #ifdef FBB_SUPPORT
912 /* 912 /*
913 ** If one device does not support FBB transfers, 913 ** If one device does not support FBB transfers,
914 ** No one on the bus can be allowed to use them. 914 ** No one on the bus can be allowed to use them.
915 */ 915 */
916 (void) pci_read_config_word(dev, PCI_STATUS, &status); 916 (void) pci_read_config_word(dev, PCI_STATUS, &status);
917 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK); 917 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
918 #endif 918 #endif
919 919
920 if (is_pdc_pat()) { 920 if (is_pdc_pat()) {
921 /* Claim resources for PDC's devices */ 921 /* Claim resources for PDC's devices */
922 lba_claim_dev_resources(dev); 922 lba_claim_dev_resources(dev);
923 } 923 }
924 924
925 /* 925 /*
926 ** P2PB's have no IRQs. ignore them. 926 ** P2PB's have no IRQs. ignore them.
927 */ 927 */
928 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) 928 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
929 continue; 929 continue;
930 930
931 /* Adjust INTERRUPT_LINE for this dev */ 931 /* Adjust INTERRUPT_LINE for this dev */
932 iosapic_fixup_irq(ldev->iosapic_obj, dev); 932 iosapic_fixup_irq(ldev->iosapic_obj, dev);
933 } 933 }
934 934
935 #ifdef FBB_SUPPORT 935 #ifdef FBB_SUPPORT
936 /* FIXME/REVISIT - finish figuring out to set FBB on both 936 /* FIXME/REVISIT - finish figuring out to set FBB on both
937 ** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL. 937 ** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL.
938 ** Can't fixup here anyway....garr... 938 ** Can't fixup here anyway....garr...
939 */ 939 */
940 if (fbb_enable) { 940 if (fbb_enable) {
941 if (bus->self) { 941 if (bus->self) {
942 u8 control; 942 u8 control;
943 /* enable on PPB */ 943 /* enable on PPB */
944 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control); 944 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
945 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK); 945 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
946 946
947 } else { 947 } else {
948 /* enable on LBA */ 948 /* enable on LBA */
949 } 949 }
950 fbb_enable = PCI_COMMAND_FAST_BACK; 950 fbb_enable = PCI_COMMAND_FAST_BACK;
951 } 951 }
952 952
953 /* Lastly enable FBB/PERR/SERR on all devices too */ 953 /* Lastly enable FBB/PERR/SERR on all devices too */
954 list_for_each(ln, &bus->devices) { 954 list_for_each(ln, &bus->devices) {
955 (void) pci_read_config_word(dev, PCI_COMMAND, &status); 955 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
956 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable; 956 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
957 (void) pci_write_config_word(dev, PCI_COMMAND, status); 957 (void) pci_write_config_word(dev, PCI_COMMAND, status);
958 } 958 }
959 #endif 959 #endif
960 } 960 }
961 961
962 962
963 struct pci_bios_ops lba_bios_ops = { 963 struct pci_bios_ops lba_bios_ops = {
964 .init = lba_bios_init, 964 .init = lba_bios_init,
965 .fixup_bus = lba_fixup_bus, 965 .fixup_bus = lba_fixup_bus,
966 }; 966 };
967 967
968 968
969 969
970 970
971 /******************************************************* 971 /*******************************************************
972 ** 972 **
973 ** LBA Sprockets "I/O Port" Space Accessor Functions 973 ** LBA Sprockets "I/O Port" Space Accessor Functions
974 ** 974 **
975 ** This set of accessor functions is intended for use with 975 ** This set of accessor functions is intended for use with
976 ** "legacy firmware" (ie Sprockets on Allegro/Forte boxes). 976 ** "legacy firmware" (ie Sprockets on Allegro/Forte boxes).
977 ** 977 **
978 ** Many PCI devices don't require use of I/O port space (eg Tulip, 978 ** Many PCI devices don't require use of I/O port space (eg Tulip,
979 ** NCR720) since they export the same registers to both MMIO and 979 ** NCR720) since they export the same registers to both MMIO and
980 ** I/O port space. In general I/O port space is slower than 980 ** I/O port space. In general I/O port space is slower than
981 ** MMIO since drivers are designed so PIO writes can be posted. 981 ** MMIO since drivers are designed so PIO writes can be posted.
982 ** 982 **
983 ********************************************************/ 983 ********************************************************/
984 984
985 #define LBA_PORT_IN(size, mask) \ 985 #define LBA_PORT_IN(size, mask) \
986 static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \ 986 static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
987 { \ 987 { \
988 u##size t; \ 988 u##size t; \
989 t = READ_REG##size(astro_iop_base + addr); \ 989 t = READ_REG##size(astro_iop_base + addr); \
990 DBG_PORT(" 0x%x\n", t); \ 990 DBG_PORT(" 0x%x\n", t); \
991 return (t); \ 991 return (t); \
992 } 992 }
993 993
994 LBA_PORT_IN( 8, 3) 994 LBA_PORT_IN( 8, 3)
995 LBA_PORT_IN(16, 2) 995 LBA_PORT_IN(16, 2)
996 LBA_PORT_IN(32, 0) 996 LBA_PORT_IN(32, 0)
997 997
998 998
999 999
1000 /* 1000 /*
1001 ** BUG X4107: Ordering broken - DMA RD return can bypass PIO WR 1001 ** BUG X4107: Ordering broken - DMA RD return can bypass PIO WR
1002 ** 1002 **
1003 ** Fixed in Elroy 2.2. The READ_U32(..., LBA_FUNC_ID) below is 1003 ** Fixed in Elroy 2.2. The READ_U32(..., LBA_FUNC_ID) below is
1004 ** guarantee non-postable completion semantics - not avoid X4107. 1004 ** guarantee non-postable completion semantics - not avoid X4107.
1005 ** The READ_U32 only guarantees the write data gets to elroy but 1005 ** The READ_U32 only guarantees the write data gets to elroy but
1006 ** out to the PCI bus. We can't read stuff from I/O port space 1006 ** out to the PCI bus. We can't read stuff from I/O port space
1007 ** since we don't know what has side-effects. Attempting to read 1007 ** since we don't know what has side-effects. Attempting to read
1008 ** from configuration space would be suicidal given the number of 1008 ** from configuration space would be suicidal given the number of
1009 ** bugs in that elroy functionality. 1009 ** bugs in that elroy functionality.
1010 ** 1010 **
1011 ** Description: 1011 ** Description:
1012 ** DMA read results can improperly pass PIO writes (X4107). The 1012 ** DMA read results can improperly pass PIO writes (X4107). The
1013 ** result of this bug is that if a processor modifies a location in 1013 ** result of this bug is that if a processor modifies a location in
1014 ** memory after having issued PIO writes, the PIO writes are not 1014 ** memory after having issued PIO writes, the PIO writes are not
1015 ** guaranteed to be completed before a PCI device is allowed to see 1015 ** guaranteed to be completed before a PCI device is allowed to see
1016 ** the modified data in a DMA read. 1016 ** the modified data in a DMA read.
1017 ** 1017 **
1018 ** Note that IKE bug X3719 in TR1 IKEs will result in the same 1018 ** Note that IKE bug X3719 in TR1 IKEs will result in the same
1019 ** symptom. 1019 ** symptom.
1020 ** 1020 **
1021 ** Workaround: 1021 ** Workaround:
1022 ** The workaround for this bug is to always follow a PIO write with 1022 ** The workaround for this bug is to always follow a PIO write with
1023 ** a PIO read to the same bus before starting DMA on that PCI bus. 1023 ** a PIO read to the same bus before starting DMA on that PCI bus.
1024 ** 1024 **
1025 */ 1025 */
1026 #define LBA_PORT_OUT(size, mask) \ 1026 #define LBA_PORT_OUT(size, mask) \
1027 static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ 1027 static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
1028 { \ 1028 { \
1029 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \ 1029 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
1030 WRITE_REG##size(val, astro_iop_base + addr); \ 1030 WRITE_REG##size(val, astro_iop_base + addr); \
1031 if (LBA_DEV(d)->hw_rev < 3) \ 1031 if (LBA_DEV(d)->hw_rev < 3) \
1032 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \ 1032 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
1033 } 1033 }
1034 1034
1035 LBA_PORT_OUT( 8, 3) 1035 LBA_PORT_OUT( 8, 3)
1036 LBA_PORT_OUT(16, 2) 1036 LBA_PORT_OUT(16, 2)
1037 LBA_PORT_OUT(32, 0) 1037 LBA_PORT_OUT(32, 0)
1038 1038
1039 1039
1040 static struct pci_port_ops lba_astro_port_ops = { 1040 static struct pci_port_ops lba_astro_port_ops = {
1041 .inb = lba_astro_in8, 1041 .inb = lba_astro_in8,
1042 .inw = lba_astro_in16, 1042 .inw = lba_astro_in16,
1043 .inl = lba_astro_in32, 1043 .inl = lba_astro_in32,
1044 .outb = lba_astro_out8, 1044 .outb = lba_astro_out8,
1045 .outw = lba_astro_out16, 1045 .outw = lba_astro_out16,
1046 .outl = lba_astro_out32 1046 .outl = lba_astro_out32
1047 }; 1047 };
1048 1048
1049 1049
1050 #ifdef CONFIG_64BIT 1050 #ifdef CONFIG_64BIT
1051 #define PIOP_TO_GMMIO(lba, addr) \ 1051 #define PIOP_TO_GMMIO(lba, addr) \
1052 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3)) 1052 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
1053 1053
1054 /******************************************************* 1054 /*******************************************************
1055 ** 1055 **
1056 ** LBA PAT "I/O Port" Space Accessor Functions 1056 ** LBA PAT "I/O Port" Space Accessor Functions
1057 ** 1057 **
1058 ** This set of accessor functions is intended for use with 1058 ** This set of accessor functions is intended for use with
1059 ** "PAT PDC" firmware (ie Prelude/Rhapsody/Piranha boxes). 1059 ** "PAT PDC" firmware (ie Prelude/Rhapsody/Piranha boxes).
1060 ** 1060 **
1061 ** This uses the PIOP space located in the first 64MB of GMMIO. 1061 ** This uses the PIOP space located in the first 64MB of GMMIO.
1062 ** Each rope gets a full 64*KB* (ie 4 bytes per page) this way. 1062 ** Each rope gets a full 64*KB* (ie 4 bytes per page) this way.
1063 ** bits 1:0 stay the same. bits 15:2 become 25:12. 1063 ** bits 1:0 stay the same. bits 15:2 become 25:12.
1064 ** Then add the base and we can generate an I/O Port cycle. 1064 ** Then add the base and we can generate an I/O Port cycle.
1065 ********************************************************/ 1065 ********************************************************/
1066 #undef LBA_PORT_IN 1066 #undef LBA_PORT_IN
1067 #define LBA_PORT_IN(size, mask) \ 1067 #define LBA_PORT_IN(size, mask) \
1068 static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \ 1068 static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
1069 { \ 1069 { \
1070 u##size t; \ 1070 u##size t; \
1071 DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \ 1071 DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
1072 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \ 1072 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
1073 DBG_PORT(" 0x%x\n", t); \ 1073 DBG_PORT(" 0x%x\n", t); \
1074 return (t); \ 1074 return (t); \
1075 } 1075 }
1076 1076
1077 LBA_PORT_IN( 8, 3) 1077 LBA_PORT_IN( 8, 3)
1078 LBA_PORT_IN(16, 2) 1078 LBA_PORT_IN(16, 2)
1079 LBA_PORT_IN(32, 0) 1079 LBA_PORT_IN(32, 0)
1080 1080
1081 1081
1082 #undef LBA_PORT_OUT 1082 #undef LBA_PORT_OUT
1083 #define LBA_PORT_OUT(size, mask) \ 1083 #define LBA_PORT_OUT(size, mask) \
1084 static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \ 1084 static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
1085 { \ 1085 { \
1086 void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \ 1086 void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \
1087 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \ 1087 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
1088 WRITE_REG##size(val, where); \ 1088 WRITE_REG##size(val, where); \
1089 /* flush the I/O down to the elroy at least */ \ 1089 /* flush the I/O down to the elroy at least */ \
1090 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \ 1090 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
1091 } 1091 }
1092 1092
1093 LBA_PORT_OUT( 8, 3) 1093 LBA_PORT_OUT( 8, 3)
1094 LBA_PORT_OUT(16, 2) 1094 LBA_PORT_OUT(16, 2)
1095 LBA_PORT_OUT(32, 0) 1095 LBA_PORT_OUT(32, 0)
1096 1096
1097 1097
1098 static struct pci_port_ops lba_pat_port_ops = { 1098 static struct pci_port_ops lba_pat_port_ops = {
1099 .inb = lba_pat_in8, 1099 .inb = lba_pat_in8,
1100 .inw = lba_pat_in16, 1100 .inw = lba_pat_in16,
1101 .inl = lba_pat_in32, 1101 .inl = lba_pat_in32,
1102 .outb = lba_pat_out8, 1102 .outb = lba_pat_out8,
1103 .outw = lba_pat_out16, 1103 .outw = lba_pat_out16,
1104 .outl = lba_pat_out32 1104 .outl = lba_pat_out32
1105 }; 1105 };
1106 1106
1107 1107
1108 1108
1109 /* 1109 /*
1110 ** make range information from PDC available to PCI subsystem. 1110 ** make range information from PDC available to PCI subsystem.
1111 ** We make the PDC call here in order to get the PCI bus range 1111 ** We make the PDC call here in order to get the PCI bus range
1112 ** numbers. The rest will get forwarded in pcibios_fixup_bus(). 1112 ** numbers. The rest will get forwarded in pcibios_fixup_bus().
1113 ** We don't have a struct pci_bus assigned to us yet. 1113 ** We don't have a struct pci_bus assigned to us yet.
1114 */ 1114 */
1115 static void 1115 static void
1116 lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) 1116 lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1117 { 1117 {
1118 unsigned long bytecnt; 1118 unsigned long bytecnt;
1119 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */ 1119 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
1120 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */ 1120 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
1121 long io_count; 1121 long io_count;
1122 long status; /* PDC return status */ 1122 long status; /* PDC return status */
1123 long pa_count; 1123 long pa_count;
1124 int i; 1124 int i;
1125 1125
1126 /* return cell module (IO view) */ 1126 /* return cell module (IO view) */
1127 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, 1127 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1128 PA_VIEW, & pa_pdc_cell); 1128 PA_VIEW, & pa_pdc_cell);
1129 pa_count = pa_pdc_cell.mod[1]; 1129 pa_count = pa_pdc_cell.mod[1];
1130 1130
1131 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, 1131 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1132 IO_VIEW, &io_pdc_cell); 1132 IO_VIEW, &io_pdc_cell);
1133 io_count = io_pdc_cell.mod[1]; 1133 io_count = io_pdc_cell.mod[1];
1134 1134
1135 /* We've already done this once for device discovery...*/ 1135 /* We've already done this once for device discovery...*/
1136 if (status != PDC_OK) { 1136 if (status != PDC_OK) {
1137 panic("pdc_pat_cell_module() call failed for LBA!\n"); 1137 panic("pdc_pat_cell_module() call failed for LBA!\n");
1138 } 1138 }
1139 1139
1140 if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) { 1140 if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) {
1141 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n"); 1141 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1142 } 1142 }
1143 1143
1144 /* 1144 /*
1145 ** Inspect the resources PAT tells us about 1145 ** Inspect the resources PAT tells us about
1146 */ 1146 */
1147 for (i = 0; i < pa_count; i++) { 1147 for (i = 0; i < pa_count; i++) {
1148 struct { 1148 struct {
1149 unsigned long type; 1149 unsigned long type;
1150 unsigned long start; 1150 unsigned long start;
1151 unsigned long end; /* aka finish */ 1151 unsigned long end; /* aka finish */
1152 } *p, *io; 1152 } *p, *io;
1153 struct resource *r; 1153 struct resource *r;
1154 1154
1155 p = (void *) &(pa_pdc_cell.mod[2+i*3]); 1155 p = (void *) &(pa_pdc_cell.mod[2+i*3]);
1156 io = (void *) &(io_pdc_cell.mod[2+i*3]); 1156 io = (void *) &(io_pdc_cell.mod[2+i*3]);
1157 1157
1158 /* Convert the PAT range data to PCI "struct resource" */ 1158 /* Convert the PAT range data to PCI "struct resource" */
1159 switch(p->type & 0xff) { 1159 switch(p->type & 0xff) {
1160 case PAT_PBNUM: 1160 case PAT_PBNUM:
1161 lba_dev->hba.bus_num.start = p->start; 1161 lba_dev->hba.bus_num.start = p->start;
1162 lba_dev->hba.bus_num.end = p->end; 1162 lba_dev->hba.bus_num.end = p->end;
1163 break; 1163 break;
1164 1164
1165 case PAT_LMMIO: 1165 case PAT_LMMIO:
1166 /* used to fix up pre-initialized MEM BARs */ 1166 /* used to fix up pre-initialized MEM BARs */
1167 if (!lba_dev->hba.lmmio_space.start) { 1167 if (!lba_dev->hba.lmmio_space.start) {
1168 sprintf(lba_dev->hba.lmmio_name, 1168 sprintf(lba_dev->hba.lmmio_name,
1169 "PCI%02lx LMMIO", 1169 "PCI%02lx LMMIO",
1170 lba_dev->hba.bus_num.start); 1170 lba_dev->hba.bus_num.start);
1171 lba_dev->hba.lmmio_space_offset = p->start - 1171 lba_dev->hba.lmmio_space_offset = p->start -
1172 io->start; 1172 io->start;
1173 r = &lba_dev->hba.lmmio_space; 1173 r = &lba_dev->hba.lmmio_space;
1174 r->name = lba_dev->hba.lmmio_name; 1174 r->name = lba_dev->hba.lmmio_name;
1175 } else if (!lba_dev->hba.elmmio_space.start) { 1175 } else if (!lba_dev->hba.elmmio_space.start) {
1176 sprintf(lba_dev->hba.elmmio_name, 1176 sprintf(lba_dev->hba.elmmio_name,
1177 "PCI%02lx ELMMIO", 1177 "PCI%02lx ELMMIO",
1178 lba_dev->hba.bus_num.start); 1178 lba_dev->hba.bus_num.start);
1179 r = &lba_dev->hba.elmmio_space; 1179 r = &lba_dev->hba.elmmio_space;
1180 r->name = lba_dev->hba.elmmio_name; 1180 r->name = lba_dev->hba.elmmio_name;
1181 } else { 1181 } else {
1182 printk(KERN_WARNING MODULE_NAME 1182 printk(KERN_WARNING MODULE_NAME
1183 " only supports 2 LMMIO resources!\n"); 1183 " only supports 2 LMMIO resources!\n");
1184 break; 1184 break;
1185 } 1185 }
1186 1186
1187 r->start = p->start; 1187 r->start = p->start;
1188 r->end = p->end; 1188 r->end = p->end;
1189 r->flags = IORESOURCE_MEM; 1189 r->flags = IORESOURCE_MEM;
1190 r->parent = r->sibling = r->child = NULL; 1190 r->parent = r->sibling = r->child = NULL;
1191 break; 1191 break;
1192 1192
1193 case PAT_GMMIO: 1193 case PAT_GMMIO:
1194 /* MMIO space > 4GB phys addr; for 64-bit BAR */ 1194 /* MMIO space > 4GB phys addr; for 64-bit BAR */
1195 sprintf(lba_dev->hba.gmmio_name, "PCI%02lx GMMIO", 1195 sprintf(lba_dev->hba.gmmio_name, "PCI%02lx GMMIO",
1196 lba_dev->hba.bus_num.start); 1196 lba_dev->hba.bus_num.start);
1197 r = &lba_dev->hba.gmmio_space; 1197 r = &lba_dev->hba.gmmio_space;
1198 r->name = lba_dev->hba.gmmio_name; 1198 r->name = lba_dev->hba.gmmio_name;
1199 r->start = p->start; 1199 r->start = p->start;
1200 r->end = p->end; 1200 r->end = p->end;
1201 r->flags = IORESOURCE_MEM; 1201 r->flags = IORESOURCE_MEM;
1202 r->parent = r->sibling = r->child = NULL; 1202 r->parent = r->sibling = r->child = NULL;
1203 break; 1203 break;
1204 1204
1205 case PAT_NPIOP: 1205 case PAT_NPIOP:
1206 printk(KERN_WARNING MODULE_NAME 1206 printk(KERN_WARNING MODULE_NAME
1207 " range[%d] : ignoring NPIOP (0x%lx)\n", 1207 " range[%d] : ignoring NPIOP (0x%lx)\n",
1208 i, p->start); 1208 i, p->start);
1209 break; 1209 break;
1210 1210
1211 case PAT_PIOP: 1211 case PAT_PIOP:
1212 /* 1212 /*
1213 ** Postable I/O port space is per PCI host adapter. 1213 ** Postable I/O port space is per PCI host adapter.
1214 ** base of 64MB PIOP region 1214 ** base of 64MB PIOP region
1215 */ 1215 */
1216 lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024); 1216 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1217 1217
1218 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports", 1218 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
1219 lba_dev->hba.bus_num.start); 1219 lba_dev->hba.bus_num.start);
1220 r = &lba_dev->hba.io_space; 1220 r = &lba_dev->hba.io_space;
1221 r->name = lba_dev->hba.io_name; 1221 r->name = lba_dev->hba.io_name;
1222 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num); 1222 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1223 r->end = r->start + HBA_PORT_SPACE_SIZE - 1; 1223 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1224 r->flags = IORESOURCE_IO; 1224 r->flags = IORESOURCE_IO;
1225 r->parent = r->sibling = r->child = NULL; 1225 r->parent = r->sibling = r->child = NULL;
1226 break; 1226 break;
1227 1227
1228 default: 1228 default:
1229 printk(KERN_WARNING MODULE_NAME 1229 printk(KERN_WARNING MODULE_NAME
1230 " range[%d] : unknown pat range type (0x%lx)\n", 1230 " range[%d] : unknown pat range type (0x%lx)\n",
1231 i, p->type & 0xff); 1231 i, p->type & 0xff);
1232 break; 1232 break;
1233 } 1233 }
1234 } 1234 }
1235 } 1235 }
1236 #else 1236 #else
1237 /* keep compiler from complaining about missing declarations */ 1237 /* keep compiler from complaining about missing declarations */
1238 #define lba_pat_port_ops lba_astro_port_ops 1238 #define lba_pat_port_ops lba_astro_port_ops
1239 #define lba_pat_resources(pa_dev, lba_dev) 1239 #define lba_pat_resources(pa_dev, lba_dev)
1240 #endif /* CONFIG_64BIT */ 1240 #endif /* CONFIG_64BIT */
1241 1241
1242 1242
1243 extern void sba_distributed_lmmio(struct parisc_device *, struct resource *); 1243 extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1244 extern void sba_directed_lmmio(struct parisc_device *, struct resource *); 1244 extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1245 1245
1246 1246
1247 static void 1247 static void
1248 lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) 1248 lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1249 { 1249 {
1250 struct resource *r; 1250 struct resource *r;
1251 int lba_num; 1251 int lba_num;
1252 1252
1253 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND; 1253 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1254 1254
1255 /* 1255 /*
1256 ** With "legacy" firmware, the lowest byte of FW_SCRATCH 1256 ** With "legacy" firmware, the lowest byte of FW_SCRATCH
1257 ** represents bus->secondary and the second byte represents 1257 ** represents bus->secondary and the second byte represents
1258 ** bus->subsidiary (i.e. highest PPB programmed by firmware). 1258 ** bus->subsidiary (i.e. highest PPB programmed by firmware).
1259 ** PCI bus walk *should* end up with the same result. 1259 ** PCI bus walk *should* end up with the same result.
1260 ** FIXME: But we don't have sanity checks in PCI or LBA. 1260 ** FIXME: But we don't have sanity checks in PCI or LBA.
1261 */ 1261 */
1262 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH); 1262 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1263 r = &(lba_dev->hba.bus_num); 1263 r = &(lba_dev->hba.bus_num);
1264 r->name = "LBA PCI Busses"; 1264 r->name = "LBA PCI Busses";
1265 r->start = lba_num & 0xff; 1265 r->start = lba_num & 0xff;
1266 r->end = (lba_num>>8) & 0xff; 1266 r->end = (lba_num>>8) & 0xff;
1267 1267
1268 /* Set up local PCI Bus resources - we don't need them for 1268 /* Set up local PCI Bus resources - we don't need them for
1269 ** Legacy boxes but it's nice to see in /proc/iomem. 1269 ** Legacy boxes but it's nice to see in /proc/iomem.
1270 */ 1270 */
1271 r = &(lba_dev->hba.lmmio_space); 1271 r = &(lba_dev->hba.lmmio_space);
1272 sprintf(lba_dev->hba.lmmio_name, "PCI%02lx LMMIO", 1272 sprintf(lba_dev->hba.lmmio_name, "PCI%02lx LMMIO",
1273 lba_dev->hba.bus_num.start); 1273 lba_dev->hba.bus_num.start);
1274 r->name = lba_dev->hba.lmmio_name; 1274 r->name = lba_dev->hba.lmmio_name;
1275 1275
1276 #if 1 1276 #if 1
1277 /* We want the CPU -> IO routing of addresses. 1277 /* We want the CPU -> IO routing of addresses.
1278 * The SBA BASE/MASK registers control CPU -> IO routing. 1278 * The SBA BASE/MASK registers control CPU -> IO routing.
1279 * Ask SBA what is routed to this rope/LBA. 1279 * Ask SBA what is routed to this rope/LBA.
1280 */ 1280 */
1281 sba_distributed_lmmio(pa_dev, r); 1281 sba_distributed_lmmio(pa_dev, r);
1282 #else 1282 #else
1283 /* 1283 /*
1284 * The LBA BASE/MASK registers control IO -> System routing. 1284 * The LBA BASE/MASK registers control IO -> System routing.
1285 * 1285 *
1286 * The following code works but doesn't get us what we want. 1286 * The following code works but doesn't get us what we want.
1287 * Well, only because firmware (v5.0) on C3000 doesn't program 1287 * Well, only because firmware (v5.0) on C3000 doesn't program
1288 * the LBA BASE/MASE registers to be the exact inverse of 1288 * the LBA BASE/MASE registers to be the exact inverse of
1289 * the corresponding SBA registers. Other Astro/Pluto 1289 * the corresponding SBA registers. Other Astro/Pluto
1290 * based platform firmware may do it right. 1290 * based platform firmware may do it right.
1291 * 1291 *
1292 * Should someone want to mess with MSI, they may need to 1292 * Should someone want to mess with MSI, they may need to
1293 * reprogram LBA BASE/MASK registers. Thus preserve the code 1293 * reprogram LBA BASE/MASK registers. Thus preserve the code
1294 * below until MSI is known to work on C3000/A500/N4000/RP3440. 1294 * below until MSI is known to work on C3000/A500/N4000/RP3440.
1295 * 1295 *
1296 * Using the code below, /proc/iomem shows: 1296 * Using the code below, /proc/iomem shows:
1297 * ... 1297 * ...
1298 * f0000000-f0ffffff : PCI00 LMMIO 1298 * f0000000-f0ffffff : PCI00 LMMIO
1299 * f05d0000-f05d0000 : lcd_data 1299 * f05d0000-f05d0000 : lcd_data
1300 * f05d0008-f05d0008 : lcd_cmd 1300 * f05d0008-f05d0008 : lcd_cmd
1301 * f1000000-f1ffffff : PCI01 LMMIO 1301 * f1000000-f1ffffff : PCI01 LMMIO
1302 * f4000000-f4ffffff : PCI02 LMMIO 1302 * f4000000-f4ffffff : PCI02 LMMIO
1303 * f4000000-f4001fff : sym53c8xx 1303 * f4000000-f4001fff : sym53c8xx
1304 * f4002000-f4003fff : sym53c8xx 1304 * f4002000-f4003fff : sym53c8xx
1305 * f4004000-f40043ff : sym53c8xx 1305 * f4004000-f40043ff : sym53c8xx
1306 * f4005000-f40053ff : sym53c8xx 1306 * f4005000-f40053ff : sym53c8xx
1307 * f4007000-f4007fff : ohci_hcd 1307 * f4007000-f4007fff : ohci_hcd
1308 * f4008000-f40083ff : tulip 1308 * f4008000-f40083ff : tulip
1309 * f6000000-f6ffffff : PCI03 LMMIO 1309 * f6000000-f6ffffff : PCI03 LMMIO
1310 * f8000000-fbffffff : PCI00 ELMMIO 1310 * f8000000-fbffffff : PCI00 ELMMIO
1311 * fa100000-fa4fffff : stifb mmio 1311 * fa100000-fa4fffff : stifb mmio
1312 * fb000000-fb1fffff : stifb fb 1312 * fb000000-fb1fffff : stifb fb
1313 * 1313 *
1314 * But everything listed under PCI02 actually lives under PCI00. 1314 * But everything listed under PCI02 actually lives under PCI00.
1315 * This is clearly wrong. 1315 * This is clearly wrong.
1316 * 1316 *
1317 * Asking SBA how things are routed tells the correct story: 1317 * Asking SBA how things are routed tells the correct story:
1318 * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000 1318 * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000
1319 * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006 1319 * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006
1320 * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004 1320 * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004
1321 * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000 1321 * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000
1322 * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000 1322 * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000
1323 * 1323 *
1324 * Which looks like this in /proc/iomem: 1324 * Which looks like this in /proc/iomem:
1325 * f4000000-f47fffff : PCI00 LMMIO 1325 * f4000000-f47fffff : PCI00 LMMIO
1326 * f4000000-f4001fff : sym53c8xx 1326 * f4000000-f4001fff : sym53c8xx
1327 * ...[deteled core devices - same as above]... 1327 * ...[deteled core devices - same as above]...
1328 * f4008000-f40083ff : tulip 1328 * f4008000-f40083ff : tulip
1329 * f4800000-f4ffffff : PCI01 LMMIO 1329 * f4800000-f4ffffff : PCI01 LMMIO
1330 * f6000000-f67fffff : PCI02 LMMIO 1330 * f6000000-f67fffff : PCI02 LMMIO
1331 * f7000000-f77fffff : PCI03 LMMIO 1331 * f7000000-f77fffff : PCI03 LMMIO
1332 * f9000000-f9ffffff : PCI02 ELMMIO 1332 * f9000000-f9ffffff : PCI02 ELMMIO
1333 * fa000000-fbffffff : PCI03 ELMMIO 1333 * fa000000-fbffffff : PCI03 ELMMIO
1334 * fa100000-fa4fffff : stifb mmio 1334 * fa100000-fa4fffff : stifb mmio
1335 * fb000000-fb1fffff : stifb fb 1335 * fb000000-fb1fffff : stifb fb
1336 * 1336 *
1337 * ie all Built-in core are under now correctly under PCI00. 1337 * ie all Built-in core are under now correctly under PCI00.
1338 * The "PCI02 ELMMIO" directed range is for: 1338 * The "PCI02 ELMMIO" directed range is for:
1339 * +-[02]---03.0 3Dfx Interactive, Inc. Voodoo 2 1339 * +-[02]---03.0 3Dfx Interactive, Inc. Voodoo 2
1340 * 1340 *
1341 * All is well now. 1341 * All is well now.
1342 */ 1342 */
1343 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE); 1343 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1344 if (r->start & 1) { 1344 if (r->start & 1) {
1345 unsigned long rsize; 1345 unsigned long rsize;
1346 1346
1347 r->flags = IORESOURCE_MEM; 1347 r->flags = IORESOURCE_MEM;
1348 /* mmio_mask also clears Enable bit */ 1348 /* mmio_mask also clears Enable bit */
1349 r->start &= mmio_mask; 1349 r->start &= mmio_mask;
1350 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); 1350 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1351 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK); 1351 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1352 1352
1353 /* 1353 /*
1354 ** Each rope only gets part of the distributed range. 1354 ** Each rope only gets part of the distributed range.
1355 ** Adjust "window" for this rope. 1355 ** Adjust "window" for this rope.
1356 */ 1356 */
1357 rsize /= ROPES_PER_IOC; 1357 rsize /= ROPES_PER_IOC;
1358 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start); 1358 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1359 r->end = r->start + rsize; 1359 r->end = r->start + rsize;
1360 } else { 1360 } else {
1361 r->end = r->start = 0; /* Not enabled. */ 1361 r->end = r->start = 0; /* Not enabled. */
1362 } 1362 }
1363 #endif 1363 #endif
1364 1364
1365 /* 1365 /*
1366 ** "Directed" ranges are used when the "distributed range" isn't 1366 ** "Directed" ranges are used when the "distributed range" isn't
1367 ** sufficient for all devices below a given LBA. Typically devices 1367 ** sufficient for all devices below a given LBA. Typically devices
1368 ** like graphics cards or X25 may need a directed range when the 1368 ** like graphics cards or X25 may need a directed range when the
1369 ** bus has multiple slots (ie multiple devices) or the device 1369 ** bus has multiple slots (ie multiple devices) or the device
1370 ** needs more than the typical 4 or 8MB a distributed range offers. 1370 ** needs more than the typical 4 or 8MB a distributed range offers.
1371 ** 1371 **
1372 ** The main reason for ignoring it now frigging complications. 1372 ** The main reason for ignoring it now frigging complications.
1373 ** Directed ranges may overlap (and have precedence) over 1373 ** Directed ranges may overlap (and have precedence) over
1374 ** distributed ranges. Or a distributed range assigned to a unused 1374 ** distributed ranges. Or a distributed range assigned to a unused
1375 ** rope may be used by a directed range on a different rope. 1375 ** rope may be used by a directed range on a different rope.
1376 ** Support for graphics devices may require fixing this 1376 ** Support for graphics devices may require fixing this
1377 ** since they may be assigned a directed range which overlaps 1377 ** since they may be assigned a directed range which overlaps
1378 ** an existing (but unused portion of) distributed range. 1378 ** an existing (but unused portion of) distributed range.
1379 */ 1379 */
1380 r = &(lba_dev->hba.elmmio_space); 1380 r = &(lba_dev->hba.elmmio_space);
1381 sprintf(lba_dev->hba.elmmio_name, "PCI%02lx ELMMIO", 1381 sprintf(lba_dev->hba.elmmio_name, "PCI%02lx ELMMIO",
1382 lba_dev->hba.bus_num.start); 1382 lba_dev->hba.bus_num.start);
1383 r->name = lba_dev->hba.elmmio_name; 1383 r->name = lba_dev->hba.elmmio_name;
1384 1384
1385 #if 1 1385 #if 1
1386 /* See comment which precedes call to sba_directed_lmmio() */ 1386 /* See comment which precedes call to sba_directed_lmmio() */
1387 sba_directed_lmmio(pa_dev, r); 1387 sba_directed_lmmio(pa_dev, r);
1388 #else 1388 #else
1389 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE); 1389 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1390 1390
1391 if (r->start & 1) { 1391 if (r->start & 1) {
1392 unsigned long rsize; 1392 unsigned long rsize;
1393 r->flags = IORESOURCE_MEM; 1393 r->flags = IORESOURCE_MEM;
1394 /* mmio_mask also clears Enable bit */ 1394 /* mmio_mask also clears Enable bit */
1395 r->start &= mmio_mask; 1395 r->start &= mmio_mask;
1396 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); 1396 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1397 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK); 1397 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1398 r->end = r->start + ~rsize; 1398 r->end = r->start + ~rsize;
1399 } 1399 }
1400 #endif 1400 #endif
1401 1401
1402 r = &(lba_dev->hba.io_space); 1402 r = &(lba_dev->hba.io_space);
1403 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports", 1403 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
1404 lba_dev->hba.bus_num.start); 1404 lba_dev->hba.bus_num.start);
1405 r->name = lba_dev->hba.io_name; 1405 r->name = lba_dev->hba.io_name;
1406 r->flags = IORESOURCE_IO; 1406 r->flags = IORESOURCE_IO;
1407 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L; 1407 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1408 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1)); 1408 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1409 1409
1410 /* Virtualize the I/O Port space ranges */ 1410 /* Virtualize the I/O Port space ranges */
1411 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num); 1411 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1412 r->start |= lba_num; 1412 r->start |= lba_num;
1413 r->end |= lba_num; 1413 r->end |= lba_num;
1414 } 1414 }
1415 1415
1416 1416
1417 /************************************************************************** 1417 /**************************************************************************
1418 ** 1418 **
1419 ** LBA initialization code (HW and SW) 1419 ** LBA initialization code (HW and SW)
1420 ** 1420 **
1421 ** o identify LBA chip itself 1421 ** o identify LBA chip itself
1422 ** o initialize LBA chip modes (HardFail) 1422 ** o initialize LBA chip modes (HardFail)
1423 ** o FIXME: initialize DMA hints for reasonable defaults 1423 ** o FIXME: initialize DMA hints for reasonable defaults
1424 ** o enable configuration functions 1424 ** o enable configuration functions
1425 ** o call pci_register_ops() to discover devs (fixup/fixup_bus get invoked) 1425 ** o call pci_register_ops() to discover devs (fixup/fixup_bus get invoked)
1426 ** 1426 **
1427 **************************************************************************/ 1427 **************************************************************************/
1428 1428
1429 static int __init 1429 static int __init
1430 lba_hw_init(struct lba_device *d) 1430 lba_hw_init(struct lba_device *d)
1431 { 1431 {
1432 u32 stat; 1432 u32 stat;
1433 u32 bus_reset; /* PDC_PAT_BUG */ 1433 u32 bus_reset; /* PDC_PAT_BUG */
1434 1434
1435 #if 0 1435 #if 0
1436 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n", 1436 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1437 d->hba.base_addr, 1437 d->hba.base_addr,
1438 READ_REG64(d->hba.base_addr + LBA_STAT_CTL), 1438 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1439 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG), 1439 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1440 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS), 1440 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1441 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) ); 1441 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1442 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n", 1442 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1443 READ_REG64(d->hba.base_addr + LBA_ARB_MASK), 1443 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1444 READ_REG64(d->hba.base_addr + LBA_ARB_PRI), 1444 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1445 READ_REG64(d->hba.base_addr + LBA_ARB_MODE), 1445 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1446 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) ); 1446 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1447 printk(KERN_DEBUG " HINT cfg 0x%Lx\n", 1447 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1448 READ_REG64(d->hba.base_addr + LBA_HINT_CFG)); 1448 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1449 printk(KERN_DEBUG " HINT reg "); 1449 printk(KERN_DEBUG " HINT reg ");
1450 { int i; 1450 { int i;
1451 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8) 1451 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1452 printk(" %Lx", READ_REG64(d->hba.base_addr + i)); 1452 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1453 } 1453 }
1454 printk("\n"); 1454 printk("\n");
1455 #endif /* DEBUG_LBA_PAT */ 1455 #endif /* DEBUG_LBA_PAT */
1456 1456
1457 #ifdef CONFIG_64BIT 1457 #ifdef CONFIG_64BIT
1458 /* 1458 /*
1459 * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support 1459 * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
1460 * Only N-Class and up can really make use of Get slot status. 1460 * Only N-Class and up can really make use of Get slot status.
1461 * maybe L-class too but I've never played with it there. 1461 * maybe L-class too but I've never played with it there.
1462 */ 1462 */
1463 #endif 1463 #endif
1464 1464
1465 /* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */ 1465 /* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */
1466 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1; 1466 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1467 if (bus_reset) { 1467 if (bus_reset) {
1468 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n"); 1468 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1469 } 1469 }
1470 1470
1471 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); 1471 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1472 if (stat & LBA_SMART_MODE) { 1472 if (stat & LBA_SMART_MODE) {
1473 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n"); 1473 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1474 stat &= ~LBA_SMART_MODE; 1474 stat &= ~LBA_SMART_MODE;
1475 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); 1475 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1476 } 1476 }
1477 1477
1478 /* Set HF mode as the default (vs. -1 mode). */ 1478 /* Set HF mode as the default (vs. -1 mode). */
1479 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); 1479 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1480 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); 1480 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1481 1481
1482 /* 1482 /*
1483 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal 1483 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
1484 ** if it's not already set. If we just cleared the PCI Bus Reset 1484 ** if it's not already set. If we just cleared the PCI Bus Reset
1485 ** signal, wait a bit for the PCI devices to recover and setup. 1485 ** signal, wait a bit for the PCI devices to recover and setup.
1486 */ 1486 */
1487 if (bus_reset) 1487 if (bus_reset)
1488 mdelay(pci_post_reset_delay); 1488 mdelay(pci_post_reset_delay);
1489 1489
1490 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) { 1490 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1491 /* 1491 /*
1492 ** PDC_PAT_BUG: PDC rev 40.48 on L2000. 1492 ** PDC_PAT_BUG: PDC rev 40.48 on L2000.
1493 ** B2000/C3600/J6000 also have this problem? 1493 ** B2000/C3600/J6000 also have this problem?
1494 ** 1494 **
1495 ** Elroys with hot pluggable slots don't get configured 1495 ** Elroys with hot pluggable slots don't get configured
1496 ** correctly if the slot is empty. ARB_MASK is set to 0 1496 ** correctly if the slot is empty. ARB_MASK is set to 0
1497 ** and we can't master transactions on the bus if it's 1497 ** and we can't master transactions on the bus if it's
1498 ** not at least one. 0x3 enables elroy and first slot. 1498 ** not at least one. 0x3 enables elroy and first slot.
1499 */ 1499 */
1500 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n"); 1500 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1501 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK); 1501 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1502 } 1502 }
1503 1503
1504 /* 1504 /*
1505 ** FIXME: Hint registers are programmed with default hint 1505 ** FIXME: Hint registers are programmed with default hint
1506 ** values by firmware. Hints should be sane even if we 1506 ** values by firmware. Hints should be sane even if we
1507 ** can't reprogram them the way drivers want. 1507 ** can't reprogram them the way drivers want.
1508 */ 1508 */
1509 return 0; 1509 return 0;
1510 } 1510 }
1511 1511
1512 1512
1513 1513
1514 /* 1514 /*
1515 ** Determine if lba should claim this chip (return 0) or not (return 1). 1515 ** Determine if lba should claim this chip (return 0) or not (return 1).
1516 ** If so, initialize the chip and tell other partners in crime they 1516 ** If so, initialize the chip and tell other partners in crime they
1517 ** have work to do. 1517 ** have work to do.
1518 */ 1518 */
1519 static int __init 1519 static int __init
1520 lba_driver_probe(struct parisc_device *dev) 1520 lba_driver_probe(struct parisc_device *dev)
1521 { 1521 {
1522 struct lba_device *lba_dev; 1522 struct lba_device *lba_dev;
1523 struct pci_bus *lba_bus; 1523 struct pci_bus *lba_bus;
1524 struct pci_ops *cfg_ops; 1524 struct pci_ops *cfg_ops;
1525 u32 func_class; 1525 u32 func_class;
1526 void *tmp_obj; 1526 void *tmp_obj;
1527 char *version; 1527 char *version;
1528 void __iomem *addr = ioremap(dev->hpa.start, 4096); 1528 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1529 1529
1530 /* Read HW Rev First */ 1530 /* Read HW Rev First */
1531 func_class = READ_REG32(addr + LBA_FCLASS); 1531 func_class = READ_REG32(addr + LBA_FCLASS);
1532 1532
1533 if (IS_ELROY(dev)) { 1533 if (IS_ELROY(dev)) {
1534 func_class &= 0xf; 1534 func_class &= 0xf;
1535 switch (func_class) { 1535 switch (func_class) {
1536 case 0: version = "TR1.0"; break; 1536 case 0: version = "TR1.0"; break;
1537 case 1: version = "TR2.0"; break; 1537 case 1: version = "TR2.0"; break;
1538 case 2: version = "TR2.1"; break; 1538 case 2: version = "TR2.1"; break;
1539 case 3: version = "TR2.2"; break; 1539 case 3: version = "TR2.2"; break;
1540 case 4: version = "TR3.0"; break; 1540 case 4: version = "TR3.0"; break;
1541 case 5: version = "TR4.0"; break; 1541 case 5: version = "TR4.0"; break;
1542 default: version = "TR4+"; 1542 default: version = "TR4+";
1543 } 1543 }
1544 1544
1545 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", 1545 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
1546 MODULE_NAME, version, func_class & 0xf, dev->hpa.start); 1546 MODULE_NAME, version, func_class & 0xf, dev->hpa.start);
1547 1547
1548 if (func_class < 2) { 1548 if (func_class < 2) {
1549 printk(KERN_WARNING "Can't support LBA older than " 1549 printk(KERN_WARNING "Can't support LBA older than "
1550 "TR2.1 - continuing under adversity.\n"); 1550 "TR2.1 - continuing under adversity.\n");
1551 } 1551 }
1552 1552
1553 #if 0 1553 #if 0
1554 /* Elroy TR4.0 should work with simple algorithm. 1554 /* Elroy TR4.0 should work with simple algorithm.
1555 But it doesn't. Still missing something. *sigh* 1555 But it doesn't. Still missing something. *sigh*
1556 */ 1556 */
1557 if (func_class > 4) { 1557 if (func_class > 4) {
1558 cfg_ops = &mercury_cfg_ops; 1558 cfg_ops = &mercury_cfg_ops;
1559 } else 1559 } else
1560 #endif 1560 #endif
1561 { 1561 {
1562 cfg_ops = &elroy_cfg_ops; 1562 cfg_ops = &elroy_cfg_ops;
1563 } 1563 }
1564 1564
1565 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { 1565 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1566 func_class &= 0xff; 1566 func_class &= 0xff;
1567 version = kmalloc(6, GFP_KERNEL); 1567 version = kmalloc(6, GFP_KERNEL);
1568 snprintf(version, 6, "TR%d.%d",(func_class >> 4),(func_class & 0xf)); 1568 snprintf(version, 6, "TR%d.%d",(func_class >> 4),(func_class & 0xf));
1569 /* We could use one printk for both Elroy and Mercury, 1569 /* We could use one printk for both Elroy and Mercury,
1570 * but for the mask for func_class. 1570 * but for the mask for func_class.
1571 */ 1571 */
1572 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", 1572 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
1573 MODULE_NAME, version, func_class & 0xff, dev->hpa.start); 1573 MODULE_NAME, version, func_class & 0xff, dev->hpa.start);
1574 cfg_ops = &mercury_cfg_ops; 1574 cfg_ops = &mercury_cfg_ops;
1575 } else { 1575 } else {
1576 printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start); 1576 printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start);
1577 return -ENODEV; 1577 return -ENODEV;
1578 } 1578 }
1579 1579
1580 /* 1580 /*
1581 ** Tell I/O SAPIC driver we have a IRQ handler/region. 1581 ** Tell I/O SAPIC driver we have a IRQ handler/region.
1582 */ 1582 */
1583 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE); 1583 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1584 1584
1585 /* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't 1585 /* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't
1586 ** have an IRT entry will get NULL back from iosapic code. 1586 ** have an IRT entry will get NULL back from iosapic code.
1587 */ 1587 */
1588 1588
1589 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL); 1589 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1590 if (!lba_dev) { 1590 if (!lba_dev) {
1591 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n"); 1591 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1592 return(1); 1592 return(1);
1593 } 1593 }
1594 1594
1595 1595
1596 /* ---------- First : initialize data we already have --------- */ 1596 /* ---------- First : initialize data we already have --------- */
1597 1597
1598 lba_dev->hw_rev = func_class; 1598 lba_dev->hw_rev = func_class;
1599 lba_dev->hba.base_addr = addr; 1599 lba_dev->hba.base_addr = addr;
1600 lba_dev->hba.dev = dev; 1600 lba_dev->hba.dev = dev;
1601 lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */ 1601 lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */
1602 lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ 1602 lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */
1603 1603
1604 /* ------------ Second : initialize common stuff ---------- */ 1604 /* ------------ Second : initialize common stuff ---------- */
1605 pci_bios = &lba_bios_ops; 1605 pci_bios = &lba_bios_ops;
1606 pcibios_register_hba(HBA_DATA(lba_dev)); 1606 pcibios_register_hba(HBA_DATA(lba_dev));
1607 spin_lock_init(&lba_dev->lba_lock); 1607 spin_lock_init(&lba_dev->lba_lock);
1608 1608
1609 if (lba_hw_init(lba_dev)) 1609 if (lba_hw_init(lba_dev))
1610 return(1); 1610 return(1);
1611 1611
1612 /* ---------- Third : setup I/O Port and MMIO resources --------- */ 1612 /* ---------- Third : setup I/O Port and MMIO resources --------- */
1613 1613
1614 if (is_pdc_pat()) { 1614 if (is_pdc_pat()) {
1615 /* PDC PAT firmware uses PIOP region of GMMIO space. */ 1615 /* PDC PAT firmware uses PIOP region of GMMIO space. */
1616 pci_port = &lba_pat_port_ops; 1616 pci_port = &lba_pat_port_ops;
1617 /* Go ask PDC PAT what resources this LBA has */ 1617 /* Go ask PDC PAT what resources this LBA has */
1618 lba_pat_resources(dev, lba_dev); 1618 lba_pat_resources(dev, lba_dev);
1619 } else { 1619 } else {
1620 if (!astro_iop_base) { 1620 if (!astro_iop_base) {
1621 /* Sprockets PDC uses NPIOP region */ 1621 /* Sprockets PDC uses NPIOP region */
1622 astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024); 1622 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1623 pci_port = &lba_astro_port_ops; 1623 pci_port = &lba_astro_port_ops;
1624 } 1624 }
1625 1625
1626 /* Poke the chip a bit for /proc output */ 1626 /* Poke the chip a bit for /proc output */
1627 lba_legacy_resources(dev, lba_dev); 1627 lba_legacy_resources(dev, lba_dev);
1628 } 1628 }
1629 1629
1630 /* 1630 /*
1631 ** Tell PCI support another PCI bus was found. 1631 ** Tell PCI support another PCI bus was found.
1632 ** Walks PCI bus for us too. 1632 ** Walks PCI bus for us too.
1633 */ 1633 */
1634 dev->dev.platform_data = lba_dev; 1634 dev->dev.platform_data = lba_dev;
1635 lba_bus = lba_dev->hba.hba_bus = 1635 lba_bus = lba_dev->hba.hba_bus =
1636 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start, 1636 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
1637 cfg_ops, NULL); 1637 cfg_ops, NULL);
1638 if (lba_bus) 1638 if (lba_bus)
1639 pci_bus_add_devices(lba_bus); 1639 pci_bus_add_devices(lba_bus);
1640 1640
1641 /* This is in lieu of calling pci_assign_unassigned_resources() */ 1641 /* This is in lieu of calling pci_assign_unassigned_resources() */
1642 if (is_pdc_pat()) { 1642 if (is_pdc_pat()) {
1643 /* assign resources to un-initialized devices */ 1643 /* assign resources to un-initialized devices */
1644 1644
1645 DBG_PAT("LBA pci_bus_size_bridges()\n"); 1645 DBG_PAT("LBA pci_bus_size_bridges()\n");
1646 pci_bus_size_bridges(lba_bus); 1646 pci_bus_size_bridges(lba_bus);
1647 1647
1648 DBG_PAT("LBA pci_bus_assign_resources()\n"); 1648 DBG_PAT("LBA pci_bus_assign_resources()\n");
1649 pci_bus_assign_resources(lba_bus); 1649 pci_bus_assign_resources(lba_bus);
1650 1650
1651 #ifdef DEBUG_LBA_PAT 1651 #ifdef DEBUG_LBA_PAT
1652 DBG_PAT("\nLBA PIOP resource tree\n"); 1652 DBG_PAT("\nLBA PIOP resource tree\n");
1653 lba_dump_res(&lba_dev->hba.io_space, 2); 1653 lba_dump_res(&lba_dev->hba.io_space, 2);
1654 DBG_PAT("\nLBA LMMIO resource tree\n"); 1654 DBG_PAT("\nLBA LMMIO resource tree\n");
1655 lba_dump_res(&lba_dev->hba.lmmio_space, 2); 1655 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1656 #endif 1656 #endif
1657 } 1657 }
1658 pci_enable_bridges(lba_bus); 1658 pci_enable_bridges(lba_bus);
1659 1659
1660 1660
1661 /* 1661 /*
1662 ** Once PCI register ops has walked the bus, access to config 1662 ** Once PCI register ops has walked the bus, access to config
1663 ** space is restricted. Avoids master aborts on config cycles. 1663 ** space is restricted. Avoids master aborts on config cycles.
1664 ** Early LBA revs go fatal on *any* master abort. 1664 ** Early LBA revs go fatal on *any* master abort.
1665 */ 1665 */
1666 if (cfg_ops == &elroy_cfg_ops) { 1666 if (cfg_ops == &elroy_cfg_ops) {
1667 lba_dev->flags |= LBA_FLAG_SKIP_PROBE; 1667 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1668 } 1668 }
1669 1669
1670 /* Whew! Finally done! Tell services we got this one covered. */ 1670 /* Whew! Finally done! Tell services we got this one covered. */
1671 return 0; 1671 return 0;
1672 } 1672 }
1673 1673
1674 static struct parisc_device_id lba_tbl[] = { 1674 static struct parisc_device_id lba_tbl[] = {
1675 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa }, 1675 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1676 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa }, 1676 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1677 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa }, 1677 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1678 { 0, } 1678 { 0, }
1679 }; 1679 };
1680 1680
1681 static struct parisc_driver lba_driver = { 1681 static struct parisc_driver lba_driver = {
1682 .name = MODULE_NAME, 1682 .name = MODULE_NAME,
1683 .id_table = lba_tbl, 1683 .id_table = lba_tbl,
1684 .probe = lba_driver_probe, 1684 .probe = lba_driver_probe,
1685 }; 1685 };
1686 1686
1687 /* 1687 /*
1688 ** One time initialization to let the world know the LBA was found. 1688 ** One time initialization to let the world know the LBA was found.
1689 ** Must be called exactly once before pci_init(). 1689 ** Must be called exactly once before pci_init().
1690 */ 1690 */
1691 void __init lba_init(void) 1691 void __init lba_init(void)
1692 { 1692 {
1693 register_parisc_driver(&lba_driver); 1693 register_parisc_driver(&lba_driver);
1694 } 1694 }
1695 1695
1696 /* 1696 /*
1697 ** Initialize the IBASE/IMASK registers for LBA (Elroy). 1697 ** Initialize the IBASE/IMASK registers for LBA (Elroy).
1698 ** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA). 1698 ** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA).
1699 ** sba_iommu is responsible for locking (none needed at init time). 1699 ** sba_iommu is responsible for locking (none needed at init time).
1700 */ 1700 */
1701 void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) 1701 void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1702 { 1702 {
1703 void __iomem * base_addr = ioremap(lba->hpa.start, 4096); 1703 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1704 1704
1705 imask <<= 2; /* adjust for hints - 2 more bits */ 1705 imask <<= 2; /* adjust for hints - 2 more bits */
1706 1706
1707 /* Make sure we aren't trying to set bits that aren't writeable. */ 1707 /* Make sure we aren't trying to set bits that aren't writeable. */
1708 WARN_ON((ibase & 0x001fffff) != 0); 1708 WARN_ON((ibase & 0x001fffff) != 0);
1709 WARN_ON((imask & 0x001fffff) != 0); 1709 WARN_ON((imask & 0x001fffff) != 0);
1710 1710
1711 DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask); 1711 DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
1712 WRITE_REG32( imask, base_addr + LBA_IMASK); 1712 WRITE_REG32( imask, base_addr + LBA_IMASK);
1713 WRITE_REG32( ibase, base_addr + LBA_IBASE); 1713 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1714 iounmap(base_addr); 1714 iounmap(base_addr);
1715 } 1715 }
1716 1716
1717 1717
drivers/parisc/sba_iommu.c
1 /* 1 /*
2 ** System Bus Adapter (SBA) I/O MMU manager 2 ** System Bus Adapter (SBA) I/O MMU manager
3 ** 3 **
4 ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org> 4 ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
5 ** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com> 5 ** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
6 ** (c) Copyright 2000-2004 Hewlett-Packard Company 6 ** (c) Copyright 2000-2004 Hewlett-Packard Company
7 ** 7 **
8 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 8 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
9 ** 9 **
10 ** This program is free software; you can redistribute it and/or modify 10 ** This program is free software; you can redistribute it and/or modify
11 ** it under the terms of the GNU General Public License as published by 11 ** it under the terms of the GNU General Public License as published by
12 ** the Free Software Foundation; either version 2 of the License, or 12 ** the Free Software Foundation; either version 2 of the License, or
13 ** (at your option) any later version. 13 ** (at your option) any later version.
14 ** 14 **
15 ** 15 **
16 ** This module initializes the IOC (I/O Controller) found on B1000/C3000/ 16 ** This module initializes the IOC (I/O Controller) found on B1000/C3000/
17 ** J5000/J7000/N-class/L-class machines and their successors. 17 ** J5000/J7000/N-class/L-class machines and their successors.
18 ** 18 **
19 ** FIXME: add DMA hint support programming in both sba and lba modules. 19 ** FIXME: add DMA hint support programming in both sba and lba modules.
20 */ 20 */
21 21
22 #include <linux/config.h> 22 #include <linux/config.h>
23 #include <linux/types.h> 23 #include <linux/types.h>
24 #include <linux/kernel.h> 24 #include <linux/kernel.h>
25 #include <linux/spinlock.h> 25 #include <linux/spinlock.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 28
29 #include <linux/mm.h> 29 #include <linux/mm.h>
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/pci.h> 31 #include <linux/pci.h>
32 32
33 #include <asm/byteorder.h> 33 #include <asm/byteorder.h>
34 #include <asm/io.h> 34 #include <asm/io.h>
35 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ 35 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
36 36
37 #include <asm/hardware.h> /* for register_parisc_driver() stuff */ 37 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
38 38
39 #include <linux/proc_fs.h> 39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h> 40 #include <linux/seq_file.h>
41 41
42 #include <asm/runway.h> /* for proc_runway_root */ 42 #include <asm/runway.h> /* for proc_runway_root */
43 #include <asm/pdc.h> /* for PDC_MODEL_* */ 43 #include <asm/pdc.h> /* for PDC_MODEL_* */
44 #include <asm/pdcpat.h> /* for is_pdc_pat() */ 44 #include <asm/pdcpat.h> /* for is_pdc_pat() */
45 #include <asm/parisc-device.h> 45 #include <asm/parisc-device.h>
46 46
47 47
48 /* declared in arch/parisc/kernel/setup.c */ 48 /* declared in arch/parisc/kernel/setup.c */
49 extern struct proc_dir_entry * proc_mckinley_root; 49 extern struct proc_dir_entry * proc_mckinley_root;
50 50
51 #define MODULE_NAME "SBA" 51 #define MODULE_NAME "SBA"
52 52
53 #ifdef CONFIG_PROC_FS 53 #ifdef CONFIG_PROC_FS
54 /* depends on proc fs support. But costs CPU performance */ 54 /* depends on proc fs support. But costs CPU performance */
55 #undef SBA_COLLECT_STATS 55 #undef SBA_COLLECT_STATS
56 #endif 56 #endif
57 57
58 /* 58 /*
59 ** The number of debug flags is a clue - this code is fragile. 59 ** The number of debug flags is a clue - this code is fragile.
60 ** Don't even think about messing with it unless you have 60 ** Don't even think about messing with it unless you have
61 ** plenty of 710's to sacrifice to the computer gods. :^) 61 ** plenty of 710's to sacrifice to the computer gods. :^)
62 */ 62 */
63 #undef DEBUG_SBA_INIT 63 #undef DEBUG_SBA_INIT
64 #undef DEBUG_SBA_RUN 64 #undef DEBUG_SBA_RUN
65 #undef DEBUG_SBA_RUN_SG 65 #undef DEBUG_SBA_RUN_SG
66 #undef DEBUG_SBA_RESOURCE 66 #undef DEBUG_SBA_RESOURCE
67 #undef ASSERT_PDIR_SANITY 67 #undef ASSERT_PDIR_SANITY
68 #undef DEBUG_LARGE_SG_ENTRIES 68 #undef DEBUG_LARGE_SG_ENTRIES
69 #undef DEBUG_DMB_TRAP 69 #undef DEBUG_DMB_TRAP
70 70
71 #ifdef DEBUG_SBA_INIT 71 #ifdef DEBUG_SBA_INIT
72 #define DBG_INIT(x...) printk(x) 72 #define DBG_INIT(x...) printk(x)
73 #else 73 #else
74 #define DBG_INIT(x...) 74 #define DBG_INIT(x...)
75 #endif 75 #endif
76 76
77 #ifdef DEBUG_SBA_RUN 77 #ifdef DEBUG_SBA_RUN
78 #define DBG_RUN(x...) printk(x) 78 #define DBG_RUN(x...) printk(x)
79 #else 79 #else
80 #define DBG_RUN(x...) 80 #define DBG_RUN(x...)
81 #endif 81 #endif
82 82
83 #ifdef DEBUG_SBA_RUN_SG 83 #ifdef DEBUG_SBA_RUN_SG
84 #define DBG_RUN_SG(x...) printk(x) 84 #define DBG_RUN_SG(x...) printk(x)
85 #else 85 #else
86 #define DBG_RUN_SG(x...) 86 #define DBG_RUN_SG(x...)
87 #endif 87 #endif
88 88
89 89
90 #ifdef DEBUG_SBA_RESOURCE 90 #ifdef DEBUG_SBA_RESOURCE
91 #define DBG_RES(x...) printk(x) 91 #define DBG_RES(x...) printk(x)
92 #else 92 #else
93 #define DBG_RES(x...) 93 #define DBG_RES(x...)
94 #endif 94 #endif
95 95
96 #if defined(CONFIG_64BIT) 96 #if defined(CONFIG_64BIT)
97 /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */ 97 /* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
98 #define ZX1_SUPPORT 98 #define ZX1_SUPPORT
99 #endif 99 #endif
100 100
101 #define SBA_INLINE __inline__ 101 #define SBA_INLINE __inline__
102 102
103 103
104 /* 104 /*
105 ** The number of pdir entries to "free" before issueing 105 ** The number of pdir entries to "free" before issueing
106 ** a read to PCOM register to flush out PCOM writes. 106 ** a read to PCOM register to flush out PCOM writes.
107 ** Interacts with allocation granularity (ie 4 or 8 entries 107 ** Interacts with allocation granularity (ie 4 or 8 entries
108 ** allocated and free'd/purged at a time might make this 108 ** allocated and free'd/purged at a time might make this
109 ** less interesting). 109 ** less interesting).
110 */ 110 */
111 #define DELAYED_RESOURCE_CNT 16 111 #define DELAYED_RESOURCE_CNT 16
112 112
113 #define DEFAULT_DMA_HINT_REG 0 113 #define DEFAULT_DMA_HINT_REG 0
114 114
115 #define ASTRO_RUNWAY_PORT 0x582 115 #define ASTRO_RUNWAY_PORT 0x582
116 #define IKE_MERCED_PORT 0x803 116 #define IKE_MERCED_PORT 0x803
117 #define REO_MERCED_PORT 0x804 117 #define REO_MERCED_PORT 0x804
118 #define REOG_MERCED_PORT 0x805 118 #define REOG_MERCED_PORT 0x805
119 #define PLUTO_MCKINLEY_PORT 0x880 119 #define PLUTO_MCKINLEY_PORT 0x880
120 120
121 #define SBA_FUNC_ID 0x0000 /* function id */ 121 #define SBA_FUNC_ID 0x0000 /* function id */
122 #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ 122 #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
123 123
124 #define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT) 124 #define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT)
125 #define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT) 125 #define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT)
126 #define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT) 126 #define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT)
127 127
128 #define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */ 128 #define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
129 129
130 #define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE) 130 #define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
131 #define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE) 131 #define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
132 /* Ike's IOC's occupy functions 2 and 3 */ 132 /* Ike's IOC's occupy functions 2 and 3 */
133 #define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE) 133 #define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
134 134
135 #define IOC_CTRL 0x8 /* IOC_CTRL offset */ 135 #define IOC_CTRL 0x8 /* IOC_CTRL offset */
136 #define IOC_CTRL_TC (1 << 0) /* TOC Enable */ 136 #define IOC_CTRL_TC (1 << 0) /* TOC Enable */
137 #define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */ 137 #define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
138 #define IOC_CTRL_DE (1 << 2) /* Dillon Enable */ 138 #define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
139 #define IOC_CTRL_RM (1 << 8) /* Real Mode */ 139 #define IOC_CTRL_RM (1 << 8) /* Real Mode */
140 #define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */ 140 #define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
141 #define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */ 141 #define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
142 #define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */ 142 #define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
143 143
144 #define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */ 144 #define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
145 145
146 #define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */ 146 #define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
147 147
148 148
149 /* 149 /*
150 ** Offsets into MBIB (Function 0 on Ike and hopefully Astro) 150 ** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
151 ** Firmware programs this stuff. Don't touch it. 151 ** Firmware programs this stuff. Don't touch it.
152 */ 152 */
153 #define LMMIO_DIRECT0_BASE 0x300 153 #define LMMIO_DIRECT0_BASE 0x300
154 #define LMMIO_DIRECT0_MASK 0x308 154 #define LMMIO_DIRECT0_MASK 0x308
155 #define LMMIO_DIRECT0_ROUTE 0x310 155 #define LMMIO_DIRECT0_ROUTE 0x310
156 156
157 #define LMMIO_DIST_BASE 0x360 157 #define LMMIO_DIST_BASE 0x360
158 #define LMMIO_DIST_MASK 0x368 158 #define LMMIO_DIST_MASK 0x368
159 #define LMMIO_DIST_ROUTE 0x370 159 #define LMMIO_DIST_ROUTE 0x370
160 160
161 #define IOS_DIST_BASE 0x390 161 #define IOS_DIST_BASE 0x390
162 #define IOS_DIST_MASK 0x398 162 #define IOS_DIST_MASK 0x398
163 #define IOS_DIST_ROUTE 0x3A0 163 #define IOS_DIST_ROUTE 0x3A0
164 164
165 #define IOS_DIRECT_BASE 0x3C0 165 #define IOS_DIRECT_BASE 0x3C0
166 #define IOS_DIRECT_MASK 0x3C8 166 #define IOS_DIRECT_MASK 0x3C8
167 #define IOS_DIRECT_ROUTE 0x3D0 167 #define IOS_DIRECT_ROUTE 0x3D0
168 168
169 /* 169 /*
170 ** Offsets into I/O TLB (Function 2 and 3 on Ike) 170 ** Offsets into I/O TLB (Function 2 and 3 on Ike)
171 */ 171 */
172 #define ROPE0_CTL 0x200 /* "regbus pci0" */ 172 #define ROPE0_CTL 0x200 /* "regbus pci0" */
173 #define ROPE1_CTL 0x208 173 #define ROPE1_CTL 0x208
174 #define ROPE2_CTL 0x210 174 #define ROPE2_CTL 0x210
175 #define ROPE3_CTL 0x218 175 #define ROPE3_CTL 0x218
176 #define ROPE4_CTL 0x220 176 #define ROPE4_CTL 0x220
177 #define ROPE5_CTL 0x228 177 #define ROPE5_CTL 0x228
178 #define ROPE6_CTL 0x230 178 #define ROPE6_CTL 0x230
179 #define ROPE7_CTL 0x238 179 #define ROPE7_CTL 0x238
180 180
181 #define HF_ENABLE 0x40 181 #define HF_ENABLE 0x40
182 182
183 183
184 #define IOC_IBASE 0x300 /* IO TLB */ 184 #define IOC_IBASE 0x300 /* IO TLB */
185 #define IOC_IMASK 0x308 185 #define IOC_IMASK 0x308
186 #define IOC_PCOM 0x310 186 #define IOC_PCOM 0x310
187 #define IOC_TCNFG 0x318 187 #define IOC_TCNFG 0x318
188 #define IOC_PDIR_BASE 0x320 188 #define IOC_PDIR_BASE 0x320
189 189
190 /* AGP GART driver looks for this */ 190 /* AGP GART driver looks for this */
191 #define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL 191 #define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
192 192
193 193
194 /* 194 /*
195 ** IOC supports 4/8/16/64KB page sizes (see TCNFG register) 195 ** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
196 ** It's safer (avoid memory corruption) to keep DMA page mappings 196 ** It's safer (avoid memory corruption) to keep DMA page mappings
197 ** equivalently sized to VM PAGE_SIZE. 197 ** equivalently sized to VM PAGE_SIZE.
198 ** 198 **
199 ** We really can't avoid generating a new mapping for each 199 ** We really can't avoid generating a new mapping for each
200 ** page since the Virtual Coherence Index has to be generated 200 ** page since the Virtual Coherence Index has to be generated
201 ** and updated for each page. 201 ** and updated for each page.
202 ** 202 **
203 ** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse. 203 ** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
204 */ 204 */
205 #define IOVP_SIZE PAGE_SIZE 205 #define IOVP_SIZE PAGE_SIZE
206 #define IOVP_SHIFT PAGE_SHIFT 206 #define IOVP_SHIFT PAGE_SHIFT
207 #define IOVP_MASK PAGE_MASK 207 #define IOVP_MASK PAGE_MASK
208 208
209 #define SBA_PERF_CFG 0x708 /* Performance Counter stuff */ 209 #define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
210 #define SBA_PERF_MASK1 0x718 210 #define SBA_PERF_MASK1 0x718
211 #define SBA_PERF_MASK2 0x730 211 #define SBA_PERF_MASK2 0x730
212 212
213 213
214 /* 214 /*
215 ** Offsets into PCI Performance Counters (functions 12 and 13) 215 ** Offsets into PCI Performance Counters (functions 12 and 13)
216 ** Controlled by PERF registers in function 2 & 3 respectively. 216 ** Controlled by PERF registers in function 2 & 3 respectively.
217 */ 217 */
218 #define SBA_PERF_CNT1 0x200 218 #define SBA_PERF_CNT1 0x200
219 #define SBA_PERF_CNT2 0x208 219 #define SBA_PERF_CNT2 0x208
220 #define SBA_PERF_CNT3 0x210 220 #define SBA_PERF_CNT3 0x210
221 221
222 222
223 struct ioc { 223 struct ioc {
224 void __iomem *ioc_hpa; /* I/O MMU base address */ 224 void __iomem *ioc_hpa; /* I/O MMU base address */
225 char *res_map; /* resource map, bit == pdir entry */ 225 char *res_map; /* resource map, bit == pdir entry */
226 u64 *pdir_base; /* physical base address */ 226 u64 *pdir_base; /* physical base address */
227 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */ 227 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
228 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */ 228 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
229 #ifdef ZX1_SUPPORT 229 #ifdef ZX1_SUPPORT
230 unsigned long iovp_mask; /* help convert IOVA to IOVP */ 230 unsigned long iovp_mask; /* help convert IOVA to IOVP */
231 #endif 231 #endif
232 unsigned long *res_hint; /* next avail IOVP - circular search */ 232 unsigned long *res_hint; /* next avail IOVP - circular search */
233 spinlock_t res_lock; 233 spinlock_t res_lock;
234 unsigned int res_bitshift; /* from the LEFT! */ 234 unsigned int res_bitshift; /* from the LEFT! */
235 unsigned int res_size; /* size of resource map in bytes */ 235 unsigned int res_size; /* size of resource map in bytes */
236 #ifdef SBA_HINT_SUPPORT 236 #ifdef SBA_HINT_SUPPORT
237 /* FIXME : DMA HINTs not used */ 237 /* FIXME : DMA HINTs not used */
238 unsigned long hint_mask_pdir; /* bits used for DMA hints */ 238 unsigned long hint_mask_pdir; /* bits used for DMA hints */
239 unsigned int hint_shift_pdir; 239 unsigned int hint_shift_pdir;
240 #endif 240 #endif
241 #if DELAYED_RESOURCE_CNT > 0 241 #if DELAYED_RESOURCE_CNT > 0
242 int saved_cnt; 242 int saved_cnt;
243 struct sba_dma_pair { 243 struct sba_dma_pair {
244 dma_addr_t iova; 244 dma_addr_t iova;
245 size_t size; 245 size_t size;
246 } saved[DELAYED_RESOURCE_CNT]; 246 } saved[DELAYED_RESOURCE_CNT];
247 #endif 247 #endif
248 248
249 #ifdef SBA_COLLECT_STATS 249 #ifdef SBA_COLLECT_STATS
250 #define SBA_SEARCH_SAMPLE 0x100 250 #define SBA_SEARCH_SAMPLE 0x100
251 unsigned long avg_search[SBA_SEARCH_SAMPLE]; 251 unsigned long avg_search[SBA_SEARCH_SAMPLE];
252 unsigned long avg_idx; /* current index into avg_search */ 252 unsigned long avg_idx; /* current index into avg_search */
253 unsigned long used_pages; 253 unsigned long used_pages;
254 unsigned long msingle_calls; 254 unsigned long msingle_calls;
255 unsigned long msingle_pages; 255 unsigned long msingle_pages;
256 unsigned long msg_calls; 256 unsigned long msg_calls;
257 unsigned long msg_pages; 257 unsigned long msg_pages;
258 unsigned long usingle_calls; 258 unsigned long usingle_calls;
259 unsigned long usingle_pages; 259 unsigned long usingle_pages;
260 unsigned long usg_calls; 260 unsigned long usg_calls;
261 unsigned long usg_pages; 261 unsigned long usg_pages;
262 #endif 262 #endif
263 263
264 /* STUFF We don't need in performance path */ 264 /* STUFF We don't need in performance path */
265 unsigned int pdir_size; /* in bytes, determined by IOV Space size */ 265 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
266 }; 266 };
267 267
268 struct sba_device { 268 struct sba_device {
269 struct sba_device *next; /* list of SBA's in system */ 269 struct sba_device *next; /* list of SBA's in system */
270 struct parisc_device *dev; /* dev found in bus walk */ 270 struct parisc_device *dev; /* dev found in bus walk */
271 struct parisc_device_id *iodc; /* data about dev from firmware */ 271 struct parisc_device_id *iodc; /* data about dev from firmware */
272 const char *name; 272 const char *name;
273 void __iomem *sba_hpa; /* base address */ 273 void __iomem *sba_hpa; /* base address */
274 spinlock_t sba_lock; 274 spinlock_t sba_lock;
275 unsigned int flags; /* state/functionality enabled */ 275 unsigned int flags; /* state/functionality enabled */
276 unsigned int hw_rev; /* HW revision of chip */ 276 unsigned int hw_rev; /* HW revision of chip */
277 277
278 struct resource chip_resv; /* MMIO reserved for chip */ 278 struct resource chip_resv; /* MMIO reserved for chip */
279 struct resource iommu_resv; /* MMIO reserved for iommu */ 279 struct resource iommu_resv; /* MMIO reserved for iommu */
280 280
281 unsigned int num_ioc; /* number of on-board IOC's */ 281 unsigned int num_ioc; /* number of on-board IOC's */
282 struct ioc ioc[MAX_IOC]; 282 struct ioc ioc[MAX_IOC];
283 }; 283 };
284 284
285 285
286 static struct sba_device *sba_list; 286 static struct sba_device *sba_list;
287 287
288 static unsigned long ioc_needs_fdc = 0; 288 static unsigned long ioc_needs_fdc = 0;
289 289
290 /* global count of IOMMUs in the system */ 290 /* global count of IOMMUs in the system */
291 static unsigned int global_ioc_cnt = 0; 291 static unsigned int global_ioc_cnt = 0;
292 292
293 /* PA8700 (Piranha 2.2) bug workaround */ 293 /* PA8700 (Piranha 2.2) bug workaround */
294 static unsigned long piranha_bad_128k = 0; 294 static unsigned long piranha_bad_128k = 0;
295 295
296 /* Looks nice and keeps the compiler happy */ 296 /* Looks nice and keeps the compiler happy */
297 #define SBA_DEV(d) ((struct sba_device *) (d)) 297 #define SBA_DEV(d) ((struct sba_device *) (d))
298 298
299 #ifdef SBA_AGP_SUPPORT 299 #ifdef SBA_AGP_SUPPORT
300 static int reserve_sba_gart = 1; 300 static int reserve_sba_gart = 1;
301 #endif 301 #endif
302 302
303 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 303 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
304 304
305 305
306 /************************************ 306 /************************************
307 ** SBA register read and write support 307 ** SBA register read and write support
308 ** 308 **
309 ** BE WARNED: register writes are posted. 309 ** BE WARNED: register writes are posted.
310 ** (ie follow writes which must reach HW with a read) 310 ** (ie follow writes which must reach HW with a read)
311 ** 311 **
312 ** Superdome (in particular, REO) allows only 64-bit CSR accesses. 312 ** Superdome (in particular, REO) allows only 64-bit CSR accesses.
313 */ 313 */
314 #define READ_REG32(addr) le32_to_cpu(__raw_readl(addr)) 314 #define READ_REG32(addr) le32_to_cpu(__raw_readl(addr))
315 #define READ_REG64(addr) le64_to_cpu(__raw_readq(addr)) 315 #define READ_REG64(addr) le64_to_cpu(__raw_readq(addr))
316 #define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr) 316 #define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr)
317 #define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr) 317 #define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr)
318 318
319 #ifdef CONFIG_64BIT 319 #ifdef CONFIG_64BIT
320 #define READ_REG(addr) READ_REG64(addr) 320 #define READ_REG(addr) READ_REG64(addr)
321 #define WRITE_REG(value, addr) WRITE_REG64(value, addr) 321 #define WRITE_REG(value, addr) WRITE_REG64(value, addr)
322 #else 322 #else
323 #define READ_REG(addr) READ_REG32(addr) 323 #define READ_REG(addr) READ_REG32(addr)
324 #define WRITE_REG(value, addr) WRITE_REG32(value, addr) 324 #define WRITE_REG(value, addr) WRITE_REG32(value, addr)
325 #endif 325 #endif
326 326
327 #ifdef DEBUG_SBA_INIT 327 #ifdef DEBUG_SBA_INIT
328 328
329 /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */ 329 /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
330 330
331 /** 331 /**
332 * sba_dump_ranges - debugging only - print ranges assigned to this IOA 332 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
333 * @hpa: base address of the sba 333 * @hpa: base address of the sba
334 * 334 *
335 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO 335 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
336 * IO Adapter (aka Bus Converter). 336 * IO Adapter (aka Bus Converter).
337 */ 337 */
338 static void 338 static void
339 sba_dump_ranges(void __iomem *hpa) 339 sba_dump_ranges(void __iomem *hpa)
340 { 340 {
341 DBG_INIT("SBA at 0x%p\n", hpa); 341 DBG_INIT("SBA at 0x%p\n", hpa);
342 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE)); 342 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
343 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK)); 343 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
344 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE)); 344 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
345 DBG_INIT("\n"); 345 DBG_INIT("\n");
346 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE)); 346 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
347 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK)); 347 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
348 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE)); 348 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
349 } 349 }
350 350
351 /** 351 /**
352 * sba_dump_tlb - debugging only - print IOMMU operating parameters 352 * sba_dump_tlb - debugging only - print IOMMU operating parameters
353 * @hpa: base address of the IOMMU 353 * @hpa: base address of the IOMMU
354 * 354 *
355 * Print the size/location of the IO MMU PDIR. 355 * Print the size/location of the IO MMU PDIR.
356 */ 356 */
357 static void sba_dump_tlb(void __iomem *hpa) 357 static void sba_dump_tlb(void __iomem *hpa)
358 { 358 {
359 DBG_INIT("IO TLB at 0x%p\n", hpa); 359 DBG_INIT("IO TLB at 0x%p\n", hpa);
360 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE)); 360 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
361 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK)); 361 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
362 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG)); 362 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
363 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE)); 363 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
364 DBG_INIT("\n"); 364 DBG_INIT("\n");
365 } 365 }
366 #else 366 #else
367 #define sba_dump_ranges(x) 367 #define sba_dump_ranges(x)
368 #define sba_dump_tlb(x) 368 #define sba_dump_tlb(x)
369 #endif /* DEBUG_SBA_INIT */ 369 #endif /* DEBUG_SBA_INIT */
370 370
371 371
372 #ifdef ASSERT_PDIR_SANITY 372 #ifdef ASSERT_PDIR_SANITY
373 373
374 /** 374 /**
375 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry 375 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
376 * @ioc: IO MMU structure which owns the pdir we are interested in. 376 * @ioc: IO MMU structure which owns the pdir we are interested in.
377 * @msg: text to print ont the output line. 377 * @msg: text to print ont the output line.
378 * @pide: pdir index. 378 * @pide: pdir index.
379 * 379 *
380 * Print one entry of the IO MMU PDIR in human readable form. 380 * Print one entry of the IO MMU PDIR in human readable form.
381 */ 381 */
382 static void 382 static void
383 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) 383 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
384 { 384 {
385 /* start printing from lowest pde in rval */ 385 /* start printing from lowest pde in rval */
386 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); 386 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
387 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); 387 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
388 uint rcnt; 388 uint rcnt;
389 389
390 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", 390 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
391 msg, 391 msg,
392 rptr, pide & (BITS_PER_LONG - 1), *rptr); 392 rptr, pide & (BITS_PER_LONG - 1), *rptr);
393 393
394 rcnt = 0; 394 rcnt = 0;
395 while (rcnt < BITS_PER_LONG) { 395 while (rcnt < BITS_PER_LONG) {
396 printk(KERN_DEBUG "%s %2d %p %016Lx\n", 396 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
397 (rcnt == (pide & (BITS_PER_LONG - 1))) 397 (rcnt == (pide & (BITS_PER_LONG - 1)))
398 ? " -->" : " ", 398 ? " -->" : " ",
399 rcnt, ptr, *ptr ); 399 rcnt, ptr, *ptr );
400 rcnt++; 400 rcnt++;
401 ptr++; 401 ptr++;
402 } 402 }
403 printk(KERN_DEBUG "%s", msg); 403 printk(KERN_DEBUG "%s", msg);
404 } 404 }
405 405
406 406
407 /** 407 /**
408 * sba_check_pdir - debugging only - consistency checker 408 * sba_check_pdir - debugging only - consistency checker
409 * @ioc: IO MMU structure which owns the pdir we are interested in. 409 * @ioc: IO MMU structure which owns the pdir we are interested in.
410 * @msg: text to print ont the output line. 410 * @msg: text to print ont the output line.
411 * 411 *
412 * Verify the resource map and pdir state is consistent 412 * Verify the resource map and pdir state is consistent
413 */ 413 */
414 static int 414 static int
415 sba_check_pdir(struct ioc *ioc, char *msg) 415 sba_check_pdir(struct ioc *ioc, char *msg)
416 { 416 {
417 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); 417 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
418 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ 418 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
419 u64 *pptr = ioc->pdir_base; /* pdir ptr */ 419 u64 *pptr = ioc->pdir_base; /* pdir ptr */
420 uint pide = 0; 420 uint pide = 0;
421 421
422 while (rptr < rptr_end) { 422 while (rptr < rptr_end) {
423 u32 rval = *rptr; 423 u32 rval = *rptr;
424 int rcnt = 32; /* number of bits we might check */ 424 int rcnt = 32; /* number of bits we might check */
425 425
426 while (rcnt) { 426 while (rcnt) {
427 /* Get last byte and highest bit from that */ 427 /* Get last byte and highest bit from that */
428 u32 pde = ((u32) (((char *)pptr)[7])) << 24; 428 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
429 if ((rval ^ pde) & 0x80000000) 429 if ((rval ^ pde) & 0x80000000)
430 { 430 {
431 /* 431 /*
432 ** BUMMER! -- res_map != pdir -- 432 ** BUMMER! -- res_map != pdir --
433 ** Dump rval and matching pdir entries 433 ** Dump rval and matching pdir entries
434 */ 434 */
435 sba_dump_pdir_entry(ioc, msg, pide); 435 sba_dump_pdir_entry(ioc, msg, pide);
436 return(1); 436 return(1);
437 } 437 }
438 rcnt--; 438 rcnt--;
439 rval <<= 1; /* try the next bit */ 439 rval <<= 1; /* try the next bit */
440 pptr++; 440 pptr++;
441 pide++; 441 pide++;
442 } 442 }
443 rptr++; /* look at next word of res_map */ 443 rptr++; /* look at next word of res_map */
444 } 444 }
445 /* It'd be nice if we always got here :^) */ 445 /* It'd be nice if we always got here :^) */
446 return 0; 446 return 0;
447 } 447 }
448 448
449 449
450 /** 450 /**
451 * sba_dump_sg - debugging only - print Scatter-Gather list 451 * sba_dump_sg - debugging only - print Scatter-Gather list
452 * @ioc: IO MMU structure which owns the pdir we are interested in. 452 * @ioc: IO MMU structure which owns the pdir we are interested in.
453 * @startsg: head of the SG list 453 * @startsg: head of the SG list
454 * @nents: number of entries in SG list 454 * @nents: number of entries in SG list
455 * 455 *
456 * print the SG list so we can verify it's correct by hand. 456 * print the SG list so we can verify it's correct by hand.
457 */ 457 */
458 static void 458 static void
459 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) 459 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
460 { 460 {
461 while (nents-- > 0) { 461 while (nents-- > 0) {
462 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n", 462 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
463 nents, 463 nents,
464 (unsigned long) sg_dma_address(startsg), 464 (unsigned long) sg_dma_address(startsg),
465 sg_dma_len(startsg), 465 sg_dma_len(startsg),
466 sg_virt_addr(startsg), startsg->length); 466 sg_virt_addr(startsg), startsg->length);
467 startsg++; 467 startsg++;
468 } 468 }
469 } 469 }
470 470
471 #endif /* ASSERT_PDIR_SANITY */ 471 #endif /* ASSERT_PDIR_SANITY */
472 472
473 473
474 474
475 475
476 /************************************************************** 476 /**************************************************************
477 * 477 *
478 * I/O Pdir Resource Management 478 * I/O Pdir Resource Management
479 * 479 *
480 * Bits set in the resource map are in use. 480 * Bits set in the resource map are in use.
481 * Each bit can represent a number of pages. 481 * Each bit can represent a number of pages.
482 * LSbs represent lower addresses (IOVA's). 482 * LSbs represent lower addresses (IOVA's).
483 * 483 *
484 ***************************************************************/ 484 ***************************************************************/
485 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ 485 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
486 486
487 /* Convert from IOVP to IOVA and vice versa. */ 487 /* Convert from IOVP to IOVA and vice versa. */
488 488
489 #ifdef ZX1_SUPPORT 489 #ifdef ZX1_SUPPORT
490 /* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */ 490 /* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
491 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) 491 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
492 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) 492 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
493 #else 493 #else
494 /* only support Astro and ancestors. Saves a few cycles in key places */ 494 /* only support Astro and ancestors. Saves a few cycles in key places */
495 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) 495 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
496 #define SBA_IOVP(ioc,iova) (iova) 496 #define SBA_IOVP(ioc,iova) (iova)
497 #endif 497 #endif
498 498
499 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) 499 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
500 500
501 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 501 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
502 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 502 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
503 503
504 504
505 /** 505 /**
506 * sba_search_bitmap - find free space in IO PDIR resource bitmap 506 * sba_search_bitmap - find free space in IO PDIR resource bitmap
507 * @ioc: IO MMU structure which owns the pdir we are interested in. 507 * @ioc: IO MMU structure which owns the pdir we are interested in.
508 * @bits_wanted: number of entries we need. 508 * @bits_wanted: number of entries we need.
509 * 509 *
510 * Find consecutive free bits in resource bitmap. 510 * Find consecutive free bits in resource bitmap.
511 * Each bit represents one entry in the IO Pdir. 511 * Each bit represents one entry in the IO Pdir.
512 * Cool perf optimization: search for log2(size) bits at a time. 512 * Cool perf optimization: search for log2(size) bits at a time.
513 */ 513 */
514 static SBA_INLINE unsigned long 514 static SBA_INLINE unsigned long
515 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 515 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
516 { 516 {
517 unsigned long *res_ptr = ioc->res_hint; 517 unsigned long *res_ptr = ioc->res_hint;
518 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 518 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
519 unsigned long pide = ~0UL; 519 unsigned long pide = ~0UL;
520 520
521 if (bits_wanted > (BITS_PER_LONG/2)) { 521 if (bits_wanted > (BITS_PER_LONG/2)) {
522 /* Search word at a time - no mask needed */ 522 /* Search word at a time - no mask needed */
523 for(; res_ptr < res_end; ++res_ptr) { 523 for(; res_ptr < res_end; ++res_ptr) {
524 if (*res_ptr == 0) { 524 if (*res_ptr == 0) {
525 *res_ptr = RESMAP_MASK(bits_wanted); 525 *res_ptr = RESMAP_MASK(bits_wanted);
526 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 526 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
527 pide <<= 3; /* convert to bit address */ 527 pide <<= 3; /* convert to bit address */
528 break; 528 break;
529 } 529 }
530 } 530 }
531 /* point to the next word on next pass */ 531 /* point to the next word on next pass */
532 res_ptr++; 532 res_ptr++;
533 ioc->res_bitshift = 0; 533 ioc->res_bitshift = 0;
534 } else { 534 } else {
535 /* 535 /*
536 ** Search the resource bit map on well-aligned values. 536 ** Search the resource bit map on well-aligned values.
537 ** "o" is the alignment. 537 ** "o" is the alignment.
538 ** We need the alignment to invalidate I/O TLB using 538 ** We need the alignment to invalidate I/O TLB using
539 ** SBA HW features in the unmap path. 539 ** SBA HW features in the unmap path.
540 */ 540 */
541 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); 541 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
542 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); 542 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
543 unsigned long mask; 543 unsigned long mask;
544 544
545 if (bitshiftcnt >= BITS_PER_LONG) { 545 if (bitshiftcnt >= BITS_PER_LONG) {
546 bitshiftcnt = 0; 546 bitshiftcnt = 0;
547 res_ptr++; 547 res_ptr++;
548 } 548 }
549 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; 549 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
550 550
551 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); 551 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
552 while(res_ptr < res_end) 552 while(res_ptr < res_end)
553 { 553 {
554 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 554 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
555 WARN_ON(mask == 0); 555 WARN_ON(mask == 0);
556 if(((*res_ptr) & mask) == 0) { 556 if(((*res_ptr) & mask) == 0) {
557 *res_ptr |= mask; /* mark resources busy! */ 557 *res_ptr |= mask; /* mark resources busy! */
558 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 558 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
559 pide <<= 3; /* convert to bit address */ 559 pide <<= 3; /* convert to bit address */
560 pide += bitshiftcnt; 560 pide += bitshiftcnt;
561 break; 561 break;
562 } 562 }
563 mask >>= o; 563 mask >>= o;
564 bitshiftcnt += o; 564 bitshiftcnt += o;
565 if (mask == 0) { 565 if (mask == 0) {
566 mask = RESMAP_MASK(bits_wanted); 566 mask = RESMAP_MASK(bits_wanted);
567 bitshiftcnt=0; 567 bitshiftcnt=0;
568 res_ptr++; 568 res_ptr++;
569 } 569 }
570 } 570 }
571 /* look in the same word on the next pass */ 571 /* look in the same word on the next pass */
572 ioc->res_bitshift = bitshiftcnt + bits_wanted; 572 ioc->res_bitshift = bitshiftcnt + bits_wanted;
573 } 573 }
574 574
575 /* wrapped ? */ 575 /* wrapped ? */
576 if (res_end <= res_ptr) { 576 if (res_end <= res_ptr) {
577 ioc->res_hint = (unsigned long *) ioc->res_map; 577 ioc->res_hint = (unsigned long *) ioc->res_map;
578 ioc->res_bitshift = 0; 578 ioc->res_bitshift = 0;
579 } else { 579 } else {
580 ioc->res_hint = res_ptr; 580 ioc->res_hint = res_ptr;
581 } 581 }
582 return (pide); 582 return (pide);
583 } 583 }
584 584
585 585
586 /** 586 /**
587 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap 587 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
588 * @ioc: IO MMU structure which owns the pdir we are interested in. 588 * @ioc: IO MMU structure which owns the pdir we are interested in.
589 * @size: number of bytes to create a mapping for 589 * @size: number of bytes to create a mapping for
590 * 590 *
591 * Given a size, find consecutive unmarked and then mark those bits in the 591 * Given a size, find consecutive unmarked and then mark those bits in the
592 * resource bit map. 592 * resource bit map.
593 */ 593 */
594 static int 594 static int
595 sba_alloc_range(struct ioc *ioc, size_t size) 595 sba_alloc_range(struct ioc *ioc, size_t size)
596 { 596 {
597 unsigned int pages_needed = size >> IOVP_SHIFT; 597 unsigned int pages_needed = size >> IOVP_SHIFT;
598 #ifdef SBA_COLLECT_STATS 598 #ifdef SBA_COLLECT_STATS
599 unsigned long cr_start = mfctl(16); 599 unsigned long cr_start = mfctl(16);
600 #endif 600 #endif
601 unsigned long pide; 601 unsigned long pide;
602 602
603 pide = sba_search_bitmap(ioc, pages_needed); 603 pide = sba_search_bitmap(ioc, pages_needed);
604 if (pide >= (ioc->res_size << 3)) { 604 if (pide >= (ioc->res_size << 3)) {
605 pide = sba_search_bitmap(ioc, pages_needed); 605 pide = sba_search_bitmap(ioc, pages_needed);
606 if (pide >= (ioc->res_size << 3)) 606 if (pide >= (ioc->res_size << 3))
607 panic("%s: I/O MMU @ %p is out of mapping resources\n", 607 panic("%s: I/O MMU @ %p is out of mapping resources\n",
608 __FILE__, ioc->ioc_hpa); 608 __FILE__, ioc->ioc_hpa);
609 } 609 }
610 610
611 #ifdef ASSERT_PDIR_SANITY 611 #ifdef ASSERT_PDIR_SANITY
612 /* verify the first enable bit is clear */ 612 /* verify the first enable bit is clear */
613 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { 613 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
614 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); 614 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
615 } 615 }
616 #endif 616 #endif
617 617
618 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", 618 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
619 __FUNCTION__, size, pages_needed, pide, 619 __FUNCTION__, size, pages_needed, pide,
620 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 620 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
621 ioc->res_bitshift ); 621 ioc->res_bitshift );
622 622
623 #ifdef SBA_COLLECT_STATS 623 #ifdef SBA_COLLECT_STATS
624 { 624 {
625 unsigned long cr_end = mfctl(16); 625 unsigned long cr_end = mfctl(16);
626 unsigned long tmp = cr_end - cr_start; 626 unsigned long tmp = cr_end - cr_start;
627 /* check for roll over */ 627 /* check for roll over */
628 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); 628 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
629 } 629 }
630 ioc->avg_search[ioc->avg_idx++] = cr_start; 630 ioc->avg_search[ioc->avg_idx++] = cr_start;
631 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; 631 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
632 632
633 ioc->used_pages += pages_needed; 633 ioc->used_pages += pages_needed;
634 #endif 634 #endif
635 635
636 return (pide); 636 return (pide);
637 } 637 }
638 638
639 639
640 /** 640 /**
641 * sba_free_range - unmark bits in IO PDIR resource bitmap 641 * sba_free_range - unmark bits in IO PDIR resource bitmap
642 * @ioc: IO MMU structure which owns the pdir we are interested in. 642 * @ioc: IO MMU structure which owns the pdir we are interested in.
643 * @iova: IO virtual address which was previously allocated. 643 * @iova: IO virtual address which was previously allocated.
644 * @size: number of bytes to create a mapping for 644 * @size: number of bytes to create a mapping for
645 * 645 *
646 * clear bits in the ioc's resource map 646 * clear bits in the ioc's resource map
647 */ 647 */
648 static SBA_INLINE void 648 static SBA_INLINE void
649 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) 649 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
650 { 650 {
651 unsigned long iovp = SBA_IOVP(ioc, iova); 651 unsigned long iovp = SBA_IOVP(ioc, iova);
652 unsigned int pide = PDIR_INDEX(iovp); 652 unsigned int pide = PDIR_INDEX(iovp);
653 unsigned int ridx = pide >> 3; /* convert bit to byte address */ 653 unsigned int ridx = pide >> 3; /* convert bit to byte address */
654 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); 654 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
655 655
656 int bits_not_wanted = size >> IOVP_SHIFT; 656 int bits_not_wanted = size >> IOVP_SHIFT;
657 657
658 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ 658 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
659 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); 659 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
660 660
661 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", 661 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
662 __FUNCTION__, (uint) iova, size, 662 __FUNCTION__, (uint) iova, size,
663 bits_not_wanted, m, pide, res_ptr, *res_ptr); 663 bits_not_wanted, m, pide, res_ptr, *res_ptr);
664 664
665 #ifdef SBA_COLLECT_STATS 665 #ifdef SBA_COLLECT_STATS
666 ioc->used_pages -= bits_not_wanted; 666 ioc->used_pages -= bits_not_wanted;
667 #endif 667 #endif
668 668
669 *res_ptr &= ~m; 669 *res_ptr &= ~m;
670 } 670 }
671 671
672 672
673 /************************************************************** 673 /**************************************************************
674 * 674 *
675 * "Dynamic DMA Mapping" support (aka "Coherent I/O") 675 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
676 * 676 *
677 ***************************************************************/ 677 ***************************************************************/
678 678
679 #ifdef SBA_HINT_SUPPORT 679 #ifdef SBA_HINT_SUPPORT
680 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) 680 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
681 #endif 681 #endif
682 682
683 typedef unsigned long space_t; 683 typedef unsigned long space_t;
684 #define KERNEL_SPACE 0 684 #define KERNEL_SPACE 0
685 685
686 /** 686 /**
687 * sba_io_pdir_entry - fill in one IO PDIR entry 687 * sba_io_pdir_entry - fill in one IO PDIR entry
688 * @pdir_ptr: pointer to IO PDIR entry 688 * @pdir_ptr: pointer to IO PDIR entry
689 * @sid: process Space ID - currently only support KERNEL_SPACE 689 * @sid: process Space ID - currently only support KERNEL_SPACE
690 * @vba: Virtual CPU address of buffer to map 690 * @vba: Virtual CPU address of buffer to map
691 * @hint: DMA hint set to use for this mapping 691 * @hint: DMA hint set to use for this mapping
692 * 692 *
693 * SBA Mapping Routine 693 * SBA Mapping Routine
694 * 694 *
695 * Given a virtual address (vba, arg2) and space id, (sid, arg1) 695 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
696 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by 696 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
697 * pdir_ptr (arg0). 697 * pdir_ptr (arg0).
698 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry 698 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
699 * for Astro/Ike looks like: 699 * for Astro/Ike looks like:
700 * 700 *
701 * 701 *
702 * 0 19 51 55 63 702 * 0 19 51 55 63
703 * +-+---------------------+----------------------------------+----+--------+ 703 * +-+---------------------+----------------------------------+----+--------+
704 * |V| U | PPN[43:12] | U | VI | 704 * |V| U | PPN[43:12] | U | VI |
705 * +-+---------------------+----------------------------------+----+--------+ 705 * +-+---------------------+----------------------------------+----+--------+
706 * 706 *
707 * Pluto is basically identical, supports fewer physical address bits: 707 * Pluto is basically identical, supports fewer physical address bits:
708 * 708 *
709 * 0 23 51 55 63 709 * 0 23 51 55 63
710 * +-+------------------------+-------------------------------+----+--------+ 710 * +-+------------------------+-------------------------------+----+--------+
711 * |V| U | PPN[39:12] | U | VI | 711 * |V| U | PPN[39:12] | U | VI |
712 * +-+------------------------+-------------------------------+----+--------+ 712 * +-+------------------------+-------------------------------+----+--------+
713 * 713 *
714 * V == Valid Bit (Most Significant Bit is bit 0) 714 * V == Valid Bit (Most Significant Bit is bit 0)
715 * U == Unused 715 * U == Unused
716 * PPN == Physical Page Number 716 * PPN == Physical Page Number
717 * VI == Virtual Index (aka Coherent Index) 717 * VI == Virtual Index (aka Coherent Index)
718 * 718 *
719 * LPA instruction output is put into PPN field. 719 * LPA instruction output is put into PPN field.
720 * LCI (Load Coherence Index) instruction provides the "VI" bits. 720 * LCI (Load Coherence Index) instruction provides the "VI" bits.
721 * 721 *
722 * We pre-swap the bytes since PCX-W is Big Endian and the 722 * We pre-swap the bytes since PCX-W is Big Endian and the
723 * IOMMU uses little endian for the pdir. 723 * IOMMU uses little endian for the pdir.
724 */ 724 */
725 725
726 void SBA_INLINE 726 void SBA_INLINE
727 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, 727 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
728 unsigned long hint) 728 unsigned long hint)
729 { 729 {
730 u64 pa; /* physical address */ 730 u64 pa; /* physical address */
731 register unsigned ci; /* coherent index */ 731 register unsigned ci; /* coherent index */
732 732
733 pa = virt_to_phys(vba); 733 pa = virt_to_phys(vba);
734 pa &= IOVP_MASK; 734 pa &= IOVP_MASK;
735 735
736 mtsp(sid,1); 736 mtsp(sid,1);
737 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 737 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
738 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 738 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
739 739
740 pa |= 0x8000000000000000ULL; /* set "valid" bit */ 740 pa |= 0x8000000000000000ULL; /* set "valid" bit */
741 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 741 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
742 742
743 /* 743 /*
744 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 744 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
745 * (bit #61, big endian), we have to flush and sync every time 745 * (bit #61, big endian), we have to flush and sync every time
746 * IO-PDIR is changed in Ike/Astro. 746 * IO-PDIR is changed in Ike/Astro.
747 */ 747 */
748 if (ioc_needs_fdc) 748 if (ioc_needs_fdc)
749 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 749 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
750 } 750 }
751 751
752 752
753 /** 753 /**
754 * sba_mark_invalid - invalidate one or more IO PDIR entries 754 * sba_mark_invalid - invalidate one or more IO PDIR entries
755 * @ioc: IO MMU structure which owns the pdir we are interested in. 755 * @ioc: IO MMU structure which owns the pdir we are interested in.
756 * @iova: IO Virtual Address mapped earlier 756 * @iova: IO Virtual Address mapped earlier
757 * @byte_cnt: number of bytes this mapping covers. 757 * @byte_cnt: number of bytes this mapping covers.
758 * 758 *
759 * Marking the IO PDIR entry(ies) as Invalid and invalidate 759 * Marking the IO PDIR entry(ies) as Invalid and invalidate
760 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) 760 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
761 * is to purge stale entries in the IO TLB when unmapping entries. 761 * is to purge stale entries in the IO TLB when unmapping entries.
762 * 762 *
763 * The PCOM register supports purging of multiple pages, with a minium 763 * The PCOM register supports purging of multiple pages, with a minium
764 * of 1 page and a maximum of 2GB. Hardware requires the address be 764 * of 1 page and a maximum of 2GB. Hardware requires the address be
765 * aligned to the size of the range being purged. The size of the range 765 * aligned to the size of the range being purged. The size of the range
766 * must be a power of 2. The "Cool perf optimization" in the 766 * must be a power of 2. The "Cool perf optimization" in the
767 * allocation routine helps keep that true. 767 * allocation routine helps keep that true.
768 */ 768 */
769 static SBA_INLINE void 769 static SBA_INLINE void
770 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) 770 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
771 { 771 {
772 u32 iovp = (u32) SBA_IOVP(ioc,iova); 772 u32 iovp = (u32) SBA_IOVP(ioc,iova);
773 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; 773 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
774 774
775 #ifdef ASSERT_PDIR_SANITY 775 #ifdef ASSERT_PDIR_SANITY
776 /* Assert first pdir entry is set. 776 /* Assert first pdir entry is set.
777 ** 777 **
778 ** Even though this is a big-endian machine, the entries 778 ** Even though this is a big-endian machine, the entries
779 ** in the iopdir are little endian. That's why we look at 779 ** in the iopdir are little endian. That's why we look at
780 ** the byte at +7 instead of at +0. 780 ** the byte at +7 instead of at +0.
781 */ 781 */
782 if (0x80 != (((u8 *) pdir_ptr)[7])) { 782 if (0x80 != (((u8 *) pdir_ptr)[7])) {
783 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); 783 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
784 } 784 }
785 #endif 785 #endif
786 786
787 if (byte_cnt > IOVP_SIZE) 787 if (byte_cnt > IOVP_SIZE)
788 { 788 {
789 #if 0 789 #if 0
790 unsigned long entries_per_cacheline = ioc_needs_fdc ? 790 unsigned long entries_per_cacheline = ioc_needs_fdc ?
791 L1_CACHE_ALIGN(((unsigned long) pdir_ptr)) 791 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
792 - (unsigned long) pdir_ptr; 792 - (unsigned long) pdir_ptr;
793 : 262144; 793 : 262144;
794 #endif 794 #endif
795 795
796 /* set "size" field for PCOM */ 796 /* set "size" field for PCOM */
797 iovp |= get_order(byte_cnt) + PAGE_SHIFT; 797 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
798 798
799 do { 799 do {
800 /* clear I/O Pdir entry "valid" bit first */ 800 /* clear I/O Pdir entry "valid" bit first */
801 ((u8 *) pdir_ptr)[7] = 0; 801 ((u8 *) pdir_ptr)[7] = 0;
802 if (ioc_needs_fdc) { 802 if (ioc_needs_fdc) {
803 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 803 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
804 #if 0 804 #if 0
805 entries_per_cacheline = L1_CACHE_SHIFT - 3; 805 entries_per_cacheline = L1_CACHE_SHIFT - 3;
806 #endif 806 #endif
807 } 807 }
808 pdir_ptr++; 808 pdir_ptr++;
809 byte_cnt -= IOVP_SIZE; 809 byte_cnt -= IOVP_SIZE;
810 } while (byte_cnt > IOVP_SIZE); 810 } while (byte_cnt > IOVP_SIZE);
811 } else 811 } else
812 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ 812 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
813 813
814 /* 814 /*
815 ** clear I/O PDIR entry "valid" bit. 815 ** clear I/O PDIR entry "valid" bit.
816 ** We have to R/M/W the cacheline regardless how much of the 816 ** We have to R/M/W the cacheline regardless how much of the
817 ** pdir entry that we clobber. 817 ** pdir entry that we clobber.
818 ** The rest of the entry would be useful for debugging if we 818 ** The rest of the entry would be useful for debugging if we
819 ** could dump core on HPMC. 819 ** could dump core on HPMC.
820 */ 820 */
821 ((u8 *) pdir_ptr)[7] = 0; 821 ((u8 *) pdir_ptr)[7] = 0;
822 if (ioc_needs_fdc) 822 if (ioc_needs_fdc)
823 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); 823 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
824 824
825 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); 825 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
826 } 826 }
827 827
828 /** 828 /**
829 * sba_dma_supported - PCI driver can query DMA support 829 * sba_dma_supported - PCI driver can query DMA support
830 * @dev: instance of PCI owned by the driver that's asking 830 * @dev: instance of PCI owned by the driver that's asking
831 * @mask: number of address bits this PCI device can handle 831 * @mask: number of address bits this PCI device can handle
832 * 832 *
833 * See Documentation/DMA-mapping.txt 833 * See Documentation/DMA-mapping.txt
834 */ 834 */
835 static int sba_dma_supported( struct device *dev, u64 mask) 835 static int sba_dma_supported( struct device *dev, u64 mask)
836 { 836 {
837 struct ioc *ioc; 837 struct ioc *ioc;
838 838
839 if (dev == NULL) { 839 if (dev == NULL) {
840 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); 840 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
841 BUG(); 841 BUG();
842 return(0); 842 return(0);
843 } 843 }
844 844
845 /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, 845 /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first,
846 * then fall back to 32-bit if that fails. 846 * then fall back to 32-bit if that fails.
847 * We are just "encouraging" 32-bit DMA masks here since we can 847 * We are just "encouraging" 32-bit DMA masks here since we can
848 * never allow IOMMU bypass unless we add special support for ZX1. 848 * never allow IOMMU bypass unless we add special support for ZX1.
849 */ 849 */
850 if (mask > ~0U) 850 if (mask > ~0U)
851 return 0; 851 return 0;
852 852
853 ioc = GET_IOC(dev); 853 ioc = GET_IOC(dev);
854 854
855 /* 855 /*
856 * check if mask is >= than the current max IO Virt Address 856 * check if mask is >= than the current max IO Virt Address
857 * The max IO Virt address will *always* < 30 bits. 857 * The max IO Virt address will *always* < 30 bits.
858 */ 858 */
859 return((int)(mask >= (ioc->ibase - 1 + 859 return((int)(mask >= (ioc->ibase - 1 +
860 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); 860 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
861 } 861 }
862 862
863 863
864 /** 864 /**
865 * sba_map_single - map one buffer and return IOVA for DMA 865 * sba_map_single - map one buffer and return IOVA for DMA
866 * @dev: instance of PCI owned by the driver that's asking. 866 * @dev: instance of PCI owned by the driver that's asking.
867 * @addr: driver buffer to map. 867 * @addr: driver buffer to map.
868 * @size: number of bytes to map in driver buffer. 868 * @size: number of bytes to map in driver buffer.
869 * @direction: R/W or both. 869 * @direction: R/W or both.
870 * 870 *
871 * See Documentation/DMA-mapping.txt 871 * See Documentation/DMA-mapping.txt
872 */ 872 */
873 static dma_addr_t 873 static dma_addr_t
874 sba_map_single(struct device *dev, void *addr, size_t size, 874 sba_map_single(struct device *dev, void *addr, size_t size,
875 enum dma_data_direction direction) 875 enum dma_data_direction direction)
876 { 876 {
877 struct ioc *ioc; 877 struct ioc *ioc;
878 unsigned long flags; 878 unsigned long flags;
879 dma_addr_t iovp; 879 dma_addr_t iovp;
880 dma_addr_t offset; 880 dma_addr_t offset;
881 u64 *pdir_start; 881 u64 *pdir_start;
882 int pide; 882 int pide;
883 883
884 ioc = GET_IOC(dev); 884 ioc = GET_IOC(dev);
885 885
886 /* save offset bits */ 886 /* save offset bits */
887 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 887 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
888 888
889 /* round up to nearest IOVP_SIZE */ 889 /* round up to nearest IOVP_SIZE */
890 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; 890 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
891 891
892 spin_lock_irqsave(&ioc->res_lock, flags); 892 spin_lock_irqsave(&ioc->res_lock, flags);
893 #ifdef ASSERT_PDIR_SANITY 893 #ifdef ASSERT_PDIR_SANITY
894 sba_check_pdir(ioc,"Check before sba_map_single()"); 894 sba_check_pdir(ioc,"Check before sba_map_single()");
895 #endif 895 #endif
896 896
897 #ifdef SBA_COLLECT_STATS 897 #ifdef SBA_COLLECT_STATS
898 ioc->msingle_calls++; 898 ioc->msingle_calls++;
899 ioc->msingle_pages += size >> IOVP_SHIFT; 899 ioc->msingle_pages += size >> IOVP_SHIFT;
900 #endif 900 #endif
901 pide = sba_alloc_range(ioc, size); 901 pide = sba_alloc_range(ioc, size);
902 iovp = (dma_addr_t) pide << IOVP_SHIFT; 902 iovp = (dma_addr_t) pide << IOVP_SHIFT;
903 903
904 DBG_RUN("%s() 0x%p -> 0x%lx\n", 904 DBG_RUN("%s() 0x%p -> 0x%lx\n",
905 __FUNCTION__, addr, (long) iovp | offset); 905 __FUNCTION__, addr, (long) iovp | offset);
906 906
907 pdir_start = &(ioc->pdir_base[pide]); 907 pdir_start = &(ioc->pdir_base[pide]);
908 908
909 while (size > 0) { 909 while (size > 0) {
910 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 910 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
911 911
912 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 912 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
913 pdir_start, 913 pdir_start,
914 (u8) (((u8 *) pdir_start)[7]), 914 (u8) (((u8 *) pdir_start)[7]),
915 (u8) (((u8 *) pdir_start)[6]), 915 (u8) (((u8 *) pdir_start)[6]),
916 (u8) (((u8 *) pdir_start)[5]), 916 (u8) (((u8 *) pdir_start)[5]),
917 (u8) (((u8 *) pdir_start)[4]), 917 (u8) (((u8 *) pdir_start)[4]),
918 (u8) (((u8 *) pdir_start)[3]), 918 (u8) (((u8 *) pdir_start)[3]),
919 (u8) (((u8 *) pdir_start)[2]), 919 (u8) (((u8 *) pdir_start)[2]),
920 (u8) (((u8 *) pdir_start)[1]), 920 (u8) (((u8 *) pdir_start)[1]),
921 (u8) (((u8 *) pdir_start)[0]) 921 (u8) (((u8 *) pdir_start)[0])
922 ); 922 );
923 923
924 addr += IOVP_SIZE; 924 addr += IOVP_SIZE;
925 size -= IOVP_SIZE; 925 size -= IOVP_SIZE;
926 pdir_start++; 926 pdir_start++;
927 } 927 }
928 928
929 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 929 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
930 if (ioc_needs_fdc) 930 if (ioc_needs_fdc)
931 asm volatile("sync" : : ); 931 asm volatile("sync" : : );
932 932
933 #ifdef ASSERT_PDIR_SANITY 933 #ifdef ASSERT_PDIR_SANITY
934 sba_check_pdir(ioc,"Check after sba_map_single()"); 934 sba_check_pdir(ioc,"Check after sba_map_single()");
935 #endif 935 #endif
936 spin_unlock_irqrestore(&ioc->res_lock, flags); 936 spin_unlock_irqrestore(&ioc->res_lock, flags);
937 937
938 /* form complete address */ 938 /* form complete address */
939 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); 939 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
940 } 940 }
941 941
942 942
943 /** 943 /**
944 * sba_unmap_single - unmap one IOVA and free resources 944 * sba_unmap_single - unmap one IOVA and free resources
945 * @dev: instance of PCI owned by the driver that's asking. 945 * @dev: instance of PCI owned by the driver that's asking.
946 * @iova: IOVA of driver buffer previously mapped. 946 * @iova: IOVA of driver buffer previously mapped.
947 * @size: number of bytes mapped in driver buffer. 947 * @size: number of bytes mapped in driver buffer.
948 * @direction: R/W or both. 948 * @direction: R/W or both.
949 * 949 *
950 * See Documentation/DMA-mapping.txt 950 * See Documentation/DMA-mapping.txt
951 */ 951 */
952 static void 952 static void
953 sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, 953 sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
954 enum dma_data_direction direction) 954 enum dma_data_direction direction)
955 { 955 {
956 struct ioc *ioc; 956 struct ioc *ioc;
957 #if DELAYED_RESOURCE_CNT > 0 957 #if DELAYED_RESOURCE_CNT > 0
958 struct sba_dma_pair *d; 958 struct sba_dma_pair *d;
959 #endif 959 #endif
960 unsigned long flags; 960 unsigned long flags;
961 dma_addr_t offset; 961 dma_addr_t offset;
962 962
963 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size); 963 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size);
964 964
965 ioc = GET_IOC(dev); 965 ioc = GET_IOC(dev);
966 offset = iova & ~IOVP_MASK; 966 offset = iova & ~IOVP_MASK;
967 iova ^= offset; /* clear offset bits */ 967 iova ^= offset; /* clear offset bits */
968 size += offset; 968 size += offset;
969 size = ROUNDUP(size, IOVP_SIZE); 969 size = ROUNDUP(size, IOVP_SIZE);
970 970
971 spin_lock_irqsave(&ioc->res_lock, flags); 971 spin_lock_irqsave(&ioc->res_lock, flags);
972 972
973 #ifdef SBA_COLLECT_STATS 973 #ifdef SBA_COLLECT_STATS
974 ioc->usingle_calls++; 974 ioc->usingle_calls++;
975 ioc->usingle_pages += size >> IOVP_SHIFT; 975 ioc->usingle_pages += size >> IOVP_SHIFT;
976 #endif 976 #endif
977 977
978 sba_mark_invalid(ioc, iova, size); 978 sba_mark_invalid(ioc, iova, size);
979 979
980 #if DELAYED_RESOURCE_CNT > 0 980 #if DELAYED_RESOURCE_CNT > 0
981 /* Delaying when we re-use a IO Pdir entry reduces the number 981 /* Delaying when we re-use a IO Pdir entry reduces the number
982 * of MMIO reads needed to flush writes to the PCOM register. 982 * of MMIO reads needed to flush writes to the PCOM register.
983 */ 983 */
984 d = &(ioc->saved[ioc->saved_cnt]); 984 d = &(ioc->saved[ioc->saved_cnt]);
985 d->iova = iova; 985 d->iova = iova;
986 d->size = size; 986 d->size = size;
987 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { 987 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
988 int cnt = ioc->saved_cnt; 988 int cnt = ioc->saved_cnt;
989 while (cnt--) { 989 while (cnt--) {
990 sba_free_range(ioc, d->iova, d->size); 990 sba_free_range(ioc, d->iova, d->size);
991 d--; 991 d--;
992 } 992 }
993 ioc->saved_cnt = 0; 993 ioc->saved_cnt = 0;
994 994
995 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 995 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
996 } 996 }
997 #else /* DELAYED_RESOURCE_CNT == 0 */ 997 #else /* DELAYED_RESOURCE_CNT == 0 */
998 sba_free_range(ioc, iova, size); 998 sba_free_range(ioc, iova, size);
999 999
1000 /* If fdc's were issued, force fdc's to be visible now */ 1000 /* If fdc's were issued, force fdc's to be visible now */
1001 if (ioc_needs_fdc) 1001 if (ioc_needs_fdc)
1002 asm volatile("sync" : : ); 1002 asm volatile("sync" : : );
1003 1003
1004 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 1004 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1005 #endif /* DELAYED_RESOURCE_CNT == 0 */ 1005 #endif /* DELAYED_RESOURCE_CNT == 0 */
1006 1006
1007 spin_unlock_irqrestore(&ioc->res_lock, flags); 1007 spin_unlock_irqrestore(&ioc->res_lock, flags);
1008 1008
1009 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. 1009 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
1010 ** For Astro based systems this isn't a big deal WRT performance. 1010 ** For Astro based systems this isn't a big deal WRT performance.
1011 ** As long as 2.4 kernels copyin/copyout data from/to userspace, 1011 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
1012 ** we don't need the syncdma. The issue here is I/O MMU cachelines 1012 ** we don't need the syncdma. The issue here is I/O MMU cachelines
1013 ** are *not* coherent in all cases. May be hwrev dependent. 1013 ** are *not* coherent in all cases. May be hwrev dependent.
1014 ** Need to investigate more. 1014 ** Need to investigate more.
1015 asm volatile("syncdma"); 1015 asm volatile("syncdma");
1016 */ 1016 */
1017 } 1017 }
1018 1018
1019 1019
1020 /** 1020 /**
1021 * sba_alloc_consistent - allocate/map shared mem for DMA 1021 * sba_alloc_consistent - allocate/map shared mem for DMA
1022 * @hwdev: instance of PCI owned by the driver that's asking. 1022 * @hwdev: instance of PCI owned by the driver that's asking.
1023 * @size: number of bytes mapped in driver buffer. 1023 * @size: number of bytes mapped in driver buffer.
1024 * @dma_handle: IOVA of new buffer. 1024 * @dma_handle: IOVA of new buffer.
1025 * 1025 *
1026 * See Documentation/DMA-mapping.txt 1026 * See Documentation/DMA-mapping.txt
1027 */ 1027 */
1028 static void *sba_alloc_consistent(struct device *hwdev, size_t size, 1028 static void *sba_alloc_consistent(struct device *hwdev, size_t size,
1029 dma_addr_t *dma_handle, gfp_t gfp) 1029 dma_addr_t *dma_handle, gfp_t gfp)
1030 { 1030 {
1031 void *ret; 1031 void *ret;
1032 1032
1033 if (!hwdev) { 1033 if (!hwdev) {
1034 /* only support PCI */ 1034 /* only support PCI */
1035 *dma_handle = 0; 1035 *dma_handle = 0;
1036 return 0; 1036 return 0;
1037 } 1037 }
1038 1038
1039 ret = (void *) __get_free_pages(gfp, get_order(size)); 1039 ret = (void *) __get_free_pages(gfp, get_order(size));
1040 1040
1041 if (ret) { 1041 if (ret) {
1042 memset(ret, 0, size); 1042 memset(ret, 0, size);
1043 *dma_handle = sba_map_single(hwdev, ret, size, 0); 1043 *dma_handle = sba_map_single(hwdev, ret, size, 0);
1044 } 1044 }
1045 1045
1046 return ret; 1046 return ret;
1047 } 1047 }
1048 1048
1049 1049
1050 /** 1050 /**
1051 * sba_free_consistent - free/unmap shared mem for DMA 1051 * sba_free_consistent - free/unmap shared mem for DMA
1052 * @hwdev: instance of PCI owned by the driver that's asking. 1052 * @hwdev: instance of PCI owned by the driver that's asking.
1053 * @size: number of bytes mapped in driver buffer. 1053 * @size: number of bytes mapped in driver buffer.
1054 * @vaddr: virtual address IOVA of "consistent" buffer. 1054 * @vaddr: virtual address IOVA of "consistent" buffer.
1055 * @dma_handler: IO virtual address of "consistent" buffer. 1055 * @dma_handler: IO virtual address of "consistent" buffer.
1056 * 1056 *
1057 * See Documentation/DMA-mapping.txt 1057 * See Documentation/DMA-mapping.txt
1058 */ 1058 */
1059 static void 1059 static void
1060 sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, 1060 sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
1061 dma_addr_t dma_handle) 1061 dma_addr_t dma_handle)
1062 { 1062 {
1063 sba_unmap_single(hwdev, dma_handle, size, 0); 1063 sba_unmap_single(hwdev, dma_handle, size, 0);
1064 free_pages((unsigned long) vaddr, get_order(size)); 1064 free_pages((unsigned long) vaddr, get_order(size));
1065 } 1065 }
1066 1066
1067 1067
1068 /* 1068 /*
1069 ** Since 0 is a valid pdir_base index value, can't use that 1069 ** Since 0 is a valid pdir_base index value, can't use that
1070 ** to determine if a value is valid or not. Use a flag to indicate 1070 ** to determine if a value is valid or not. Use a flag to indicate
1071 ** the SG list entry contains a valid pdir index. 1071 ** the SG list entry contains a valid pdir index.
1072 */ 1072 */
1073 #define PIDE_FLAG 0x80000000UL 1073 #define PIDE_FLAG 0x80000000UL
1074 1074
1075 #ifdef SBA_COLLECT_STATS 1075 #ifdef SBA_COLLECT_STATS
1076 #define IOMMU_MAP_STATS 1076 #define IOMMU_MAP_STATS
1077 #endif 1077 #endif
1078 #include "iommu-helpers.h" 1078 #include "iommu-helpers.h"
1079 1079
1080 #ifdef DEBUG_LARGE_SG_ENTRIES 1080 #ifdef DEBUG_LARGE_SG_ENTRIES
1081 int dump_run_sg = 0; 1081 int dump_run_sg = 0;
1082 #endif 1082 #endif
1083 1083
1084 1084
1085 /** 1085 /**
1086 * sba_map_sg - map Scatter/Gather list 1086 * sba_map_sg - map Scatter/Gather list
1087 * @dev: instance of PCI owned by the driver that's asking. 1087 * @dev: instance of PCI owned by the driver that's asking.
1088 * @sglist: array of buffer/length pairs 1088 * @sglist: array of buffer/length pairs
1089 * @nents: number of entries in list 1089 * @nents: number of entries in list
1090 * @direction: R/W or both. 1090 * @direction: R/W or both.
1091 * 1091 *
1092 * See Documentation/DMA-mapping.txt 1092 * See Documentation/DMA-mapping.txt
1093 */ 1093 */
1094 static int 1094 static int
1095 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 1095 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1096 enum dma_data_direction direction) 1096 enum dma_data_direction direction)
1097 { 1097 {
1098 struct ioc *ioc; 1098 struct ioc *ioc;
1099 int coalesced, filled = 0; 1099 int coalesced, filled = 0;
1100 unsigned long flags; 1100 unsigned long flags;
1101 1101
1102 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); 1102 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1103 1103
1104 ioc = GET_IOC(dev); 1104 ioc = GET_IOC(dev);
1105 1105
1106 /* Fast path single entry scatterlists. */ 1106 /* Fast path single entry scatterlists. */
1107 if (nents == 1) { 1107 if (nents == 1) {
1108 sg_dma_address(sglist) = sba_map_single(dev, 1108 sg_dma_address(sglist) = sba_map_single(dev,
1109 (void *)sg_virt_addr(sglist), 1109 (void *)sg_virt_addr(sglist),
1110 sglist->length, direction); 1110 sglist->length, direction);
1111 sg_dma_len(sglist) = sglist->length; 1111 sg_dma_len(sglist) = sglist->length;
1112 return 1; 1112 return 1;
1113 } 1113 }
1114 1114
1115 spin_lock_irqsave(&ioc->res_lock, flags); 1115 spin_lock_irqsave(&ioc->res_lock, flags);
1116 1116
1117 #ifdef ASSERT_PDIR_SANITY 1117 #ifdef ASSERT_PDIR_SANITY
1118 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) 1118 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1119 { 1119 {
1120 sba_dump_sg(ioc, sglist, nents); 1120 sba_dump_sg(ioc, sglist, nents);
1121 panic("Check before sba_map_sg()"); 1121 panic("Check before sba_map_sg()");
1122 } 1122 }
1123 #endif 1123 #endif
1124 1124
1125 #ifdef SBA_COLLECT_STATS 1125 #ifdef SBA_COLLECT_STATS
1126 ioc->msg_calls++; 1126 ioc->msg_calls++;
1127 #endif 1127 #endif
1128 1128
1129 /* 1129 /*
1130 ** First coalesce the chunks and allocate I/O pdir space 1130 ** First coalesce the chunks and allocate I/O pdir space
1131 ** 1131 **
1132 ** If this is one DMA stream, we can properly map using the 1132 ** If this is one DMA stream, we can properly map using the
1133 ** correct virtual address associated with each DMA page. 1133 ** correct virtual address associated with each DMA page.
1134 ** w/o this association, we wouldn't have coherent DMA! 1134 ** w/o this association, we wouldn't have coherent DMA!
1135 ** Access to the virtual address is what forces a two pass algorithm. 1135 ** Access to the virtual address is what forces a two pass algorithm.
1136 */ 1136 */
1137 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); 1137 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
1138 1138
1139 /* 1139 /*
1140 ** Program the I/O Pdir 1140 ** Program the I/O Pdir
1141 ** 1141 **
1142 ** map the virtual addresses to the I/O Pdir 1142 ** map the virtual addresses to the I/O Pdir
1143 ** o dma_address will contain the pdir index 1143 ** o dma_address will contain the pdir index
1144 ** o dma_len will contain the number of bytes to map 1144 ** o dma_len will contain the number of bytes to map
1145 ** o address contains the virtual address. 1145 ** o address contains the virtual address.
1146 */ 1146 */
1147 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); 1147 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1148 1148
1149 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */ 1149 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
1150 if (ioc_needs_fdc) 1150 if (ioc_needs_fdc)
1151 asm volatile("sync" : : ); 1151 asm volatile("sync" : : );
1152 1152
1153 #ifdef ASSERT_PDIR_SANITY 1153 #ifdef ASSERT_PDIR_SANITY
1154 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) 1154 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1155 { 1155 {
1156 sba_dump_sg(ioc, sglist, nents); 1156 sba_dump_sg(ioc, sglist, nents);
1157 panic("Check after sba_map_sg()\n"); 1157 panic("Check after sba_map_sg()\n");
1158 } 1158 }
1159 #endif 1159 #endif
1160 1160
1161 spin_unlock_irqrestore(&ioc->res_lock, flags); 1161 spin_unlock_irqrestore(&ioc->res_lock, flags);
1162 1162
1163 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); 1163 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1164 1164
1165 return filled; 1165 return filled;
1166 } 1166 }
1167 1167
1168 1168
1169 /** 1169 /**
1170 * sba_unmap_sg - unmap Scatter/Gather list 1170 * sba_unmap_sg - unmap Scatter/Gather list
1171 * @dev: instance of PCI owned by the driver that's asking. 1171 * @dev: instance of PCI owned by the driver that's asking.
1172 * @sglist: array of buffer/length pairs 1172 * @sglist: array of buffer/length pairs
1173 * @nents: number of entries in list 1173 * @nents: number of entries in list
1174 * @direction: R/W or both. 1174 * @direction: R/W or both.
1175 * 1175 *
1176 * See Documentation/DMA-mapping.txt 1176 * See Documentation/DMA-mapping.txt
1177 */ 1177 */
1178 static void 1178 static void
1179 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1179 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1180 enum dma_data_direction direction) 1180 enum dma_data_direction direction)
1181 { 1181 {
1182 struct ioc *ioc; 1182 struct ioc *ioc;
1183 #ifdef ASSERT_PDIR_SANITY 1183 #ifdef ASSERT_PDIR_SANITY
1184 unsigned long flags; 1184 unsigned long flags;
1185 #endif 1185 #endif
1186 1186
1187 DBG_RUN_SG("%s() START %d entries, %p,%x\n", 1187 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1188 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); 1188 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
1189 1189
1190 ioc = GET_IOC(dev); 1190 ioc = GET_IOC(dev);
1191 1191
1192 #ifdef SBA_COLLECT_STATS 1192 #ifdef SBA_COLLECT_STATS
1193 ioc->usg_calls++; 1193 ioc->usg_calls++;
1194 #endif 1194 #endif
1195 1195
1196 #ifdef ASSERT_PDIR_SANITY 1196 #ifdef ASSERT_PDIR_SANITY
1197 spin_lock_irqsave(&ioc->res_lock, flags); 1197 spin_lock_irqsave(&ioc->res_lock, flags);
1198 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); 1198 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1199 spin_unlock_irqrestore(&ioc->res_lock, flags); 1199 spin_unlock_irqrestore(&ioc->res_lock, flags);
1200 #endif 1200 #endif
1201 1201
1202 while (sg_dma_len(sglist) && nents--) { 1202 while (sg_dma_len(sglist) && nents--) {
1203 1203
1204 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); 1204 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1205 #ifdef SBA_COLLECT_STATS 1205 #ifdef SBA_COLLECT_STATS
1206 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1206 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1207 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ 1207 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1208 #endif 1208 #endif
1209 ++sglist; 1209 ++sglist;
1210 } 1210 }
1211 1211
1212 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); 1212 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1213 1213
1214 #ifdef ASSERT_PDIR_SANITY 1214 #ifdef ASSERT_PDIR_SANITY
1215 spin_lock_irqsave(&ioc->res_lock, flags); 1215 spin_lock_irqsave(&ioc->res_lock, flags);
1216 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); 1216 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1217 spin_unlock_irqrestore(&ioc->res_lock, flags); 1217 spin_unlock_irqrestore(&ioc->res_lock, flags);
1218 #endif 1218 #endif
1219 1219
1220 } 1220 }
1221 1221
1222 static struct hppa_dma_ops sba_ops = { 1222 static struct hppa_dma_ops sba_ops = {
1223 .dma_supported = sba_dma_supported, 1223 .dma_supported = sba_dma_supported,
1224 .alloc_consistent = sba_alloc_consistent, 1224 .alloc_consistent = sba_alloc_consistent,
1225 .alloc_noncoherent = sba_alloc_consistent, 1225 .alloc_noncoherent = sba_alloc_consistent,
1226 .free_consistent = sba_free_consistent, 1226 .free_consistent = sba_free_consistent,
1227 .map_single = sba_map_single, 1227 .map_single = sba_map_single,
1228 .unmap_single = sba_unmap_single, 1228 .unmap_single = sba_unmap_single,
1229 .map_sg = sba_map_sg, 1229 .map_sg = sba_map_sg,
1230 .unmap_sg = sba_unmap_sg, 1230 .unmap_sg = sba_unmap_sg,
1231 .dma_sync_single_for_cpu = NULL, 1231 .dma_sync_single_for_cpu = NULL,
1232 .dma_sync_single_for_device = NULL, 1232 .dma_sync_single_for_device = NULL,
1233 .dma_sync_sg_for_cpu = NULL, 1233 .dma_sync_sg_for_cpu = NULL,
1234 .dma_sync_sg_for_device = NULL, 1234 .dma_sync_sg_for_device = NULL,
1235 }; 1235 };
1236 1236
1237 1237
1238 /************************************************************************** 1238 /**************************************************************************
1239 ** 1239 **
1240 ** SBA PAT PDC support 1240 ** SBA PAT PDC support
1241 ** 1241 **
1242 ** o call pdc_pat_cell_module() 1242 ** o call pdc_pat_cell_module()
1243 ** o store ranges in PCI "resource" structures 1243 ** o store ranges in PCI "resource" structures
1244 ** 1244 **
1245 **************************************************************************/ 1245 **************************************************************************/
1246 1246
1247 static void 1247 static void
1248 sba_get_pat_resources(struct sba_device *sba_dev) 1248 sba_get_pat_resources(struct sba_device *sba_dev)
1249 { 1249 {
1250 #if 0 1250 #if 0
1251 /* 1251 /*
1252 ** TODO/REVISIT/FIXME: support for directed ranges requires calls to 1252 ** TODO/REVISIT/FIXME: support for directed ranges requires calls to
1253 ** PAT PDC to program the SBA/LBA directed range registers...this 1253 ** PAT PDC to program the SBA/LBA directed range registers...this
1254 ** burden may fall on the LBA code since it directly supports the 1254 ** burden may fall on the LBA code since it directly supports the
1255 ** PCI subsystem. It's not clear yet. - ggg 1255 ** PCI subsystem. It's not clear yet. - ggg
1256 */ 1256 */
1257 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); 1257 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1258 FIXME : ??? 1258 FIXME : ???
1259 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); 1259 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1260 Tells where the dvi bits are located in the address. 1260 Tells where the dvi bits are located in the address.
1261 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); 1261 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1262 FIXME : ??? 1262 FIXME : ???
1263 #endif 1263 #endif
1264 } 1264 }
1265 1265
1266 1266
1267 /************************************************************** 1267 /**************************************************************
1268 * 1268 *
1269 * Initialization and claim 1269 * Initialization and claim
1270 * 1270 *
1271 ***************************************************************/ 1271 ***************************************************************/
1272 #define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */ 1272 #define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
1273 #define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */ 1273 #define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
1274 static void * 1274 static void *
1275 sba_alloc_pdir(unsigned int pdir_size) 1275 sba_alloc_pdir(unsigned int pdir_size)
1276 { 1276 {
1277 unsigned long pdir_base; 1277 unsigned long pdir_base;
1278 unsigned long pdir_order = get_order(pdir_size); 1278 unsigned long pdir_order = get_order(pdir_size);
1279 1279
1280 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order); 1280 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1281 if (NULL == (void *) pdir_base) { 1281 if (NULL == (void *) pdir_base) {
1282 panic("%s() could not allocate I/O Page Table\n", 1282 panic("%s() could not allocate I/O Page Table\n",
1283 __FUNCTION__); 1283 __FUNCTION__);
1284 } 1284 }
1285 1285
1286 /* If this is not PA8700 (PCX-W2) 1286 /* If this is not PA8700 (PCX-W2)
1287 ** OR newer than ver 2.2 1287 ** OR newer than ver 2.2
1288 ** OR in a system that doesn't need VINDEX bits from SBA, 1288 ** OR in a system that doesn't need VINDEX bits from SBA,
1289 ** 1289 **
1290 ** then we aren't exposed to the HW bug. 1290 ** then we aren't exposed to the HW bug.
1291 */ 1291 */
1292 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 1292 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1293 || (boot_cpu_data.pdc.versions > 0x202) 1293 || (boot_cpu_data.pdc.versions > 0x202)
1294 || (boot_cpu_data.pdc.capabilities & 0x08L) ) 1294 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1295 return (void *) pdir_base; 1295 return (void *) pdir_base;
1296 1296
1297 /* 1297 /*
1298 * PA8700 (PCX-W2, aka piranha) silent data corruption fix 1298 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
1299 * 1299 *
1300 * An interaction between PA8700 CPU (Ver 2.2 or older) and 1300 * An interaction between PA8700 CPU (Ver 2.2 or older) and
1301 * Ike/Astro can cause silent data corruption. This is only 1301 * Ike/Astro can cause silent data corruption. This is only
1302 * a problem if the I/O PDIR is located in memory such that 1302 * a problem if the I/O PDIR is located in memory such that
1303 * (little-endian) bits 17 and 18 are on and bit 20 is off. 1303 * (little-endian) bits 17 and 18 are on and bit 20 is off.
1304 * 1304 *
1305 * Since the max IO Pdir size is 2MB, by cleverly allocating the 1305 * Since the max IO Pdir size is 2MB, by cleverly allocating the
1306 * right physical address, we can either avoid (IOPDIR <= 1MB) 1306 * right physical address, we can either avoid (IOPDIR <= 1MB)
1307 * or minimize (2MB IO Pdir) the problem if we restrict the 1307 * or minimize (2MB IO Pdir) the problem if we restrict the
1308 * IO Pdir to a maximum size of 2MB-128K (1902K). 1308 * IO Pdir to a maximum size of 2MB-128K (1902K).
1309 * 1309 *
1310 * Because we always allocate 2^N sized IO pdirs, either of the 1310 * Because we always allocate 2^N sized IO pdirs, either of the
1311 * "bad" regions will be the last 128K if at all. That's easy 1311 * "bad" regions will be the last 128K if at all. That's easy
1312 * to test for. 1312 * to test for.
1313 * 1313 *
1314 */ 1314 */
1315 if (pdir_order <= (19-12)) { 1315 if (pdir_order <= (19-12)) {
1316 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { 1316 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1317 /* allocate a new one on 512k alignment */ 1317 /* allocate a new one on 512k alignment */
1318 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); 1318 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1319 /* release original */ 1319 /* release original */
1320 free_pages(pdir_base, pdir_order); 1320 free_pages(pdir_base, pdir_order);
1321 1321
1322 pdir_base = new_pdir; 1322 pdir_base = new_pdir;
1323 1323
1324 /* release excess */ 1324 /* release excess */
1325 while (pdir_order < (19-12)) { 1325 while (pdir_order < (19-12)) {
1326 new_pdir += pdir_size; 1326 new_pdir += pdir_size;
1327 free_pages(new_pdir, pdir_order); 1327 free_pages(new_pdir, pdir_order);
1328 pdir_order +=1; 1328 pdir_order +=1;
1329 pdir_size <<=1; 1329 pdir_size <<=1;
1330 } 1330 }
1331 } 1331 }
1332 } else { 1332 } else {
1333 /* 1333 /*
1334 ** 1MB or 2MB Pdir 1334 ** 1MB or 2MB Pdir
1335 ** Needs to be aligned on an "odd" 1MB boundary. 1335 ** Needs to be aligned on an "odd" 1MB boundary.
1336 */ 1336 */
1337 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */ 1337 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
1338 1338
1339 /* release original */ 1339 /* release original */
1340 free_pages( pdir_base, pdir_order); 1340 free_pages( pdir_base, pdir_order);
1341 1341
1342 /* release first 1MB */ 1342 /* release first 1MB */
1343 free_pages(new_pdir, 20-12); 1343 free_pages(new_pdir, 20-12);
1344 1344
1345 pdir_base = new_pdir + 1024*1024; 1345 pdir_base = new_pdir + 1024*1024;
1346 1346
1347 if (pdir_order > (20-12)) { 1347 if (pdir_order > (20-12)) {
1348 /* 1348 /*
1349 ** 2MB Pdir. 1349 ** 2MB Pdir.
1350 ** 1350 **
1351 ** Flag tells init_bitmap() to mark bad 128k as used 1351 ** Flag tells init_bitmap() to mark bad 128k as used
1352 ** and to reduce the size by 128k. 1352 ** and to reduce the size by 128k.
1353 */ 1353 */
1354 piranha_bad_128k = 1; 1354 piranha_bad_128k = 1;
1355 1355
1356 new_pdir += 3*1024*1024; 1356 new_pdir += 3*1024*1024;
1357 /* release last 1MB */ 1357 /* release last 1MB */
1358 free_pages(new_pdir, 20-12); 1358 free_pages(new_pdir, 20-12);
1359 1359
1360 /* release unusable 128KB */ 1360 /* release unusable 128KB */
1361 free_pages(new_pdir - 128*1024 , 17-12); 1361 free_pages(new_pdir - 128*1024 , 17-12);
1362 1362
1363 pdir_size -= 128*1024; 1363 pdir_size -= 128*1024;
1364 } 1364 }
1365 } 1365 }
1366 1366
1367 memset((void *) pdir_base, 0, pdir_size); 1367 memset((void *) pdir_base, 0, pdir_size);
1368 return (void *) pdir_base; 1368 return (void *) pdir_base;
1369 } 1369 }
1370 1370
1371 static struct device *next_device(struct klist_iter *i) 1371 static struct device *next_device(struct klist_iter *i)
1372 { 1372 {
1373 struct klist_node * n = klist_next(i); 1373 struct klist_node * n = klist_next(i);
1374 return n ? container_of(n, struct device, knode_parent) : NULL; 1374 return n ? container_of(n, struct device, knode_parent) : NULL;
1375 } 1375 }
1376 1376
1377 /* setup Mercury or Elroy IBASE/IMASK registers. */ 1377 /* setup Mercury or Elroy IBASE/IMASK registers. */
1378 static void 1378 static void
1379 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1379 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1380 { 1380 {
1381 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */ 1381 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */
1382 extern void lba_set_iregs(struct parisc_device *, u32, u32); 1382 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1383 struct device *dev; 1383 struct device *dev;
1384 struct klist_iter i; 1384 struct klist_iter i;
1385 1385
1386 klist_iter_init(&sba->dev.klist_children, &i); 1386 klist_iter_init(&sba->dev.klist_children, &i);
1387 while ((dev = next_device(&i))) { 1387 while ((dev = next_device(&i))) {
1388 struct parisc_device *lba = to_parisc_device(dev); 1388 struct parisc_device *lba = to_parisc_device(dev);
1389 int rope_num = (lba->hpa.start >> 13) & 0xf; 1389 int rope_num = (lba->hpa.start >> 13) & 0xf;
1390 if (rope_num >> 3 == ioc_num) 1390 if (rope_num >> 3 == ioc_num)
1391 lba_set_iregs(lba, ioc->ibase, ioc->imask); 1391 lba_set_iregs(lba, ioc->ibase, ioc->imask);
1392 } 1392 }
1393 klist_iter_exit(&i); 1393 klist_iter_exit(&i);
1394 } 1394 }
1395 1395
1396 static void 1396 static void
1397 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1397 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1398 { 1398 {
1399 u32 iova_space_mask; 1399 u32 iova_space_mask;
1400 u32 iova_space_size; 1400 u32 iova_space_size;
1401 int iov_order, tcnfg; 1401 int iov_order, tcnfg;
1402 #ifdef SBA_AGP_SUPPORT 1402 #ifdef SBA_AGP_SUPPORT
1403 int agp_found = 0; 1403 int agp_found = 0;
1404 #endif 1404 #endif
1405 /* 1405 /*
1406 ** Firmware programs the base and size of a "safe IOVA space" 1406 ** Firmware programs the base and size of a "safe IOVA space"
1407 ** (one that doesn't overlap memory or LMMIO space) in the 1407 ** (one that doesn't overlap memory or LMMIO space) in the
1408 ** IBASE and IMASK registers. 1408 ** IBASE and IMASK registers.
1409 */ 1409 */
1410 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); 1410 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1411 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; 1411 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1412 1412
1413 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { 1413 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1414 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n"); 1414 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1415 iova_space_size /= 2; 1415 iova_space_size /= 2;
1416 } 1416 }
1417 1417
1418 /* 1418 /*
1419 ** iov_order is always based on a 1GB IOVA space since we want to 1419 ** iov_order is always based on a 1GB IOVA space since we want to
1420 ** turn on the other half for AGP GART. 1420 ** turn on the other half for AGP GART.
1421 */ 1421 */
1422 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); 1422 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1423 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); 1423 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1424 1424
1425 DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n", 1425 DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
1426 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20, 1426 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
1427 iov_order + PAGE_SHIFT); 1427 iov_order + PAGE_SHIFT);
1428 1428
1429 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, 1429 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1430 get_order(ioc->pdir_size)); 1430 get_order(ioc->pdir_size));
1431 if (!ioc->pdir_base) 1431 if (!ioc->pdir_base)
1432 panic("Couldn't allocate I/O Page Table\n"); 1432 panic("Couldn't allocate I/O Page Table\n");
1433 1433
1434 memset(ioc->pdir_base, 0, ioc->pdir_size); 1434 memset(ioc->pdir_base, 0, ioc->pdir_size);
1435 1435
1436 DBG_INIT("%s() pdir %p size %x\n", 1436 DBG_INIT("%s() pdir %p size %x\n",
1437 __FUNCTION__, ioc->pdir_base, ioc->pdir_size); 1437 __FUNCTION__, ioc->pdir_base, ioc->pdir_size);
1438 1438
1439 #ifdef SBA_HINT_SUPPORT 1439 #ifdef SBA_HINT_SUPPORT
1440 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1440 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1441 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1441 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1442 1442
1443 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1443 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1444 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1444 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1445 #endif 1445 #endif
1446 1446
1447 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); 1447 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1448 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1448 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1449 1449
1450 /* build IMASK for IOC and Elroy */ 1450 /* build IMASK for IOC and Elroy */
1451 iova_space_mask = 0xffffffff; 1451 iova_space_mask = 0xffffffff;
1452 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1452 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1453 ioc->imask = iova_space_mask; 1453 ioc->imask = iova_space_mask;
1454 #ifdef ZX1_SUPPORT 1454 #ifdef ZX1_SUPPORT
1455 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1455 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1456 #endif 1456 #endif
1457 sba_dump_tlb(ioc->ioc_hpa); 1457 sba_dump_tlb(ioc->ioc_hpa);
1458 1458
1459 setup_ibase_imask(sba, ioc, ioc_num); 1459 setup_ibase_imask(sba, ioc, ioc_num);
1460 1460
1461 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); 1461 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1462 1462
1463 #ifdef CONFIG_64BIT 1463 #ifdef CONFIG_64BIT
1464 /* 1464 /*
1465 ** Setting the upper bits makes checking for bypass addresses 1465 ** Setting the upper bits makes checking for bypass addresses
1466 ** a little faster later on. 1466 ** a little faster later on.
1467 */ 1467 */
1468 ioc->imask |= 0xFFFFFFFF00000000UL; 1468 ioc->imask |= 0xFFFFFFFF00000000UL;
1469 #endif 1469 #endif
1470 1470
1471 /* Set I/O PDIR Page size to system page size */ 1471 /* Set I/O PDIR Page size to system page size */
1472 switch (PAGE_SHIFT) { 1472 switch (PAGE_SHIFT) {
1473 case 12: tcnfg = 0; break; /* 4K */ 1473 case 12: tcnfg = 0; break; /* 4K */
1474 case 13: tcnfg = 1; break; /* 8K */ 1474 case 13: tcnfg = 1; break; /* 8K */
1475 case 14: tcnfg = 2; break; /* 16K */ 1475 case 14: tcnfg = 2; break; /* 16K */
1476 case 16: tcnfg = 3; break; /* 64K */ 1476 case 16: tcnfg = 3; break; /* 64K */
1477 default: 1477 default:
1478 panic(__FILE__ "Unsupported system page size %d", 1478 panic(__FILE__ "Unsupported system page size %d",
1479 1 << PAGE_SHIFT); 1479 1 << PAGE_SHIFT);
1480 break; 1480 break;
1481 } 1481 }
1482 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); 1482 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1483 1483
1484 /* 1484 /*
1485 ** Program the IOC's ibase and enable IOVA translation 1485 ** Program the IOC's ibase and enable IOVA translation
1486 ** Bit zero == enable bit. 1486 ** Bit zero == enable bit.
1487 */ 1487 */
1488 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); 1488 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1489 1489
1490 /* 1490 /*
1491 ** Clear I/O TLB of any possible entries. 1491 ** Clear I/O TLB of any possible entries.
1492 ** (Yes. This is a bit paranoid...but so what) 1492 ** (Yes. This is a bit paranoid...but so what)
1493 */ 1493 */
1494 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1494 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1495 1495
1496 #ifdef SBA_AGP_SUPPORT 1496 #ifdef SBA_AGP_SUPPORT
1497 /* 1497 /*
1498 ** If an AGP device is present, only use half of the IOV space 1498 ** If an AGP device is present, only use half of the IOV space
1499 ** for PCI DMA. Unfortunately we can't know ahead of time 1499 ** for PCI DMA. Unfortunately we can't know ahead of time
1500 ** whether GART support will actually be used, for now we 1500 ** whether GART support will actually be used, for now we
1501 ** can just key on any AGP device found in the system. 1501 ** can just key on any AGP device found in the system.
1502 ** We program the next pdir index after we stop w/ a key for 1502 ** We program the next pdir index after we stop w/ a key for
1503 ** the GART code to handshake on. 1503 ** the GART code to handshake on.
1504 */ 1504 */
1505 device=NULL; 1505 device=NULL;
1506 for (lba = sba->child; lba; lba = lba->sibling) { 1506 for (lba = sba->child; lba; lba = lba->sibling) {
1507 if (IS_QUICKSILVER(lba)) 1507 if (IS_QUICKSILVER(lba))
1508 break; 1508 break;
1509 } 1509 }
1510 1510
1511 if (lba) { 1511 if (lba) {
1512 DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__); 1512 DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__);
1513 ioc->pdir_size /= 2; 1513 ioc->pdir_size /= 2;
1514 ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE; 1514 ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE;
1515 } else { 1515 } else {
1516 DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__); 1516 DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
1517 } 1517 }
1518 #endif /* 0 */ 1518 #endif /* 0 */
1519 1519
1520 } 1520 }
1521 1521
1522 static void 1522 static void
1523 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) 1523 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1524 { 1524 {
1525 u32 iova_space_size, iova_space_mask; 1525 u32 iova_space_size, iova_space_mask;
1526 unsigned int pdir_size, iov_order; 1526 unsigned int pdir_size, iov_order;
1527 1527
1528 /* 1528 /*
1529 ** Determine IOVA Space size from memory size. 1529 ** Determine IOVA Space size from memory size.
1530 ** 1530 **
1531 ** Ideally, PCI drivers would register the maximum number 1531 ** Ideally, PCI drivers would register the maximum number
1532 ** of DMA they can have outstanding for each device they 1532 ** of DMA they can have outstanding for each device they
1533 ** own. Next best thing would be to guess how much DMA 1533 ** own. Next best thing would be to guess how much DMA
1534 ** can be outstanding based on PCI Class/sub-class. Both 1534 ** can be outstanding based on PCI Class/sub-class. Both
1535 ** methods still require some "extra" to support PCI 1535 ** methods still require some "extra" to support PCI
1536 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). 1536 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1537 ** 1537 **
1538 ** While we have 32-bits "IOVA" space, top two 2 bits are used 1538 ** While we have 32-bits "IOVA" space, top two 2 bits are used
1539 ** for DMA hints - ergo only 30 bits max. 1539 ** for DMA hints - ergo only 30 bits max.
1540 */ 1540 */
1541 1541
1542 iova_space_size = (u32) (num_physpages/global_ioc_cnt); 1542 iova_space_size = (u32) (num_physpages/global_ioc_cnt);
1543 1543
1544 /* limit IOVA space size to 1MB-1GB */ 1544 /* limit IOVA space size to 1MB-1GB */
1545 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { 1545 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1546 iova_space_size = 1 << (20 - PAGE_SHIFT); 1546 iova_space_size = 1 << (20 - PAGE_SHIFT);
1547 } 1547 }
1548 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { 1548 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1549 iova_space_size = 1 << (30 - PAGE_SHIFT); 1549 iova_space_size = 1 << (30 - PAGE_SHIFT);
1550 } 1550 }
1551 1551
1552 /* 1552 /*
1553 ** iova space must be log2() in size. 1553 ** iova space must be log2() in size.
1554 ** thus, pdir/res_map will also be log2(). 1554 ** thus, pdir/res_map will also be log2().
1555 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced) 1555 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
1556 */ 1556 */
1557 iov_order = get_order(iova_space_size << PAGE_SHIFT); 1557 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1558 1558
1559 /* iova_space_size is now bytes, not pages */ 1559 /* iova_space_size is now bytes, not pages */
1560 iova_space_size = 1 << (iov_order + PAGE_SHIFT); 1560 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1561 1561
1562 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); 1562 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1563 1563
1564 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n", 1564 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1565 __FUNCTION__, 1565 __FUNCTION__,
1566 ioc->ioc_hpa, 1566 ioc->ioc_hpa,
1567 (unsigned long) num_physpages >> (20 - PAGE_SHIFT), 1567 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1568 iova_space_size>>20, 1568 iova_space_size>>20,
1569 iov_order + PAGE_SHIFT); 1569 iov_order + PAGE_SHIFT);
1570 1570
1571 ioc->pdir_base = sba_alloc_pdir(pdir_size); 1571 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1572 1572
1573 DBG_INIT("%s() pdir %p size %x\n", 1573 DBG_INIT("%s() pdir %p size %x\n",
1574 __FUNCTION__, ioc->pdir_base, pdir_size); 1574 __FUNCTION__, ioc->pdir_base, pdir_size);
1575 1575
1576 #ifdef SBA_HINT_SUPPORT 1576 #ifdef SBA_HINT_SUPPORT
1577 /* FIXME : DMA HINTs not used */ 1577 /* FIXME : DMA HINTs not used */
1578 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; 1578 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1579 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); 1579 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1580 1580
1581 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n", 1581 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1582 ioc->hint_shift_pdir, ioc->hint_mask_pdir); 1582 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1583 #endif 1583 #endif
1584 1584
1585 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); 1585 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1586 1586
1587 /* build IMASK for IOC and Elroy */ 1587 /* build IMASK for IOC and Elroy */
1588 iova_space_mask = 0xffffffff; 1588 iova_space_mask = 0xffffffff;
1589 iova_space_mask <<= (iov_order + PAGE_SHIFT); 1589 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1590 1590
1591 /* 1591 /*
1592 ** On C3000 w/512MB mem, HP-UX 10.20 reports: 1592 ** On C3000 w/512MB mem, HP-UX 10.20 reports:
1593 ** ibase=0, imask=0xFE000000, size=0x2000000. 1593 ** ibase=0, imask=0xFE000000, size=0x2000000.
1594 */ 1594 */
1595 ioc->ibase = 0; 1595 ioc->ibase = 0;
1596 ioc->imask = iova_space_mask; /* save it */ 1596 ioc->imask = iova_space_mask; /* save it */
1597 #ifdef ZX1_SUPPORT 1597 #ifdef ZX1_SUPPORT
1598 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); 1598 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1599 #endif 1599 #endif
1600 1600
1601 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", 1601 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1602 __FUNCTION__, ioc->ibase, ioc->imask); 1602 __FUNCTION__, ioc->ibase, ioc->imask);
1603 1603
1604 /* 1604 /*
1605 ** FIXME: Hint registers are programmed with default hint 1605 ** FIXME: Hint registers are programmed with default hint
1606 ** values during boot, so hints should be sane even if we 1606 ** values during boot, so hints should be sane even if we
1607 ** can't reprogram them the way drivers want. 1607 ** can't reprogram them the way drivers want.
1608 */ 1608 */
1609 1609
1610 setup_ibase_imask(sba, ioc, ioc_num); 1610 setup_ibase_imask(sba, ioc, ioc_num);
1611 1611
1612 /* 1612 /*
1613 ** Program the IOC's ibase and enable IOVA translation 1613 ** Program the IOC's ibase and enable IOVA translation
1614 */ 1614 */
1615 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); 1615 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1616 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); 1616 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1617 1617
1618 /* Set I/O PDIR Page size to 4K */ 1618 /* Set I/O PDIR Page size to 4K */
1619 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG); 1619 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1620 1620
1621 /* 1621 /*
1622 ** Clear I/O TLB of any possible entries. 1622 ** Clear I/O TLB of any possible entries.
1623 ** (Yes. This is a bit paranoid...but so what) 1623 ** (Yes. This is a bit paranoid...but so what)
1624 */ 1624 */
1625 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); 1625 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1626 1626
1627 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ 1627 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1628 1628
1629 DBG_INIT("%s() DONE\n", __FUNCTION__); 1629 DBG_INIT("%s() DONE\n", __FUNCTION__);
1630 } 1630 }
1631 1631
1632 1632
1633 1633
1634 /************************************************************************** 1634 /**************************************************************************
1635 ** 1635 **
1636 ** SBA initialization code (HW and SW) 1636 ** SBA initialization code (HW and SW)
1637 ** 1637 **
1638 ** o identify SBA chip itself 1638 ** o identify SBA chip itself
1639 ** o initialize SBA chip modes (HardFail) 1639 ** o initialize SBA chip modes (HardFail)
1640 ** o initialize SBA chip modes (HardFail) 1640 ** o initialize SBA chip modes (HardFail)
1641 ** o FIXME: initialize DMA hints for reasonable defaults 1641 ** o FIXME: initialize DMA hints for reasonable defaults
1642 ** 1642 **
1643 **************************************************************************/ 1643 **************************************************************************/
1644 1644
1645 static void __iomem *ioc_remap(struct sba_device *sba_dev, int offset) 1645 static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1646 { 1646 {
1647 return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); 1647 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1648 } 1648 }
1649 1649
1650 static void sba_hw_init(struct sba_device *sba_dev) 1650 static void sba_hw_init(struct sba_device *sba_dev)
1651 { 1651 {
1652 int i; 1652 int i;
1653 int num_ioc; 1653 int num_ioc;
1654 u64 ioc_ctl; 1654 u64 ioc_ctl;
1655 1655
1656 if (!is_pdc_pat()) { 1656 if (!is_pdc_pat()) {
1657 /* Shutdown the USB controller on Astro-based workstations. 1657 /* Shutdown the USB controller on Astro-based workstations.
1658 ** Once we reprogram the IOMMU, the next DMA performed by 1658 ** Once we reprogram the IOMMU, the next DMA performed by
1659 ** USB will HPMC the box. USB is only enabled if a 1659 ** USB will HPMC the box. USB is only enabled if a
1660 ** keyboard is present and found. 1660 ** keyboard is present and found.
1661 ** 1661 **
1662 ** With serial console, j6k v5.0 firmware says: 1662 ** With serial console, j6k v5.0 firmware says:
1663 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7 1663 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
1664 ** 1664 **
1665 ** FIXME: Using GFX+USB console at power up but direct 1665 ** FIXME: Using GFX+USB console at power up but direct
1666 ** linux to serial console is still broken. 1666 ** linux to serial console is still broken.
1667 ** USB could generate DMA so we must reset USB. 1667 ** USB could generate DMA so we must reset USB.
1668 ** The proper sequence would be: 1668 ** The proper sequence would be:
1669 ** o block console output 1669 ** o block console output
1670 ** o reset USB device 1670 ** o reset USB device
1671 ** o reprogram serial port 1671 ** o reprogram serial port
1672 ** o unblock console output 1672 ** o unblock console output
1673 */ 1673 */
1674 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { 1674 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1675 pdc_io_reset_devices(); 1675 pdc_io_reset_devices();
1676 } 1676 }
1677 1677
1678 } 1678 }
1679 1679
1680 1680
1681 #if 0 1681 #if 0
1682 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, 1682 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1683 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); 1683 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1684 1684
1685 /* 1685 /*
1686 ** Need to deal with DMA from LAN. 1686 ** Need to deal with DMA from LAN.
1687 ** Maybe use page zero boot device as a handle to talk 1687 ** Maybe use page zero boot device as a handle to talk
1688 ** to PDC about which device to shutdown. 1688 ** to PDC about which device to shutdown.
1689 ** 1689 **
1690 ** Netbooting, j6k v5.0 firmware says: 1690 ** Netbooting, j6k v5.0 firmware says:
1691 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002 1691 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
1692 ** ARGH! invalid class. 1692 ** ARGH! invalid class.
1693 */ 1693 */
1694 if ((PAGE0->mem_boot.cl_class != CL_RANDOM) 1694 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1695 && (PAGE0->mem_boot.cl_class != CL_SEQU)) { 1695 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1696 pdc_io_reset(); 1696 pdc_io_reset();
1697 } 1697 }
1698 #endif 1698 #endif
1699 1699
1700 if (!IS_PLUTO(sba_dev->iodc)) { 1700 if (!IS_PLUTO(sba_dev->iodc)) {
1701 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1701 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1702 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1702 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1703 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1703 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
1704 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE); 1704 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1705 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC; 1705 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1706 /* j6700 v1.6 firmware sets 0x294f */ 1706 /* j6700 v1.6 firmware sets 0x294f */
1707 /* A500 firmware sets 0x4d */ 1707 /* A500 firmware sets 0x4d */
1708 1708
1709 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); 1709 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1710 1710
1711 #ifdef DEBUG_SBA_INIT 1711 #ifdef DEBUG_SBA_INIT
1712 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); 1712 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1713 DBG_INIT(" 0x%Lx\n", ioc_ctl); 1713 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1714 #endif 1714 #endif
1715 } /* if !PLUTO */ 1715 } /* if !PLUTO */
1716 1716
1717 if (IS_ASTRO(sba_dev->iodc)) { 1717 if (IS_ASTRO(sba_dev->iodc)) {
1718 int err; 1718 int err;
1719 /* PAT_PDC (L-class) also reports the same goofy base */ 1719 /* PAT_PDC (L-class) also reports the same goofy base */
1720 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1720 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1721 num_ioc = 1; 1721 num_ioc = 1;
1722 1722
1723 sba_dev->chip_resv.name = "Astro Intr Ack"; 1723 sba_dev->chip_resv.name = "Astro Intr Ack";
1724 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; 1724 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1725 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; 1725 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1726 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1726 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1727 BUG_ON(err < 0); 1727 BUG_ON(err < 0);
1728 1728
1729 } else if (IS_PLUTO(sba_dev->iodc)) { 1729 } else if (IS_PLUTO(sba_dev->iodc)) {
1730 int err; 1730 int err;
1731 1731
1732 /* We use a negative value for IOC HPA so it gets 1732 /* We use a negative value for IOC HPA so it gets
1733 * corrected when we add it with IKE's IOC offset. 1733 * corrected when we add it with IKE's IOC offset.
1734 * Doesnt look clean, but fewer code. 1734 * Doesnt look clean, but fewer code.
1735 */ 1735 */
1736 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1736 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1737 num_ioc = 1; 1737 num_ioc = 1;
1738 1738
1739 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; 1739 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1740 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; 1740 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1741 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); 1741 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1742 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1742 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1743 WARN_ON(err < 0); 1743 WARN_ON(err < 0);
1744 1744
1745 sba_dev->iommu_resv.name = "IOVA Space"; 1745 sba_dev->iommu_resv.name = "IOVA Space";
1746 sba_dev->iommu_resv.start = 0x40000000UL; 1746 sba_dev->iommu_resv.start = 0x40000000UL;
1747 sba_dev->iommu_resv.end = 0x50000000UL - 1; 1747 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1748 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1748 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1749 WARN_ON(err < 0); 1749 WARN_ON(err < 0);
1750 } else { 1750 } else {
1751 /* IS_IKE (ie N-class, L3000, L1500) */ 1751 /* IS_IKE (ie N-class, L3000, L1500) */
1752 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1752 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1753 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1753 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1754 num_ioc = 2; 1754 num_ioc = 2;
1755 1755
1756 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1756 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1757 } 1757 }
1758 /* XXX: What about Reo? */ 1758 /* XXX: What about Reo? */
1759 1759
1760 sba_dev->num_ioc = num_ioc; 1760 sba_dev->num_ioc = num_ioc;
1761 for (i = 0; i < num_ioc; i++) { 1761 for (i = 0; i < num_ioc; i++) {
1762 /* 1762 /*
1763 ** Make sure the box crashes if we get any errors on a rope. 1763 ** Make sure the box crashes if we get any errors on a rope.
1764 */ 1764 */
1765 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL); 1765 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
1766 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL); 1766 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
1767 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL); 1767 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
1768 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL); 1768 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
1769 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL); 1769 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
1770 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL); 1770 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
1771 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL); 1771 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
1772 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1772 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1773 1773
1774 /* flush out the writes */ 1774 /* flush out the writes */
1775 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); 1775 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1776 1776
1777 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n", 1777 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1778 i, 1778 i,
1779 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), 1779 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1780 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) 1780 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1781 ); 1781 );
1782 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n", 1782 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1783 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), 1783 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1784 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1784 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1785 ); 1785 );
1786 1786
1787 if (IS_PLUTO(sba_dev->iodc)) { 1787 if (IS_PLUTO(sba_dev->iodc)) {
1788 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1788 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1789 } else { 1789 } else {
1790 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1790 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1791 } 1791 }
1792 } 1792 }
1793 } 1793 }
1794 1794
1795 static void 1795 static void
1796 sba_common_init(struct sba_device *sba_dev) 1796 sba_common_init(struct sba_device *sba_dev)
1797 { 1797 {
1798 int i; 1798 int i;
1799 1799
1800 /* add this one to the head of the list (order doesn't matter) 1800 /* add this one to the head of the list (order doesn't matter)
1801 ** This will be useful for debugging - especially if we get coredumps 1801 ** This will be useful for debugging - especially if we get coredumps
1802 */ 1802 */
1803 sba_dev->next = sba_list; 1803 sba_dev->next = sba_list;
1804 sba_list = sba_dev; 1804 sba_list = sba_dev;
1805 1805
1806 for(i=0; i< sba_dev->num_ioc; i++) { 1806 for(i=0; i< sba_dev->num_ioc; i++) {
1807 int res_size; 1807 int res_size;
1808 #ifdef DEBUG_DMB_TRAP 1808 #ifdef DEBUG_DMB_TRAP
1809 extern void iterate_pages(unsigned long , unsigned long , 1809 extern void iterate_pages(unsigned long , unsigned long ,
1810 void (*)(pte_t * , unsigned long), 1810 void (*)(pte_t * , unsigned long),
1811 unsigned long ); 1811 unsigned long );
1812 void set_data_memory_break(pte_t * , unsigned long); 1812 void set_data_memory_break(pte_t * , unsigned long);
1813 #endif 1813 #endif
1814 /* resource map size dictated by pdir_size */ 1814 /* resource map size dictated by pdir_size */
1815 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ 1815 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1816 1816
1817 /* Second part of PIRANHA BUG */ 1817 /* Second part of PIRANHA BUG */
1818 if (piranha_bad_128k) { 1818 if (piranha_bad_128k) {
1819 res_size -= (128*1024)/sizeof(u64); 1819 res_size -= (128*1024)/sizeof(u64);
1820 } 1820 }
1821 1821
1822 res_size >>= 3; /* convert bit count to byte count */ 1822 res_size >>= 3; /* convert bit count to byte count */
1823 DBG_INIT("%s() res_size 0x%x\n", 1823 DBG_INIT("%s() res_size 0x%x\n",
1824 __FUNCTION__, res_size); 1824 __FUNCTION__, res_size);
1825 1825
1826 sba_dev->ioc[i].res_size = res_size; 1826 sba_dev->ioc[i].res_size = res_size;
1827 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); 1827 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1828 1828
1829 #ifdef DEBUG_DMB_TRAP 1829 #ifdef DEBUG_DMB_TRAP
1830 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1830 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1831 set_data_memory_break, 0); 1831 set_data_memory_break, 0);
1832 #endif 1832 #endif
1833 1833
1834 if (NULL == sba_dev->ioc[i].res_map) 1834 if (NULL == sba_dev->ioc[i].res_map)
1835 { 1835 {
1836 panic("%s:%s() could not allocate resource map\n", 1836 panic("%s:%s() could not allocate resource map\n",
1837 __FILE__, __FUNCTION__ ); 1837 __FILE__, __FUNCTION__ );
1838 } 1838 }
1839 1839
1840 memset(sba_dev->ioc[i].res_map, 0, res_size); 1840 memset(sba_dev->ioc[i].res_map, 0, res_size);
1841 /* next available IOVP - circular search */ 1841 /* next available IOVP - circular search */
1842 sba_dev->ioc[i].res_hint = (unsigned long *) 1842 sba_dev->ioc[i].res_hint = (unsigned long *)
1843 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); 1843 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1844 1844
1845 #ifdef ASSERT_PDIR_SANITY 1845 #ifdef ASSERT_PDIR_SANITY
1846 /* Mark first bit busy - ie no IOVA 0 */ 1846 /* Mark first bit busy - ie no IOVA 0 */
1847 sba_dev->ioc[i].res_map[0] = 0x80; 1847 sba_dev->ioc[i].res_map[0] = 0x80;
1848 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; 1848 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1849 #endif 1849 #endif
1850 1850
1851 /* Third (and last) part of PIRANHA BUG */ 1851 /* Third (and last) part of PIRANHA BUG */
1852 if (piranha_bad_128k) { 1852 if (piranha_bad_128k) {
1853 /* region from +1408K to +1536 is un-usable. */ 1853 /* region from +1408K to +1536 is un-usable. */
1854 1854
1855 int idx_start = (1408*1024/sizeof(u64)) >> 3; 1855 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1856 int idx_end = (1536*1024/sizeof(u64)) >> 3; 1856 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1857 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); 1857 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1858 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); 1858 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1859 1859
1860 /* mark that part of the io pdir busy */ 1860 /* mark that part of the io pdir busy */
1861 while (p_start < p_end) 1861 while (p_start < p_end)
1862 *p_start++ = -1; 1862 *p_start++ = -1;
1863 1863
1864 } 1864 }
1865 1865
1866 #ifdef DEBUG_DMB_TRAP 1866 #ifdef DEBUG_DMB_TRAP
1867 iterate_pages( sba_dev->ioc[i].res_map, res_size, 1867 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1868 set_data_memory_break, 0); 1868 set_data_memory_break, 0);
1869 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, 1869 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1870 set_data_memory_break, 0); 1870 set_data_memory_break, 0);
1871 #endif 1871 #endif
1872 1872
1873 DBG_INIT("%s() %d res_map %x %p\n", 1873 DBG_INIT("%s() %d res_map %x %p\n",
1874 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map); 1874 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map);
1875 } 1875 }
1876 1876
1877 spin_lock_init(&sba_dev->sba_lock); 1877 spin_lock_init(&sba_dev->sba_lock);
1878 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; 1878 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1879 1879
1880 #ifdef DEBUG_SBA_INIT 1880 #ifdef DEBUG_SBA_INIT
1881 /* 1881 /*
1882 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set 1882 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
1883 * (bit #61, big endian), we have to flush and sync every time 1883 * (bit #61, big endian), we have to flush and sync every time
1884 * IO-PDIR is changed in Ike/Astro. 1884 * IO-PDIR is changed in Ike/Astro.
1885 */ 1885 */
1886 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) { 1886 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
1887 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n"); 1887 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1888 } else { 1888 } else {
1889 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n"); 1889 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1890 } 1890 }
1891 #endif 1891 #endif
1892 } 1892 }
1893 1893
1894 #ifdef CONFIG_PROC_FS 1894 #ifdef CONFIG_PROC_FS
1895 static int sba_proc_info(struct seq_file *m, void *p) 1895 static int sba_proc_info(struct seq_file *m, void *p)
1896 { 1896 {
1897 struct sba_device *sba_dev = sba_list; 1897 struct sba_device *sba_dev = sba_list;
1898 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1898 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1899 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ 1899 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1900 #ifdef SBA_COLLECT_STATS 1900 #ifdef SBA_COLLECT_STATS
1901 unsigned long avg = 0, min, max; 1901 unsigned long avg = 0, min, max;
1902 #endif 1902 #endif
1903 int i, len = 0; 1903 int i, len = 0;
1904 1904
1905 len += seq_printf(m, "%s rev %d.%d\n", 1905 len += seq_printf(m, "%s rev %d.%d\n",
1906 sba_dev->name, 1906 sba_dev->name,
1907 (sba_dev->hw_rev & 0x7) + 1, 1907 (sba_dev->hw_rev & 0x7) + 1,
1908 (sba_dev->hw_rev & 0x18) >> 3 1908 (sba_dev->hw_rev & 0x18) >> 3
1909 ); 1909 );
1910 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", 1910 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1911 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ 1911 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1912 total_pages); 1912 total_pages);
1913 1913
1914 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", 1914 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1915 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ 1915 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1916 1916
1917 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n", 1917 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1918 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), 1918 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1919 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), 1919 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1920 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE) 1920 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
1921 ); 1921 );
1922 1922
1923 for (i=0; i<4; i++) 1923 for (i=0; i<4; i++)
1924 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i, 1924 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
1925 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), 1925 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1926 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), 1926 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1927 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18) 1927 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
1928 ); 1928 );
1929 1929
1930 #ifdef SBA_COLLECT_STATS 1930 #ifdef SBA_COLLECT_STATS
1931 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", 1931 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1932 total_pages - ioc->used_pages, ioc->used_pages, 1932 total_pages - ioc->used_pages, ioc->used_pages,
1933 (int) (ioc->used_pages * 100 / total_pages)); 1933 (int) (ioc->used_pages * 100 / total_pages));
1934 1934
1935 min = max = ioc->avg_search[0]; 1935 min = max = ioc->avg_search[0];
1936 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { 1936 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1937 avg += ioc->avg_search[i]; 1937 avg += ioc->avg_search[i];
1938 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; 1938 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1939 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; 1939 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1940 } 1940 }
1941 avg /= SBA_SEARCH_SAMPLE; 1941 avg /= SBA_SEARCH_SAMPLE;
1942 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", 1942 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1943 min, avg, max); 1943 min, avg, max);
1944 1944
1945 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", 1945 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1946 ioc->msingle_calls, ioc->msingle_pages, 1946 ioc->msingle_calls, ioc->msingle_pages,
1947 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1947 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1948 1948
1949 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ 1949 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1950 min = ioc->usingle_calls; 1950 min = ioc->usingle_calls;
1951 max = ioc->usingle_pages - ioc->usg_pages; 1951 max = ioc->usingle_pages - ioc->usg_pages;
1952 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", 1952 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1953 min, max, (int) ((max * 1000)/min)); 1953 min, max, (int) ((max * 1000)/min));
1954 1954
1955 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1955 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1956 ioc->msg_calls, ioc->msg_pages, 1956 ioc->msg_calls, ioc->msg_pages,
1957 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); 1957 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1958 1958
1959 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", 1959 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1960 ioc->usg_calls, ioc->usg_pages, 1960 ioc->usg_calls, ioc->usg_pages,
1961 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); 1961 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1962 #endif 1962 #endif
1963 1963
1964 return 0; 1964 return 0;
1965 } 1965 }
1966 1966
1967 static int 1967 static int
1968 sba_proc_open(struct inode *i, struct file *f) 1968 sba_proc_open(struct inode *i, struct file *f)
1969 { 1969 {
1970 return single_open(f, &sba_proc_info, NULL); 1970 return single_open(f, &sba_proc_info, NULL);
1971 } 1971 }
1972 1972
1973 static struct file_operations sba_proc_fops = { 1973 static struct file_operations sba_proc_fops = {
1974 .owner = THIS_MODULE, 1974 .owner = THIS_MODULE,
1975 .open = sba_proc_open, 1975 .open = sba_proc_open,
1976 .read = seq_read, 1976 .read = seq_read,
1977 .llseek = seq_lseek, 1977 .llseek = seq_lseek,
1978 .release = single_release, 1978 .release = single_release,
1979 }; 1979 };
1980 1980
1981 static int 1981 static int
1982 sba_proc_bitmap_info(struct seq_file *m, void *p) 1982 sba_proc_bitmap_info(struct seq_file *m, void *p)
1983 { 1983 {
1984 struct sba_device *sba_dev = sba_list; 1984 struct sba_device *sba_dev = sba_list;
1985 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ 1985 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1986 unsigned int *res_ptr = (unsigned int *)ioc->res_map; 1986 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1987 int i, len = 0; 1987 int i, len = 0;
1988 1988
1989 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) { 1989 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1990 if ((i & 7) == 0) 1990 if ((i & 7) == 0)
1991 len += seq_printf(m, "\n "); 1991 len += seq_printf(m, "\n ");
1992 len += seq_printf(m, " %08x", *res_ptr); 1992 len += seq_printf(m, " %08x", *res_ptr);
1993 } 1993 }
1994 len += seq_printf(m, "\n"); 1994 len += seq_printf(m, "\n");
1995 1995
1996 return 0; 1996 return 0;
1997 } 1997 }
1998 1998
1999 static int 1999 static int
2000 sba_proc_bitmap_open(struct inode *i, struct file *f) 2000 sba_proc_bitmap_open(struct inode *i, struct file *f)
2001 { 2001 {
2002 return single_open(f, &sba_proc_bitmap_info, NULL); 2002 return single_open(f, &sba_proc_bitmap_info, NULL);
2003 } 2003 }
2004 2004
2005 static struct file_operations sba_proc_bitmap_fops = { 2005 static struct file_operations sba_proc_bitmap_fops = {
2006 .owner = THIS_MODULE, 2006 .owner = THIS_MODULE,
2007 .open = sba_proc_bitmap_open, 2007 .open = sba_proc_bitmap_open,
2008 .read = seq_read, 2008 .read = seq_read,
2009 .llseek = seq_lseek, 2009 .llseek = seq_lseek,
2010 .release = single_release, 2010 .release = single_release,
2011 }; 2011 };
2012 #endif /* CONFIG_PROC_FS */ 2012 #endif /* CONFIG_PROC_FS */
2013 2013
2014 static struct parisc_device_id sba_tbl[] = { 2014 static struct parisc_device_id sba_tbl[] = {
2015 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb }, 2015 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
2016 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc }, 2016 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
2017 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc }, 2017 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
2018 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc }, 2018 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
2019 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc }, 2019 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
2020 { 0, } 2020 { 0, }
2021 }; 2021 };
2022 2022
2023 int sba_driver_callback(struct parisc_device *); 2023 int sba_driver_callback(struct parisc_device *);
2024 2024
2025 static struct parisc_driver sba_driver = { 2025 static struct parisc_driver sba_driver = {
2026 .name = MODULE_NAME, 2026 .name = MODULE_NAME,
2027 .id_table = sba_tbl, 2027 .id_table = sba_tbl,
2028 .probe = sba_driver_callback, 2028 .probe = sba_driver_callback,
2029 }; 2029 };
2030 2030
2031 /* 2031 /*
2032 ** Determine if sba should claim this chip (return 0) or not (return 1). 2032 ** Determine if sba should claim this chip (return 0) or not (return 1).
2033 ** If so, initialize the chip and tell other partners in crime they 2033 ** If so, initialize the chip and tell other partners in crime they
2034 ** have work to do. 2034 ** have work to do.
2035 */ 2035 */
2036 int 2036 int
2037 sba_driver_callback(struct parisc_device *dev) 2037 sba_driver_callback(struct parisc_device *dev)
2038 { 2038 {
2039 struct sba_device *sba_dev; 2039 struct sba_device *sba_dev;
2040 u32 func_class; 2040 u32 func_class;
2041 int i; 2041 int i;
2042 char *version; 2042 char *version;
2043 void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); 2043 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
2044 struct proc_dir_entry *info_entry, *bitmap_entry, *root; 2044 struct proc_dir_entry *info_entry, *bitmap_entry, *root;
2045 2045
2046 sba_dump_ranges(sba_addr); 2046 sba_dump_ranges(sba_addr);
2047 2047
2048 /* Read HW Rev First */ 2048 /* Read HW Rev First */
2049 func_class = READ_REG(sba_addr + SBA_FCLASS); 2049 func_class = READ_REG(sba_addr + SBA_FCLASS);
2050 2050
2051 if (IS_ASTRO(&dev->id)) { 2051 if (IS_ASTRO(&dev->id)) {
2052 unsigned long fclass; 2052 unsigned long fclass;
2053 static char astro_rev[]="Astro ?.?"; 2053 static char astro_rev[]="Astro ?.?";
2054 2054
2055 /* Astro is broken...Read HW Rev First */ 2055 /* Astro is broken...Read HW Rev First */
2056 fclass = READ_REG(sba_addr); 2056 fclass = READ_REG(sba_addr);
2057 2057
2058 astro_rev[6] = '1' + (char) (fclass & 0x7); 2058 astro_rev[6] = '1' + (char) (fclass & 0x7);
2059 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 2059 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
2060 version = astro_rev; 2060 version = astro_rev;
2061 2061
2062 } else if (IS_IKE(&dev->id)) { 2062 } else if (IS_IKE(&dev->id)) {
2063 static char ike_rev[] = "Ike rev ?"; 2063 static char ike_rev[] = "Ike rev ?";
2064 ike_rev[8] = '0' + (char) (func_class & 0xff); 2064 ike_rev[8] = '0' + (char) (func_class & 0xff);
2065 version = ike_rev; 2065 version = ike_rev;
2066 } else if (IS_PLUTO(&dev->id)) { 2066 } else if (IS_PLUTO(&dev->id)) {
2067 static char pluto_rev[]="Pluto ?.?"; 2067 static char pluto_rev[]="Pluto ?.?";
2068 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 2068 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
2069 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 2069 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
2070 version = pluto_rev; 2070 version = pluto_rev;
2071 } else { 2071 } else {
2072 static char reo_rev[] = "REO rev ?"; 2072 static char reo_rev[] = "REO rev ?";
2073 reo_rev[8] = '0' + (char) (func_class & 0xff); 2073 reo_rev[8] = '0' + (char) (func_class & 0xff);
2074 version = reo_rev; 2074 version = reo_rev;
2075 } 2075 }
2076 2076
2077 if (!global_ioc_cnt) { 2077 if (!global_ioc_cnt) {
2078 global_ioc_cnt = count_parisc_driver(&sba_driver); 2078 global_ioc_cnt = count_parisc_driver(&sba_driver);
2079 2079
2080 /* Astro and Pluto have one IOC per SBA */ 2080 /* Astro and Pluto have one IOC per SBA */
2081 if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id))) 2081 if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id)))
2082 global_ioc_cnt *= 2; 2082 global_ioc_cnt *= 2;
2083 } 2083 }
2084 2084
2085 printk(KERN_INFO "%s found %s at 0x%lx\n", 2085 printk(KERN_INFO "%s found %s at 0x%lx\n",
2086 MODULE_NAME, version, dev->hpa.start); 2086 MODULE_NAME, version, dev->hpa.start);
2087 2087
2088 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL); 2088 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
2089 if (!sba_dev) { 2089 if (!sba_dev) {
2090 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); 2090 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
2091 return -ENOMEM; 2091 return -ENOMEM;
2092 } 2092 }
2093 2093
2094 parisc_set_drvdata(dev, sba_dev); 2094 parisc_set_drvdata(dev, sba_dev);
2095 2095
2096 for(i=0; i<MAX_IOC; i++) 2096 for(i=0; i<MAX_IOC; i++)
2097 spin_lock_init(&(sba_dev->ioc[i].res_lock)); 2097 spin_lock_init(&(sba_dev->ioc[i].res_lock));
2098 2098
2099 sba_dev->dev = dev; 2099 sba_dev->dev = dev;
2100 sba_dev->hw_rev = func_class; 2100 sba_dev->hw_rev = func_class;
2101 sba_dev->iodc = &dev->id; 2101 sba_dev->iodc = &dev->id;
2102 sba_dev->name = dev->name; 2102 sba_dev->name = dev->name;
2103 sba_dev->sba_hpa = sba_addr; 2103 sba_dev->sba_hpa = sba_addr;
2104 2104
2105 sba_get_pat_resources(sba_dev); 2105 sba_get_pat_resources(sba_dev);
2106 sba_hw_init(sba_dev); 2106 sba_hw_init(sba_dev);
2107 sba_common_init(sba_dev); 2107 sba_common_init(sba_dev);
2108 2108
2109 hppa_dma_ops = &sba_ops; 2109 hppa_dma_ops = &sba_ops;
2110 2110
2111 #ifdef CONFIG_PROC_FS 2111 #ifdef CONFIG_PROC_FS
2112 switch (dev->id.hversion) { 2112 switch (dev->id.hversion) {
2113 case PLUTO_MCKINLEY_PORT: 2113 case PLUTO_MCKINLEY_PORT:
2114 root = proc_mckinley_root; 2114 root = proc_mckinley_root;
2115 break; 2115 break;
2116 case ASTRO_RUNWAY_PORT: 2116 case ASTRO_RUNWAY_PORT:
2117 case IKE_MERCED_PORT: 2117 case IKE_MERCED_PORT:
2118 default: 2118 default:
2119 root = proc_runway_root; 2119 root = proc_runway_root;
2120 break; 2120 break;
2121 } 2121 }
2122 2122
2123 info_entry = create_proc_entry("sba_iommu", 0, root); 2123 info_entry = create_proc_entry("sba_iommu", 0, root);
2124 bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); 2124 bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root);
2125 2125
2126 if (info_entry) 2126 if (info_entry)
2127 info_entry->proc_fops = &sba_proc_fops; 2127 info_entry->proc_fops = &sba_proc_fops;
2128 2128
2129 if (bitmap_entry) 2129 if (bitmap_entry)
2130 bitmap_entry->proc_fops = &sba_proc_bitmap_fops; 2130 bitmap_entry->proc_fops = &sba_proc_bitmap_fops;
2131 #endif 2131 #endif
2132 2132
2133 parisc_vmerge_boundary = IOVP_SIZE; 2133 parisc_vmerge_boundary = IOVP_SIZE;
2134 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG; 2134 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
2135 parisc_has_iommu(); 2135 parisc_has_iommu();
2136 return 0; 2136 return 0;
2137 } 2137 }
2138 2138
2139 /* 2139 /*
2140 ** One time initialization to let the world know the SBA was found. 2140 ** One time initialization to let the world know the SBA was found.
2141 ** This is the only routine which is NOT static. 2141 ** This is the only routine which is NOT static.
2142 ** Must be called exactly once before pci_init(). 2142 ** Must be called exactly once before pci_init().
2143 */ 2143 */
2144 void __init sba_init(void) 2144 void __init sba_init(void)
2145 { 2145 {
2146 register_parisc_driver(&sba_driver); 2146 register_parisc_driver(&sba_driver);
2147 } 2147 }
2148 2148
2149 2149
2150 /** 2150 /**
2151 * sba_get_iommu - Assign the iommu pointer for the pci bus controller. 2151 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
2152 * @dev: The parisc device. 2152 * @dev: The parisc device.
2153 * 2153 *
2154 * Returns the appropriate IOMMU data for the given parisc PCI controller. 2154 * Returns the appropriate IOMMU data for the given parisc PCI controller.
2155 * This is cached and used later for PCI DMA Mapping. 2155 * This is cached and used later for PCI DMA Mapping.
2156 */ 2156 */
2157 void * sba_get_iommu(struct parisc_device *pci_hba) 2157 void * sba_get_iommu(struct parisc_device *pci_hba)
2158 { 2158 {
2159 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2159 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2160 struct sba_device *sba = sba_dev->dev.driver_data; 2160 struct sba_device *sba = sba_dev->dev.driver_data;
2161 char t = sba_dev->id.hw_type; 2161 char t = sba_dev->id.hw_type;
2162 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 2162 int iocnum = (pci_hba->hw_path >> 3); /* rope # */
2163 2163
2164 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT)); 2164 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2165 2165
2166 return &(sba->ioc[iocnum]); 2166 return &(sba->ioc[iocnum]);
2167 } 2167 }
2168 2168
2169 2169
2170 /** 2170 /**
2171 * sba_directed_lmmio - return first directed LMMIO range routed to rope 2171 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2172 * @pa_dev: The parisc device. 2172 * @pa_dev: The parisc device.
2173 * @r: resource PCI host controller wants start/end fields assigned. 2173 * @r: resource PCI host controller wants start/end fields assigned.
2174 * 2174 *
2175 * For the given parisc PCI controller, determine if any direct ranges 2175 * For the given parisc PCI controller, determine if any direct ranges
2176 * are routed down the corresponding rope. 2176 * are routed down the corresponding rope.
2177 */ 2177 */
2178 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 2178 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2179 { 2179 {
2180 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2180 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2181 struct sba_device *sba = sba_dev->dev.driver_data; 2181 struct sba_device *sba = sba_dev->dev.driver_data;
2182 char t = sba_dev->id.hw_type; 2182 char t = sba_dev->id.hw_type;
2183 int i; 2183 int i;
2184 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2184 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2185 2185
2186 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2186 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2187 2187
2188 r->start = r->end = 0; 2188 r->start = r->end = 0;
2189 2189
2190 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */ 2190 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
2191 for (i=0; i<4; i++) { 2191 for (i=0; i<4; i++) {
2192 int base, size; 2192 int base, size;
2193 void __iomem *reg = sba->sba_hpa + i*0x18; 2193 void __iomem *reg = sba->sba_hpa + i*0x18;
2194 2194
2195 base = READ_REG32(reg + LMMIO_DIRECT0_BASE); 2195 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2196 if ((base & 1) == 0) 2196 if ((base & 1) == 0)
2197 continue; /* not enabled */ 2197 continue; /* not enabled */
2198 2198
2199 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE); 2199 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2200 2200
2201 if ((size & (ROPES_PER_IOC-1)) != rope) 2201 if ((size & (ROPES_PER_IOC-1)) != rope)
2202 continue; /* directed down different rope */ 2202 continue; /* directed down different rope */
2203 2203
2204 r->start = (base & ~1UL) | PCI_F_EXTEND; 2204 r->start = (base & ~1UL) | PCI_F_EXTEND;
2205 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK); 2205 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2206 r->end = r->start + size; 2206 r->end = r->start + size;
2207 } 2207 }
2208 } 2208 }
2209 2209
2210 2210
2211 /** 2211 /**
2212 * sba_distributed_lmmio - return portion of distributed LMMIO range 2212 * sba_distributed_lmmio - return portion of distributed LMMIO range
2213 * @pa_dev: The parisc device. 2213 * @pa_dev: The parisc device.
2214 * @r: resource PCI host controller wants start/end fields assigned. 2214 * @r: resource PCI host controller wants start/end fields assigned.
2215 * 2215 *
2216 * For the given parisc PCI controller, return portion of distributed LMMIO 2216 * For the given parisc PCI controller, return portion of distributed LMMIO
2217 * range. The distributed LMMIO is always present and it's just a question 2217 * range. The distributed LMMIO is always present and it's just a question
2218 * of the base address and size of the range. 2218 * of the base address and size of the range.
2219 */ 2219 */
2220 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 2220 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2221 { 2221 {
2222 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2222 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2223 struct sba_device *sba = sba_dev->dev.driver_data; 2223 struct sba_device *sba = sba_dev->dev.driver_data;
2224 char t = sba_dev->id.hw_type; 2224 char t = sba_dev->id.hw_type;
2225 int base, size; 2225 int base, size;
2226 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2226 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2227 2227
2228 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT)); 2228 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2229 2229
2230 r->start = r->end = 0; 2230 r->start = r->end = 0;
2231 2231
2232 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); 2232 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2233 if ((base & 1) == 0) { 2233 if ((base & 1) == 0) {
2234 BUG(); /* Gah! Distr Range wasn't enabled! */ 2234 BUG(); /* Gah! Distr Range wasn't enabled! */
2235 return; 2235 return;
2236 } 2236 }
2237 2237
2238 r->start = (base & ~1UL) | PCI_F_EXTEND; 2238 r->start = (base & ~1UL) | PCI_F_EXTEND;
2239 2239
2240 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; 2240 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2241 r->start += rope * (size + 1); /* adjust base for this rope */ 2241 r->start += rope * (size + 1); /* adjust base for this rope */
2242 r->end = r->start + size; 2242 r->end = r->start + size;
2243 } 2243 }
2244 2244
drivers/parisc/superio.c
1 /* National Semiconductor NS87560UBD Super I/O controller used in 1 /* National Semiconductor NS87560UBD Super I/O controller used in
2 * HP [BCJ]x000 workstations. 2 * HP [BCJ]x000 workstations.
3 * 3 *
4 * This chip is a horrid piece of engineering, and National 4 * This chip is a horrid piece of engineering, and National
5 * denies any knowledge of its existence. Thus no datasheet is 5 * denies any knowledge of its existence. Thus no datasheet is
6 * available off www.national.com. 6 * available off www.national.com.
7 * 7 *
8 * (C) Copyright 2000 Linuxcare, Inc. 8 * (C) Copyright 2000 Linuxcare, Inc.
9 * (C) Copyright 2000 Linuxcare Canada, Inc. 9 * (C) Copyright 2000 Linuxcare Canada, Inc.
10 * (C) Copyright 2000 Martin K. Petersen <mkp@linuxcare.com> 10 * (C) Copyright 2000 Martin K. Petersen <mkp@linuxcare.com>
11 * (C) Copyright 2000 Alex deVries <alex@onefishtwo.ca> 11 * (C) Copyright 2000 Alex deVries <alex@onefishtwo.ca>
12 * (C) Copyright 2001 John Marvin <jsm fc hp com> 12 * (C) Copyright 2001 John Marvin <jsm fc hp com>
13 * (C) Copyright 2003 Grant Grundler <grundler parisc-linux org> 13 * (C) Copyright 2003 Grant Grundler <grundler parisc-linux org>
14 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org> 14 * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
15 * (C) Copyright 2006 Helge Deller <deller@gmx.de>
15 * 16 *
16 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as 18 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of 19 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version. 20 * the License, or (at your option) any later version.
20 * 21 *
21 * The initial version of this is by Martin Peterson. Alex deVries 22 * The initial version of this is by Martin Peterson. Alex deVries
22 * has spent a bit of time trying to coax it into working. 23 * has spent a bit of time trying to coax it into working.
23 * 24 *
24 * Major changes to get basic interrupt infrastructure working to 25 * Major changes to get basic interrupt infrastructure working to
25 * hopefully be able to support all SuperIO devices. Currently 26 * hopefully be able to support all SuperIO devices. Currently
26 * works with serial. -- John Marvin <jsm@fc.hp.com> 27 * works with serial. -- John Marvin <jsm@fc.hp.com>
27 * 28 *
28 * Converted superio_init() to be a PCI_FIXUP_FINAL callee. 29 * Converted superio_init() to be a PCI_FIXUP_FINAL callee.
29 * -- Kyle McMartin <kyle@parisc-linux.org> 30 * -- Kyle McMartin <kyle@parisc-linux.org>
30 */ 31 */
31 32
32 33
33 /* NOTES: 34 /* NOTES:
34 * 35 *
35 * Function 0 is an IDE controller. It is identical to a PC87415 IDE 36 * Function 0 is an IDE controller. It is identical to a PC87415 IDE
36 * controller (and identifies itself as such). 37 * controller (and identifies itself as such).
37 * 38 *
38 * Function 1 is a "Legacy I/O" controller. Under this function is a 39 * Function 1 is a "Legacy I/O" controller. Under this function is a
39 * whole mess of legacy I/O peripherals. Of course, HP hasn't enabled 40 * whole mess of legacy I/O peripherals. Of course, HP hasn't enabled
40 * all the functionality in hardware, but the following is available: 41 * all the functionality in hardware, but the following is available:
41 * 42 *
42 * Two 16550A compatible serial controllers 43 * Two 16550A compatible serial controllers
43 * An IEEE 1284 compatible parallel port 44 * An IEEE 1284 compatible parallel port
44 * A floppy disk controller 45 * A floppy disk controller
45 * 46 *
46 * Function 2 is a USB controller. 47 * Function 2 is a USB controller.
47 * 48 *
48 * We must be incredibly careful during initialization. Since all 49 * We must be incredibly careful during initialization. Since all
49 * interrupts are routed through function 1 (which is not allowed by 50 * interrupts are routed through function 1 (which is not allowed by
50 * the PCI spec), we need to program the PICs on the legacy I/O port 51 * the PCI spec), we need to program the PICs on the legacy I/O port
51 * *before* we attempt to set up IDE and USB. @#$!& 52 * *before* we attempt to set up IDE and USB. @#$!&
52 * 53 *
53 * According to HP, devices are only enabled by firmware if they have 54 * According to HP, devices are only enabled by firmware if they have
54 * a physical device connected. 55 * a physical device connected.
55 * 56 *
56 * Configuration register bits: 57 * Configuration register bits:
57 * 0x5A: FDC, SP1, IDE1, SP2, IDE2, PAR, Reserved, P92 58 * 0x5A: FDC, SP1, IDE1, SP2, IDE2, PAR, Reserved, P92
58 * 0x5B: RTC, 8259, 8254, DMA1, DMA2, KBC, P61, APM 59 * 0x5B: RTC, 8259, 8254, DMA1, DMA2, KBC, P61, APM
59 * 60 *
60 */ 61 */
61 62
62 #include <linux/errno.h> 63 #include <linux/errno.h>
63 #include <linux/init.h> 64 #include <linux/init.h>
64 #include <linux/module.h> 65 #include <linux/module.h>
65 #include <linux/types.h> 66 #include <linux/types.h>
66 #include <linux/interrupt.h> 67 #include <linux/interrupt.h>
67 #include <linux/ioport.h> 68 #include <linux/ioport.h>
68 #include <linux/serial.h> 69 #include <linux/serial.h>
69 #include <linux/pci.h> 70 #include <linux/pci.h>
70 #include <linux/parport.h> 71 #include <linux/parport.h>
71 #include <linux/parport_pc.h> 72 #include <linux/parport_pc.h>
72 #include <linux/termios.h> 73 #include <linux/termios.h>
73 #include <linux/tty.h> 74 #include <linux/tty.h>
74 #include <linux/serial_core.h> 75 #include <linux/serial_core.h>
75 #include <linux/delay.h> 76 #include <linux/delay.h>
76 77
77 #include <asm/io.h> 78 #include <asm/io.h>
78 #include <asm/hardware.h> 79 #include <asm/hardware.h>
79 #include <asm/superio.h> 80 #include <asm/superio.h>
80 81
81 static struct superio_device sio_dev; 82 static struct superio_device sio_dev;
82 83
83 84
84 #undef DEBUG_SUPERIO_INIT 85 #undef DEBUG_SUPERIO_INIT
85 86
86 #ifdef DEBUG_SUPERIO_INIT 87 #ifdef DEBUG_SUPERIO_INIT
87 #define DBG_INIT(x...) printk(x) 88 #define DBG_INIT(x...) printk(x)
88 #else 89 #else
89 #define DBG_INIT(x...) 90 #define DBG_INIT(x...)
90 #endif 91 #endif
91 92
92 #define SUPERIO "SuperIO" 93 #define SUPERIO "SuperIO"
93 #define PFX SUPERIO ": " 94 #define PFX SUPERIO ": "
94 95
95 static irqreturn_t 96 static irqreturn_t
96 superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs) 97 superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
97 { 98 {
98 u8 results; 99 u8 results;
99 u8 local_irq; 100 u8 local_irq;
100 101
101 /* Poll the 8259 to see if there's an interrupt. */ 102 /* Poll the 8259 to see if there's an interrupt. */
102 outb (OCW3_POLL,IC_PIC1+0); 103 outb (OCW3_POLL,IC_PIC1+0);
103 104
104 results = inb(IC_PIC1+0); 105 results = inb(IC_PIC1+0);
105 106
106 /* 107 /*
107 * Bit 7: 1 = active Interrupt; 0 = no Interrupt pending 108 * Bit 7: 1 = active Interrupt; 0 = no Interrupt pending
108 * Bits 6-3: zero 109 * Bits 6-3: zero
109 * Bits 2-0: highest priority, active requesting interrupt ID (0-7) 110 * Bits 2-0: highest priority, active requesting interrupt ID (0-7)
110 */ 111 */
111 if ((results & 0x80) == 0) { 112 if ((results & 0x80) == 0) {
112 /* I suspect "spurious" interrupts are from unmasking an IRQ. 113 /* I suspect "spurious" interrupts are from unmasking an IRQ.
113 * We don't know if an interrupt was/is pending and thus 114 * We don't know if an interrupt was/is pending and thus
114 * just call the handler for that IRQ as if it were pending. 115 * just call the handler for that IRQ as if it were pending.
115 */ 116 */
116 return IRQ_NONE; 117 return IRQ_NONE;
117 } 118 }
118 119
119 /* Check to see which device is interrupting */ 120 /* Check to see which device is interrupting */
120 local_irq = results & 0x0f; 121 local_irq = results & 0x0f;
121 122
122 if (local_irq == 2 || local_irq > 7) { 123 if (local_irq == 2 || local_irq > 7) {
123 printk(KERN_ERR PFX "slave interrupted!\n"); 124 printk(KERN_ERR PFX "slave interrupted!\n");
124 return IRQ_HANDLED; 125 return IRQ_HANDLED;
125 } 126 }
126 127
127 if (local_irq == 7) { 128 if (local_irq == 7) {
128 129
129 /* Could be spurious. Check in service bits */ 130 /* Could be spurious. Check in service bits */
130 131
131 outb(OCW3_ISR,IC_PIC1+0); 132 outb(OCW3_ISR,IC_PIC1+0);
132 results = inb(IC_PIC1+0); 133 results = inb(IC_PIC1+0);
133 if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */ 134 if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */
134 printk(KERN_WARNING PFX "spurious interrupt!\n"); 135 printk(KERN_WARNING PFX "spurious interrupt!\n");
135 return IRQ_HANDLED; 136 return IRQ_HANDLED;
136 } 137 }
137 } 138 }
138 139
139 /* Call the appropriate device's interrupt */ 140 /* Call the appropriate device's interrupt */
140 __do_IRQ(local_irq, regs); 141 __do_IRQ(local_irq, regs);
141 142
142 /* set EOI - forces a new interrupt if a lower priority device 143 /* set EOI - forces a new interrupt if a lower priority device
143 * still needs service. 144 * still needs service.
144 */ 145 */
145 outb((OCW2_SEOI|local_irq),IC_PIC1 + 0); 146 outb((OCW2_SEOI|local_irq),IC_PIC1 + 0);
146 return IRQ_HANDLED; 147 return IRQ_HANDLED;
147 } 148 }
148 149
149 /* Initialize Super I/O device */ 150 /* Initialize Super I/O device */
150 static void 151 static void
151 superio_init(struct pci_dev *pcidev) 152 superio_init(struct pci_dev *pcidev)
152 { 153 {
153 struct superio_device *sio = &sio_dev; 154 struct superio_device *sio = &sio_dev;
154 struct pci_dev *pdev = sio->lio_pdev; 155 struct pci_dev *pdev = sio->lio_pdev;
155 u16 word; 156 u16 word;
156 157
157 if (sio->suckyio_irq_enabled) 158 if (sio->suckyio_irq_enabled)
158 return; 159 return;
159 160
160 BUG_ON(!pdev); 161 BUG_ON(!pdev);
161 BUG_ON(!sio->usb_pdev); 162 BUG_ON(!sio->usb_pdev);
162 163
163 /* use the IRQ iosapic found for USB INT D... */ 164 /* use the IRQ iosapic found for USB INT D... */
164 pdev->irq = sio->usb_pdev->irq; 165 pdev->irq = sio->usb_pdev->irq;
165 166
166 /* ...then properly fixup the USB to point at suckyio PIC */ 167 /* ...then properly fixup the USB to point at suckyio PIC */
167 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev); 168 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
168 169
169 printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i) \n", 170 printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
170 pci_name(pdev), pdev->irq); 171 pci_name(pdev), pdev->irq);
171 172
172 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base); 173 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
173 sio->sp1_base &= ~1; 174 sio->sp1_base &= ~1;
174 printk(KERN_INFO PFX "Serial port 1 at 0x%x\n", sio->sp1_base); 175 printk(KERN_INFO PFX "Serial port 1 at 0x%x\n", sio->sp1_base);
175 176
176 pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base); 177 pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base);
177 sio->sp2_base &= ~1; 178 sio->sp2_base &= ~1;
178 printk(KERN_INFO PFX "Serial port 2 at 0x%x\n", sio->sp2_base); 179 printk(KERN_INFO PFX "Serial port 2 at 0x%x\n", sio->sp2_base);
179 180
180 pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base); 181 pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base);
181 sio->pp_base &= ~1; 182 sio->pp_base &= ~1;
182 printk(KERN_INFO PFX "Parallel port at 0x%x\n", sio->pp_base); 183 printk(KERN_INFO PFX "Parallel port at 0x%x\n", sio->pp_base);
183 184
184 pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base); 185 pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base);
185 sio->fdc_base &= ~1; 186 sio->fdc_base &= ~1;
186 printk(KERN_INFO PFX "Floppy controller at 0x%x\n", sio->fdc_base); 187 printk(KERN_INFO PFX "Floppy controller at 0x%x\n", sio->fdc_base);
187 pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base); 188 pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base);
188 sio->acpi_base &= ~1; 189 sio->acpi_base &= ~1;
189 printk(KERN_INFO PFX "ACPI at 0x%x\n", sio->acpi_base); 190 printk(KERN_INFO PFX "ACPI at 0x%x\n", sio->acpi_base);
190 191
191 request_region (IC_PIC1, 0x1f, "pic1"); 192 request_region (IC_PIC1, 0x1f, "pic1");
192 request_region (IC_PIC2, 0x1f, "pic2"); 193 request_region (IC_PIC2, 0x1f, "pic2");
193 request_region (sio->acpi_base, 0x1f, "acpi"); 194 request_region (sio->acpi_base, 0x1f, "acpi");
194 195
195 /* Enable the legacy I/O function */ 196 /* Enable the legacy I/O function */
196 pci_read_config_word (pdev, PCI_COMMAND, &word); 197 pci_read_config_word (pdev, PCI_COMMAND, &word);
197 word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_IO; 198 word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_IO;
198 pci_write_config_word (pdev, PCI_COMMAND, word); 199 pci_write_config_word (pdev, PCI_COMMAND, word);
199 200
200 pci_set_master (pdev); 201 pci_set_master (pdev);
201 pci_enable_device(pdev); 202 pci_enable_device(pdev);
202 203
203 /* 204 /*
204 * Next project is programming the onboard interrupt controllers. 205 * Next project is programming the onboard interrupt controllers.
205 * PDC hasn't done this for us, since it's using polled I/O. 206 * PDC hasn't done this for us, since it's using polled I/O.
206 * 207 *
207 * XXX Use dword writes to avoid bugs in Elroy or Suckyio Config 208 * XXX Use dword writes to avoid bugs in Elroy or Suckyio Config
208 * space access. PCI is by nature a 32-bit bus and config 209 * space access. PCI is by nature a 32-bit bus and config
209 * space can be sensitive to that. 210 * space can be sensitive to that.
210 */ 211 */
211 212
212 /* 0x64 - 0x67 : 213 /* 0x64 - 0x67 :
213 DMA Rtg 2 214 DMA Rtg 2
214 DMA Rtg 3 215 DMA Rtg 3
215 DMA Chan Ctl 216 DMA Chan Ctl
216 TRIGGER_1 == 0x82 USB & IDE level triggered, rest to edge 217 TRIGGER_1 == 0x82 USB & IDE level triggered, rest to edge
217 */ 218 */
218 pci_write_config_dword (pdev, 0x64, 0x82000000U); 219 pci_write_config_dword (pdev, 0x64, 0x82000000U);
219 220
220 /* 0x68 - 0x6b : 221 /* 0x68 - 0x6b :
221 TRIGGER_2 == 0x00 all edge triggered (not used) 222 TRIGGER_2 == 0x00 all edge triggered (not used)
222 CFG_IR_SER == 0x43 SerPort1 = IRQ3, SerPort2 = IRQ4 223 CFG_IR_SER == 0x43 SerPort1 = IRQ3, SerPort2 = IRQ4
223 CFG_IR_PF == 0x65 ParPort = IRQ5, FloppyCtlr = IRQ6 224 CFG_IR_PF == 0x65 ParPort = IRQ5, FloppyCtlr = IRQ6
224 CFG_IR_IDE == 0x07 IDE1 = IRQ7, reserved 225 CFG_IR_IDE == 0x07 IDE1 = IRQ7, reserved
225 */ 226 */
226 pci_write_config_dword (pdev, TRIGGER_2, 0x07654300U); 227 pci_write_config_dword (pdev, TRIGGER_2, 0x07654300U);
227 228
228 /* 0x6c - 0x6f : 229 /* 0x6c - 0x6f :
229 CFG_IR_INTAB == 0x00 230 CFG_IR_INTAB == 0x00
230 CFG_IR_INTCD == 0x10 USB = IRQ1 231 CFG_IR_INTCD == 0x10 USB = IRQ1
231 CFG_IR_PS2 == 0x00 232 CFG_IR_PS2 == 0x00
232 CFG_IR_FXBUS == 0x00 233 CFG_IR_FXBUS == 0x00
233 */ 234 */
234 pci_write_config_dword (pdev, CFG_IR_INTAB, 0x00001000U); 235 pci_write_config_dword (pdev, CFG_IR_INTAB, 0x00001000U);
235 236
236 /* 0x70 - 0x73 : 237 /* 0x70 - 0x73 :
237 CFG_IR_USB == 0x00 not used. USB is connected to INTD. 238 CFG_IR_USB == 0x00 not used. USB is connected to INTD.
238 CFG_IR_ACPI == 0x00 not used. 239 CFG_IR_ACPI == 0x00 not used.
239 DMA Priority == 0x4c88 Power on default value. NFC. 240 DMA Priority == 0x4c88 Power on default value. NFC.
240 */ 241 */
241 pci_write_config_dword (pdev, CFG_IR_USB, 0x4c880000U); 242 pci_write_config_dword (pdev, CFG_IR_USB, 0x4c880000U);
242 243
243 /* PIC1 Initialization Command Word register programming */ 244 /* PIC1 Initialization Command Word register programming */
244 outb (0x11,IC_PIC1+0); /* ICW1: ICW4 write req | ICW1 */ 245 outb (0x11,IC_PIC1+0); /* ICW1: ICW4 write req | ICW1 */
245 outb (0x00,IC_PIC1+1); /* ICW2: interrupt vector table - not used */ 246 outb (0x00,IC_PIC1+1); /* ICW2: interrupt vector table - not used */
246 outb (0x04,IC_PIC1+1); /* ICW3: Cascade */ 247 outb (0x04,IC_PIC1+1); /* ICW3: Cascade */
247 outb (0x01,IC_PIC1+1); /* ICW4: x86 mode */ 248 outb (0x01,IC_PIC1+1); /* ICW4: x86 mode */
248 249
249 /* PIC1 Program Operational Control Words */ 250 /* PIC1 Program Operational Control Words */
250 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */ 251 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
251 outb (0xc2,IC_PIC1+0); /* OCW2: priority (3-7,0-2) */ 252 outb (0xc2,IC_PIC1+0); /* OCW2: priority (3-7,0-2) */
252 253
253 /* PIC2 Initialization Command Word register programming */ 254 /* PIC2 Initialization Command Word register programming */
254 outb (0x11,IC_PIC2+0); /* ICW1: ICW4 write req | ICW1 */ 255 outb (0x11,IC_PIC2+0); /* ICW1: ICW4 write req | ICW1 */
255 outb (0x00,IC_PIC2+1); /* ICW2: N/A */ 256 outb (0x00,IC_PIC2+1); /* ICW2: N/A */
256 outb (0x02,IC_PIC2+1); /* ICW3: Slave ID code */ 257 outb (0x02,IC_PIC2+1); /* ICW3: Slave ID code */
257 outb (0x01,IC_PIC2+1); /* ICW4: x86 mode */ 258 outb (0x01,IC_PIC2+1); /* ICW4: x86 mode */
258 259
259 /* Program Operational Control Words */ 260 /* Program Operational Control Words */
260 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */ 261 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
261 outb (0x68,IC_PIC1+0); /* OCW3: OCW3 select | ESMM | SMM */ 262 outb (0x68,IC_PIC1+0); /* OCW3: OCW3 select | ESMM | SMM */
262 263
263 /* Write master mask reg */ 264 /* Write master mask reg */
264 outb (0xff,IC_PIC1+1); 265 outb (0xff,IC_PIC1+1);
265 266
266 /* Setup USB power regulation */ 267 /* Setup USB power regulation */
267 outb(1, sio->acpi_base + USB_REG_CR); 268 outb(1, sio->acpi_base + USB_REG_CR);
268 if (inb(sio->acpi_base + USB_REG_CR) & 1) 269 if (inb(sio->acpi_base + USB_REG_CR) & 1)
269 printk(KERN_INFO PFX "USB regulator enabled\n"); 270 printk(KERN_INFO PFX "USB regulator enabled\n");
270 else 271 else
271 printk(KERN_ERR PFX "USB regulator not initialized!\n"); 272 printk(KERN_ERR PFX "USB regulator not initialized!\n");
272 273
273 if (request_irq(pdev->irq, superio_interrupt, SA_INTERRUPT, 274 if (request_irq(pdev->irq, superio_interrupt, SA_INTERRUPT,
274 SUPERIO, (void *)sio)) { 275 SUPERIO, (void *)sio)) {
275 276
276 printk(KERN_ERR PFX "could not get irq\n"); 277 printk(KERN_ERR PFX "could not get irq\n");
277 BUG(); 278 BUG();
278 return; 279 return;
279 } 280 }
280 281
281 sio->suckyio_irq_enabled = 1; 282 sio->suckyio_irq_enabled = 1;
282 } 283 }
283 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init); 284 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
284 285
285 static void superio_disable_irq(unsigned int irq) 286 static void superio_disable_irq(unsigned int irq)
286 { 287 {
287 u8 r8; 288 u8 r8;
288 289
289 if ((irq < 1) || (irq == 2) || (irq > 7)) { 290 if ((irq < 1) || (irq == 2) || (irq > 7)) {
290 printk(KERN_ERR PFX "Illegal irq number.\n"); 291 printk(KERN_ERR PFX "Illegal irq number.\n");
291 BUG(); 292 BUG();
292 return; 293 return;
293 } 294 }
294 295
295 /* Mask interrupt */ 296 /* Mask interrupt */
296 297
297 r8 = inb(IC_PIC1+1); 298 r8 = inb(IC_PIC1+1);
298 r8 |= (1 << irq); 299 r8 |= (1 << irq);
299 outb (r8,IC_PIC1+1); 300 outb (r8,IC_PIC1+1);
300 } 301 }
301 302
302 static void superio_enable_irq(unsigned int irq) 303 static void superio_enable_irq(unsigned int irq)
303 { 304 {
304 u8 r8; 305 u8 r8;
305 306
306 if ((irq < 1) || (irq == 2) || (irq > 7)) { 307 if ((irq < 1) || (irq == 2) || (irq > 7)) {
307 printk(KERN_ERR PFX "Illegal irq number (%d).\n", irq); 308 printk(KERN_ERR PFX "Illegal irq number (%d).\n", irq);
308 BUG(); 309 BUG();
309 return; 310 return;
310 } 311 }
311 312
312 /* Unmask interrupt */ 313 /* Unmask interrupt */
313 r8 = inb(IC_PIC1+1); 314 r8 = inb(IC_PIC1+1);
314 r8 &= ~(1 << irq); 315 r8 &= ~(1 << irq);
315 outb (r8,IC_PIC1+1); 316 outb (r8,IC_PIC1+1);
316 } 317 }
317 318
318 static unsigned int superio_startup_irq(unsigned int irq) 319 static unsigned int superio_startup_irq(unsigned int irq)
319 { 320 {
320 superio_enable_irq(irq); 321 superio_enable_irq(irq);
321 return 0; 322 return 0;
322 } 323 }
323 324
324 static struct hw_interrupt_type superio_interrupt_type = { 325 static struct hw_interrupt_type superio_interrupt_type = {
325 .typename = SUPERIO, 326 .typename = SUPERIO,
326 .startup = superio_startup_irq, 327 .startup = superio_startup_irq,
327 .shutdown = superio_disable_irq, 328 .shutdown = superio_disable_irq,
328 .enable = superio_enable_irq, 329 .enable = superio_enable_irq,
329 .disable = superio_disable_irq, 330 .disable = superio_disable_irq,
330 .ack = no_ack_irq, 331 .ack = no_ack_irq,
331 .end = no_end_irq, 332 .end = no_end_irq,
332 }; 333 };
333 334
334 #ifdef DEBUG_SUPERIO_INIT 335 #ifdef DEBUG_SUPERIO_INIT
335 static unsigned short expected_device[3] = { 336 static unsigned short expected_device[3] = {
336 PCI_DEVICE_ID_NS_87415, 337 PCI_DEVICE_ID_NS_87415,
337 PCI_DEVICE_ID_NS_87560_LIO, 338 PCI_DEVICE_ID_NS_87560_LIO,
338 PCI_DEVICE_ID_NS_87560_USB 339 PCI_DEVICE_ID_NS_87560_USB
339 }; 340 };
340 #endif 341 #endif
341 342
342 int superio_fixup_irq(struct pci_dev *pcidev) 343 int superio_fixup_irq(struct pci_dev *pcidev)
343 { 344 {
344 int local_irq, i; 345 int local_irq, i;
345 346
346 #ifdef DEBUG_SUPERIO_INIT 347 #ifdef DEBUG_SUPERIO_INIT
347 int fn; 348 int fn;
348 fn = PCI_FUNC(pcidev->devfn); 349 fn = PCI_FUNC(pcidev->devfn);
349 350
350 /* Verify the function number matches the expected device id. */ 351 /* Verify the function number matches the expected device id. */
351 if (expected_device[fn] != pcidev->device) { 352 if (expected_device[fn] != pcidev->device) {
352 BUG(); 353 BUG();
353 return -1; 354 return -1;
354 } 355 }
355 printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %p\n", 356 printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %p\n",
356 pci_name(pcidev), 357 pci_name(pcidev),
357 pcidev->vendor, pcidev->device, 358 pcidev->vendor, pcidev->device,
358 __builtin_return_address(0)); 359 __builtin_return_address(0));
359 #endif 360 #endif
360 361
361 for (i = 0; i < 16; i++) { 362 for (i = 0; i < 16; i++) {
362 irq_desc[i].handler = &superio_interrupt_type; 363 irq_desc[i].handler = &superio_interrupt_type;
363 } 364 }
364 365
365 /* 366 /*
366 * We don't allocate a SuperIO irq for the legacy IO function, 367 * We don't allocate a SuperIO irq for the legacy IO function,
367 * since it is a "bridge". Instead, we will allocate irq's for 368 * since it is a "bridge". Instead, we will allocate irq's for
368 * each legacy device as they are initialized. 369 * each legacy device as they are initialized.
369 */ 370 */
370 371
371 switch(pcidev->device) { 372 switch(pcidev->device) {
372 case PCI_DEVICE_ID_NS_87415: /* Function 0 */ 373 case PCI_DEVICE_ID_NS_87415: /* Function 0 */
373 local_irq = IDE_IRQ; 374 local_irq = IDE_IRQ;
374 break; 375 break;
375 case PCI_DEVICE_ID_NS_87560_LIO: /* Function 1 */ 376 case PCI_DEVICE_ID_NS_87560_LIO: /* Function 1 */
376 sio_dev.lio_pdev = pcidev; /* save for superio_init() */ 377 sio_dev.lio_pdev = pcidev; /* save for superio_init() */
377 return -1; 378 return -1;
378 case PCI_DEVICE_ID_NS_87560_USB: /* Function 2 */ 379 case PCI_DEVICE_ID_NS_87560_USB: /* Function 2 */
379 sio_dev.usb_pdev = pcidev; /* save for superio_init() */ 380 sio_dev.usb_pdev = pcidev; /* save for superio_init() */
380 local_irq = USB_IRQ; 381 local_irq = USB_IRQ;
381 break; 382 break;
382 default: 383 default:
383 local_irq = -1; 384 local_irq = -1;
384 BUG(); 385 BUG();
385 break; 386 break;
386 } 387 }
387 388
388 return local_irq; 389 return local_irq;
389 } 390 }
390 391
391 static struct uart_port serial[] = {
392 {
393 .iotype = UPIO_PORT,
394 .line = 0,
395 .type = PORT_16550A,
396 .uartclk = 115200*16,
397 .fifosize = 16,
398 },
399 {
400 .iotype = UPIO_PORT,
401 .line = 1,
402 .type = PORT_16550A,
403 .uartclk = 115200*16,
404 .fifosize = 16,
405 }
406 };
407
408 static void __devinit superio_serial_init(void) 392 static void __devinit superio_serial_init(void)
409 { 393 {
410 #ifdef CONFIG_SERIAL_8250 394 #ifdef CONFIG_SERIAL_8250
411 int retval; 395 int retval;
412 396 struct uart_port serial_port;
413 serial[0].iobase = sio_dev.sp1_base;
414 serial[0].irq = SP1_IRQ;
415 spin_lock_init(&serial[0].lock);
416 397
417 retval = early_serial_setup(&serial[0]); 398 memset(&serial_port, 0, sizeof(serial_port));
399 serial_port.iotype = UPIO_PORT;
400 serial_port.type = PORT_16550A;
401 serial_port.uartclk = 115200*16;
402 serial_port.fifosize = 16;
403 spin_lock_init(&serial_port.lock);
404
405 /* serial port #1 */
406 serial_port.iobase = sio_dev.sp1_base;
407 serial_port.irq = SP1_IRQ;
408 serial_port.line = 0;
409 retval = early_serial_setup(&serial_port);
418 if (retval < 0) { 410 if (retval < 0) {
419 printk(KERN_WARNING PFX "Register Serial #0 failed.\n"); 411 printk(KERN_WARNING PFX "Register Serial #0 failed.\n");
420 return; 412 return;
421 } 413 }
422 414
423 serial[1].iobase = sio_dev.sp2_base; 415 /* serial port #2 */
424 serial[1].irq = SP2_IRQ; 416 serial_port.iobase = sio_dev.sp2_base;
425 spin_lock_init(&serial[1].lock); 417 serial_port.irq = SP2_IRQ;
426 retval = early_serial_setup(&serial[1]); 418 serial_port.line = 1;
427 419 retval = early_serial_setup(&serial_port);
428 if (retval < 0) 420 if (retval < 0)
429 printk(KERN_WARNING PFX "Register Serial #1 failed.\n"); 421 printk(KERN_WARNING PFX "Register Serial #1 failed.\n");
430 #endif /* CONFIG_SERIAL_8250 */ 422 #endif /* CONFIG_SERIAL_8250 */
431 } 423 }
432 424
433 425
434 static void __devinit superio_parport_init(void) 426 static void __devinit superio_parport_init(void)
435 { 427 {
436 #ifdef CONFIG_PARPORT_PC 428 #ifdef CONFIG_PARPORT_PC
437 if (!parport_pc_probe_port(sio_dev.pp_base, 429 if (!parport_pc_probe_port(sio_dev.pp_base,
438 0 /*base_hi*/, 430 0 /*base_hi*/,
439 PAR_IRQ, 431 PAR_IRQ,
440 PARPORT_DMA_NONE /* dma */, 432 PARPORT_DMA_NONE /* dma */,
441 NULL /*struct pci_dev* */) ) 433 NULL /*struct pci_dev* */) )
442 434
443 printk(KERN_WARNING PFX "Probing parallel port failed.\n"); 435 printk(KERN_WARNING PFX "Probing parallel port failed.\n");
444 #endif /* CONFIG_PARPORT_PC */ 436 #endif /* CONFIG_PARPORT_PC */
445 } 437 }
446 438
447 439
448 static void superio_fixup_pci(struct pci_dev *pdev) 440 static void superio_fixup_pci(struct pci_dev *pdev)
449 { 441 {
450 u8 prog; 442 u8 prog;
451 443
452 pdev->class |= 0x5; 444 pdev->class |= 0x5;
453 pci_write_config_byte(pdev, PCI_CLASS_PROG, pdev->class); 445 pci_write_config_byte(pdev, PCI_CLASS_PROG, pdev->class);
454 446
455 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); 447 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
456 printk("PCI: Enabled native mode for NS87415 (pif=0x%x)\n", prog); 448 printk("PCI: Enabled native mode for NS87415 (pif=0x%x)\n", prog);
457 } 449 }
458 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci); 450 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
459 451
460 452
461 static int __devinit 453 static int __devinit
462 superio_probe(struct pci_dev *dev, const struct pci_device_id *id) 454 superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
463 { 455 {
464 struct superio_device *sio = &sio_dev; 456 struct superio_device *sio = &sio_dev;
465 457
466 /* 458 /*
467 ** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a 459 ** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
468 ** superio_probe(00:0e.1) ven 0x100b dev 0xe sv 0x0 sd 0x0 class 0x68000 460 ** superio_probe(00:0e.1) ven 0x100b dev 0xe sv 0x0 sd 0x0 class 0x68000
469 ** superio_probe(00:0e.2) ven 0x100b dev 0x12 sv 0x0 sd 0x0 class 0xc0310 461 ** superio_probe(00:0e.2) ven 0x100b dev 0x12 sv 0x0 sd 0x0 class 0xc0310
470 */ 462 */
471 DBG_INIT("superio_probe(%s) ven 0x%x dev 0x%x sv 0x%x sd 0x%x class 0x%x\n", 463 DBG_INIT("superio_probe(%s) ven 0x%x dev 0x%x sv 0x%x sd 0x%x class 0x%x\n",
472 pci_name(dev), 464 pci_name(dev),
473 dev->vendor, dev->device, 465 dev->vendor, dev->device,
474 dev->subsystem_vendor, dev->subsystem_device, 466 dev->subsystem_vendor, dev->subsystem_device,
475 dev->class); 467 dev->class);
476 468
477 BUG_ON(!sio->suckyio_irq_enabled); /* Enabled by PCI_FIXUP_FINAL */ 469 BUG_ON(!sio->suckyio_irq_enabled); /* Enabled by PCI_FIXUP_FINAL */
478 470
479 if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */ 471 if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
480 superio_parport_init(); 472 superio_parport_init();
481 superio_serial_init(); 473 superio_serial_init();
482 /* REVISIT XXX : superio_fdc_init() ? */ 474 /* REVISIT XXX : superio_fdc_init() ? */
483 return 0; 475 return 0;
484 } else if (dev->device == PCI_DEVICE_ID_NS_87415) { /* Function 0 */ 476 } else if (dev->device == PCI_DEVICE_ID_NS_87415) { /* Function 0 */
485 DBG_INIT("superio_probe: ignoring IDE 87415\n"); 477 DBG_INIT("superio_probe: ignoring IDE 87415\n");
486 } else if (dev->device == PCI_DEVICE_ID_NS_87560_USB) { /* Function 2 */ 478 } else if (dev->device == PCI_DEVICE_ID_NS_87560_USB) { /* Function 2 */
487 DBG_INIT("superio_probe: ignoring USB OHCI controller\n"); 479 DBG_INIT("superio_probe: ignoring USB OHCI controller\n");
488 } else { 480 } else {
489 DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n"); 481 DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
490 } 482 }
491 483
492 /* Let appropriate other driver claim this device. */ 484 /* Let appropriate other driver claim this device. */
493 return -ENODEV; 485 return -ENODEV;
494 } 486 }
495 487
496 static struct pci_device_id superio_tbl[] = { 488 static struct pci_device_id superio_tbl[] = {
497 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) }, 489 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) },
498 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) }, 490 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) },
499 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) }, 491 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) },
500 { 0, } 492 { 0, }
501 }; 493 };
502 494
503 static struct pci_driver superio_driver = { 495 static struct pci_driver superio_driver = {
504 .name = SUPERIO, 496 .name = SUPERIO,
505 .id_table = superio_tbl, 497 .id_table = superio_tbl,
506 .probe = superio_probe, 498 .probe = superio_probe,
507 }; 499 };
508 500
509 static int __init superio_modinit(void) 501 static int __init superio_modinit(void)
drivers/scsi/lasi700.c
1 /* -*- mode: c; c-basic-offset: 8 -*- */ 1 /* -*- mode: c; c-basic-offset: 8 -*- */
2 2
3 /* PARISC LASI driver for the 53c700 chip 3 /* PARISC LASI driver for the 53c700 chip
4 * 4 *
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com 5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **----------------------------------------------------------------------------- 6 **-----------------------------------------------------------------------------
7 ** 7 **
8 ** This program is free software; you can redistribute it and/or modify 8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by 9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or 10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version. 11 ** (at your option) any later version.
12 ** 12 **
13 ** This program is distributed in the hope that it will be useful, 13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details. 16 ** GNU General Public License for more details.
17 ** 17 **
18 ** You should have received a copy of the GNU General Public License 18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software 19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 ** 21 **
22 **----------------------------------------------------------------------------- 22 **-----------------------------------------------------------------------------
23 */ 23 */
24 24
25 /* 25 /*
26 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently 26 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
27 * debugging this driver on the parisc architecture and suggesting 27 * debugging this driver on the parisc architecture and suggesting
28 * many improvements and bug fixes. 28 * many improvements and bug fixes.
29 * 29 *
30 * Thanks also go to Linuxcare Inc. for providing several PARISC 30 * Thanks also go to Linuxcare Inc. for providing several PARISC
31 * machines for me to debug the driver on. 31 * machines for me to debug the driver on.
32 */ 32 */
33 33
34 #include <linux/kernel.h> 34 #include <linux/kernel.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/init.h> 36 #include <linux/init.h>
37 #include <linux/types.h> 37 #include <linux/types.h>
38 #include <linux/stat.h> 38 #include <linux/stat.h>
39 #include <linux/mm.h> 39 #include <linux/mm.h>
40 #include <linux/blkdev.h> 40 #include <linux/blkdev.h>
41 #include <linux/sched.h> 41 #include <linux/sched.h>
42 #include <linux/ioport.h> 42 #include <linux/ioport.h>
43 #include <linux/dma-mapping.h> 43 #include <linux/dma-mapping.h>
44 44
45 #include <asm/page.h> 45 #include <asm/page.h>
46 #include <asm/pgtable.h> 46 #include <asm/pgtable.h>
47 #include <asm/irq.h> 47 #include <asm/irq.h>
48 #include <asm/hardware.h> 48 #include <asm/hardware.h>
49 #include <asm/parisc-device.h> 49 #include <asm/parisc-device.h>
50 #include <asm/delay.h> 50 #include <asm/delay.h>
51 51
52 #include <scsi/scsi_host.h> 52 #include <scsi/scsi_host.h>
53 #include <scsi/scsi_device.h> 53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_transport.h> 54 #include <scsi/scsi_transport.h>
55 #include <scsi/scsi_transport_spi.h> 55 #include <scsi/scsi_transport_spi.h>
56 56
57 #include "53c700.h" 57 #include "53c700.h"
58 58
59 MODULE_AUTHOR("James Bottomley"); 59 MODULE_AUTHOR("James Bottomley");
60 MODULE_DESCRIPTION("lasi700 SCSI Driver"); 60 MODULE_DESCRIPTION("lasi700 SCSI Driver");
61 MODULE_LICENSE("GPL"); 61 MODULE_LICENSE("GPL");
62 62
63 #define LASI_700_SVERSION 0x00071 63 #define LASI_700_SVERSION 0x00071
64 #define LASI_710_SVERSION 0x00082 64 #define LASI_710_SVERSION 0x00082
65 65
66 #define LASI700_ID_TABLE { \ 66 #define LASI700_ID_TABLE { \
67 .hw_type = HPHW_FIO, \ 67 .hw_type = HPHW_FIO, \
68 .sversion = LASI_700_SVERSION, \ 68 .sversion = LASI_700_SVERSION, \
69 .hversion = HVERSION_ANY_ID, \ 69 .hversion = HVERSION_ANY_ID, \
70 .hversion_rev = HVERSION_REV_ANY_ID, \ 70 .hversion_rev = HVERSION_REV_ANY_ID, \
71 } 71 }
72 72
73 #define LASI710_ID_TABLE { \ 73 #define LASI710_ID_TABLE { \
74 .hw_type = HPHW_FIO, \ 74 .hw_type = HPHW_FIO, \
75 .sversion = LASI_710_SVERSION, \ 75 .sversion = LASI_710_SVERSION, \
76 .hversion = HVERSION_ANY_ID, \ 76 .hversion = HVERSION_ANY_ID, \
77 .hversion_rev = HVERSION_REV_ANY_ID, \ 77 .hversion_rev = HVERSION_REV_ANY_ID, \
78 } 78 }
79 79
80 #define LASI700_CLOCK 25 80 #define LASI700_CLOCK 25
81 #define LASI710_CLOCK 40 81 #define LASI710_CLOCK 40
82 #define LASI_SCSI_CORE_OFFSET 0x100 82 #define LASI_SCSI_CORE_OFFSET 0x100
83 83
84 static struct parisc_device_id lasi700_ids[] = { 84 static struct parisc_device_id lasi700_ids[] = {
85 LASI700_ID_TABLE, 85 LASI700_ID_TABLE,
86 LASI710_ID_TABLE, 86 LASI710_ID_TABLE,
87 { 0 } 87 { 0 }
88 }; 88 };
89 89
90 static struct scsi_host_template lasi700_template = { 90 static struct scsi_host_template lasi700_template = {
91 .name = "LASI SCSI 53c700", 91 .name = "LASI SCSI 53c700",
92 .proc_name = "lasi700", 92 .proc_name = "lasi700",
93 .this_id = 7, 93 .this_id = 7,
94 .module = THIS_MODULE, 94 .module = THIS_MODULE,
95 }; 95 };
96 MODULE_DEVICE_TABLE(parisc, lasi700_ids); 96 MODULE_DEVICE_TABLE(parisc, lasi700_ids);
97 97
98 static int __init 98 static int __init
99 lasi700_probe(struct parisc_device *dev) 99 lasi700_probe(struct parisc_device *dev)
100 { 100 {
101 unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET; 101 unsigned long base = dev->hpa.start + LASI_SCSI_CORE_OFFSET;
102 struct NCR_700_Host_Parameters *hostdata; 102 struct NCR_700_Host_Parameters *hostdata;
103 struct Scsi_Host *host; 103 struct Scsi_Host *host;
104 104
105 hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL); 105 hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL);
106 if (!hostdata) { 106 if (!hostdata) {
107 printk(KERN_ERR "%s: Failed to allocate host data\n", 107 printk(KERN_ERR "%s: Failed to allocate host data\n",
108 dev->dev.bus_id); 108 dev->dev.bus_id);
109 return -ENOMEM; 109 return -ENOMEM;
110 } 110 }
111 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters)); 111 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
112 112
113 hostdata->dev = &dev->dev; 113 hostdata->dev = &dev->dev;
114 dma_set_mask(&dev->dev, DMA_32BIT_MASK); 114 dma_set_mask(&dev->dev, DMA_32BIT_MASK);
115 hostdata->base = ioremap(base, 0x100); 115 hostdata->base = ioremap_nocache(base, 0x100);
116 hostdata->differential = 0; 116 hostdata->differential = 0;
117 117
118 if (dev->id.sversion == LASI_700_SVERSION) { 118 if (dev->id.sversion == LASI_700_SVERSION) {
119 hostdata->clock = LASI700_CLOCK; 119 hostdata->clock = LASI700_CLOCK;
120 hostdata->force_le_on_be = 1; 120 hostdata->force_le_on_be = 1;
121 } else { 121 } else {
122 hostdata->clock = LASI710_CLOCK; 122 hostdata->clock = LASI710_CLOCK;
123 hostdata->force_le_on_be = 0; 123 hostdata->force_le_on_be = 0;
124 hostdata->chip710 = 1; 124 hostdata->chip710 = 1;
125 hostdata->dmode_extra = DMODE_FC2; 125 hostdata->dmode_extra = DMODE_FC2;
126 } 126 }
127 127
128 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); 128 host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev);
129 if (!host) 129 if (!host)
130 goto out_kfree; 130 goto out_kfree;
131 host->this_id = 7; 131 host->this_id = 7;
132 host->base = base; 132 host->base = base;
133 host->irq = dev->irq; 133 host->irq = dev->irq;
134 if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) { 134 if(request_irq(dev->irq, NCR_700_intr, SA_SHIRQ, "lasi700", host)) {
135 printk(KERN_ERR "lasi700: request_irq failed!\n"); 135 printk(KERN_ERR "lasi700: request_irq failed!\n");
136 goto out_put_host; 136 goto out_put_host;
137 } 137 }
138 138
139 dev_set_drvdata(&dev->dev, host); 139 dev_set_drvdata(&dev->dev, host);
140 scsi_scan_host(host); 140 scsi_scan_host(host);
141 141
142 return 0; 142 return 0;
143 143
144 out_put_host: 144 out_put_host:
145 scsi_host_put(host); 145 scsi_host_put(host);
146 out_kfree: 146 out_kfree:
147 iounmap(hostdata->base); 147 iounmap(hostdata->base);
148 kfree(hostdata); 148 kfree(hostdata);
149 return -ENODEV; 149 return -ENODEV;
150 } 150 }
151 151
152 static int __exit 152 static int __exit
153 lasi700_driver_remove(struct parisc_device *dev) 153 lasi700_driver_remove(struct parisc_device *dev)
154 { 154 {
155 struct Scsi_Host *host = dev_get_drvdata(&dev->dev); 155 struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
156 struct NCR_700_Host_Parameters *hostdata = 156 struct NCR_700_Host_Parameters *hostdata =
157 (struct NCR_700_Host_Parameters *)host->hostdata[0]; 157 (struct NCR_700_Host_Parameters *)host->hostdata[0];
158 158
159 scsi_remove_host(host); 159 scsi_remove_host(host);
160 NCR_700_release(host); 160 NCR_700_release(host);
161 free_irq(host->irq, host); 161 free_irq(host->irq, host);
162 iounmap(hostdata->base); 162 iounmap(hostdata->base);
163 kfree(hostdata); 163 kfree(hostdata);
164 164
165 return 0; 165 return 0;
166 } 166 }
167 167
168 static struct parisc_driver lasi700_driver = { 168 static struct parisc_driver lasi700_driver = {
169 .name = "lasi_scsi", 169 .name = "lasi_scsi",
170 .id_table = lasi700_ids, 170 .id_table = lasi700_ids,
171 .probe = lasi700_probe, 171 .probe = lasi700_probe,
172 .remove = __devexit_p(lasi700_driver_remove), 172 .remove = __devexit_p(lasi700_driver_remove),
173 }; 173 };
174 174
175 static int __init 175 static int __init
176 lasi700_init(void) 176 lasi700_init(void)
177 { 177 {
178 return register_parisc_driver(&lasi700_driver); 178 return register_parisc_driver(&lasi700_driver);
179 } 179 }
180 180
181 static void __exit 181 static void __exit
182 lasi700_exit(void) 182 lasi700_exit(void)
183 { 183 {
184 unregister_parisc_driver(&lasi700_driver); 184 unregister_parisc_driver(&lasi700_driver);
185 } 185 }
186 186
187 module_init(lasi700_init); 187 module_init(lasi700_init);
188 module_exit(lasi700_exit); 188 module_exit(lasi700_exit);
189 189
drivers/scsi/zalon.c
1 /* 1 /*
2 * Zalon 53c7xx device driver. 2 * Zalon 53c7xx device driver.
3 * By Richard Hirst (rhirst@linuxcare.com) 3 * By Richard Hirst (rhirst@linuxcare.com)
4 */ 4 */
5 5
6 #include <linux/init.h> 6 #include <linux/init.h>
7 #include <linux/interrupt.h> 7 #include <linux/interrupt.h>
8 #include <linux/module.h> 8 #include <linux/module.h>
9 #include <linux/types.h> 9 #include <linux/types.h>
10 #include <asm/hardware.h> 10 #include <asm/hardware.h>
11 #include <asm/io.h> 11 #include <asm/io.h>
12 12
13 #include "../parisc/gsc.h" 13 #include "../parisc/gsc.h"
14 14
15 #include "ncr53c8xx.h" 15 #include "ncr53c8xx.h"
16 16
17 MODULE_AUTHOR("Richard Hirst"); 17 MODULE_AUTHOR("Richard Hirst");
18 MODULE_DESCRIPTION("Bluefish/Zalon 720 SCSI Driver"); 18 MODULE_DESCRIPTION("Bluefish/Zalon 720 SCSI Driver");
19 MODULE_LICENSE("GPL"); 19 MODULE_LICENSE("GPL");
20 20
21 #define GSC_SCSI_ZALON_OFFSET 0x800 21 #define GSC_SCSI_ZALON_OFFSET 0x800
22 22
23 #define IO_MODULE_EIM (1*4) 23 #define IO_MODULE_EIM (1*4)
24 #define IO_MODULE_DC_ADATA (2*4) 24 #define IO_MODULE_DC_ADATA (2*4)
25 #define IO_MODULE_II_CDATA (3*4) 25 #define IO_MODULE_II_CDATA (3*4)
26 #define IO_MODULE_IO_COMMAND (12*4) 26 #define IO_MODULE_IO_COMMAND (12*4)
27 #define IO_MODULE_IO_STATUS (13*4) 27 #define IO_MODULE_IO_STATUS (13*4)
28 28
29 #define IOSTATUS_RY 0x40 29 #define IOSTATUS_RY 0x40
30 #define IOSTATUS_FE 0x80 30 #define IOSTATUS_FE 0x80
31 #define IOIIDATA_SMINT5L 0x40000000 31 #define IOIIDATA_SMINT5L 0x40000000
32 #define IOIIDATA_MINT5EN 0x20000000 32 #define IOIIDATA_MINT5EN 0x20000000
33 #define IOIIDATA_PACKEN 0x10000000 33 #define IOIIDATA_PACKEN 0x10000000
34 #define IOIIDATA_PREFETCHEN 0x08000000 34 #define IOIIDATA_PREFETCHEN 0x08000000
35 #define IOIIDATA_IOII 0x00000020 35 #define IOIIDATA_IOII 0x00000020
36 36
37 #define CMD_RESET 5 37 #define CMD_RESET 5
38 38
39 static struct ncr_chip zalon720_chip __initdata = { 39 static struct ncr_chip zalon720_chip __initdata = {
40 .revision_id = 0x0f, 40 .revision_id = 0x0f,
41 .burst_max = 3, 41 .burst_max = 3,
42 .offset_max = 8, 42 .offset_max = 8,
43 .nr_divisor = 4, 43 .nr_divisor = 4,
44 .features = FE_WIDE | FE_DIFF | FE_EHP| FE_MUX | FE_EA, 44 .features = FE_WIDE | FE_DIFF | FE_EHP| FE_MUX | FE_EA,
45 }; 45 };
46 46
47 47
48 48
49 #if 0 49 #if 0
50 /* FIXME: 50 /* FIXME:
51 * Is this function dead code? or is someone planning on using it in the 51 * Is this function dead code? or is someone planning on using it in the
52 * future. The clock = (int) pdc_result[16] does not look correct to 52 * future. The clock = (int) pdc_result[16] does not look correct to
53 * me ... I think it should be iodc_data[16]. Since this cause a compile 53 * me ... I think it should be iodc_data[16]. Since this cause a compile
54 * error with the new encapsulated PDC, I'm not compiling in this function. 54 * error with the new encapsulated PDC, I'm not compiling in this function.
55 * - RB 55 * - RB
56 */ 56 */
57 /* poke SCSI clock out of iodc data */ 57 /* poke SCSI clock out of iodc data */
58 58
59 static u8 iodc_data[32] __attribute__ ((aligned (64))); 59 static u8 iodc_data[32] __attribute__ ((aligned (64)));
60 static unsigned long pdc_result[32] __attribute__ ((aligned (16))) ={0,0,0,0}; 60 static unsigned long pdc_result[32] __attribute__ ((aligned (16))) ={0,0,0,0};
61 61
62 static int 62 static int
63 lasi_scsi_clock(void * hpa, int defaultclock) 63 lasi_scsi_clock(void * hpa, int defaultclock)
64 { 64 {
65 int clock, status; 65 int clock, status;
66 66
67 status = pdc_iodc_read(&pdc_result, hpa, 0, &iodc_data, 32 ); 67 status = pdc_iodc_read(&pdc_result, hpa, 0, &iodc_data, 32 );
68 if (status == PDC_RET_OK) { 68 if (status == PDC_RET_OK) {
69 clock = (int) pdc_result[16]; 69 clock = (int) pdc_result[16];
70 } else { 70 } else {
71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status); 71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status);
72 clock = defaultclock; 72 clock = defaultclock;
73 } 73 }
74 74
75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock); 75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock);
76 return clock; 76 return clock;
77 } 77 }
78 #endif 78 #endif
79 79
80 static struct scsi_host_template zalon7xx_template = { 80 static struct scsi_host_template zalon7xx_template = {
81 .module = THIS_MODULE, 81 .module = THIS_MODULE,
82 .proc_name = "zalon7xx", 82 .proc_name = "zalon7xx",
83 }; 83 };
84 84
85 static int __init 85 static int __init
86 zalon_probe(struct parisc_device *dev) 86 zalon_probe(struct parisc_device *dev)
87 { 87 {
88 struct gsc_irq gsc_irq; 88 struct gsc_irq gsc_irq;
89 u32 zalon_vers; 89 u32 zalon_vers;
90 int error = -ENODEV; 90 int error = -ENODEV;
91 void __iomem *zalon = ioremap(dev->hpa.start, 4096); 91 void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096);
92 void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; 92 void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET;
93 static int unit = 0; 93 static int unit = 0;
94 struct Scsi_Host *host; 94 struct Scsi_Host *host;
95 struct ncr_device device; 95 struct ncr_device device;
96 96
97 __raw_writel(CMD_RESET, zalon + IO_MODULE_IO_COMMAND); 97 __raw_writel(CMD_RESET, zalon + IO_MODULE_IO_COMMAND);
98 while (!(__raw_readl(zalon + IO_MODULE_IO_STATUS) & IOSTATUS_RY)) 98 while (!(__raw_readl(zalon + IO_MODULE_IO_STATUS) & IOSTATUS_RY))
99 cpu_relax(); 99 cpu_relax();
100 __raw_writel(IOIIDATA_MINT5EN | IOIIDATA_PACKEN | IOIIDATA_PREFETCHEN, 100 __raw_writel(IOIIDATA_MINT5EN | IOIIDATA_PACKEN | IOIIDATA_PREFETCHEN,
101 zalon + IO_MODULE_II_CDATA); 101 zalon + IO_MODULE_II_CDATA);
102 102
103 /* XXX: Save the Zalon version for bug workarounds? */ 103 /* XXX: Save the Zalon version for bug workarounds? */
104 zalon_vers = (__raw_readl(zalon + IO_MODULE_II_CDATA) >> 24) & 0x07; 104 zalon_vers = (__raw_readl(zalon + IO_MODULE_II_CDATA) >> 24) & 0x07;
105 105
106 /* Setup the interrupts first. 106 /* Setup the interrupts first.
107 ** Later on request_irq() will register the handler. 107 ** Later on request_irq() will register the handler.
108 */ 108 */
109 dev->irq = gsc_alloc_irq(&gsc_irq); 109 dev->irq = gsc_alloc_irq(&gsc_irq);
110 110
111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__, 111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__,
112 zalon_vers, dev->irq); 112 zalon_vers, dev->irq);
113 113
114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); 114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
115 115
116 if (zalon_vers == 0) 116 if (zalon_vers == 0)
117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__); 117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__);
118 118
119 memset(&device, 0, sizeof(struct ncr_device)); 119 memset(&device, 0, sizeof(struct ncr_device));
120 120
121 /* The following three are needed before any other access. */ 121 /* The following three are needed before any other access. */
122 __raw_writeb(0x20, io_port + 0x38); /* DCNTL_REG, EA */ 122 __raw_writeb(0x20, io_port + 0x38); /* DCNTL_REG, EA */
123 __raw_writeb(0x04, io_port + 0x1b); /* CTEST0_REG, EHP */ 123 __raw_writeb(0x04, io_port + 0x1b); /* CTEST0_REG, EHP */
124 __raw_writeb(0x80, io_port + 0x22); /* CTEST4_REG, MUX */ 124 __raw_writeb(0x80, io_port + 0x22); /* CTEST4_REG, MUX */
125 125
126 /* Initialise ncr_device structure with items required by ncr_attach. */ 126 /* Initialise ncr_device structure with items required by ncr_attach. */
127 device.chip = zalon720_chip; 127 device.chip = zalon720_chip;
128 device.host_id = 7; 128 device.host_id = 7;
129 device.dev = &dev->dev; 129 device.dev = &dev->dev;
130 device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET; 130 device.slot.base = dev->hpa.start + GSC_SCSI_ZALON_OFFSET;
131 device.slot.base_v = io_port; 131 device.slot.base_v = io_port;
132 device.slot.irq = dev->irq; 132 device.slot.irq = dev->irq;
133 device.differential = 2; 133 device.differential = 2;
134 134
135 host = ncr_attach(&zalon7xx_template, unit, &device); 135 host = ncr_attach(&zalon7xx_template, unit, &device);
136 if (!host) 136 if (!host)
137 goto fail; 137 goto fail;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, SA_SHIRQ, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, SA_SHIRQ, "zalon", host)) {
140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ", 140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ",
141 dev->dev.bus_id, dev->irq); 141 dev->dev.bus_id, dev->irq);
142 goto fail; 142 goto fail;
143 } 143 }
144 144
145 unit++; 145 unit++;
146 146
147 dev_set_drvdata(&dev->dev, host); 147 dev_set_drvdata(&dev->dev, host);
148 148
149 error = scsi_add_host(host, &dev->dev); 149 error = scsi_add_host(host, &dev->dev);
150 if (error) 150 if (error)
151 goto fail_free_irq; 151 goto fail_free_irq;
152 152
153 scsi_scan_host(host); 153 scsi_scan_host(host);
154 return 0; 154 return 0;
155 155
156 fail_free_irq: 156 fail_free_irq:
157 free_irq(dev->irq, host); 157 free_irq(dev->irq, host);
158 fail: 158 fail:
159 ncr53c8xx_release(host); 159 ncr53c8xx_release(host);
160 return error; 160 return error;
161 } 161 }
162 162
163 static struct parisc_device_id zalon_tbl[] = { 163 static struct parisc_device_id zalon_tbl[] = {
164 { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00089 }, 164 { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00089 },
165 { 0, } 165 { 0, }
166 }; 166 };
167 167
168 MODULE_DEVICE_TABLE(parisc, zalon_tbl); 168 MODULE_DEVICE_TABLE(parisc, zalon_tbl);
169 169
170 static int __exit zalon_remove(struct parisc_device *dev) 170 static int __exit zalon_remove(struct parisc_device *dev)
171 { 171 {
172 struct Scsi_Host *host = dev_get_drvdata(&dev->dev); 172 struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
173 173
174 scsi_remove_host(host); 174 scsi_remove_host(host);
175 ncr53c8xx_release(host); 175 ncr53c8xx_release(host);
176 free_irq(dev->irq, host); 176 free_irq(dev->irq, host);
177 177
178 return 0; 178 return 0;
179 } 179 }
180 180
181 static struct parisc_driver zalon_driver = { 181 static struct parisc_driver zalon_driver = {
182 .name = "zalon", 182 .name = "zalon",
183 .id_table = zalon_tbl, 183 .id_table = zalon_tbl,
184 .probe = zalon_probe, 184 .probe = zalon_probe,
185 .remove = __devexit_p(zalon_remove), 185 .remove = __devexit_p(zalon_remove),
186 }; 186 };
187 187
188 static int __init zalon7xx_init(void) 188 static int __init zalon7xx_init(void)
189 { 189 {
190 int ret = ncr53c8xx_init(); 190 int ret = ncr53c8xx_init();
191 if (!ret) 191 if (!ret)
192 ret = register_parisc_driver(&zalon_driver); 192 ret = register_parisc_driver(&zalon_driver);
193 if (ret) 193 if (ret)
194 ncr53c8xx_exit(); 194 ncr53c8xx_exit();
195 return ret; 195 return ret;
196 } 196 }
197 197
198 static void __exit zalon7xx_exit(void) 198 static void __exit zalon7xx_exit(void)
199 { 199 {
200 unregister_parisc_driver(&zalon_driver); 200 unregister_parisc_driver(&zalon_driver);
201 ncr53c8xx_exit(); 201 ncr53c8xx_exit();
202 } 202 }
203 203
204 module_init(zalon7xx_init); 204 module_init(zalon7xx_init);
205 module_exit(zalon7xx_exit); 205 module_exit(zalon7xx_exit);
206 206
drivers/serial/8250_gsc.c
1 /* 1 /*
2 * Serial Device Initialisation for Lasi/Asp/Wax/Dino 2 * Serial Device Initialisation for Lasi/Asp/Wax/Dino
3 * 3 *
4 * (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002 4 * (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/ioport.h> 15 #include <linux/ioport.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/serial_core.h> 17 #include <linux/serial_core.h>
18 #include <linux/signal.h> 18 #include <linux/signal.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 21
22 #include <asm/hardware.h> 22 #include <asm/hardware.h>
23 #include <asm/parisc-device.h> 23 #include <asm/parisc-device.h>
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/serial.h> /* for LASI_BASE_BAUD */ 25 #include <asm/serial.h> /* for LASI_BASE_BAUD */
26 26
27 #include "8250.h" 27 #include "8250.h"
28 28
29 static int __init 29 static int __init
30 serial_init_chip(struct parisc_device *dev) 30 serial_init_chip(struct parisc_device *dev)
31 { 31 {
32 struct uart_port port; 32 struct uart_port port;
33 unsigned long address; 33 unsigned long address;
34 int err; 34 int err;
35 35
36 if (!dev->irq) { 36 if (!dev->irq) {
37 /* We find some unattached serial ports by walking native 37 /* We find some unattached serial ports by walking native
38 * busses. These should be silently ignored. Otherwise, 38 * busses. These should be silently ignored. Otherwise,
39 * what we have here is a missing parent device, so tell 39 * what we have here is a missing parent device, so tell
40 * the user what they're missing. 40 * the user what they're missing.
41 */ 41 */
42 if (parisc_parent(dev)->id.hw_type != HPHW_IOA) { 42 if (parisc_parent(dev)->id.hw_type != HPHW_IOA) {
43 printk(KERN_INFO "Serial: device 0x%lx not configured.\n" 43 printk(KERN_INFO "Serial: device 0x%lx not configured.\n"
44 "Enable support for Wax, Lasi, Asp or Dino.\n", 44 "Enable support for Wax, Lasi, Asp or Dino.\n",
45 dev->hpa.start); 45 dev->hpa.start);
46 } 46 }
47 return -ENODEV; 47 return -ENODEV;
48 } 48 }
49 49
50 address = dev->hpa.start; 50 address = dev->hpa.start;
51 if (dev->id.sversion != 0x8d) { 51 if (dev->id.sversion != 0x8d) {
52 address += 0x800; 52 address += 0x800;
53 } 53 }
54 54
55 memset(&port, 0, sizeof(struct uart_port)); 55 memset(&port, 0, sizeof(port));
56 port.mapbase = address; 56 port.iotype = UPIO_MEM;
57 port.irq = dev->irq; 57 port.uartclk = LASI_BASE_BAUD * 16;
58 port.iotype = UPIO_MEM; 58 port.mapbase = address;
59 port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; 59 port.membase = ioremap_nocache(address, 16);
60 port.uartclk = LASI_BASE_BAUD * 16; 60 port.irq = dev->irq;
61 port.dev = &dev->dev; 61 port.flags = UPF_BOOT_AUTOCONF;
62 port.dev = &dev->dev;
62 63
63 err = serial8250_register_port(&port); 64 err = serial8250_register_port(&port);
64 if (err < 0) { 65 if (err < 0) {
65 printk(KERN_WARNING "serial8250_register_port returned error %d\n", err); 66 printk(KERN_WARNING "serial8250_register_port returned error %d\n", err);
66 return err; 67 return err;
67 } 68 }
68 69
69 return 0; 70 return 0;
70 } 71 }
71 72
72 static struct parisc_device_id serial_tbl[] = { 73 static struct parisc_device_id serial_tbl[] = {
73 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, 74 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
74 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, 75 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
75 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, 76 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
76 { 0 } 77 { 0 }
77 }; 78 };
78 79
79 /* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1 80 /* Hack. Some machines have SERIAL_0 attached to Lasi and SERIAL_1
80 * attached to Dino. Unfortunately, Dino appears before Lasi in the device 81 * attached to Dino. Unfortunately, Dino appears before Lasi in the device
81 * tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one 82 * tree. To ensure that ttyS0 == SERIAL_0, we register two drivers; one
82 * which only knows about Lasi and then a second which will find all the 83 * which only knows about Lasi and then a second which will find all the
83 * other serial ports. HPUX ignores this problem. 84 * other serial ports. HPUX ignores this problem.
84 */ 85 */
85 static struct parisc_device_id lasi_tbl[] = { 86 static struct parisc_device_id lasi_tbl[] = {
86 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */ 87 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03B, 0x0008C }, /* C1xx/C1xxL */
87 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */ 88 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03C, 0x0008C }, /* B132L */
88 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */ 89 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03D, 0x0008C }, /* B160L */
89 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */ 90 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03E, 0x0008C }, /* B132L+ */
90 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */ 91 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x03F, 0x0008C }, /* B180L+ */
91 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */ 92 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x046, 0x0008C }, /* Rocky2 120 */
92 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */ 93 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x047, 0x0008C }, /* Rocky2 150 */
93 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */ 94 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x04E, 0x0008C }, /* Kiji L2 132 */
94 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */ 95 { HPHW_FIO, HVERSION_REV_ANY_ID, 0x056, 0x0008C }, /* Raven+ */
95 { 0 } 96 { 0 }
96 }; 97 };
97 98
98 99
99 MODULE_DEVICE_TABLE(parisc, serial_tbl); 100 MODULE_DEVICE_TABLE(parisc, serial_tbl);
100 101
101 static struct parisc_driver lasi_driver = { 102 static struct parisc_driver lasi_driver = {
102 .name = "serial_1", 103 .name = "serial_1",
103 .id_table = lasi_tbl, 104 .id_table = lasi_tbl,
104 .probe = serial_init_chip, 105 .probe = serial_init_chip,
105 }; 106 };
106 107
107 static struct parisc_driver serial_driver = { 108 static struct parisc_driver serial_driver = {
108 .name = "serial", 109 .name = "serial",
109 .id_table = serial_tbl, 110 .id_table = serial_tbl,
110 .probe = serial_init_chip, 111 .probe = serial_init_chip,
111 }; 112 };
112 113
113 int __init probe_serial_gsc(void) 114 int __init probe_serial_gsc(void)
114 { 115 {
115 register_parisc_driver(&lasi_driver); 116 register_parisc_driver(&lasi_driver);
116 register_parisc_driver(&serial_driver); 117 register_parisc_driver(&serial_driver);
117 return 0; 118 return 0;
118 } 119 }
119 120
120 module_init(probe_serial_gsc); 121 module_init(probe_serial_gsc);
121 122
drivers/serial/mux.c
1 /* 1 /*
2 ** mux.c: 2 ** mux.c:
3 ** serial driver for the Mux console found in some PA-RISC servers. 3 ** serial driver for the Mux console found in some PA-RISC servers.
4 ** 4 **
5 ** (c) Copyright 2002 Ryan Bradetich 5 ** (c) Copyright 2002 Ryan Bradetich
6 ** (c) Copyright 2002 Hewlett-Packard Company 6 ** (c) Copyright 2002 Hewlett-Packard Company
7 ** 7 **
8 ** This program is free software; you can redistribute it and/or modify 8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by 9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or 10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version. 11 ** (at your option) any later version.
12 ** 12 **
13 ** This Driver currently only supports the console (port 0) on the MUX. 13 ** This Driver currently only supports the console (port 0) on the MUX.
14 ** Additional work will be needed on this driver to enable the full 14 ** Additional work will be needed on this driver to enable the full
15 ** functionality of the MUX. 15 ** functionality of the MUX.
16 ** 16 **
17 */ 17 */
18 18
19 #include <linux/config.h> 19 #include <linux/config.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/tty.h> 21 #include <linux/tty.h>
22 #include <linux/ioport.h> 22 #include <linux/ioport.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 #include <linux/serial.h> 24 #include <linux/serial.h>
25 #include <linux/console.h> 25 #include <linux/console.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/delay.h> /* for udelay */ 27 #include <linux/delay.h> /* for udelay */
28 #include <linux/device.h> 28 #include <linux/device.h>
29 #include <asm/io.h> 29 #include <asm/io.h>
30 #include <asm/irq.h> 30 #include <asm/irq.h>
31 #include <asm/parisc-device.h> 31 #include <asm/parisc-device.h>
32 32
33 #ifdef CONFIG_MAGIC_SYSRQ 33 #ifdef CONFIG_MAGIC_SYSRQ
34 #include <linux/sysrq.h> 34 #include <linux/sysrq.h>
35 #define SUPPORT_SYSRQ 35 #define SUPPORT_SYSRQ
36 #endif 36 #endif
37 37
38 #include <linux/serial_core.h> 38 #include <linux/serial_core.h>
39 39
40 #define MUX_OFFSET 0x800 40 #define MUX_OFFSET 0x800
41 #define MUX_LINE_OFFSET 0x80 41 #define MUX_LINE_OFFSET 0x80
42 42
43 #define MUX_FIFO_SIZE 255 43 #define MUX_FIFO_SIZE 255
44 #define MUX_POLL_DELAY (30 * HZ / 1000) 44 #define MUX_POLL_DELAY (30 * HZ / 1000)
45 45
46 #define IO_DATA_REG_OFFSET 0x3c 46 #define IO_DATA_REG_OFFSET 0x3c
47 #define IO_DCOUNT_REG_OFFSET 0x40 47 #define IO_DCOUNT_REG_OFFSET 0x40
48 48
49 #define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000) 49 #define MUX_EOFIFO(status) ((status & 0xF000) == 0xF000)
50 #define MUX_STATUS(status) ((status & 0xF000) == 0x8000) 50 #define MUX_STATUS(status) ((status & 0xF000) == 0x8000)
51 #define MUX_BREAK(status) ((status & 0xF000) == 0x2000) 51 #define MUX_BREAK(status) ((status & 0xF000) == 0x2000)
52 52
53 #define MUX_NR 256 53 #define MUX_NR 256
54 static unsigned int port_cnt = 0; 54 static unsigned int port_cnt __read_mostly;
55 static struct uart_port mux_ports[MUX_NR]; 55 static struct uart_port mux_ports[MUX_NR];
56 56
57 static struct uart_driver mux_driver = { 57 static struct uart_driver mux_driver = {
58 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
59 .driver_name = "ttyB", 59 .driver_name = "ttyB",
60 .dev_name = "ttyB", 60 .dev_name = "ttyB",
61 .major = MUX_MAJOR, 61 .major = MUX_MAJOR,
62 .minor = 0, 62 .minor = 0,
63 .nr = MUX_NR, 63 .nr = MUX_NR,
64 }; 64 };
65 65
66 static struct timer_list mux_timer; 66 static struct timer_list mux_timer;
67 67
68 #define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET) 68 #define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
69 #define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET) 69 #define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
70 #define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8 70 #define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8
71 71
72 /** 72 /**
73 * mux_tx_empty - Check if the transmitter fifo is empty. 73 * mux_tx_empty - Check if the transmitter fifo is empty.
74 * @port: Ptr to the uart_port. 74 * @port: Ptr to the uart_port.
75 * 75 *
76 * This function test if the transmitter fifo for the port 76 * This function test if the transmitter fifo for the port
77 * described by 'port' is empty. If it is empty, this function 77 * described by 'port' is empty. If it is empty, this function
78 * should return TIOCSER_TEMT, otherwise return 0. 78 * should return TIOCSER_TEMT, otherwise return 0.
79 */ 79 */
80 static unsigned int mux_tx_empty(struct uart_port *port) 80 static unsigned int mux_tx_empty(struct uart_port *port)
81 { 81 {
82 return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT; 82 return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
83 } 83 }
84 84
85 /** 85 /**
86 * mux_set_mctrl - Set the current state of the modem control inputs. 86 * mux_set_mctrl - Set the current state of the modem control inputs.
87 * @ports: Ptr to the uart_port. 87 * @ports: Ptr to the uart_port.
88 * @mctrl: Modem control bits. 88 * @mctrl: Modem control bits.
89 * 89 *
90 * The Serial MUX does not support CTS, DCD or DSR so this function 90 * The Serial MUX does not support CTS, DCD or DSR so this function
91 * is ignored. 91 * is ignored.
92 */ 92 */
93 static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl) 93 static void mux_set_mctrl(struct uart_port *port, unsigned int mctrl)
94 { 94 {
95 } 95 }
96 96
97 /** 97 /**
98 * mux_get_mctrl - Returns the current state of modem control inputs. 98 * mux_get_mctrl - Returns the current state of modem control inputs.
99 * @port: Ptr to the uart_port. 99 * @port: Ptr to the uart_port.
100 * 100 *
101 * The Serial MUX does not support CTS, DCD or DSR so these lines are 101 * The Serial MUX does not support CTS, DCD or DSR so these lines are
102 * treated as permanently active. 102 * treated as permanently active.
103 */ 103 */
104 static unsigned int mux_get_mctrl(struct uart_port *port) 104 static unsigned int mux_get_mctrl(struct uart_port *port)
105 { 105 {
106 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 106 return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
107 } 107 }
108 108
109 /** 109 /**
110 * mux_stop_tx - Stop transmitting characters. 110 * mux_stop_tx - Stop transmitting characters.
111 * @port: Ptr to the uart_port. 111 * @port: Ptr to the uart_port.
112 * 112 *
113 * The Serial MUX does not support this function. 113 * The Serial MUX does not support this function.
114 */ 114 */
115 static void mux_stop_tx(struct uart_port *port) 115 static void mux_stop_tx(struct uart_port *port)
116 { 116 {
117 } 117 }
118 118
119 /** 119 /**
120 * mux_start_tx - Start transmitting characters. 120 * mux_start_tx - Start transmitting characters.
121 * @port: Ptr to the uart_port. 121 * @port: Ptr to the uart_port.
122 * 122 *
123 * The Serial Mux does not support this function. 123 * The Serial Mux does not support this function.
124 */ 124 */
125 static void mux_start_tx(struct uart_port *port) 125 static void mux_start_tx(struct uart_port *port)
126 { 126 {
127 } 127 }
128 128
129 /** 129 /**
130 * mux_stop_rx - Stop receiving characters. 130 * mux_stop_rx - Stop receiving characters.
131 * @port: Ptr to the uart_port. 131 * @port: Ptr to the uart_port.
132 * 132 *
133 * The Serial Mux does not support this function. 133 * The Serial Mux does not support this function.
134 */ 134 */
135 static void mux_stop_rx(struct uart_port *port) 135 static void mux_stop_rx(struct uart_port *port)
136 { 136 {
137 } 137 }
138 138
139 /** 139 /**
140 * mux_enable_ms - Enable modum status interrupts. 140 * mux_enable_ms - Enable modum status interrupts.
141 * @port: Ptr to the uart_port. 141 * @port: Ptr to the uart_port.
142 * 142 *
143 * The Serial Mux does not support this function. 143 * The Serial Mux does not support this function.
144 */ 144 */
145 static void mux_enable_ms(struct uart_port *port) 145 static void mux_enable_ms(struct uart_port *port)
146 { 146 {
147 } 147 }
148 148
149 /** 149 /**
150 * mux_break_ctl - Control the transmitssion of a break signal. 150 * mux_break_ctl - Control the transmitssion of a break signal.
151 * @port: Ptr to the uart_port. 151 * @port: Ptr to the uart_port.
152 * @break_state: Raise/Lower the break signal. 152 * @break_state: Raise/Lower the break signal.
153 * 153 *
154 * The Serial Mux does not support this function. 154 * The Serial Mux does not support this function.
155 */ 155 */
156 static void mux_break_ctl(struct uart_port *port, int break_state) 156 static void mux_break_ctl(struct uart_port *port, int break_state)
157 { 157 {
158 } 158 }
159 159
160 /** 160 /**
161 * mux_write - Write chars to the mux fifo. 161 * mux_write - Write chars to the mux fifo.
162 * @port: Ptr to the uart_port. 162 * @port: Ptr to the uart_port.
163 * 163 *
164 * This function writes all the data from the uart buffer to 164 * This function writes all the data from the uart buffer to
165 * the mux fifo. 165 * the mux fifo.
166 */ 166 */
167 static void mux_write(struct uart_port *port) 167 static void mux_write(struct uart_port *port)
168 { 168 {
169 int count; 169 int count;
170 struct circ_buf *xmit = &port->info->xmit; 170 struct circ_buf *xmit = &port->info->xmit;
171 171
172 if(port->x_char) { 172 if(port->x_char) {
173 UART_PUT_CHAR(port, port->x_char); 173 UART_PUT_CHAR(port, port->x_char);
174 port->icount.tx++; 174 port->icount.tx++;
175 port->x_char = 0; 175 port->x_char = 0;
176 return; 176 return;
177 } 177 }
178 178
179 if(uart_circ_empty(xmit) || uart_tx_stopped(port)) { 179 if(uart_circ_empty(xmit) || uart_tx_stopped(port)) {
180 mux_stop_tx(port); 180 mux_stop_tx(port);
181 return; 181 return;
182 } 182 }
183 183
184 count = (port->fifosize) - UART_GET_FIFO_CNT(port); 184 count = (port->fifosize) - UART_GET_FIFO_CNT(port);
185 do { 185 do {
186 UART_PUT_CHAR(port, xmit->buf[xmit->tail]); 186 UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
187 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 187 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
188 port->icount.tx++; 188 port->icount.tx++;
189 if(uart_circ_empty(xmit)) 189 if(uart_circ_empty(xmit))
190 break; 190 break;
191 191
192 } while(--count > 0); 192 } while(--count > 0);
193 193
194 while(UART_GET_FIFO_CNT(port)) 194 while(UART_GET_FIFO_CNT(port))
195 udelay(1); 195 udelay(1);
196 196
197 if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 197 if(uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
198 uart_write_wakeup(port); 198 uart_write_wakeup(port);
199 199
200 if (uart_circ_empty(xmit)) 200 if (uart_circ_empty(xmit))
201 mux_stop_tx(port); 201 mux_stop_tx(port);
202 } 202 }
203 203
204 /** 204 /**
205 * mux_read - Read chars from the mux fifo. 205 * mux_read - Read chars from the mux fifo.
206 * @port: Ptr to the uart_port. 206 * @port: Ptr to the uart_port.
207 * 207 *
208 * This reads all available data from the mux's fifo and pushes 208 * This reads all available data from the mux's fifo and pushes
209 * the data to the tty layer. 209 * the data to the tty layer.
210 */ 210 */
211 static void mux_read(struct uart_port *port) 211 static void mux_read(struct uart_port *port)
212 { 212 {
213 int data; 213 int data;
214 struct tty_struct *tty = port->info->tty; 214 struct tty_struct *tty = port->info->tty;
215 __u32 start_count = port->icount.rx; 215 __u32 start_count = port->icount.rx;
216 216
217 while(1) { 217 while(1) {
218 data = __raw_readl(port->membase + IO_DATA_REG_OFFSET); 218 data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
219 219
220 if (MUX_STATUS(data)) 220 if (MUX_STATUS(data))
221 continue; 221 continue;
222 222
223 if (MUX_EOFIFO(data)) 223 if (MUX_EOFIFO(data))
224 break; 224 break;
225 225
226 port->icount.rx++; 226 port->icount.rx++;
227 227
228 if (MUX_BREAK(data)) { 228 if (MUX_BREAK(data)) {
229 port->icount.brk++; 229 port->icount.brk++;
230 if(uart_handle_break(port)) 230 if(uart_handle_break(port))
231 continue; 231 continue;
232 } 232 }
233 233
234 if (uart_handle_sysrq_char(port, data & 0xffu, NULL)) 234 if (uart_handle_sysrq_char(port, data & 0xffu, NULL))
235 continue; 235 continue;
236 236
237 tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL); 237 tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
238 } 238 }
239 239
240 if (start_count != port->icount.rx) { 240 if (start_count != port->icount.rx) {
241 tty_flip_buffer_push(tty); 241 tty_flip_buffer_push(tty);
242 } 242 }
243 } 243 }
244 244
245 /** 245 /**
246 * mux_startup - Initialize the port. 246 * mux_startup - Initialize the port.
247 * @port: Ptr to the uart_port. 247 * @port: Ptr to the uart_port.
248 * 248 *
249 * Grab any resources needed for this port and start the 249 * Grab any resources needed for this port and start the
250 * mux timer. 250 * mux timer.
251 */ 251 */
252 static int mux_startup(struct uart_port *port) 252 static int mux_startup(struct uart_port *port)
253 { 253 {
254 mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY); 254 mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
255 return 0; 255 return 0;
256 } 256 }
257 257
258 /** 258 /**
259 * mux_shutdown - Disable the port. 259 * mux_shutdown - Disable the port.
260 * @port: Ptr to the uart_port. 260 * @port: Ptr to the uart_port.
261 * 261 *
262 * Release any resources needed for the port. 262 * Release any resources needed for the port.
263 */ 263 */
264 static void mux_shutdown(struct uart_port *port) 264 static void mux_shutdown(struct uart_port *port)
265 { 265 {
266 } 266 }
267 267
268 /** 268 /**
269 * mux_set_termios - Chane port parameters. 269 * mux_set_termios - Chane port parameters.
270 * @port: Ptr to the uart_port. 270 * @port: Ptr to the uart_port.
271 * @termios: new termios settings. 271 * @termios: new termios settings.
272 * @old: old termios settings. 272 * @old: old termios settings.
273 * 273 *
274 * The Serial Mux does not support this function. 274 * The Serial Mux does not support this function.
275 */ 275 */
276 static void 276 static void
277 mux_set_termios(struct uart_port *port, struct termios *termios, 277 mux_set_termios(struct uart_port *port, struct termios *termios,
278 struct termios *old) 278 struct termios *old)
279 { 279 {
280 } 280 }
281 281
282 /** 282 /**
283 * mux_type - Describe the port. 283 * mux_type - Describe the port.
284 * @port: Ptr to the uart_port. 284 * @port: Ptr to the uart_port.
285 * 285 *
286 * Return a pointer to a string constant describing the 286 * Return a pointer to a string constant describing the
287 * specified port. 287 * specified port.
288 */ 288 */
289 static const char *mux_type(struct uart_port *port) 289 static const char *mux_type(struct uart_port *port)
290 { 290 {
291 return "Mux"; 291 return "Mux";
292 } 292 }
293 293
294 /** 294 /**
295 * mux_release_port - Release memory and IO regions. 295 * mux_release_port - Release memory and IO regions.
296 * @port: Ptr to the uart_port. 296 * @port: Ptr to the uart_port.
297 * 297 *
298 * Release any memory and IO region resources currently in use by 298 * Release any memory and IO region resources currently in use by
299 * the port. 299 * the port.
300 */ 300 */
301 static void mux_release_port(struct uart_port *port) 301 static void mux_release_port(struct uart_port *port)
302 { 302 {
303 } 303 }
304 304
305 /** 305 /**
306 * mux_request_port - Request memory and IO regions. 306 * mux_request_port - Request memory and IO regions.
307 * @port: Ptr to the uart_port. 307 * @port: Ptr to the uart_port.
308 * 308 *
309 * Request any memory and IO region resources required by the port. 309 * Request any memory and IO region resources required by the port.
310 * If any fail, no resources should be registered when this function 310 * If any fail, no resources should be registered when this function
311 * returns, and it should return -EBUSY on failure. 311 * returns, and it should return -EBUSY on failure.
312 */ 312 */
313 static int mux_request_port(struct uart_port *port) 313 static int mux_request_port(struct uart_port *port)
314 { 314 {
315 return 0; 315 return 0;
316 } 316 }
317 317
318 /** 318 /**
319 * mux_config_port - Perform port autoconfiguration. 319 * mux_config_port - Perform port autoconfiguration.
320 * @port: Ptr to the uart_port. 320 * @port: Ptr to the uart_port.
321 * @type: Bitmask of required configurations. 321 * @type: Bitmask of required configurations.
322 * 322 *
323 * Perform any autoconfiguration steps for the port. This functino is 323 * Perform any autoconfiguration steps for the port. This functino is
324 * called if the UPF_BOOT_AUTOCONF flag is specified for the port. 324 * called if the UPF_BOOT_AUTOCONF flag is specified for the port.
325 * [Note: This is required for now because of a bug in the Serial core. 325 * [Note: This is required for now because of a bug in the Serial core.
326 * rmk has already submitted a patch to linus, should be available for 326 * rmk has already submitted a patch to linus, should be available for
327 * 2.5.47.] 327 * 2.5.47.]
328 */ 328 */
329 static void mux_config_port(struct uart_port *port, int type) 329 static void mux_config_port(struct uart_port *port, int type)
330 { 330 {
331 port->type = PORT_MUX; 331 port->type = PORT_MUX;
332 } 332 }
333 333
334 /** 334 /**
335 * mux_verify_port - Verify the port information. 335 * mux_verify_port - Verify the port information.
336 * @port: Ptr to the uart_port. 336 * @port: Ptr to the uart_port.
337 * @ser: Ptr to the serial information. 337 * @ser: Ptr to the serial information.
338 * 338 *
339 * Verify the new serial port information contained within serinfo is 339 * Verify the new serial port information contained within serinfo is
340 * suitable for this port type. 340 * suitable for this port type.
341 */ 341 */
342 static int mux_verify_port(struct uart_port *port, struct serial_struct *ser) 342 static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
343 { 343 {
344 if(port->membase == NULL) 344 if(port->membase == NULL)
345 return -EINVAL; 345 return -EINVAL;
346 346
347 return 0; 347 return 0;
348 } 348 }
349 349
350 /** 350 /**
351 * mux_drv_poll - Mux poll function. 351 * mux_drv_poll - Mux poll function.
352 * @unused: Unused variable 352 * @unused: Unused variable
353 * 353 *
354 * This function periodically polls the Serial MUX to check for new data. 354 * This function periodically polls the Serial MUX to check for new data.
355 */ 355 */
356 static void mux_poll(unsigned long unused) 356 static void mux_poll(unsigned long unused)
357 { 357 {
358 int i; 358 int i;
359 359
360 for(i = 0; i < port_cnt; ++i) { 360 for(i = 0; i < port_cnt; ++i) {
361 if(!mux_ports[i].info) 361 if(!mux_ports[i].info)
362 continue; 362 continue;
363 363
364 mux_read(&mux_ports[i]); 364 mux_read(&mux_ports[i]);
365 mux_write(&mux_ports[i]); 365 mux_write(&mux_ports[i]);
366 } 366 }
367 367
368 mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY); 368 mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
369 } 369 }
370 370
371 371
372 #ifdef CONFIG_SERIAL_MUX_CONSOLE 372 #ifdef CONFIG_SERIAL_MUX_CONSOLE
373 static void mux_console_write(struct console *co, const char *s, unsigned count) 373 static void mux_console_write(struct console *co, const char *s, unsigned count)
374 { 374 {
375 while(count--) 375 while(count--)
376 pdc_iodc_putc(*s++); 376 pdc_iodc_putc(*s++);
377 } 377 }
378 378
379 static int mux_console_setup(struct console *co, char *options) 379 static int mux_console_setup(struct console *co, char *options)
380 { 380 {
381 return 0; 381 return 0;
382 } 382 }
383 383
384 struct tty_driver *mux_console_device(struct console *co, int *index) 384 struct tty_driver *mux_console_device(struct console *co, int *index)
385 { 385 {
386 *index = co->index; 386 *index = co->index;
387 return mux_driver.tty_driver; 387 return mux_driver.tty_driver;
388 } 388 }
389 389
390 static struct console mux_console = { 390 static struct console mux_console = {
391 .name = "ttyB", 391 .name = "ttyB",
392 .write = mux_console_write, 392 .write = mux_console_write,
393 .device = mux_console_device, 393 .device = mux_console_device,
394 .setup = mux_console_setup, 394 .setup = mux_console_setup,
395 .flags = CON_ENABLED | CON_PRINTBUFFER, 395 .flags = CON_ENABLED | CON_PRINTBUFFER,
396 .index = 0, 396 .index = 0,
397 }; 397 };
398 398
399 #define MUX_CONSOLE &mux_console 399 #define MUX_CONSOLE &mux_console
400 #else 400 #else
401 #define MUX_CONSOLE NULL 401 #define MUX_CONSOLE NULL
402 #endif 402 #endif
403 403
404 static struct uart_ops mux_pops = { 404 static struct uart_ops mux_pops = {
405 .tx_empty = mux_tx_empty, 405 .tx_empty = mux_tx_empty,
406 .set_mctrl = mux_set_mctrl, 406 .set_mctrl = mux_set_mctrl,
407 .get_mctrl = mux_get_mctrl, 407 .get_mctrl = mux_get_mctrl,
408 .stop_tx = mux_stop_tx, 408 .stop_tx = mux_stop_tx,
409 .start_tx = mux_start_tx, 409 .start_tx = mux_start_tx,
410 .stop_rx = mux_stop_rx, 410 .stop_rx = mux_stop_rx,
411 .enable_ms = mux_enable_ms, 411 .enable_ms = mux_enable_ms,
412 .break_ctl = mux_break_ctl, 412 .break_ctl = mux_break_ctl,
413 .startup = mux_startup, 413 .startup = mux_startup,
414 .shutdown = mux_shutdown, 414 .shutdown = mux_shutdown,
415 .set_termios = mux_set_termios, 415 .set_termios = mux_set_termios,
416 .type = mux_type, 416 .type = mux_type,
417 .release_port = mux_release_port, 417 .release_port = mux_release_port,
418 .request_port = mux_request_port, 418 .request_port = mux_request_port,
419 .config_port = mux_config_port, 419 .config_port = mux_config_port,
420 .verify_port = mux_verify_port, 420 .verify_port = mux_verify_port,
421 }; 421 };
422 422
423 /** 423 /**
424 * mux_probe - Determine if the Serial Mux should claim this device. 424 * mux_probe - Determine if the Serial Mux should claim this device.
425 * @dev: The parisc device. 425 * @dev: The parisc device.
426 * 426 *
427 * Deterimine if the Serial Mux should claim this chip (return 0) 427 * Deterimine if the Serial Mux should claim this chip (return 0)
428 * or not (return 1). 428 * or not (return 1).
429 */ 429 */
430 static int __init mux_probe(struct parisc_device *dev) 430 static int __init mux_probe(struct parisc_device *dev)
431 { 431 {
432 int i, status, ports; 432 int i, status, ports;
433 u8 iodc_data[32]; 433 u8 iodc_data[32];
434 unsigned long bytecnt; 434 unsigned long bytecnt;
435 struct uart_port *port; 435 struct uart_port *port;
436 436
437 status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32); 437 status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
438 if(status != PDC_OK) { 438 if(status != PDC_OK) {
439 printk(KERN_ERR "Serial mux: Unable to read IODC.\n"); 439 printk(KERN_ERR "Serial mux: Unable to read IODC.\n");
440 return 1; 440 return 1;
441 } 441 }
442 442
443 ports = GET_MUX_PORTS(iodc_data); 443 ports = GET_MUX_PORTS(iodc_data);
444 printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.3\n", ports); 444 printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.3\n", ports);
445 445
446 if(!port_cnt) { 446 if(!port_cnt) {
447 mux_driver.cons = MUX_CONSOLE; 447 mux_driver.cons = MUX_CONSOLE;
448 448
449 status = uart_register_driver(&mux_driver); 449 status = uart_register_driver(&mux_driver);
450 if(status) { 450 if(status) {
451 printk(KERN_ERR "Serial mux: Unable to register driver.\n"); 451 printk(KERN_ERR "Serial mux: Unable to register driver.\n");
452 return 1; 452 return 1;
453 } 453 }
454 454
455 init_timer(&mux_timer); 455 init_timer(&mux_timer);
456 mux_timer.function = mux_poll; 456 mux_timer.function = mux_poll;
457 } 457 }
458 458
459 for(i = 0; i < ports; ++i, ++port_cnt) { 459 for(i = 0; i < ports; ++i, ++port_cnt) {
460 port = &mux_ports[port_cnt]; 460 port = &mux_ports[port_cnt];
461 port->iobase = 0; 461 port->iobase = 0;
462 port->mapbase = dev->hpa.start + MUX_OFFSET + 462 port->mapbase = dev->hpa.start + MUX_OFFSET +
463 (i * MUX_LINE_OFFSET); 463 (i * MUX_LINE_OFFSET);
464 port->membase = ioremap(port->mapbase, MUX_LINE_OFFSET); 464 port->membase = ioremap_nocache(port->mapbase, MUX_LINE_OFFSET);
465 port->iotype = UPIO_MEM; 465 port->iotype = UPIO_MEM;
466 port->type = PORT_MUX; 466 port->type = PORT_MUX;
467 port->irq = NO_IRQ; 467 port->irq = NO_IRQ;
468 port->uartclk = 0; 468 port->uartclk = 0;
469 port->fifosize = MUX_FIFO_SIZE; 469 port->fifosize = MUX_FIFO_SIZE;
470 port->ops = &mux_pops; 470 port->ops = &mux_pops;
471 port->flags = UPF_BOOT_AUTOCONF; 471 port->flags = UPF_BOOT_AUTOCONF;
472 port->line = port_cnt; 472 port->line = port_cnt;
473 473
474 /* The port->timeout needs to match what is present in 474 /* The port->timeout needs to match what is present in
475 * uart_wait_until_sent in serial_core.c. Otherwise 475 * uart_wait_until_sent in serial_core.c. Otherwise
476 * the time spent in msleep_interruptable will be very 476 * the time spent in msleep_interruptable will be very
477 * long, causing the appearance of a console hang. 477 * long, causing the appearance of a console hang.
478 */ 478 */
479 port->timeout = HZ / 50; 479 port->timeout = HZ / 50;
480 spin_lock_init(&port->lock); 480 spin_lock_init(&port->lock);
481 status = uart_add_one_port(&mux_driver, port); 481 status = uart_add_one_port(&mux_driver, port);
482 BUG_ON(status); 482 BUG_ON(status);
483 } 483 }
484 484
485 #ifdef CONFIG_SERIAL_MUX_CONSOLE 485 #ifdef CONFIG_SERIAL_MUX_CONSOLE
486 register_console(&mux_console); 486 register_console(&mux_console);
487 #endif 487 #endif
488 return 0; 488 return 0;
489 } 489 }
490 490
491 static struct parisc_device_id mux_tbl[] = { 491 static struct parisc_device_id mux_tbl[] = {
492 { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D }, 492 { HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
493 { 0, } 493 { 0, }
494 }; 494 };
495 495
496 MODULE_DEVICE_TABLE(parisc, mux_tbl); 496 MODULE_DEVICE_TABLE(parisc, mux_tbl);
497 497
498 static struct parisc_driver serial_mux_driver = { 498 static struct parisc_driver serial_mux_driver = {
499 .name = "serial_mux", 499 .name = "serial_mux",
500 .id_table = mux_tbl, 500 .id_table = mux_tbl,
501 .probe = mux_probe, 501 .probe = mux_probe,
502 }; 502 };
503 503
504 /** 504 /**
505 * mux_init - Serial MUX initalization procedure. 505 * mux_init - Serial MUX initalization procedure.
506 * 506 *
507 * Register the Serial MUX driver. 507 * Register the Serial MUX driver.
508 */ 508 */
509 static int __init mux_init(void) 509 static int __init mux_init(void)
510 { 510 {
511 return register_parisc_driver(&serial_mux_driver); 511 return register_parisc_driver(&serial_mux_driver);
512 } 512 }
513 513
514 /** 514 /**
515 * mux_exit - Serial MUX cleanup procedure. 515 * mux_exit - Serial MUX cleanup procedure.
516 * 516 *
517 * Unregister the Serial MUX driver from the tty layer. 517 * Unregister the Serial MUX driver from the tty layer.
518 */ 518 */
519 static void __exit mux_exit(void) 519 static void __exit mux_exit(void)
520 { 520 {
521 int i; 521 int i;
522 522
523 for (i = 0; i < port_cnt; i++) { 523 for (i = 0; i < port_cnt; i++) {
524 uart_remove_one_port(&mux_driver, &mux_ports[i]); 524 uart_remove_one_port(&mux_driver, &mux_ports[i]);
525 } 525 }
526 526
527 uart_unregister_driver(&mux_driver); 527 uart_unregister_driver(&mux_driver);
528 } 528 }
529 529
530 module_init(mux_init); 530 module_init(mux_init);
531 module_exit(mux_exit); 531 module_exit(mux_exit);
532 532
533 MODULE_AUTHOR("Ryan Bradetich"); 533 MODULE_AUTHOR("Ryan Bradetich");
534 MODULE_DESCRIPTION("Serial MUX driver"); 534 MODULE_DESCRIPTION("Serial MUX driver");
535 MODULE_LICENSE("GPL"); 535 MODULE_LICENSE("GPL");
536 MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR); 536 MODULE_ALIAS_CHARDEV_MAJOR(MUX_MAJOR);
537 537