Commit de8d28b16f5614aeb12bb69c8f9a38578b8d3ada

Authored by David S. Miller
1 parent 765b5f3273

[SPARC64]: Convert sparc64 PCI layer to in-kernel device tree.

One thing this change pointed out was that we really should
pull the "get 'local-mac-address' property" logic into a helper
function all the network drivers can call.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 17 changed files with 182 additions and 134 deletions Inline Diff

arch/sparc64/kernel/ebus.c
1 /* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $ 1 /* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $
2 * ebus.c: PCI to EBus bridge device. 2 * ebus.c: PCI to EBus bridge device.
3 * 3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */ 6 */
7 7
8 #include <linux/config.h> 8 #include <linux/config.h>
9 #include <linux/module.h> 9 #include <linux/module.h>
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/types.h> 11 #include <linux/types.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/string.h> 14 #include <linux/string.h>
15 #include <linux/interrupt.h> 15 #include <linux/interrupt.h>
16 #include <linux/delay.h> 16 #include <linux/delay.h>
17 17
18 #include <asm/system.h> 18 #include <asm/system.h>
19 #include <asm/page.h> 19 #include <asm/page.h>
20 #include <asm/pbm.h> 20 #include <asm/pbm.h>
21 #include <asm/ebus.h> 21 #include <asm/ebus.h>
22 #include <asm/oplib.h> 22 #include <asm/oplib.h>
23 #include <asm/bpp.h> 23 #include <asm/bpp.h>
24 #include <asm/irq.h> 24 #include <asm/irq.h>
25 25
26 /* EBUS dma library. */ 26 /* EBUS dma library. */
27 27
28 #define EBDMA_CSR 0x00UL /* Control/Status */ 28 #define EBDMA_CSR 0x00UL /* Control/Status */
29 #define EBDMA_ADDR 0x04UL /* DMA Address */ 29 #define EBDMA_ADDR 0x04UL /* DMA Address */
30 #define EBDMA_COUNT 0x08UL /* DMA Count */ 30 #define EBDMA_COUNT 0x08UL /* DMA Count */
31 31
32 #define EBDMA_CSR_INT_PEND 0x00000001 32 #define EBDMA_CSR_INT_PEND 0x00000001
33 #define EBDMA_CSR_ERR_PEND 0x00000002 33 #define EBDMA_CSR_ERR_PEND 0x00000002
34 #define EBDMA_CSR_DRAIN 0x00000004 34 #define EBDMA_CSR_DRAIN 0x00000004
35 #define EBDMA_CSR_INT_EN 0x00000010 35 #define EBDMA_CSR_INT_EN 0x00000010
36 #define EBDMA_CSR_RESET 0x00000080 36 #define EBDMA_CSR_RESET 0x00000080
37 #define EBDMA_CSR_WRITE 0x00000100 37 #define EBDMA_CSR_WRITE 0x00000100
38 #define EBDMA_CSR_EN_DMA 0x00000200 38 #define EBDMA_CSR_EN_DMA 0x00000200
39 #define EBDMA_CSR_CYC_PEND 0x00000400 39 #define EBDMA_CSR_CYC_PEND 0x00000400
40 #define EBDMA_CSR_DIAG_RD_DONE 0x00000800 40 #define EBDMA_CSR_DIAG_RD_DONE 0x00000800
41 #define EBDMA_CSR_DIAG_WR_DONE 0x00001000 41 #define EBDMA_CSR_DIAG_WR_DONE 0x00001000
42 #define EBDMA_CSR_EN_CNT 0x00002000 42 #define EBDMA_CSR_EN_CNT 0x00002000
43 #define EBDMA_CSR_TC 0x00004000 43 #define EBDMA_CSR_TC 0x00004000
44 #define EBDMA_CSR_DIS_CSR_DRN 0x00010000 44 #define EBDMA_CSR_DIS_CSR_DRN 0x00010000
45 #define EBDMA_CSR_BURST_SZ_MASK 0x000c0000 45 #define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
46 #define EBDMA_CSR_BURST_SZ_1 0x00080000 46 #define EBDMA_CSR_BURST_SZ_1 0x00080000
47 #define EBDMA_CSR_BURST_SZ_4 0x00000000 47 #define EBDMA_CSR_BURST_SZ_4 0x00000000
48 #define EBDMA_CSR_BURST_SZ_8 0x00040000 48 #define EBDMA_CSR_BURST_SZ_8 0x00040000
49 #define EBDMA_CSR_BURST_SZ_16 0x000c0000 49 #define EBDMA_CSR_BURST_SZ_16 0x000c0000
50 #define EBDMA_CSR_DIAG_EN 0x00100000 50 #define EBDMA_CSR_DIAG_EN 0x00100000
51 #define EBDMA_CSR_DIS_ERR_PEND 0x00400000 51 #define EBDMA_CSR_DIS_ERR_PEND 0x00400000
52 #define EBDMA_CSR_TCI_DIS 0x00800000 52 #define EBDMA_CSR_TCI_DIS 0x00800000
53 #define EBDMA_CSR_EN_NEXT 0x01000000 53 #define EBDMA_CSR_EN_NEXT 0x01000000
54 #define EBDMA_CSR_DMA_ON 0x02000000 54 #define EBDMA_CSR_DMA_ON 0x02000000
55 #define EBDMA_CSR_A_LOADED 0x04000000 55 #define EBDMA_CSR_A_LOADED 0x04000000
56 #define EBDMA_CSR_NA_LOADED 0x08000000 56 #define EBDMA_CSR_NA_LOADED 0x08000000
57 #define EBDMA_CSR_DEV_ID_MASK 0xf0000000 57 #define EBDMA_CSR_DEV_ID_MASK 0xf0000000
58 58
59 #define EBUS_DMA_RESET_TIMEOUT 10000 59 #define EBUS_DMA_RESET_TIMEOUT 10000
60 60
61 static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain) 61 static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
62 { 62 {
63 int i; 63 int i;
64 u32 val = 0; 64 u32 val = 0;
65 65
66 writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR); 66 writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
67 udelay(1); 67 udelay(1);
68 68
69 if (no_drain) 69 if (no_drain)
70 return; 70 return;
71 71
72 for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) { 72 for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
73 val = readl(p->regs + EBDMA_CSR); 73 val = readl(p->regs + EBDMA_CSR);
74 74
75 if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND))) 75 if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
76 break; 76 break;
77 udelay(10); 77 udelay(10);
78 } 78 }
79 } 79 }
80 80
81 static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs) 81 static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs)
82 { 82 {
83 struct ebus_dma_info *p = dev_id; 83 struct ebus_dma_info *p = dev_id;
84 unsigned long flags; 84 unsigned long flags;
85 u32 csr = 0; 85 u32 csr = 0;
86 86
87 spin_lock_irqsave(&p->lock, flags); 87 spin_lock_irqsave(&p->lock, flags);
88 csr = readl(p->regs + EBDMA_CSR); 88 csr = readl(p->regs + EBDMA_CSR);
89 writel(csr, p->regs + EBDMA_CSR); 89 writel(csr, p->regs + EBDMA_CSR);
90 spin_unlock_irqrestore(&p->lock, flags); 90 spin_unlock_irqrestore(&p->lock, flags);
91 91
92 if (csr & EBDMA_CSR_ERR_PEND) { 92 if (csr & EBDMA_CSR_ERR_PEND) {
93 printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name); 93 printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
94 p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie); 94 p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
95 return IRQ_HANDLED; 95 return IRQ_HANDLED;
96 } else if (csr & EBDMA_CSR_INT_PEND) { 96 } else if (csr & EBDMA_CSR_INT_PEND) {
97 p->callback(p, 97 p->callback(p,
98 (csr & EBDMA_CSR_TC) ? 98 (csr & EBDMA_CSR_TC) ?
99 EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE, 99 EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
100 p->client_cookie); 100 p->client_cookie);
101 return IRQ_HANDLED; 101 return IRQ_HANDLED;
102 } 102 }
103 103
104 return IRQ_NONE; 104 return IRQ_NONE;
105 105
106 } 106 }
107 107
108 int ebus_dma_register(struct ebus_dma_info *p) 108 int ebus_dma_register(struct ebus_dma_info *p)
109 { 109 {
110 u32 csr; 110 u32 csr;
111 111
112 if (!p->regs) 112 if (!p->regs)
113 return -EINVAL; 113 return -EINVAL;
114 if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER | 114 if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
115 EBUS_DMA_FLAG_TCI_DISABLE)) 115 EBUS_DMA_FLAG_TCI_DISABLE))
116 return -EINVAL; 116 return -EINVAL;
117 if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback) 117 if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
118 return -EINVAL; 118 return -EINVAL;
119 if (!strlen(p->name)) 119 if (!strlen(p->name))
120 return -EINVAL; 120 return -EINVAL;
121 121
122 __ebus_dma_reset(p, 1); 122 __ebus_dma_reset(p, 1);
123 123
124 csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT; 124 csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
125 125
126 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) 126 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
127 csr |= EBDMA_CSR_TCI_DIS; 127 csr |= EBDMA_CSR_TCI_DIS;
128 128
129 writel(csr, p->regs + EBDMA_CSR); 129 writel(csr, p->regs + EBDMA_CSR);
130 130
131 return 0; 131 return 0;
132 } 132 }
133 EXPORT_SYMBOL(ebus_dma_register); 133 EXPORT_SYMBOL(ebus_dma_register);
134 134
135 int ebus_dma_irq_enable(struct ebus_dma_info *p, int on) 135 int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
136 { 136 {
137 unsigned long flags; 137 unsigned long flags;
138 u32 csr; 138 u32 csr;
139 139
140 if (on) { 140 if (on) {
141 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { 141 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
142 if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p)) 142 if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p))
143 return -EBUSY; 143 return -EBUSY;
144 } 144 }
145 145
146 spin_lock_irqsave(&p->lock, flags); 146 spin_lock_irqsave(&p->lock, flags);
147 csr = readl(p->regs + EBDMA_CSR); 147 csr = readl(p->regs + EBDMA_CSR);
148 csr |= EBDMA_CSR_INT_EN; 148 csr |= EBDMA_CSR_INT_EN;
149 writel(csr, p->regs + EBDMA_CSR); 149 writel(csr, p->regs + EBDMA_CSR);
150 spin_unlock_irqrestore(&p->lock, flags); 150 spin_unlock_irqrestore(&p->lock, flags);
151 } else { 151 } else {
152 spin_lock_irqsave(&p->lock, flags); 152 spin_lock_irqsave(&p->lock, flags);
153 csr = readl(p->regs + EBDMA_CSR); 153 csr = readl(p->regs + EBDMA_CSR);
154 csr &= ~EBDMA_CSR_INT_EN; 154 csr &= ~EBDMA_CSR_INT_EN;
155 writel(csr, p->regs + EBDMA_CSR); 155 writel(csr, p->regs + EBDMA_CSR);
156 spin_unlock_irqrestore(&p->lock, flags); 156 spin_unlock_irqrestore(&p->lock, flags);
157 157
158 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) { 158 if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
159 free_irq(p->irq, p); 159 free_irq(p->irq, p);
160 } 160 }
161 } 161 }
162 162
163 return 0; 163 return 0;
164 } 164 }
165 EXPORT_SYMBOL(ebus_dma_irq_enable); 165 EXPORT_SYMBOL(ebus_dma_irq_enable);
166 166
167 void ebus_dma_unregister(struct ebus_dma_info *p) 167 void ebus_dma_unregister(struct ebus_dma_info *p)
168 { 168 {
169 unsigned long flags; 169 unsigned long flags;
170 u32 csr; 170 u32 csr;
171 int irq_on = 0; 171 int irq_on = 0;
172 172
173 spin_lock_irqsave(&p->lock, flags); 173 spin_lock_irqsave(&p->lock, flags);
174 csr = readl(p->regs + EBDMA_CSR); 174 csr = readl(p->regs + EBDMA_CSR);
175 if (csr & EBDMA_CSR_INT_EN) { 175 if (csr & EBDMA_CSR_INT_EN) {
176 csr &= ~EBDMA_CSR_INT_EN; 176 csr &= ~EBDMA_CSR_INT_EN;
177 writel(csr, p->regs + EBDMA_CSR); 177 writel(csr, p->regs + EBDMA_CSR);
178 irq_on = 1; 178 irq_on = 1;
179 } 179 }
180 spin_unlock_irqrestore(&p->lock, flags); 180 spin_unlock_irqrestore(&p->lock, flags);
181 181
182 if (irq_on) 182 if (irq_on)
183 free_irq(p->irq, p); 183 free_irq(p->irq, p);
184 } 184 }
185 EXPORT_SYMBOL(ebus_dma_unregister); 185 EXPORT_SYMBOL(ebus_dma_unregister);
186 186
187 int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len) 187 int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
188 { 188 {
189 unsigned long flags; 189 unsigned long flags;
190 u32 csr; 190 u32 csr;
191 int err; 191 int err;
192 192
193 if (len >= (1 << 24)) 193 if (len >= (1 << 24))
194 return -EINVAL; 194 return -EINVAL;
195 195
196 spin_lock_irqsave(&p->lock, flags); 196 spin_lock_irqsave(&p->lock, flags);
197 csr = readl(p->regs + EBDMA_CSR); 197 csr = readl(p->regs + EBDMA_CSR);
198 err = -EINVAL; 198 err = -EINVAL;
199 if (!(csr & EBDMA_CSR_EN_DMA)) 199 if (!(csr & EBDMA_CSR_EN_DMA))
200 goto out; 200 goto out;
201 err = -EBUSY; 201 err = -EBUSY;
202 if (csr & EBDMA_CSR_NA_LOADED) 202 if (csr & EBDMA_CSR_NA_LOADED)
203 goto out; 203 goto out;
204 204
205 writel(len, p->regs + EBDMA_COUNT); 205 writel(len, p->regs + EBDMA_COUNT);
206 writel(bus_addr, p->regs + EBDMA_ADDR); 206 writel(bus_addr, p->regs + EBDMA_ADDR);
207 err = 0; 207 err = 0;
208 208
209 out: 209 out:
210 spin_unlock_irqrestore(&p->lock, flags); 210 spin_unlock_irqrestore(&p->lock, flags);
211 211
212 return err; 212 return err;
213 } 213 }
214 EXPORT_SYMBOL(ebus_dma_request); 214 EXPORT_SYMBOL(ebus_dma_request);
215 215
216 void ebus_dma_prepare(struct ebus_dma_info *p, int write) 216 void ebus_dma_prepare(struct ebus_dma_info *p, int write)
217 { 217 {
218 unsigned long flags; 218 unsigned long flags;
219 u32 csr; 219 u32 csr;
220 220
221 spin_lock_irqsave(&p->lock, flags); 221 spin_lock_irqsave(&p->lock, flags);
222 __ebus_dma_reset(p, 0); 222 __ebus_dma_reset(p, 0);
223 223
224 csr = (EBDMA_CSR_INT_EN | 224 csr = (EBDMA_CSR_INT_EN |
225 EBDMA_CSR_EN_CNT | 225 EBDMA_CSR_EN_CNT |
226 EBDMA_CSR_BURST_SZ_16 | 226 EBDMA_CSR_BURST_SZ_16 |
227 EBDMA_CSR_EN_NEXT); 227 EBDMA_CSR_EN_NEXT);
228 228
229 if (write) 229 if (write)
230 csr |= EBDMA_CSR_WRITE; 230 csr |= EBDMA_CSR_WRITE;
231 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE) 231 if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
232 csr |= EBDMA_CSR_TCI_DIS; 232 csr |= EBDMA_CSR_TCI_DIS;
233 233
234 writel(csr, p->regs + EBDMA_CSR); 234 writel(csr, p->regs + EBDMA_CSR);
235 235
236 spin_unlock_irqrestore(&p->lock, flags); 236 spin_unlock_irqrestore(&p->lock, flags);
237 } 237 }
238 EXPORT_SYMBOL(ebus_dma_prepare); 238 EXPORT_SYMBOL(ebus_dma_prepare);
239 239
240 unsigned int ebus_dma_residue(struct ebus_dma_info *p) 240 unsigned int ebus_dma_residue(struct ebus_dma_info *p)
241 { 241 {
242 return readl(p->regs + EBDMA_COUNT); 242 return readl(p->regs + EBDMA_COUNT);
243 } 243 }
244 EXPORT_SYMBOL(ebus_dma_residue); 244 EXPORT_SYMBOL(ebus_dma_residue);
245 245
246 unsigned int ebus_dma_addr(struct ebus_dma_info *p) 246 unsigned int ebus_dma_addr(struct ebus_dma_info *p)
247 { 247 {
248 return readl(p->regs + EBDMA_ADDR); 248 return readl(p->regs + EBDMA_ADDR);
249 } 249 }
250 EXPORT_SYMBOL(ebus_dma_addr); 250 EXPORT_SYMBOL(ebus_dma_addr);
251 251
252 void ebus_dma_enable(struct ebus_dma_info *p, int on) 252 void ebus_dma_enable(struct ebus_dma_info *p, int on)
253 { 253 {
254 unsigned long flags; 254 unsigned long flags;
255 u32 orig_csr, csr; 255 u32 orig_csr, csr;
256 256
257 spin_lock_irqsave(&p->lock, flags); 257 spin_lock_irqsave(&p->lock, flags);
258 orig_csr = csr = readl(p->regs + EBDMA_CSR); 258 orig_csr = csr = readl(p->regs + EBDMA_CSR);
259 if (on) 259 if (on)
260 csr |= EBDMA_CSR_EN_DMA; 260 csr |= EBDMA_CSR_EN_DMA;
261 else 261 else
262 csr &= ~EBDMA_CSR_EN_DMA; 262 csr &= ~EBDMA_CSR_EN_DMA;
263 if ((orig_csr & EBDMA_CSR_EN_DMA) != 263 if ((orig_csr & EBDMA_CSR_EN_DMA) !=
264 (csr & EBDMA_CSR_EN_DMA)) 264 (csr & EBDMA_CSR_EN_DMA))
265 writel(csr, p->regs + EBDMA_CSR); 265 writel(csr, p->regs + EBDMA_CSR);
266 spin_unlock_irqrestore(&p->lock, flags); 266 spin_unlock_irqrestore(&p->lock, flags);
267 } 267 }
268 EXPORT_SYMBOL(ebus_dma_enable); 268 EXPORT_SYMBOL(ebus_dma_enable);
269 269
270 struct linux_ebus *ebus_chain = NULL; 270 struct linux_ebus *ebus_chain = NULL;
271 271
272 #ifdef CONFIG_SUN_AUXIO 272 #ifdef CONFIG_SUN_AUXIO
273 extern void auxio_probe(void); 273 extern void auxio_probe(void);
274 #endif 274 #endif
275 275
276 static inline void *ebus_alloc(size_t size) 276 static inline void *ebus_alloc(size_t size)
277 { 277 {
278 void *mem; 278 void *mem;
279 279
280 mem = kzalloc(size, GFP_ATOMIC); 280 mem = kzalloc(size, GFP_ATOMIC);
281 if (!mem) 281 if (!mem)
282 panic("ebus_alloc: out of memory"); 282 panic("ebus_alloc: out of memory");
283 return mem; 283 return mem;
284 } 284 }
285 285
286 static void __init ebus_ranges_init(struct linux_ebus *ebus) 286 static void __init ebus_ranges_init(struct linux_ebus *ebus)
287 { 287 {
288 int success; 288 int success;
289 289
290 ebus->num_ebus_ranges = 0; 290 ebus->num_ebus_ranges = 0;
291 success = prom_getproperty(ebus->prom_node, "ranges", 291 success = prom_getproperty(ebus->prom_node, "ranges",
292 (char *)ebus->ebus_ranges, 292 (char *)ebus->ebus_ranges,
293 sizeof(ebus->ebus_ranges)); 293 sizeof(ebus->ebus_ranges));
294 if (success != -1) 294 if (success != -1)
295 ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges)); 295 ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
296 } 296 }
297 297
298 static void __init ebus_intmap_init(struct linux_ebus *ebus) 298 static void __init ebus_intmap_init(struct linux_ebus *ebus)
299 { 299 {
300 int success; 300 int success;
301 301
302 ebus->num_ebus_intmap = 0; 302 ebus->num_ebus_intmap = 0;
303 success = prom_getproperty(ebus->prom_node, "interrupt-map", 303 success = prom_getproperty(ebus->prom_node, "interrupt-map",
304 (char *)ebus->ebus_intmap, 304 (char *)ebus->ebus_intmap,
305 sizeof(ebus->ebus_intmap)); 305 sizeof(ebus->ebus_intmap));
306 if (success == -1) 306 if (success == -1)
307 return; 307 return;
308 308
309 ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap)); 309 ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
310 310
311 success = prom_getproperty(ebus->prom_node, "interrupt-map-mask", 311 success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
312 (char *)&ebus->ebus_intmask, 312 (char *)&ebus->ebus_intmask,
313 sizeof(ebus->ebus_intmask)); 313 sizeof(ebus->ebus_intmask));
314 if (success == -1) { 314 if (success == -1) {
315 prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__); 315 prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
316 prom_halt(); 316 prom_halt();
317 } 317 }
318 } 318 }
319 319
320 int __init ebus_intmap_match(struct linux_ebus *ebus, 320 int __init ebus_intmap_match(struct linux_ebus *ebus,
321 struct linux_prom_registers *reg, 321 struct linux_prom_registers *reg,
322 int *interrupt) 322 int *interrupt)
323 { 323 {
324 unsigned int hi, lo, irq; 324 unsigned int hi, lo, irq;
325 int i; 325 int i;
326 326
327 if (!ebus->num_ebus_intmap) 327 if (!ebus->num_ebus_intmap)
328 return 0; 328 return 0;
329 329
330 hi = reg->which_io & ebus->ebus_intmask.phys_hi; 330 hi = reg->which_io & ebus->ebus_intmask.phys_hi;
331 lo = reg->phys_addr & ebus->ebus_intmask.phys_lo; 331 lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
332 irq = *interrupt & ebus->ebus_intmask.interrupt; 332 irq = *interrupt & ebus->ebus_intmask.interrupt;
333 for (i = 0; i < ebus->num_ebus_intmap; i++) { 333 for (i = 0; i < ebus->num_ebus_intmap; i++) {
334 if ((ebus->ebus_intmap[i].phys_hi == hi) && 334 if ((ebus->ebus_intmap[i].phys_hi == hi) &&
335 (ebus->ebus_intmap[i].phys_lo == lo) && 335 (ebus->ebus_intmap[i].phys_lo == lo) &&
336 (ebus->ebus_intmap[i].interrupt == irq)) { 336 (ebus->ebus_intmap[i].interrupt == irq)) {
337 *interrupt = ebus->ebus_intmap[i].cinterrupt; 337 *interrupt = ebus->ebus_intmap[i].cinterrupt;
338 return 0; 338 return 0;
339 } 339 }
340 } 340 }
341 return -1; 341 return -1;
342 } 342 }
343 343
344 void __init fill_ebus_child(int node, struct linux_prom_registers *preg, 344 void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
345 struct linux_ebus_child *dev, int non_standard_regs) 345 struct linux_ebus_child *dev, int non_standard_regs)
346 { 346 {
347 int regs[PROMREG_MAX]; 347 int regs[PROMREG_MAX];
348 int irqs[PROMREG_MAX]; 348 int irqs[PROMREG_MAX];
349 int i, len; 349 int i, len;
350 350
351 dev->prom_node = node; 351 dev->prom_node = node;
352 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name)); 352 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
353 printk(" (%s)", dev->prom_name); 353 printk(" (%s)", dev->prom_name);
354 354
355 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs)); 355 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
356 dev->num_addrs = len / sizeof(regs[0]); 356 dev->num_addrs = len / sizeof(regs[0]);
357 357
358 if (non_standard_regs) { 358 if (non_standard_regs) {
359 /* This is to handle reg properties which are not 359 /* This is to handle reg properties which are not
360 * in the parent relative format. One example are 360 * in the parent relative format. One example are
361 * children of the i2c device on CompactPCI systems. 361 * children of the i2c device on CompactPCI systems.
362 * 362 *
363 * So, for such devices we just record the property 363 * So, for such devices we just record the property
364 * raw in the child resources. 364 * raw in the child resources.
365 */ 365 */
366 for (i = 0; i < dev->num_addrs; i++) 366 for (i = 0; i < dev->num_addrs; i++)
367 dev->resource[i].start = regs[i]; 367 dev->resource[i].start = regs[i];
368 } else { 368 } else {
369 for (i = 0; i < dev->num_addrs; i++) { 369 for (i = 0; i < dev->num_addrs; i++) {
370 int rnum = regs[i]; 370 int rnum = regs[i];
371 if (rnum >= dev->parent->num_addrs) { 371 if (rnum >= dev->parent->num_addrs) {
372 prom_printf("UGH: property for %s was %d, need < %d\n", 372 prom_printf("UGH: property for %s was %d, need < %d\n",
373 dev->prom_name, len, dev->parent->num_addrs); 373 dev->prom_name, len, dev->parent->num_addrs);
374 panic(__FUNCTION__); 374 panic(__FUNCTION__);
375 } 375 }
376 dev->resource[i].start = dev->parent->resource[i].start; 376 dev->resource[i].start = dev->parent->resource[i].start;
377 dev->resource[i].end = dev->parent->resource[i].end; 377 dev->resource[i].end = dev->parent->resource[i].end;
378 dev->resource[i].flags = IORESOURCE_MEM; 378 dev->resource[i].flags = IORESOURCE_MEM;
379 dev->resource[i].name = dev->prom_name; 379 dev->resource[i].name = dev->prom_name;
380 } 380 }
381 } 381 }
382 382
383 for (i = 0; i < PROMINTR_MAX; i++) 383 for (i = 0; i < PROMINTR_MAX; i++)
384 dev->irqs[i] = PCI_IRQ_NONE; 384 dev->irqs[i] = PCI_IRQ_NONE;
385 385
386 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs)); 386 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
387 if ((len == -1) || (len == 0)) { 387 if ((len == -1) || (len == 0)) {
388 dev->num_irqs = 0; 388 dev->num_irqs = 0;
389 /* 389 /*
390 * Oh, well, some PROMs don't export interrupts 390 * Oh, well, some PROMs don't export interrupts
391 * property to children of EBus devices... 391 * property to children of EBus devices...
392 * 392 *
393 * Be smart about PS/2 keyboard and mouse. 393 * Be smart about PS/2 keyboard and mouse.
394 */ 394 */
395 if (!strcmp(dev->parent->prom_name, "8042")) { 395 if (!strcmp(dev->parent->prom_name, "8042")) {
396 if (!strcmp(dev->prom_name, "kb_ps2")) { 396 if (!strcmp(dev->prom_name, "kb_ps2")) {
397 dev->num_irqs = 1; 397 dev->num_irqs = 1;
398 dev->irqs[0] = dev->parent->irqs[0]; 398 dev->irqs[0] = dev->parent->irqs[0];
399 } else { 399 } else {
400 dev->num_irqs = 1; 400 dev->num_irqs = 1;
401 dev->irqs[0] = dev->parent->irqs[1]; 401 dev->irqs[0] = dev->parent->irqs[1];
402 } 402 }
403 } 403 }
404 } else { 404 } else {
405 dev->num_irqs = len / sizeof(irqs[0]); 405 dev->num_irqs = len / sizeof(irqs[0]);
406 for (i = 0; i < dev->num_irqs; i++) { 406 for (i = 0; i < dev->num_irqs; i++) {
407 struct pci_pbm_info *pbm = dev->bus->parent; 407 struct pci_pbm_info *pbm = dev->bus->parent;
408 struct pci_controller_info *p = pbm->parent; 408 struct pci_controller_info *p = pbm->parent;
409 409
410 if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) { 410 if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
411 dev->irqs[i] = p->irq_build(pbm, 411 dev->irqs[i] = p->irq_build(pbm,
412 dev->bus->self, 412 dev->bus->self,
413 irqs[i]); 413 irqs[i]);
414 } else { 414 } else {
415 /* If we get a bogus interrupt property, just 415 /* If we get a bogus interrupt property, just
416 * record the raw value instead of punting. 416 * record the raw value instead of punting.
417 */ 417 */
418 dev->irqs[i] = irqs[i]; 418 dev->irqs[i] = irqs[i];
419 } 419 }
420 } 420 }
421 } 421 }
422 } 422 }
423 423
424 static int __init child_regs_nonstandard(struct linux_ebus_device *dev) 424 static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
425 { 425 {
426 if (!strcmp(dev->prom_name, "i2c") || 426 if (!strcmp(dev->prom_name, "i2c") ||
427 !strcmp(dev->prom_name, "SUNW,lombus")) 427 !strcmp(dev->prom_name, "SUNW,lombus"))
428 return 1; 428 return 1;
429 return 0; 429 return 0;
430 } 430 }
431 431
432 void __init fill_ebus_device(int node, struct linux_ebus_device *dev) 432 void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
433 { 433 {
434 struct linux_prom_registers regs[PROMREG_MAX]; 434 struct linux_prom_registers regs[PROMREG_MAX];
435 struct linux_ebus_child *child; 435 struct linux_ebus_child *child;
436 int irqs[PROMINTR_MAX]; 436 int irqs[PROMINTR_MAX];
437 int i, n, len; 437 int i, n, len;
438 438
439 dev->prom_node = node; 439 dev->prom_node = node;
440 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name)); 440 prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
441 printk(" [%s", dev->prom_name); 441 printk(" [%s", dev->prom_name);
442 442
443 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs)); 443 len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
444 if (len == -1) { 444 if (len == -1) {
445 dev->num_addrs = 0; 445 dev->num_addrs = 0;
446 goto probe_interrupts; 446 goto probe_interrupts;
447 } 447 }
448 448
449 if (len % sizeof(struct linux_prom_registers)) { 449 if (len % sizeof(struct linux_prom_registers)) {
450 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n", 450 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
451 dev->prom_name, len, 451 dev->prom_name, len,
452 (int)sizeof(struct linux_prom_registers)); 452 (int)sizeof(struct linux_prom_registers));
453 prom_halt(); 453 prom_halt();
454 } 454 }
455 dev->num_addrs = len / sizeof(struct linux_prom_registers); 455 dev->num_addrs = len / sizeof(struct linux_prom_registers);
456 456
457 for (i = 0; i < dev->num_addrs; i++) { 457 for (i = 0; i < dev->num_addrs; i++) {
458 /* XXX Learn how to interpret ebus ranges... -DaveM */ 458 /* XXX Learn how to interpret ebus ranges... -DaveM */
459 if (regs[i].which_io >= 0x10) 459 if (regs[i].which_io >= 0x10)
460 n = (regs[i].which_io - 0x10) >> 2; 460 n = (regs[i].which_io - 0x10) >> 2;
461 else 461 else
462 n = regs[i].which_io; 462 n = regs[i].which_io;
463 463
464 dev->resource[i].start = dev->bus->self->resource[n].start; 464 dev->resource[i].start = dev->bus->self->resource[n].start;
465 dev->resource[i].start += (unsigned long)regs[i].phys_addr; 465 dev->resource[i].start += (unsigned long)regs[i].phys_addr;
466 dev->resource[i].end = 466 dev->resource[i].end =
467 (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL); 467 (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
468 dev->resource[i].flags = IORESOURCE_MEM; 468 dev->resource[i].flags = IORESOURCE_MEM;
469 dev->resource[i].name = dev->prom_name; 469 dev->resource[i].name = dev->prom_name;
470 request_resource(&dev->bus->self->resource[n], 470 request_resource(&dev->bus->self->resource[n],
471 &dev->resource[i]); 471 &dev->resource[i]);
472 } 472 }
473 473
474 probe_interrupts: 474 probe_interrupts:
475 for (i = 0; i < PROMINTR_MAX; i++) 475 for (i = 0; i < PROMINTR_MAX; i++)
476 dev->irqs[i] = PCI_IRQ_NONE; 476 dev->irqs[i] = PCI_IRQ_NONE;
477 477
478 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs)); 478 len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
479 if ((len == -1) || (len == 0)) { 479 if ((len == -1) || (len == 0)) {
480 dev->num_irqs = 0; 480 dev->num_irqs = 0;
481 } else { 481 } else {
482 dev->num_irqs = len / sizeof(irqs[0]); 482 dev->num_irqs = len / sizeof(irqs[0]);
483 for (i = 0; i < dev->num_irqs; i++) { 483 for (i = 0; i < dev->num_irqs; i++) {
484 struct pci_pbm_info *pbm = dev->bus->parent; 484 struct pci_pbm_info *pbm = dev->bus->parent;
485 struct pci_controller_info *p = pbm->parent; 485 struct pci_controller_info *p = pbm->parent;
486 486
487 if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) { 487 if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
488 dev->irqs[i] = p->irq_build(pbm, 488 dev->irqs[i] = p->irq_build(pbm,
489 dev->bus->self, 489 dev->bus->self,
490 irqs[i]); 490 irqs[i]);
491 } else { 491 } else {
492 /* If we get a bogus interrupt property, just 492 /* If we get a bogus interrupt property, just
493 * record the raw value instead of punting. 493 * record the raw value instead of punting.
494 */ 494 */
495 dev->irqs[i] = irqs[i]; 495 dev->irqs[i] = irqs[i];
496 } 496 }
497 } 497 }
498 } 498 }
499 499
500 if ((node = prom_getchild(node))) { 500 if ((node = prom_getchild(node))) {
501 printk(" ->"); 501 printk(" ->");
502 dev->children = ebus_alloc(sizeof(struct linux_ebus_child)); 502 dev->children = ebus_alloc(sizeof(struct linux_ebus_child));
503 503
504 child = dev->children; 504 child = dev->children;
505 child->next = NULL; 505 child->next = NULL;
506 child->parent = dev; 506 child->parent = dev;
507 child->bus = dev->bus; 507 child->bus = dev->bus;
508 fill_ebus_child(node, &regs[0], 508 fill_ebus_child(node, &regs[0],
509 child, child_regs_nonstandard(dev)); 509 child, child_regs_nonstandard(dev));
510 510
511 while ((node = prom_getsibling(node)) != 0) { 511 while ((node = prom_getsibling(node)) != 0) {
512 child->next = ebus_alloc(sizeof(struct linux_ebus_child)); 512 child->next = ebus_alloc(sizeof(struct linux_ebus_child));
513 513
514 child = child->next; 514 child = child->next;
515 child->next = NULL; 515 child->next = NULL;
516 child->parent = dev; 516 child->parent = dev;
517 child->bus = dev->bus; 517 child->bus = dev->bus;
518 fill_ebus_child(node, &regs[0], 518 fill_ebus_child(node, &regs[0],
519 child, child_regs_nonstandard(dev)); 519 child, child_regs_nonstandard(dev));
520 } 520 }
521 } 521 }
522 printk("]"); 522 printk("]");
523 } 523 }
524 524
525 static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p) 525 static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
526 { 526 {
527 struct pci_dev *pdev = start; 527 struct pci_dev *pdev = start;
528 528
529 while ((pdev = pci_get_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev))) 529 while ((pdev = pci_get_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev)))
530 if (pdev->device == PCI_DEVICE_ID_SUN_EBUS || 530 if (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
531 pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS) 531 pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS)
532 break; 532 break;
533 533
534 *is_rio_p = !!(pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS)); 534 *is_rio_p = !!(pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS));
535 535
536 return pdev; 536 return pdev;
537 } 537 }
538 538
539 void __init ebus_init(void) 539 void __init ebus_init(void)
540 { 540 {
541 struct pci_pbm_info *pbm; 541 struct pci_pbm_info *pbm;
542 struct linux_ebus_device *dev; 542 struct linux_ebus_device *dev;
543 struct linux_ebus *ebus; 543 struct linux_ebus *ebus;
544 struct pci_dev *pdev; 544 struct pci_dev *pdev;
545 struct pcidev_cookie *cookie; 545 struct pcidev_cookie *cookie;
546 int nd, ebusnd, is_rio; 546 int nd, ebusnd, is_rio;
547 int num_ebus = 0; 547 int num_ebus = 0;
548 548
549 pdev = find_next_ebus(NULL, &is_rio); 549 pdev = find_next_ebus(NULL, &is_rio);
550 if (!pdev) { 550 if (!pdev) {
551 printk("ebus: No EBus's found.\n"); 551 printk("ebus: No EBus's found.\n");
552 return; 552 return;
553 } 553 }
554 554
555 cookie = pdev->sysdata; 555 cookie = pdev->sysdata;
556 ebusnd = cookie->prom_node; 556 ebusnd = cookie->prom_node->node;
557 557
558 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus)); 558 ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
559 ebus->next = NULL; 559 ebus->next = NULL;
560 ebus->is_rio = is_rio; 560 ebus->is_rio = is_rio;
561 561
562 while (ebusnd) { 562 while (ebusnd) {
563 /* SUNW,pci-qfe uses four empty ebuses on it. 563 /* SUNW,pci-qfe uses four empty ebuses on it.
564 I think we should not consider them here, 564 I think we should not consider them here,
565 as they have half of the properties this 565 as they have half of the properties this
566 code expects and once we do PCI hot-plug, 566 code expects and once we do PCI hot-plug,
567 we'd have to tweak with the ebus_chain 567 we'd have to tweak with the ebus_chain
568 in the runtime after initialization. -jj */ 568 in the runtime after initialization. -jj */
569 if (!prom_getchild (ebusnd)) { 569 if (!prom_getchild (ebusnd)) {
570 pdev = find_next_ebus(pdev, &is_rio); 570 pdev = find_next_ebus(pdev, &is_rio);
571 if (!pdev) { 571 if (!pdev) {
572 if (ebus == ebus_chain) { 572 if (ebus == ebus_chain) {
573 ebus_chain = NULL; 573 ebus_chain = NULL;
574 printk("ebus: No EBus's found.\n"); 574 printk("ebus: No EBus's found.\n");
575 return; 575 return;
576 } 576 }
577 break; 577 break;
578 } 578 }
579 ebus->is_rio = is_rio; 579 ebus->is_rio = is_rio;
580 cookie = pdev->sysdata; 580 cookie = pdev->sysdata;
581 ebusnd = cookie->prom_node; 581 ebusnd = cookie->prom_node->node;
582 continue; 582 continue;
583 } 583 }
584 printk("ebus%d:", num_ebus); 584 printk("ebus%d:", num_ebus);
585 585
586 prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name)); 586 prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name));
587 ebus->index = num_ebus; 587 ebus->index = num_ebus;
588 ebus->prom_node = ebusnd; 588 ebus->prom_node = ebusnd;
589 ebus->self = pdev; 589 ebus->self = pdev;
590 ebus->parent = pbm = cookie->pbm; 590 ebus->parent = pbm = cookie->pbm;
591 591
592 ebus_ranges_init(ebus); 592 ebus_ranges_init(ebus);
593 ebus_intmap_init(ebus); 593 ebus_intmap_init(ebus);
594 594
595 nd = prom_getchild(ebusnd); 595 nd = prom_getchild(ebusnd);
596 if (!nd) 596 if (!nd)
597 goto next_ebus; 597 goto next_ebus;
598 598
599 ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device)); 599 ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device));
600 600
601 dev = ebus->devices; 601 dev = ebus->devices;
602 dev->next = NULL; 602 dev->next = NULL;
603 dev->children = NULL; 603 dev->children = NULL;
604 dev->bus = ebus; 604 dev->bus = ebus;
605 fill_ebus_device(nd, dev); 605 fill_ebus_device(nd, dev);
606 606
607 while ((nd = prom_getsibling(nd)) != 0) { 607 while ((nd = prom_getsibling(nd)) != 0) {
608 dev->next = ebus_alloc(sizeof(struct linux_ebus_device)); 608 dev->next = ebus_alloc(sizeof(struct linux_ebus_device));
609 609
610 dev = dev->next; 610 dev = dev->next;
611 dev->next = NULL; 611 dev->next = NULL;
612 dev->children = NULL; 612 dev->children = NULL;
613 dev->bus = ebus; 613 dev->bus = ebus;
614 fill_ebus_device(nd, dev); 614 fill_ebus_device(nd, dev);
615 } 615 }
616 616
617 next_ebus: 617 next_ebus:
618 printk("\n"); 618 printk("\n");
619 619
620 pdev = find_next_ebus(pdev, &is_rio); 620 pdev = find_next_ebus(pdev, &is_rio);
621 if (!pdev) 621 if (!pdev)
622 break; 622 break;
623 623
624 cookie = pdev->sysdata; 624 cookie = pdev->sysdata;
625 ebusnd = cookie->prom_node; 625 ebusnd = cookie->prom_node->node;
626 626
627 ebus->next = ebus_alloc(sizeof(struct linux_ebus)); 627 ebus->next = ebus_alloc(sizeof(struct linux_ebus));
628 ebus = ebus->next; 628 ebus = ebus->next;
629 ebus->next = NULL; 629 ebus->next = NULL;
630 ebus->is_rio = is_rio; 630 ebus->is_rio = is_rio;
631 ++num_ebus; 631 ++num_ebus;
632 } 632 }
633 pci_dev_put(pdev); /* XXX for the case, when ebusnd is 0, is it OK? */ 633 pci_dev_put(pdev); /* XXX for the case, when ebusnd is 0, is it OK? */
634 634
635 #ifdef CONFIG_SUN_AUXIO 635 #ifdef CONFIG_SUN_AUXIO
636 auxio_probe(); 636 auxio_probe();
637 #endif 637 #endif
638 } 638 }
639 639
arch/sparc64/kernel/isa.c
1 #include <linux/kernel.h> 1 #include <linux/kernel.h>
2 #include <linux/init.h> 2 #include <linux/init.h>
3 #include <linux/pci.h> 3 #include <linux/pci.h>
4 #include <linux/slab.h> 4 #include <linux/slab.h>
5 #include <asm/oplib.h> 5 #include <asm/oplib.h>
6 #include <asm/isa.h> 6 #include <asm/isa.h>
7 7
8 struct sparc_isa_bridge *isa_chain; 8 struct sparc_isa_bridge *isa_chain;
9 9
10 static void __init fatal_err(const char *reason) 10 static void __init fatal_err(const char *reason)
11 { 11 {
12 prom_printf("ISA: fatal error, %s.\n", reason); 12 prom_printf("ISA: fatal error, %s.\n", reason);
13 } 13 }
14 14
15 static void __init report_dev(struct sparc_isa_device *isa_dev, int child) 15 static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
16 { 16 {
17 if (child) 17 if (child)
18 printk(" (%s)", isa_dev->prom_name); 18 printk(" (%s)", isa_dev->prom_name);
19 else 19 else
20 printk(" [%s", isa_dev->prom_name); 20 printk(" [%s", isa_dev->prom_name);
21 } 21 }
22 22
23 static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev, 23 static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev,
24 struct linux_prom_registers *pregs, 24 struct linux_prom_registers *pregs,
25 int pregs_size) 25 int pregs_size)
26 { 26 {
27 unsigned long base, len; 27 unsigned long base, len;
28 int prop_len; 28 int prop_len;
29 29
30 prop_len = prom_getproperty(isa_dev->prom_node, "reg", 30 prop_len = prom_getproperty(isa_dev->prom_node, "reg",
31 (char *) pregs, pregs_size); 31 (char *) pregs, pregs_size);
32 32
33 if (prop_len <= 0) 33 if (prop_len <= 0)
34 return; 34 return;
35 35
36 /* Only the first one is interesting. */ 36 /* Only the first one is interesting. */
37 len = pregs[0].reg_size; 37 len = pregs[0].reg_size;
38 base = (((unsigned long)pregs[0].which_io << 32) | 38 base = (((unsigned long)pregs[0].which_io << 32) |
39 (unsigned long)pregs[0].phys_addr); 39 (unsigned long)pregs[0].phys_addr);
40 base += isa_dev->bus->parent->io_space.start; 40 base += isa_dev->bus->parent->io_space.start;
41 41
42 isa_dev->resource.start = base; 42 isa_dev->resource.start = base;
43 isa_dev->resource.end = (base + len - 1UL); 43 isa_dev->resource.end = (base + len - 1UL);
44 isa_dev->resource.flags = IORESOURCE_IO; 44 isa_dev->resource.flags = IORESOURCE_IO;
45 isa_dev->resource.name = isa_dev->prom_name; 45 isa_dev->resource.name = isa_dev->prom_name;
46 46
47 request_resource(&isa_dev->bus->parent->io_space, 47 request_resource(&isa_dev->bus->parent->io_space,
48 &isa_dev->resource); 48 &isa_dev->resource);
49 } 49 }
50 50
51 /* I can't believe they didn't put a real INO in the isa device 51 /* I can't believe they didn't put a real INO in the isa device
52 * interrupts property. The whole point of the OBP properties 52 * interrupts property. The whole point of the OBP properties
53 * is to shield the kernel from IRQ routing details. 53 * is to shield the kernel from IRQ routing details.
54 * 54 *
55 * The P1275 standard for ISA devices seems to also have been 55 * The P1275 standard for ISA devices seems to also have been
56 * totally ignored. 56 * totally ignored.
57 * 57 *
58 * On later systems, an interrupt-map and interrupt-map-mask scheme 58 * On later systems, an interrupt-map and interrupt-map-mask scheme
59 * akin to EBUS is used. 59 * akin to EBUS is used.
60 */ 60 */
61 static struct { 61 static struct {
62 int obp_irq; 62 int obp_irq;
63 int pci_ino; 63 int pci_ino;
64 } grover_irq_table[] = { 64 } grover_irq_table[] = {
65 { 1, 0x00 }, /* dma, unknown ino at this point */ 65 { 1, 0x00 }, /* dma, unknown ino at this point */
66 { 2, 0x27 }, /* floppy */ 66 { 2, 0x27 }, /* floppy */
67 { 3, 0x22 }, /* parallel */ 67 { 3, 0x22 }, /* parallel */
68 { 4, 0x2b }, /* serial */ 68 { 4, 0x2b }, /* serial */
69 { 5, 0x25 }, /* acpi power management */ 69 { 5, 0x25 }, /* acpi power management */
70 70
71 { 0, 0x00 } /* end of table */ 71 { 0, 0x00 } /* end of table */
72 }; 72 };
73 73
74 static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev, 74 static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
75 struct sparc_isa_bridge *isa_br, 75 struct sparc_isa_bridge *isa_br,
76 int *interrupt, 76 int *interrupt,
77 struct linux_prom_registers *pregs) 77 struct linux_prom_registers *pregs)
78 { 78 {
79 unsigned int hi, lo, irq; 79 unsigned int hi, lo, irq;
80 int i; 80 int i;
81 81
82 hi = pregs->which_io & isa_br->isa_intmask.phys_hi; 82 hi = pregs->which_io & isa_br->isa_intmask.phys_hi;
83 lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo; 83 lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo;
84 irq = *interrupt & isa_br->isa_intmask.interrupt; 84 irq = *interrupt & isa_br->isa_intmask.interrupt;
85 for (i = 0; i < isa_br->num_isa_intmap; i++) { 85 for (i = 0; i < isa_br->num_isa_intmap; i++) {
86 if ((isa_br->isa_intmap[i].phys_hi == hi) && 86 if ((isa_br->isa_intmap[i].phys_hi == hi) &&
87 (isa_br->isa_intmap[i].phys_lo == lo) && 87 (isa_br->isa_intmap[i].phys_lo == lo) &&
88 (isa_br->isa_intmap[i].interrupt == irq)) { 88 (isa_br->isa_intmap[i].interrupt == irq)) {
89 *interrupt = isa_br->isa_intmap[i].cinterrupt; 89 *interrupt = isa_br->isa_intmap[i].cinterrupt;
90 return 0; 90 return 0;
91 } 91 }
92 } 92 }
93 return -1; 93 return -1;
94 } 94 }
95 95
96 static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev, 96 static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
97 struct linux_prom_registers *pregs) 97 struct linux_prom_registers *pregs)
98 { 98 {
99 int irq_prop; 99 int irq_prop;
100 100
101 irq_prop = prom_getintdefault(isa_dev->prom_node, 101 irq_prop = prom_getintdefault(isa_dev->prom_node,
102 "interrupts", -1); 102 "interrupts", -1);
103 if (irq_prop <= 0) { 103 if (irq_prop <= 0) {
104 goto no_irq; 104 goto no_irq;
105 } else { 105 } else {
106 struct pci_controller_info *pcic; 106 struct pci_controller_info *pcic;
107 struct pci_pbm_info *pbm; 107 struct pci_pbm_info *pbm;
108 int i; 108 int i;
109 109
110 if (isa_dev->bus->num_isa_intmap) { 110 if (isa_dev->bus->num_isa_intmap) {
111 if (!isa_dev_get_irq_using_imap(isa_dev, 111 if (!isa_dev_get_irq_using_imap(isa_dev,
112 isa_dev->bus, 112 isa_dev->bus,
113 &irq_prop, 113 &irq_prop,
114 pregs)) 114 pregs))
115 goto route_irq; 115 goto route_irq;
116 } 116 }
117 117
118 for (i = 0; grover_irq_table[i].obp_irq != 0; i++) { 118 for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
119 if (grover_irq_table[i].obp_irq == irq_prop) { 119 if (grover_irq_table[i].obp_irq == irq_prop) {
120 int ino = grover_irq_table[i].pci_ino; 120 int ino = grover_irq_table[i].pci_ino;
121 121
122 if (ino == 0) 122 if (ino == 0)
123 goto no_irq; 123 goto no_irq;
124 124
125 irq_prop = ino; 125 irq_prop = ino;
126 goto route_irq; 126 goto route_irq;
127 } 127 }
128 } 128 }
129 goto no_irq; 129 goto no_irq;
130 130
131 route_irq: 131 route_irq:
132 pbm = isa_dev->bus->parent; 132 pbm = isa_dev->bus->parent;
133 pcic = pbm->parent; 133 pcic = pbm->parent;
134 isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop); 134 isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
135 return; 135 return;
136 } 136 }
137 137
138 no_irq: 138 no_irq:
139 isa_dev->irq = PCI_IRQ_NONE; 139 isa_dev->irq = PCI_IRQ_NONE;
140 } 140 }
141 141
142 static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev) 142 static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
143 { 143 {
144 int node = prom_getchild(parent_isa_dev->prom_node); 144 int node = prom_getchild(parent_isa_dev->prom_node);
145 145
146 if (node == 0) 146 if (node == 0)
147 return; 147 return;
148 148
149 printk(" ->"); 149 printk(" ->");
150 while (node != 0) { 150 while (node != 0) {
151 struct linux_prom_registers regs[PROMREG_MAX]; 151 struct linux_prom_registers regs[PROMREG_MAX];
152 struct sparc_isa_device *isa_dev; 152 struct sparc_isa_device *isa_dev;
153 int prop_len; 153 int prop_len;
154 154
155 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL); 155 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
156 if (!isa_dev) { 156 if (!isa_dev) {
157 fatal_err("cannot allocate child isa_dev"); 157 fatal_err("cannot allocate child isa_dev");
158 prom_halt(); 158 prom_halt();
159 } 159 }
160 160
161 memset(isa_dev, 0, sizeof(*isa_dev)); 161 memset(isa_dev, 0, sizeof(*isa_dev));
162 162
163 /* Link it in to parent. */ 163 /* Link it in to parent. */
164 isa_dev->next = parent_isa_dev->child; 164 isa_dev->next = parent_isa_dev->child;
165 parent_isa_dev->child = isa_dev; 165 parent_isa_dev->child = isa_dev;
166 166
167 isa_dev->bus = parent_isa_dev->bus; 167 isa_dev->bus = parent_isa_dev->bus;
168 isa_dev->prom_node = node; 168 isa_dev->prom_node = node;
169 prop_len = prom_getproperty(node, "name", 169 prop_len = prom_getproperty(node, "name",
170 (char *) isa_dev->prom_name, 170 (char *) isa_dev->prom_name,
171 sizeof(isa_dev->prom_name)); 171 sizeof(isa_dev->prom_name));
172 if (prop_len <= 0) { 172 if (prop_len <= 0) {
173 fatal_err("cannot get child isa_dev OBP node name"); 173 fatal_err("cannot get child isa_dev OBP node name");
174 prom_halt(); 174 prom_halt();
175 } 175 }
176 176
177 prop_len = prom_getproperty(node, "compatible", 177 prop_len = prom_getproperty(node, "compatible",
178 (char *) isa_dev->compatible, 178 (char *) isa_dev->compatible,
179 sizeof(isa_dev->compatible)); 179 sizeof(isa_dev->compatible));
180 180
181 /* Not having this is OK. */ 181 /* Not having this is OK. */
182 if (prop_len <= 0) 182 if (prop_len <= 0)
183 isa_dev->compatible[0] = '\0'; 183 isa_dev->compatible[0] = '\0';
184 184
185 isa_dev_get_resource(isa_dev, regs, sizeof(regs)); 185 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
186 isa_dev_get_irq(isa_dev, regs); 186 isa_dev_get_irq(isa_dev, regs);
187 187
188 report_dev(isa_dev, 1); 188 report_dev(isa_dev, 1);
189 189
190 node = prom_getsibling(node); 190 node = prom_getsibling(node);
191 } 191 }
192 } 192 }
193 193
194 static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br) 194 static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
195 { 195 {
196 int node = prom_getchild(isa_br->prom_node); 196 int node = prom_getchild(isa_br->prom_node);
197 197
198 while (node != 0) { 198 while (node != 0) {
199 struct linux_prom_registers regs[PROMREG_MAX]; 199 struct linux_prom_registers regs[PROMREG_MAX];
200 struct sparc_isa_device *isa_dev; 200 struct sparc_isa_device *isa_dev;
201 int prop_len; 201 int prop_len;
202 202
203 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL); 203 isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
204 if (!isa_dev) { 204 if (!isa_dev) {
205 fatal_err("cannot allocate isa_dev"); 205 fatal_err("cannot allocate isa_dev");
206 prom_halt(); 206 prom_halt();
207 } 207 }
208 208
209 memset(isa_dev, 0, sizeof(*isa_dev)); 209 memset(isa_dev, 0, sizeof(*isa_dev));
210 210
211 /* Link it in. */ 211 /* Link it in. */
212 isa_dev->next = NULL; 212 isa_dev->next = NULL;
213 if (isa_br->devices == NULL) { 213 if (isa_br->devices == NULL) {
214 isa_br->devices = isa_dev; 214 isa_br->devices = isa_dev;
215 } else { 215 } else {
216 struct sparc_isa_device *tmp = isa_br->devices; 216 struct sparc_isa_device *tmp = isa_br->devices;
217 217
218 while (tmp->next) 218 while (tmp->next)
219 tmp = tmp->next; 219 tmp = tmp->next;
220 220
221 tmp->next = isa_dev; 221 tmp->next = isa_dev;
222 } 222 }
223 223
224 isa_dev->bus = isa_br; 224 isa_dev->bus = isa_br;
225 isa_dev->prom_node = node; 225 isa_dev->prom_node = node;
226 prop_len = prom_getproperty(node, "name", 226 prop_len = prom_getproperty(node, "name",
227 (char *) isa_dev->prom_name, 227 (char *) isa_dev->prom_name,
228 sizeof(isa_dev->prom_name)); 228 sizeof(isa_dev->prom_name));
229 if (prop_len <= 0) { 229 if (prop_len <= 0) {
230 fatal_err("cannot get isa_dev OBP node name"); 230 fatal_err("cannot get isa_dev OBP node name");
231 prom_halt(); 231 prom_halt();
232 } 232 }
233 233
234 prop_len = prom_getproperty(node, "compatible", 234 prop_len = prom_getproperty(node, "compatible",
235 (char *) isa_dev->compatible, 235 (char *) isa_dev->compatible,
236 sizeof(isa_dev->compatible)); 236 sizeof(isa_dev->compatible));
237 237
238 /* Not having this is OK. */ 238 /* Not having this is OK. */
239 if (prop_len <= 0) 239 if (prop_len <= 0)
240 isa_dev->compatible[0] = '\0'; 240 isa_dev->compatible[0] = '\0';
241 241
242 isa_dev_get_resource(isa_dev, regs, sizeof(regs)); 242 isa_dev_get_resource(isa_dev, regs, sizeof(regs));
243 isa_dev_get_irq(isa_dev, regs); 243 isa_dev_get_irq(isa_dev, regs);
244 244
245 report_dev(isa_dev, 0); 245 report_dev(isa_dev, 0);
246 246
247 isa_fill_children(isa_dev); 247 isa_fill_children(isa_dev);
248 248
249 printk("]"); 249 printk("]");
250 250
251 node = prom_getsibling(node); 251 node = prom_getsibling(node);
252 } 252 }
253 } 253 }
254 254
255 void __init isa_init(void) 255 void __init isa_init(void)
256 { 256 {
257 struct pci_dev *pdev; 257 struct pci_dev *pdev;
258 unsigned short vendor, device; 258 unsigned short vendor, device;
259 int index = 0; 259 int index = 0;
260 260
261 vendor = PCI_VENDOR_ID_AL; 261 vendor = PCI_VENDOR_ID_AL;
262 device = PCI_DEVICE_ID_AL_M1533; 262 device = PCI_DEVICE_ID_AL_M1533;
263 263
264 pdev = NULL; 264 pdev = NULL;
265 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { 265 while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
266 struct pcidev_cookie *pdev_cookie; 266 struct pcidev_cookie *pdev_cookie;
267 struct pci_pbm_info *pbm; 267 struct pci_pbm_info *pbm;
268 struct sparc_isa_bridge *isa_br; 268 struct sparc_isa_bridge *isa_br;
269 int prop_len; 269 int prop_len;
270 270
271 pdev_cookie = pdev->sysdata; 271 pdev_cookie = pdev->sysdata;
272 if (!pdev_cookie) { 272 if (!pdev_cookie) {
273 printk("ISA: Warning, ISA bridge ignored due to " 273 printk("ISA: Warning, ISA bridge ignored due to "
274 "lack of OBP data.\n"); 274 "lack of OBP data.\n");
275 continue; 275 continue;
276 } 276 }
277 pbm = pdev_cookie->pbm; 277 pbm = pdev_cookie->pbm;
278 278
279 isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL); 279 isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL);
280 if (!isa_br) { 280 if (!isa_br) {
281 fatal_err("cannot allocate sparc_isa_bridge"); 281 fatal_err("cannot allocate sparc_isa_bridge");
282 prom_halt(); 282 prom_halt();
283 } 283 }
284 284
285 memset(isa_br, 0, sizeof(*isa_br)); 285 memset(isa_br, 0, sizeof(*isa_br));
286 286
287 /* Link it in. */ 287 /* Link it in. */
288 isa_br->next = isa_chain; 288 isa_br->next = isa_chain;
289 isa_chain = isa_br; 289 isa_chain = isa_br;
290 290
291 isa_br->parent = pbm; 291 isa_br->parent = pbm;
292 isa_br->self = pdev; 292 isa_br->self = pdev;
293 isa_br->index = index++; 293 isa_br->index = index++;
294 isa_br->prom_node = pdev_cookie->prom_node; 294 isa_br->prom_node = pdev_cookie->prom_node->node;
295 strncpy(isa_br->prom_name, pdev_cookie->prom_name, 295 strncpy(isa_br->prom_name, pdev_cookie->prom_node->name,
296 sizeof(isa_br->prom_name)); 296 sizeof(isa_br->prom_name));
297 297
298 prop_len = prom_getproperty(isa_br->prom_node, 298 prop_len = prom_getproperty(isa_br->prom_node,
299 "ranges", 299 "ranges",
300 (char *) isa_br->isa_ranges, 300 (char *) isa_br->isa_ranges,
301 sizeof(isa_br->isa_ranges)); 301 sizeof(isa_br->isa_ranges));
302 if (prop_len <= 0) 302 if (prop_len <= 0)
303 isa_br->num_isa_ranges = 0; 303 isa_br->num_isa_ranges = 0;
304 else 304 else
305 isa_br->num_isa_ranges = 305 isa_br->num_isa_ranges =
306 (prop_len / sizeof(struct linux_prom_isa_ranges)); 306 (prop_len / sizeof(struct linux_prom_isa_ranges));
307 307
308 prop_len = prom_getproperty(isa_br->prom_node, 308 prop_len = prom_getproperty(isa_br->prom_node,
309 "interrupt-map", 309 "interrupt-map",
310 (char *) isa_br->isa_intmap, 310 (char *) isa_br->isa_intmap,
311 sizeof(isa_br->isa_intmap)); 311 sizeof(isa_br->isa_intmap));
312 if (prop_len <= 0) 312 if (prop_len <= 0)
313 isa_br->num_isa_intmap = 0; 313 isa_br->num_isa_intmap = 0;
314 else 314 else
315 isa_br->num_isa_intmap = 315 isa_br->num_isa_intmap =
316 (prop_len / sizeof(struct linux_prom_isa_intmap)); 316 (prop_len / sizeof(struct linux_prom_isa_intmap));
317 317
318 prop_len = prom_getproperty(isa_br->prom_node, 318 prop_len = prom_getproperty(isa_br->prom_node,
319 "interrupt-map-mask", 319 "interrupt-map-mask",
320 (char *) &(isa_br->isa_intmask), 320 (char *) &(isa_br->isa_intmask),
321 sizeof(isa_br->isa_intmask)); 321 sizeof(isa_br->isa_intmask));
322 322
323 printk("isa%d:", isa_br->index); 323 printk("isa%d:", isa_br->index);
324 324
325 isa_fill_devices(isa_br); 325 isa_fill_devices(isa_br);
326 326
327 printk("\n"); 327 printk("\n");
328 } 328 }
329 } 329 }
330 330
arch/sparc64/kernel/pci_common.c
1 /* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $ 1 /* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
2 * pci_common.c: PCI controller common support. 2 * pci_common.c: PCI controller common support.
3 * 3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7 #include <linux/string.h> 7 #include <linux/string.h>
8 #include <linux/slab.h> 8 #include <linux/slab.h>
9 #include <linux/init.h> 9 #include <linux/init.h>
10 10
11 #include <asm/pbm.h> 11 #include <asm/pbm.h>
12 #include <asm/prom.h>
12 13
14 #include "pci_impl.h"
15
13 /* Pass "pci=irq_verbose" on the kernel command line to enable this. */ 16 /* Pass "pci=irq_verbose" on the kernel command line to enable this. */
14 int pci_irq_verbose; 17 int pci_irq_verbose;
15 18
16 /* Fix self device of BUS and hook it into BUS->self. 19 /* Fix self device of BUS and hook it into BUS->self.
17 * The pci_scan_bus does not do this for the host bridge. 20 * The pci_scan_bus does not do this for the host bridge.
18 */ 21 */
19 void __init pci_fixup_host_bridge_self(struct pci_bus *pbus) 22 void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
20 { 23 {
21 struct pci_dev *pdev; 24 struct pci_dev *pdev;
22 25
23 list_for_each_entry(pdev, &pbus->devices, bus_list) { 26 list_for_each_entry(pdev, &pbus->devices, bus_list) {
24 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) { 27 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
25 pbus->self = pdev; 28 pbus->self = pdev;
26 return; 29 return;
27 } 30 }
28 } 31 }
29 32
30 prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n"); 33 prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
31 prom_halt(); 34 prom_halt();
32 } 35 }
33 36
34 /* Find the OBP PROM device tree node for a PCI device. 37 /* Find the OBP PROM device tree node for a PCI device. */
35 * Return zero if not found. 38 static struct device_node * __init
36 */ 39 find_device_prom_node(struct pci_pbm_info *pbm, struct pci_dev *pdev,
37 static int __init find_device_prom_node(struct pci_pbm_info *pbm, 40 struct device_node *bus_node,
38 struct pci_dev *pdev, 41 struct linux_prom_pci_registers **pregs,
39 int bus_prom_node, 42 int *nregs)
40 struct linux_prom_pci_registers *pregs,
41 int *nregs)
42 { 43 {
43 int node; 44 struct device_node *dp;
44 45
45 *nregs = 0; 46 *nregs = 0;
46 47
47 /* 48 /*
48 * Return the PBM's PROM node in case we are it's PCI device, 49 * Return the PBM's PROM node in case we are it's PCI device,
49 * as the PBM's reg property is different to standard PCI reg 50 * as the PBM's reg property is different to standard PCI reg
50 * properties. We would delete this device entry otherwise, 51 * properties. We would delete this device entry otherwise,
51 * which confuses XFree86's device probing... 52 * which confuses XFree86's device probing...
52 */ 53 */
53 if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) && 54 if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
54 (pdev->vendor == PCI_VENDOR_ID_SUN) && 55 (pdev->vendor == PCI_VENDOR_ID_SUN) &&
55 (pdev->device == PCI_DEVICE_ID_SUN_PBM || 56 (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
56 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || 57 pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
57 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || 58 pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
58 pdev->device == PCI_DEVICE_ID_SUN_SABRE || 59 pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
59 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) 60 pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD))
60 return bus_prom_node; 61 return bus_node;
61 62
62 node = prom_getchild(bus_prom_node); 63 dp = bus_node->child;
63 while (node != 0) { 64 while (dp) {
64 int err = prom_getproperty(node, "reg", 65 struct linux_prom_pci_registers *regs;
65 (char *)pregs, 66 struct property *prop;
66 sizeof(*pregs) * PROMREG_MAX); 67 int len;
67 if (err == 0 || err == -1) 68
69 prop = of_find_property(dp, "reg", &len);
70 if (!prop)
68 goto do_next_sibling; 71 goto do_next_sibling;
69 if (((pregs[0].phys_hi >> 8) & 0xff) == pdev->devfn) { 72
70 *nregs = err / sizeof(*pregs); 73 regs = prop->value;
71 return node; 74 if (((regs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
75 *pregs = regs;
76 *nregs = len / sizeof(struct linux_prom_pci_registers);
77 return dp;
72 } 78 }
73 79
74 do_next_sibling: 80 do_next_sibling:
75 node = prom_getsibling(node); 81 dp = dp->sibling;
76 } 82 }
77 return 0; 83
84 return NULL;
78 } 85 }
79 86
80 /* Older versions of OBP on PCI systems encode 64-bit MEM 87 /* Older versions of OBP on PCI systems encode 64-bit MEM
81 * space assignments incorrectly, this fixes them up. We also 88 * space assignments incorrectly, this fixes them up. We also
82 * take the opportunity here to hide other kinds of bogus 89 * take the opportunity here to hide other kinds of bogus
83 * assignments. 90 * assignments.
84 */ 91 */
85 static void __init fixup_obp_assignments(struct pci_dev *pdev, 92 static void __init fixup_obp_assignments(struct pci_dev *pdev,
86 struct pcidev_cookie *pcp) 93 struct pcidev_cookie *pcp)
87 { 94 {
88 int i; 95 int i;
89 96
90 if (pdev->vendor == PCI_VENDOR_ID_AL && 97 if (pdev->vendor == PCI_VENDOR_ID_AL &&
91 (pdev->device == PCI_DEVICE_ID_AL_M7101 || 98 (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
92 pdev->device == PCI_DEVICE_ID_AL_M1533)) { 99 pdev->device == PCI_DEVICE_ID_AL_M1533)) {
93 int i; 100 int i;
94 101
95 /* Zap all of the normal resources, they are 102 /* Zap all of the normal resources, they are
96 * meaningless and generate bogus resource collision 103 * meaningless and generate bogus resource collision
97 * messages. This is OpenBoot's ill-fated attempt to 104 * messages. This is OpenBoot's ill-fated attempt to
98 * represent the implicit resources that these devices 105 * represent the implicit resources that these devices
99 * have. 106 * have.
100 */ 107 */
101 pcp->num_prom_assignments = 0; 108 pcp->num_prom_assignments = 0;
102 for (i = 0; i < 6; i++) { 109 for (i = 0; i < 6; i++) {
103 pdev->resource[i].start = 110 pdev->resource[i].start =
104 pdev->resource[i].end = 111 pdev->resource[i].end =
105 pdev->resource[i].flags = 0; 112 pdev->resource[i].flags = 0;
106 } 113 }
107 pdev->resource[PCI_ROM_RESOURCE].start = 114 pdev->resource[PCI_ROM_RESOURCE].start =
108 pdev->resource[PCI_ROM_RESOURCE].end = 115 pdev->resource[PCI_ROM_RESOURCE].end =
109 pdev->resource[PCI_ROM_RESOURCE].flags = 0; 116 pdev->resource[PCI_ROM_RESOURCE].flags = 0;
110 return; 117 return;
111 } 118 }
112 119
113 for (i = 0; i < pcp->num_prom_assignments; i++) { 120 for (i = 0; i < pcp->num_prom_assignments; i++) {
114 struct linux_prom_pci_registers *ap; 121 struct linux_prom_pci_registers *ap;
115 int space; 122 int space;
116 123
117 ap = &pcp->prom_assignments[i]; 124 ap = &pcp->prom_assignments[i];
118 space = ap->phys_hi >> 24; 125 space = ap->phys_hi >> 24;
119 if ((space & 0x3) == 2 && 126 if ((space & 0x3) == 2 &&
120 (space & 0x4) != 0) { 127 (space & 0x4) != 0) {
121 ap->phys_hi &= ~(0x7 << 24); 128 ap->phys_hi &= ~(0x7 << 24);
122 ap->phys_hi |= 0x3 << 24; 129 ap->phys_hi |= 0x3 << 24;
123 } 130 }
124 } 131 }
125 } 132 }
126 133
127 /* Fill in the PCI device cookie sysdata for the given 134 /* Fill in the PCI device cookie sysdata for the given
128 * PCI device. This cookie is the means by which one 135 * PCI device. This cookie is the means by which one
129 * can get to OBP and PCI controller specific information 136 * can get to OBP and PCI controller specific information
130 * for a PCI device. 137 * for a PCI device.
131 */ 138 */
132 static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm, 139 static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
133 struct pci_dev *pdev, 140 struct pci_dev *pdev,
134 int bus_prom_node) 141 struct device_node *bus_node)
135 { 142 {
136 struct linux_prom_pci_registers pregs[PROMREG_MAX]; 143 struct linux_prom_pci_registers *pregs = NULL;
137 struct pcidev_cookie *pcp; 144 struct pcidev_cookie *pcp;
138 int device_prom_node, nregs, err; 145 struct device_node *dp;
146 struct property *prop;
147 int nregs, len;
139 148
140 device_prom_node = find_device_prom_node(pbm, pdev, bus_prom_node, 149 dp = find_device_prom_node(pbm, pdev, bus_node,
141 pregs, &nregs); 150 &pregs, &nregs);
142 if (device_prom_node == 0) { 151 if (!dp) {
143 /* If it is not in the OBP device tree then 152 /* If it is not in the OBP device tree then
144 * there must be a damn good reason for it. 153 * there must be a damn good reason for it.
145 * 154 *
146 * So what we do is delete the device from the 155 * So what we do is delete the device from the
147 * PCI device tree completely. This scenario 156 * PCI device tree completely. This scenario
148 * is seen, for example, on CP1500 for the 157 * is seen, for example, on CP1500 for the
149 * second EBUS/HappyMeal pair if the external 158 * second EBUS/HappyMeal pair if the external
150 * connector for it is not present. 159 * connector for it is not present.
151 */ 160 */
152 pci_remove_bus_device(pdev); 161 pci_remove_bus_device(pdev);
153 return; 162 return;
154 } 163 }
155 164
156 pcp = kmalloc(sizeof(*pcp), GFP_ATOMIC); 165 pcp = kzalloc(sizeof(*pcp), GFP_ATOMIC);
157 if (pcp == NULL) { 166 if (pcp == NULL) {
158 prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n"); 167 prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
159 prom_halt(); 168 prom_halt();
160 } 169 }
161 pcp->pbm = pbm; 170 pcp->pbm = pbm;
162 pcp->prom_node = device_prom_node; 171 pcp->prom_node = dp;
163 memcpy(pcp->prom_regs, pregs, sizeof(pcp->prom_regs)); 172 memcpy(pcp->prom_regs, pregs,
173 nregs * sizeof(struct linux_prom_pci_registers));
164 pcp->num_prom_regs = nregs; 174 pcp->num_prom_regs = nregs;
165 err = prom_getproperty(device_prom_node, "name",
166 pcp->prom_name, sizeof(pcp->prom_name));
167 if (err > 0)
168 pcp->prom_name[err] = 0;
169 else
170 pcp->prom_name[0] = 0;
171 175
172 err = prom_getproperty(device_prom_node, 176 /* We can't have the pcidev_cookie assignments be just
173 "assigned-addresses", 177 * direct pointers into the property value, since they
174 (char *)pcp->prom_assignments, 178 * are potentially modified by the probing process.
175 sizeof(pcp->prom_assignments)); 179 */
176 if (err == 0 || err == -1) 180 prop = of_find_property(dp, "assigned-addresses", &len);
181 if (!prop) {
177 pcp->num_prom_assignments = 0; 182 pcp->num_prom_assignments = 0;
178 else 183 } else {
184 memcpy(pcp->prom_assignments, prop->value, len);
179 pcp->num_prom_assignments = 185 pcp->num_prom_assignments =
180 (err / sizeof(pcp->prom_assignments[0])); 186 (len / sizeof(pcp->prom_assignments[0]));
187 }
181 188
182 if (strcmp(pcp->prom_name, "ebus") == 0) { 189 if (strcmp(dp->name, "ebus") == 0) {
183 struct linux_prom_ebus_ranges erng[PROM_PCIRNG_MAX]; 190 struct linux_prom_ebus_ranges *erng;
184 int iter; 191 int iter;
185 192
186 /* EBUS is special... */ 193 /* EBUS is special... */
187 err = prom_getproperty(device_prom_node, "ranges", 194 prop = of_find_property(dp, "ranges", &len);
188 (char *)&erng[0], sizeof(erng)); 195 if (!prop) {
189 if (err == 0 || err == -1) {
190 prom_printf("EBUS: Fatal error, no range property\n"); 196 prom_printf("EBUS: Fatal error, no range property\n");
191 prom_halt(); 197 prom_halt();
192 } 198 }
193 err = (err / sizeof(erng[0])); 199 erng = prop->value;
194 for(iter = 0; iter < err; iter++) { 200 len = (len / sizeof(erng[0]));
201 for (iter = 0; iter < len; iter++) {
195 struct linux_prom_ebus_ranges *ep = &erng[iter]; 202 struct linux_prom_ebus_ranges *ep = &erng[iter];
196 struct linux_prom_pci_registers *ap; 203 struct linux_prom_pci_registers *ap;
197 204
198 ap = &pcp->prom_assignments[iter]; 205 ap = &pcp->prom_assignments[iter];
199 206
200 ap->phys_hi = ep->parent_phys_hi; 207 ap->phys_hi = ep->parent_phys_hi;
201 ap->phys_mid = ep->parent_phys_mid; 208 ap->phys_mid = ep->parent_phys_mid;
202 ap->phys_lo = ep->parent_phys_lo; 209 ap->phys_lo = ep->parent_phys_lo;
203 ap->size_hi = 0; 210 ap->size_hi = 0;
204 ap->size_lo = ep->size; 211 ap->size_lo = ep->size;
205 } 212 }
206 pcp->num_prom_assignments = err; 213 pcp->num_prom_assignments = len;
207 } 214 }
208 215
209 fixup_obp_assignments(pdev, pcp); 216 fixup_obp_assignments(pdev, pcp);
210 217
211 pdev->sysdata = pcp; 218 pdev->sysdata = pcp;
212 } 219 }
213 220
214 void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus, 221 void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
215 struct pci_pbm_info *pbm, 222 struct pci_pbm_info *pbm,
216 int prom_node) 223 struct device_node *dp)
217 { 224 {
218 struct pci_dev *pdev, *pdev_next; 225 struct pci_dev *pdev, *pdev_next;
219 struct pci_bus *this_pbus, *pbus_next; 226 struct pci_bus *this_pbus, *pbus_next;
220 227
221 /* This must be _safe because the cookie fillin 228 /* This must be _safe because the cookie fillin
222 routine can delete devices from the tree. */ 229 routine can delete devices from the tree. */
223 list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list) 230 list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
224 pdev_cookie_fillin(pbm, pdev, prom_node); 231 pdev_cookie_fillin(pbm, pdev, dp);
225 232
226 list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) { 233 list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
227 struct pcidev_cookie *pcp = this_pbus->self->sysdata; 234 struct pcidev_cookie *pcp = this_pbus->self->sysdata;
228 235
229 pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node); 236 pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
230 } 237 }
231 } 238 }
232 239
233 static void __init bad_assignment(struct pci_dev *pdev, 240 static void __init bad_assignment(struct pci_dev *pdev,
234 struct linux_prom_pci_registers *ap, 241 struct linux_prom_pci_registers *ap,
235 struct resource *res, 242 struct resource *res,
236 int do_prom_halt) 243 int do_prom_halt)
237 { 244 {
238 prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n", 245 prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
239 pdev->bus->number, pdev->devfn); 246 pdev->bus->number, pdev->devfn);
240 if (ap) 247 if (ap)
241 prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n", 248 prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
242 ap->phys_hi, ap->phys_mid, ap->phys_lo, 249 ap->phys_hi, ap->phys_mid, ap->phys_lo,
243 ap->size_hi, ap->size_lo); 250 ap->size_hi, ap->size_lo);
244 if (res) 251 if (res)
245 prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n", 252 prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
246 res->start, res->end, res->flags); 253 res->start, res->end, res->flags);
247 prom_printf("Please email this information to davem@redhat.com\n");
248 if (do_prom_halt) 254 if (do_prom_halt)
249 prom_halt(); 255 prom_halt();
250 } 256 }
251 257
252 static struct resource * 258 static struct resource *
253 __init get_root_resource(struct linux_prom_pci_registers *ap, 259 __init get_root_resource(struct linux_prom_pci_registers *ap,
254 struct pci_pbm_info *pbm) 260 struct pci_pbm_info *pbm)
255 { 261 {
256 int space = (ap->phys_hi >> 24) & 3; 262 int space = (ap->phys_hi >> 24) & 3;
257 263
258 switch (space) { 264 switch (space) {
259 case 0: 265 case 0:
260 /* Configuration space, silently ignore it. */ 266 /* Configuration space, silently ignore it. */
261 return NULL; 267 return NULL;
262 268
263 case 1: 269 case 1:
264 /* 16-bit IO space */ 270 /* 16-bit IO space */
265 return &pbm->io_space; 271 return &pbm->io_space;
266 272
267 case 2: 273 case 2:
268 /* 32-bit MEM space */ 274 /* 32-bit MEM space */
269 return &pbm->mem_space; 275 return &pbm->mem_space;
270 276
271 case 3: 277 case 3:
272 /* 64-bit MEM space, these are allocated out of 278 /* 64-bit MEM space, these are allocated out of
273 * the 32-bit mem_space range for the PBM, ie. 279 * the 32-bit mem_space range for the PBM, ie.
274 * we just zero out the upper 32-bits. 280 * we just zero out the upper 32-bits.
275 */ 281 */
276 return &pbm->mem_space; 282 return &pbm->mem_space;
277 283
278 default: 284 default:
279 printk("PCI: What is resource space %x? " 285 printk("PCI: What is resource space %x?\n", space);
280 "Tell davem@redhat.com about it!\n", space);
281 return NULL; 286 return NULL;
282 }; 287 };
283 } 288 }
284 289
285 static struct resource * 290 static struct resource *
286 __init get_device_resource(struct linux_prom_pci_registers *ap, 291 __init get_device_resource(struct linux_prom_pci_registers *ap,
287 struct pci_dev *pdev) 292 struct pci_dev *pdev)
288 { 293 {
289 struct resource *res; 294 struct resource *res;
290 int breg = (ap->phys_hi & 0xff); 295 int breg = (ap->phys_hi & 0xff);
291 296
292 switch (breg) { 297 switch (breg) {
293 case PCI_ROM_ADDRESS: 298 case PCI_ROM_ADDRESS:
294 /* Unfortunately I have seen several cases where 299 /* Unfortunately I have seen several cases where
295 * buggy FCODE uses a space value of '1' (I/O space) 300 * buggy FCODE uses a space value of '1' (I/O space)
296 * in the register property for the ROM address 301 * in the register property for the ROM address
297 * so disable this sanity check for now. 302 * so disable this sanity check for now.
298 */ 303 */
299 #if 0 304 #if 0
300 { 305 {
301 int space = (ap->phys_hi >> 24) & 3; 306 int space = (ap->phys_hi >> 24) & 3;
302 307
303 /* It had better be MEM space. */ 308 /* It had better be MEM space. */
304 if (space != 2) 309 if (space != 2)
305 bad_assignment(pdev, ap, NULL, 0); 310 bad_assignment(pdev, ap, NULL, 0);
306 } 311 }
307 #endif 312 #endif
308 res = &pdev->resource[PCI_ROM_RESOURCE]; 313 res = &pdev->resource[PCI_ROM_RESOURCE];
309 break; 314 break;
310 315
311 case PCI_BASE_ADDRESS_0: 316 case PCI_BASE_ADDRESS_0:
312 case PCI_BASE_ADDRESS_1: 317 case PCI_BASE_ADDRESS_1:
313 case PCI_BASE_ADDRESS_2: 318 case PCI_BASE_ADDRESS_2:
314 case PCI_BASE_ADDRESS_3: 319 case PCI_BASE_ADDRESS_3:
315 case PCI_BASE_ADDRESS_4: 320 case PCI_BASE_ADDRESS_4:
316 case PCI_BASE_ADDRESS_5: 321 case PCI_BASE_ADDRESS_5:
317 res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4]; 322 res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
318 break; 323 break;
319 324
320 default: 325 default:
321 bad_assignment(pdev, ap, NULL, 0); 326 bad_assignment(pdev, ap, NULL, 0);
322 res = NULL; 327 res = NULL;
323 break; 328 break;
324 }; 329 };
325 330
326 return res; 331 return res;
327 } 332 }
328 333
329 static int __init pdev_resource_collisions_expected(struct pci_dev *pdev) 334 static int __init pdev_resource_collisions_expected(struct pci_dev *pdev)
330 { 335 {
331 if (pdev->vendor != PCI_VENDOR_ID_SUN) 336 if (pdev->vendor != PCI_VENDOR_ID_SUN)
332 return 0; 337 return 0;
333 338
334 if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS || 339 if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS ||
335 pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 || 340 pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 ||
336 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB) 341 pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
337 return 1; 342 return 1;
338 343
339 return 0; 344 return 0;
340 } 345 }
341 346
342 static void __init pdev_record_assignments(struct pci_pbm_info *pbm, 347 static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
343 struct pci_dev *pdev) 348 struct pci_dev *pdev)
344 { 349 {
345 struct pcidev_cookie *pcp = pdev->sysdata; 350 struct pcidev_cookie *pcp = pdev->sysdata;
346 int i; 351 int i;
347 352
348 for (i = 0; i < pcp->num_prom_assignments; i++) { 353 for (i = 0; i < pcp->num_prom_assignments; i++) {
349 struct linux_prom_pci_registers *ap; 354 struct linux_prom_pci_registers *ap;
350 struct resource *root, *res; 355 struct resource *root, *res;
351 356
352 /* The format of this property is specified in 357 /* The format of this property is specified in
353 * the PCI Bus Binding to IEEE1275-1994. 358 * the PCI Bus Binding to IEEE1275-1994.
354 */ 359 */
355 ap = &pcp->prom_assignments[i]; 360 ap = &pcp->prom_assignments[i];
356 root = get_root_resource(ap, pbm); 361 root = get_root_resource(ap, pbm);
357 res = get_device_resource(ap, pdev); 362 res = get_device_resource(ap, pdev);
358 if (root == NULL || res == NULL || 363 if (root == NULL || res == NULL ||
359 res->flags == 0) 364 res->flags == 0)
360 continue; 365 continue;
361 366
362 /* Ok we know which resource this PROM assignment is 367 /* Ok we know which resource this PROM assignment is
363 * for, sanity check it. 368 * for, sanity check it.
364 */ 369 */
365 if ((res->start & 0xffffffffUL) != ap->phys_lo) 370 if ((res->start & 0xffffffffUL) != ap->phys_lo)
366 bad_assignment(pdev, ap, res, 1); 371 bad_assignment(pdev, ap, res, 1);
367 372
368 /* If it is a 64-bit MEM space assignment, verify that 373 /* If it is a 64-bit MEM space assignment, verify that
369 * the resource is too and that the upper 32-bits match. 374 * the resource is too and that the upper 32-bits match.
370 */ 375 */
371 if (((ap->phys_hi >> 24) & 3) == 3) { 376 if (((ap->phys_hi >> 24) & 3) == 3) {
372 if (((res->flags & IORESOURCE_MEM) == 0) || 377 if (((res->flags & IORESOURCE_MEM) == 0) ||
373 ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) 378 ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
374 != PCI_BASE_ADDRESS_MEM_TYPE_64)) 379 != PCI_BASE_ADDRESS_MEM_TYPE_64))
375 bad_assignment(pdev, ap, res, 1); 380 bad_assignment(pdev, ap, res, 1);
376 if ((res->start >> 32) != ap->phys_mid) 381 if ((res->start >> 32) != ap->phys_mid)
377 bad_assignment(pdev, ap, res, 1); 382 bad_assignment(pdev, ap, res, 1);
378 383
379 /* PBM cannot generate cpu initiated PIOs 384 /* PBM cannot generate cpu initiated PIOs
380 * to the full 64-bit space. Therefore the 385 * to the full 64-bit space. Therefore the
381 * upper 32-bits better be zero. If it is 386 * upper 32-bits better be zero. If it is
382 * not, just skip it and we will assign it 387 * not, just skip it and we will assign it
383 * properly ourselves. 388 * properly ourselves.
384 */ 389 */
385 if ((res->start >> 32) != 0UL) { 390 if ((res->start >> 32) != 0UL) {
386 printk(KERN_ERR "PCI: OBP assigns out of range MEM address " 391 printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
387 "%016lx for region %ld on device %s\n", 392 "%016lx for region %ld on device %s\n",
388 res->start, (res - &pdev->resource[0]), pci_name(pdev)); 393 res->start, (res - &pdev->resource[0]), pci_name(pdev));
389 continue; 394 continue;
390 } 395 }
391 } 396 }
392 397
393 /* Adjust the resource into the physical address space 398 /* Adjust the resource into the physical address space
394 * of this PBM. 399 * of this PBM.
395 */ 400 */
396 pbm->parent->resource_adjust(pdev, res, root); 401 pbm->parent->resource_adjust(pdev, res, root);
397 402
398 if (request_resource(root, res) < 0) { 403 if (request_resource(root, res) < 0) {
399 /* OK, there is some conflict. But this is fine 404 /* OK, there is some conflict. But this is fine
400 * since we'll reassign it in the fixup pass. 405 * since we'll reassign it in the fixup pass.
401 * 406 *
402 * We notify the user that OBP made an error if it 407 * We notify the user that OBP made an error if it
403 * is a case we don't expect. 408 * is a case we don't expect.
404 */ 409 */
405 if (!pdev_resource_collisions_expected(pdev)) { 410 if (!pdev_resource_collisions_expected(pdev)) {
406 printk(KERN_ERR "PCI: Address space collision on region %ld " 411 printk(KERN_ERR "PCI: Address space collision on region %ld "
407 "[%016lx:%016lx] of device %s\n", 412 "[%016lx:%016lx] of device %s\n",
408 (res - &pdev->resource[0]), 413 (res - &pdev->resource[0]),
409 res->start, res->end, 414 res->start, res->end,
410 pci_name(pdev)); 415 pci_name(pdev));
411 } 416 }
412 } 417 }
413 } 418 }
414 } 419 }
415 420
416 void __init pci_record_assignments(struct pci_pbm_info *pbm, 421 void __init pci_record_assignments(struct pci_pbm_info *pbm,
417 struct pci_bus *pbus) 422 struct pci_bus *pbus)
418 { 423 {
419 struct pci_dev *dev; 424 struct pci_dev *dev;
420 struct pci_bus *bus; 425 struct pci_bus *bus;
421 426
422 list_for_each_entry(dev, &pbus->devices, bus_list) 427 list_for_each_entry(dev, &pbus->devices, bus_list)
423 pdev_record_assignments(pbm, dev); 428 pdev_record_assignments(pbm, dev);
424 429
425 list_for_each_entry(bus, &pbus->children, node) 430 list_for_each_entry(bus, &pbus->children, node)
426 pci_record_assignments(pbm, bus); 431 pci_record_assignments(pbm, bus);
427 } 432 }
428 433
429 /* Return non-zero if PDEV has implicit I/O resources even 434 /* Return non-zero if PDEV has implicit I/O resources even
430 * though it may not have an I/O base address register 435 * though it may not have an I/O base address register
431 * active. 436 * active.
432 */ 437 */
433 static int __init has_implicit_io(struct pci_dev *pdev) 438 static int __init has_implicit_io(struct pci_dev *pdev)
434 { 439 {
435 int class = pdev->class >> 8; 440 int class = pdev->class >> 8;
436 441
437 if (class == PCI_CLASS_NOT_DEFINED || 442 if (class == PCI_CLASS_NOT_DEFINED ||
438 class == PCI_CLASS_NOT_DEFINED_VGA || 443 class == PCI_CLASS_NOT_DEFINED_VGA ||
439 class == PCI_CLASS_STORAGE_IDE || 444 class == PCI_CLASS_STORAGE_IDE ||
440 (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 445 (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
441 return 1; 446 return 1;
442 447
443 return 0; 448 return 0;
444 } 449 }
445 450
446 static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm, 451 static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
447 struct pci_dev *pdev) 452 struct pci_dev *pdev)
448 { 453 {
449 u32 reg; 454 u32 reg;
450 u16 cmd; 455 u16 cmd;
451 int i, io_seen, mem_seen; 456 int i, io_seen, mem_seen;
452 457
453 io_seen = mem_seen = 0; 458 io_seen = mem_seen = 0;
454 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 459 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
455 struct resource *root, *res; 460 struct resource *root, *res;
456 unsigned long size, min, max, align; 461 unsigned long size, min, max, align;
457 462
458 res = &pdev->resource[i]; 463 res = &pdev->resource[i];
459 464
460 if (res->flags & IORESOURCE_IO) 465 if (res->flags & IORESOURCE_IO)
461 io_seen++; 466 io_seen++;
462 else if (res->flags & IORESOURCE_MEM) 467 else if (res->flags & IORESOURCE_MEM)
463 mem_seen++; 468 mem_seen++;
464 469
465 /* If it is already assigned or the resource does 470 /* If it is already assigned or the resource does
466 * not exist, there is nothing to do. 471 * not exist, there is nothing to do.
467 */ 472 */
468 if (res->parent != NULL || res->flags == 0UL) 473 if (res->parent != NULL || res->flags == 0UL)
469 continue; 474 continue;
470 475
471 /* Determine the root we allocate from. */ 476 /* Determine the root we allocate from. */
472 if (res->flags & IORESOURCE_IO) { 477 if (res->flags & IORESOURCE_IO) {
473 root = &pbm->io_space; 478 root = &pbm->io_space;
474 min = root->start + 0x400UL; 479 min = root->start + 0x400UL;
475 max = root->end; 480 max = root->end;
476 } else { 481 } else {
477 root = &pbm->mem_space; 482 root = &pbm->mem_space;
478 min = root->start; 483 min = root->start;
479 max = min + 0x80000000UL; 484 max = min + 0x80000000UL;
480 } 485 }
481 486
482 size = res->end - res->start; 487 size = res->end - res->start;
483 align = size + 1; 488 align = size + 1;
484 if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) { 489 if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
485 /* uh oh */ 490 /* uh oh */
486 prom_printf("PCI: Failed to allocate resource %d for %s\n", 491 prom_printf("PCI: Failed to allocate resource %d for %s\n",
487 i, pci_name(pdev)); 492 i, pci_name(pdev));
488 prom_halt(); 493 prom_halt();
489 } 494 }
490 495
491 /* Update PCI config space. */ 496 /* Update PCI config space. */
492 pbm->parent->base_address_update(pdev, i); 497 pbm->parent->base_address_update(pdev, i);
493 } 498 }
494 499
495 /* Special case, disable the ROM. Several devices 500 /* Special case, disable the ROM. Several devices
496 * act funny (ie. do not respond to memory space writes) 501 * act funny (ie. do not respond to memory space writes)
497 * when it is left enabled. A good example are Qlogic,ISP 502 * when it is left enabled. A good example are Qlogic,ISP
498 * adapters. 503 * adapters.
499 */ 504 */
500 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg); 505 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
501 reg &= ~PCI_ROM_ADDRESS_ENABLE; 506 reg &= ~PCI_ROM_ADDRESS_ENABLE;
502 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg); 507 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
503 508
504 /* If we saw I/O or MEM resources, enable appropriate 509 /* If we saw I/O or MEM resources, enable appropriate
505 * bits in PCI command register. 510 * bits in PCI command register.
506 */ 511 */
507 if (io_seen || mem_seen) { 512 if (io_seen || mem_seen) {
508 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 513 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
509 if (io_seen || has_implicit_io(pdev)) 514 if (io_seen || has_implicit_io(pdev))
510 cmd |= PCI_COMMAND_IO; 515 cmd |= PCI_COMMAND_IO;
511 if (mem_seen) 516 if (mem_seen)
512 cmd |= PCI_COMMAND_MEMORY; 517 cmd |= PCI_COMMAND_MEMORY;
513 pci_write_config_word(pdev, PCI_COMMAND, cmd); 518 pci_write_config_word(pdev, PCI_COMMAND, cmd);
514 } 519 }
515 520
516 /* If this is a PCI bridge or an IDE controller, 521 /* If this is a PCI bridge or an IDE controller,
517 * enable bus mastering. In the former case also 522 * enable bus mastering. In the former case also
518 * set the cache line size correctly. 523 * set the cache line size correctly.
519 */ 524 */
520 if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) || 525 if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
521 (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) && 526 (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
522 ((pdev->class & 0x80) != 0))) { 527 ((pdev->class & 0x80) != 0))) {
523 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 528 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
524 cmd |= PCI_COMMAND_MASTER; 529 cmd |= PCI_COMMAND_MASTER;
525 pci_write_config_word(pdev, PCI_COMMAND, cmd); 530 pci_write_config_word(pdev, PCI_COMMAND, cmd);
526 531
527 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) 532 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
528 pci_write_config_byte(pdev, 533 pci_write_config_byte(pdev,
529 PCI_CACHE_LINE_SIZE, 534 PCI_CACHE_LINE_SIZE,
530 (64 / sizeof(u32))); 535 (64 / sizeof(u32)));
531 } 536 }
532 } 537 }
533 538
534 void __init pci_assign_unassigned(struct pci_pbm_info *pbm, 539 void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
535 struct pci_bus *pbus) 540 struct pci_bus *pbus)
536 { 541 {
537 struct pci_dev *dev; 542 struct pci_dev *dev;
538 struct pci_bus *bus; 543 struct pci_bus *bus;
539 544
540 list_for_each_entry(dev, &pbus->devices, bus_list) 545 list_for_each_entry(dev, &pbus->devices, bus_list)
541 pdev_assign_unassigned(pbm, dev); 546 pdev_assign_unassigned(pbm, dev);
542 547
543 list_for_each_entry(bus, &pbus->children, node) 548 list_for_each_entry(bus, &pbus->children, node)
544 pci_assign_unassigned(pbm, bus); 549 pci_assign_unassigned(pbm, bus);
545 } 550 }
546 551
547 static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm, 552 static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm,
548 struct pci_dev *toplevel_pdev, 553 struct pci_dev *toplevel_pdev,
549 struct pci_dev *pdev, 554 struct pci_dev *pdev,
550 unsigned int interrupt) 555 unsigned int interrupt)
551 { 556 {
552 unsigned int ret; 557 unsigned int ret;
553 558
554 if (unlikely(interrupt < 1 || interrupt > 4)) { 559 if (unlikely(interrupt < 1 || interrupt > 4)) {
555 printk("%s: Device %s interrupt value of %u is strange.\n", 560 printk("%s: Device %s interrupt value of %u is strange.\n",
556 pbm->name, pci_name(pdev), interrupt); 561 pbm->name, pci_name(pdev), interrupt);
557 return interrupt; 562 return interrupt;
558 } 563 }
559 564
560 ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1; 565 ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1;
561 566
562 if (pci_irq_verbose) 567 if (pci_irq_verbose)
563 printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n", 568 printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n",
564 pbm->name, pci_name(toplevel_pdev), pci_name(pdev), 569 pbm->name, pci_name(toplevel_pdev), pci_name(pdev),
565 interrupt, PCI_SLOT(pdev->devfn), ret); 570 interrupt, PCI_SLOT(pdev->devfn), ret);
566 571
567 return ret; 572 return ret;
568 } 573 }
569 574
570 static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm, 575 static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm,
571 struct pci_dev *toplevel_pdev, 576 struct pci_dev *toplevel_pdev,
572 struct pci_dev *pbus, 577 struct pci_dev *pbus,
573 struct pci_dev *pdev, 578 struct pci_dev *pdev,
574 unsigned int interrupt, 579 unsigned int interrupt,
575 unsigned int *cnode) 580 struct device_node **cnode)
576 { 581 {
577 struct linux_prom_pci_intmap imap[PROM_PCIIMAP_MAX]; 582 struct linux_prom_pci_intmap *imap;
578 struct linux_prom_pci_intmask imask; 583 struct linux_prom_pci_intmask *imask;
579 struct pcidev_cookie *pbus_pcp = pbus->sysdata; 584 struct pcidev_cookie *pbus_pcp = pbus->sysdata;
580 struct pcidev_cookie *pdev_pcp = pdev->sysdata; 585 struct pcidev_cookie *pdev_pcp = pdev->sysdata;
581 struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs; 586 struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs;
587 struct property *prop;
582 int plen, num_imap, i; 588 int plen, num_imap, i;
583 unsigned int hi, mid, lo, irq, orig_interrupt; 589 unsigned int hi, mid, lo, irq, orig_interrupt;
584 590
585 *cnode = pbus_pcp->prom_node; 591 *cnode = pbus_pcp->prom_node;
586 592
587 plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map", 593 prop = of_find_property(pbus_pcp->prom_node, "interrupt-map", &plen);
588 (char *) &imap[0], sizeof(imap)); 594 if (!prop ||
589 if (plen <= 0 ||
590 (plen % sizeof(struct linux_prom_pci_intmap)) != 0) { 595 (plen % sizeof(struct linux_prom_pci_intmap)) != 0) {
591 printk("%s: Device %s interrupt-map has bad len %d\n", 596 printk("%s: Device %s interrupt-map has bad len %d\n",
592 pbm->name, pci_name(pbus), plen); 597 pbm->name, pci_name(pbus), plen);
593 goto no_intmap; 598 goto no_intmap;
594 } 599 }
600 imap = prop->value;
595 num_imap = plen / sizeof(struct linux_prom_pci_intmap); 601 num_imap = plen / sizeof(struct linux_prom_pci_intmap);
596 602
597 plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map-mask", 603 prop = of_find_property(pbus_pcp->prom_node, "interrupt-map-mask", &plen);
598 (char *) &imask, sizeof(imask)); 604 if (!prop ||
599 if (plen <= 0 ||
600 (plen % sizeof(struct linux_prom_pci_intmask)) != 0) { 605 (plen % sizeof(struct linux_prom_pci_intmask)) != 0) {
601 printk("%s: Device %s interrupt-map-mask has bad len %d\n", 606 printk("%s: Device %s interrupt-map-mask has bad len %d\n",
602 pbm->name, pci_name(pbus), plen); 607 pbm->name, pci_name(pbus), plen);
603 goto no_intmap; 608 goto no_intmap;
604 } 609 }
610 imask = prop->value;
605 611
606 orig_interrupt = interrupt; 612 orig_interrupt = interrupt;
607 613
608 hi = pregs->phys_hi & imask.phys_hi; 614 hi = pregs->phys_hi & imask->phys_hi;
609 mid = pregs->phys_mid & imask.phys_mid; 615 mid = pregs->phys_mid & imask->phys_mid;
610 lo = pregs->phys_lo & imask.phys_lo; 616 lo = pregs->phys_lo & imask->phys_lo;
611 irq = interrupt & imask.interrupt; 617 irq = interrupt & imask->interrupt;
612 618
613 for (i = 0; i < num_imap; i++) { 619 for (i = 0; i < num_imap; i++) {
614 if (imap[i].phys_hi == hi && 620 if (imap[i].phys_hi == hi &&
615 imap[i].phys_mid == mid && 621 imap[i].phys_mid == mid &&
616 imap[i].phys_lo == lo && 622 imap[i].phys_lo == lo &&
617 imap[i].interrupt == irq) { 623 imap[i].interrupt == irq) {
618 *cnode = imap[i].cnode; 624 *cnode = of_find_node_by_phandle(imap[i].cnode);
619 interrupt = imap[i].cinterrupt; 625 interrupt = imap[i].cinterrupt;
620 } 626 }
621 } 627 }
622 628
623 if (pci_irq_verbose) 629 if (pci_irq_verbose)
624 printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n", 630 printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n",
625 pbm->name, pci_name(toplevel_pdev), 631 pbm->name, pci_name(toplevel_pdev),
626 pci_name(pbus), pci_name(pdev), 632 pci_name(pbus), pci_name(pdev),
627 orig_interrupt, interrupt); 633 orig_interrupt, interrupt);
628 634
629 no_intmap: 635 no_intmap:
630 return interrupt; 636 return interrupt;
631 } 637 }
632 638
633 /* For each PCI bus on the way to the root: 639 /* For each PCI bus on the way to the root:
634 * 1) If it has an interrupt-map property, apply it. 640 * 1) If it has an interrupt-map property, apply it.
635 * 2) Else, swivel the interrupt number based upon the PCI device number. 641 * 2) Else, swivel the interrupt number based upon the PCI device number.
636 * 642 *
637 * Return the "IRQ controller" node. If this is the PBM's device node, 643 * Return the "IRQ controller" node. If this is the PBM's device node,
638 * all interrupt translations are complete, else we should use that node's 644 * all interrupt translations are complete, else we should use that node's
639 * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt. 645 * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt.
640 */ 646 */
641 static unsigned int __init pci_intmap_match_to_root(struct pci_pbm_info *pbm, 647 static struct device_node * __init
642 struct pci_dev *pdev, 648 pci_intmap_match_to_root(struct pci_pbm_info *pbm,
643 unsigned int *interrupt) 649 struct pci_dev *pdev,
650 unsigned int *interrupt)
644 { 651 {
645 struct pci_dev *toplevel_pdev = pdev; 652 struct pci_dev *toplevel_pdev = pdev;
646 struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata; 653 struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata;
647 unsigned int cnode = toplevel_pcp->prom_node; 654 struct device_node *cnode = toplevel_pcp->prom_node;
648 655
649 while (pdev->bus->number != pbm->pci_first_busno) { 656 while (pdev->bus->number != pbm->pci_first_busno) {
650 struct pci_dev *pbus = pdev->bus->self; 657 struct pci_dev *pbus = pdev->bus->self;
651 struct pcidev_cookie *pcp = pbus->sysdata; 658 struct pcidev_cookie *pcp = pbus->sysdata;
652 int plen; 659 struct property *prop;
653 660
654 plen = prom_getproplen(pcp->prom_node, "interrupt-map"); 661 prop = of_find_property(pcp->prom_node, "interrupt-map", NULL);
655 if (plen <= 0) { 662 if (!prop) {
656 *interrupt = pci_slot_swivel(pbm, toplevel_pdev, 663 *interrupt = pci_slot_swivel(pbm, toplevel_pdev,
657 pdev, *interrupt); 664 pdev, *interrupt);
658 cnode = pcp->prom_node; 665 cnode = pcp->prom_node;
659 } else { 666 } else {
660 *interrupt = pci_apply_intmap(pbm, toplevel_pdev, 667 *interrupt = pci_apply_intmap(pbm, toplevel_pdev,
661 pbus, pdev, 668 pbus, pdev,
662 *interrupt, &cnode); 669 *interrupt, &cnode);
663 670
664 while (pcp->prom_node != cnode && 671 while (pcp->prom_node != cnode &&
665 pbus->bus->number != pbm->pci_first_busno) { 672 pbus->bus->number != pbm->pci_first_busno) {
666 pbus = pbus->bus->self; 673 pbus = pbus->bus->self;
667 pcp = pbus->sysdata; 674 pcp = pbus->sysdata;
668 } 675 }
669 } 676 }
670 pdev = pbus; 677 pdev = pbus;
671 678
672 if (cnode == pbm->prom_node->node) 679 if (cnode == pbm->prom_node)
673 break; 680 break;
674 } 681 }
675 682
676 return cnode; 683 return cnode;
677 } 684 }
678 685
679 static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) 686 static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
680 { 687 {
681 struct pcidev_cookie *dev_pcp = pdev->sysdata; 688 struct pcidev_cookie *dev_pcp = pdev->sysdata;
682 struct pci_pbm_info *pbm = dev_pcp->pbm; 689 struct pci_pbm_info *pbm = dev_pcp->pbm;
683 struct linux_prom_pci_registers reg[PROMREG_MAX]; 690 struct linux_prom_pci_registers *reg;
691 struct device_node *cnode;
692 struct property *prop;
684 unsigned int hi, mid, lo, irq; 693 unsigned int hi, mid, lo, irq;
685 int i, cnode, plen; 694 int i, plen;
686 695
687 cnode = pci_intmap_match_to_root(pbm, pdev, interrupt); 696 cnode = pci_intmap_match_to_root(pbm, pdev, interrupt);
688 if (cnode == pbm->prom_node->node) 697 if (cnode == pbm->prom_node)
689 goto success; 698 goto success;
690 699
691 plen = prom_getproperty(cnode, "reg", (char *) reg, sizeof(reg)); 700 prop = of_find_property(cnode, "reg", &plen);
692 if (plen <= 0 || 701 if (!prop ||
693 (plen % sizeof(struct linux_prom_pci_registers)) != 0) { 702 (plen % sizeof(struct linux_prom_pci_registers)) != 0) {
694 printk("%s: OBP node %x reg property has bad len %d\n", 703 printk("%s: OBP node %s reg property has bad len %d\n",
695 pbm->name, cnode, plen); 704 pbm->name, cnode->full_name, plen);
696 goto fail; 705 goto fail;
697 } 706 }
707 reg = prop->value;
698 708
699 hi = reg[0].phys_hi & pbm->pbm_intmask->phys_hi; 709 hi = reg[0].phys_hi & pbm->pbm_intmask->phys_hi;
700 mid = reg[0].phys_mid & pbm->pbm_intmask->phys_mid; 710 mid = reg[0].phys_mid & pbm->pbm_intmask->phys_mid;
701 lo = reg[0].phys_lo & pbm->pbm_intmask->phys_lo; 711 lo = reg[0].phys_lo & pbm->pbm_intmask->phys_lo;
702 irq = *interrupt & pbm->pbm_intmask->interrupt; 712 irq = *interrupt & pbm->pbm_intmask->interrupt;
703 713
704 for (i = 0; i < pbm->num_pbm_intmap; i++) { 714 for (i = 0; i < pbm->num_pbm_intmap; i++) {
705 struct linux_prom_pci_intmap *intmap; 715 struct linux_prom_pci_intmap *intmap;
706 716
707 intmap = &pbm->pbm_intmap[i]; 717 intmap = &pbm->pbm_intmap[i];
708 718
709 if (intmap->phys_hi == hi && 719 if (intmap->phys_hi == hi &&
710 intmap->phys_mid == mid && 720 intmap->phys_mid == mid &&
711 intmap->phys_lo == lo && 721 intmap->phys_lo == lo &&
712 intmap->interrupt == irq) { 722 intmap->interrupt == irq) {
713 *interrupt = intmap->cinterrupt; 723 *interrupt = intmap->cinterrupt;
714 goto success; 724 goto success;
715 } 725 }
716 } 726 }
717 727
718 fail: 728 fail:
719 return 0; 729 return 0;
720 730
721 success: 731 success:
722 if (pci_irq_verbose) 732 if (pci_irq_verbose)
723 printk("%s: Routing bus[%2x] slot[%2x] to INO[%02x]\n", 733 printk("%s: Routing bus[%2x] slot[%2x] to INO[%02x]\n",
724 pbm->name, 734 pbm->name,
725 pdev->bus->number, PCI_SLOT(pdev->devfn), 735 pdev->bus->number, PCI_SLOT(pdev->devfn),
726 *interrupt); 736 *interrupt);
727 return 1; 737 return 1;
728 } 738 }
729 739
730 static void __init pdev_fixup_irq(struct pci_dev *pdev) 740 static void __init pdev_fixup_irq(struct pci_dev *pdev)
731 { 741 {
732 struct pcidev_cookie *pcp = pdev->sysdata; 742 struct pcidev_cookie *pcp = pdev->sysdata;
733 struct pci_pbm_info *pbm = pcp->pbm; 743 struct pci_pbm_info *pbm = pcp->pbm;
734 struct pci_controller_info *p = pbm->parent; 744 struct pci_controller_info *p = pbm->parent;
735 unsigned int portid = pbm->portid; 745 unsigned int portid = pbm->portid;
736 unsigned int prom_irq; 746 unsigned int prom_irq;
737 int prom_node = pcp->prom_node; 747 struct device_node *dp = pcp->prom_node;
738 int err; 748 struct property *prop;
739 749
740 /* If this is an empty EBUS device, sometimes OBP fails to 750 /* If this is an empty EBUS device, sometimes OBP fails to
741 * give it a valid fully specified interrupts property. 751 * give it a valid fully specified interrupts property.
742 * The EBUS hooked up to SunHME on PCI I/O boards of 752 * The EBUS hooked up to SunHME on PCI I/O boards of
743 * Ex000 systems is one such case. 753 * Ex000 systems is one such case.
744 * 754 *
745 * The interrupt is not important so just ignore it. 755 * The interrupt is not important so just ignore it.
746 */ 756 */
747 if (pdev->vendor == PCI_VENDOR_ID_SUN && 757 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
748 pdev->device == PCI_DEVICE_ID_SUN_EBUS && 758 pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
749 !prom_getchild(prom_node)) { 759 !dp->child) {
750 pdev->irq = 0; 760 pdev->irq = 0;
751 return; 761 return;
752 } 762 }
753 763
754 err = prom_getproperty(prom_node, "interrupts", 764 prop = of_find_property(dp, "interrupts", NULL);
755 (char *)&prom_irq, sizeof(prom_irq)); 765 if (!prop) {
756 if (err == 0 || err == -1) {
757 pdev->irq = 0; 766 pdev->irq = 0;
758 return; 767 return;
759 } 768 }
769 prom_irq = *(unsigned int *) prop->value;
760 770
761 if (tlb_type != hypervisor) { 771 if (tlb_type != hypervisor) {
762 /* Fully specified already? */ 772 /* Fully specified already? */
763 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { 773 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
764 pdev->irq = p->irq_build(pbm, pdev, prom_irq); 774 pdev->irq = p->irq_build(pbm, pdev, prom_irq);
765 goto have_irq; 775 goto have_irq;
766 } 776 }
767 777
768 /* An onboard device? (bit 5 set) */ 778 /* An onboard device? (bit 5 set) */
769 if ((prom_irq & PCI_IRQ_INO) & 0x20) { 779 if ((prom_irq & PCI_IRQ_INO) & 0x20) {
770 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); 780 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
771 goto have_irq; 781 goto have_irq;
772 } 782 }
773 } 783 }
774 784
775 /* Can we find a matching entry in the interrupt-map? */ 785 /* Can we find a matching entry in the interrupt-map? */
776 if (pci_intmap_match(pdev, &prom_irq)) { 786 if (pci_intmap_match(pdev, &prom_irq)) {
777 pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq); 787 pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
778 goto have_irq; 788 goto have_irq;
779 } 789 }
780 790
781 /* Ok, we have to do it the hard way. */ 791 /* Ok, we have to do it the hard way. */
782 { 792 {
783 unsigned int bus, slot, line; 793 unsigned int bus, slot, line;
784 794
785 bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0; 795 bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
786 796
787 /* If we have a legal interrupt property, use it as 797 /* If we have a legal interrupt property, use it as
788 * the IRQ line. 798 * the IRQ line.
789 */ 799 */
790 if (prom_irq > 0 && prom_irq < 5) { 800 if (prom_irq > 0 && prom_irq < 5) {
791 line = ((prom_irq - 1) & 3); 801 line = ((prom_irq - 1) & 3);
792 } else { 802 } else {
793 u8 pci_irq_line; 803 u8 pci_irq_line;
794 804
795 /* Else just directly consult PCI config space. */ 805 /* Else just directly consult PCI config space. */
796 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line); 806 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
797 line = ((pci_irq_line - 1) & 3); 807 line = ((pci_irq_line - 1) & 3);
798 } 808 }
799 809
800 /* Now figure out the slot. 810 /* Now figure out the slot.
801 * 811 *
802 * Basically, device number zero on the top-level bus is 812 * Basically, device number zero on the top-level bus is
803 * always the PCI host controller. Slot 0 is then device 1. 813 * always the PCI host controller. Slot 0 is then device 1.
804 * PBM A supports two external slots (0 and 1), and PBM B 814 * PBM A supports two external slots (0 and 1), and PBM B
805 * supports 4 external slots (0, 1, 2, and 3). On-board PCI 815 * supports 4 external slots (0, 1, 2, and 3). On-board PCI
806 * devices are wired to device numbers outside of these 816 * devices are wired to device numbers outside of these
807 * ranges. -DaveM 817 * ranges. -DaveM
808 */ 818 */
809 if (pdev->bus->number == pbm->pci_first_busno) { 819 if (pdev->bus->number == pbm->pci_first_busno) {
810 slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot; 820 slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
811 } else { 821 } else {
812 struct pci_dev *bus_dev; 822 struct pci_dev *bus_dev;
813 823
814 /* Underneath a bridge, use slot number of parent 824 /* Underneath a bridge, use slot number of parent
815 * bridge which is closest to the PBM. 825 * bridge which is closest to the PBM.
816 */ 826 */
817 bus_dev = pdev->bus->self; 827 bus_dev = pdev->bus->self;
818 while (bus_dev->bus && 828 while (bus_dev->bus &&
819 bus_dev->bus->number != pbm->pci_first_busno) 829 bus_dev->bus->number != pbm->pci_first_busno)
820 bus_dev = bus_dev->bus->self; 830 bus_dev = bus_dev->bus->self;
821 831
822 slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot; 832 slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
823 } 833 }
824 slot = slot << 2; 834 slot = slot << 2;
825 835
826 pdev->irq = p->irq_build(pbm, pdev, 836 pdev->irq = p->irq_build(pbm, pdev,
827 ((portid << 6) & PCI_IRQ_IGN) | 837 ((portid << 6) & PCI_IRQ_IGN) |
828 (bus | slot | line)); 838 (bus | slot | line));
829 } 839 }
830 840
831 have_irq: 841 have_irq:
832 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 842 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
833 pdev->irq & PCI_IRQ_INO); 843 pdev->irq & PCI_IRQ_INO);
834 } 844 }
835 845
836 void __init pci_fixup_irq(struct pci_pbm_info *pbm, 846 void __init pci_fixup_irq(struct pci_pbm_info *pbm,
837 struct pci_bus *pbus) 847 struct pci_bus *pbus)
838 { 848 {
839 struct pci_dev *dev; 849 struct pci_dev *dev;
840 struct pci_bus *bus; 850 struct pci_bus *bus;
841 851
842 list_for_each_entry(dev, &pbus->devices, bus_list) 852 list_for_each_entry(dev, &pbus->devices, bus_list)
843 pdev_fixup_irq(dev); 853 pdev_fixup_irq(dev);
844 854
845 list_for_each_entry(bus, &pbus->children, node) 855 list_for_each_entry(bus, &pbus->children, node)
846 pci_fixup_irq(pbm, bus); 856 pci_fixup_irq(pbm, bus);
847 } 857 }
848 858
849 static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz) 859 static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
850 { 860 {
851 u16 cmd; 861 u16 cmd;
852 u8 hdr_type, min_gnt, ltimer; 862 u8 hdr_type, min_gnt, ltimer;
853 863
854 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 864 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
855 cmd |= PCI_COMMAND_MASTER; 865 cmd |= PCI_COMMAND_MASTER;
856 pci_write_config_word(pdev, PCI_COMMAND, cmd); 866 pci_write_config_word(pdev, PCI_COMMAND, cmd);
857 867
858 /* Read it back, if the mastering bit did not 868 /* Read it back, if the mastering bit did not
859 * get set, the device does not support bus 869 * get set, the device does not support bus
860 * mastering so we have nothing to do here. 870 * mastering so we have nothing to do here.
861 */ 871 */
862 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 872 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
863 if ((cmd & PCI_COMMAND_MASTER) == 0) 873 if ((cmd & PCI_COMMAND_MASTER) == 0)
864 return; 874 return;
865 875
866 /* Set correct cache line size, 64-byte on all 876 /* Set correct cache line size, 64-byte on all
867 * Sparc64 PCI systems. Note that the value is 877 * Sparc64 PCI systems. Note that the value is
868 * measured in 32-bit words. 878 * measured in 32-bit words.
869 */ 879 */
870 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 880 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
871 64 / sizeof(u32)); 881 64 / sizeof(u32));
872 882
873 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type); 883 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
874 hdr_type &= ~0x80; 884 hdr_type &= ~0x80;
875 if (hdr_type != PCI_HEADER_TYPE_NORMAL) 885 if (hdr_type != PCI_HEADER_TYPE_NORMAL)
876 return; 886 return;
877 887
878 /* If the latency timer is already programmed with a non-zero 888 /* If the latency timer is already programmed with a non-zero
879 * value, assume whoever set it (OBP or whoever) knows what 889 * value, assume whoever set it (OBP or whoever) knows what
880 * they are doing. 890 * they are doing.
881 */ 891 */
882 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer); 892 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
883 if (ltimer != 0) 893 if (ltimer != 0)
884 return; 894 return;
885 895
886 /* XXX Since I'm tipping off the min grant value to 896 /* XXX Since I'm tipping off the min grant value to
887 * XXX choose a suitable latency timer value, I also 897 * XXX choose a suitable latency timer value, I also
888 * XXX considered making use of the max latency value 898 * XXX considered making use of the max latency value
889 * XXX as well. Unfortunately I've seen too many bogusly 899 * XXX as well. Unfortunately I've seen too many bogusly
890 * XXX low settings for it to the point where it lacks 900 * XXX low settings for it to the point where it lacks
891 * XXX any usefulness. In one case, an ethernet card 901 * XXX any usefulness. In one case, an ethernet card
892 * XXX claimed a min grant of 10 and a max latency of 5. 902 * XXX claimed a min grant of 10 and a max latency of 5.
893 * XXX Now, if I had two such cards on the same bus I 903 * XXX Now, if I had two such cards on the same bus I
894 * XXX could not set the desired burst period (calculated 904 * XXX could not set the desired burst period (calculated
895 * XXX from min grant) without violating the max latency 905 * XXX from min grant) without violating the max latency
896 * XXX bound. Duh... 906 * XXX bound. Duh...
897 * XXX 907 * XXX
898 * XXX I blame dumb PC bios implementors for stuff like 908 * XXX I blame dumb PC bios implementors for stuff like
899 * XXX this, most of them don't even try to do something 909 * XXX this, most of them don't even try to do something
900 * XXX sensible with latency timer values and just set some 910 * XXX sensible with latency timer values and just set some
901 * XXX default value (usually 32) into every device. 911 * XXX default value (usually 32) into every device.
902 */ 912 */
903 913
904 pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt); 914 pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
905 915
906 if (min_gnt == 0) { 916 if (min_gnt == 0) {
907 /* If no min_gnt setting then use a default 917 /* If no min_gnt setting then use a default
908 * value. 918 * value.
909 */ 919 */
910 if (is_66mhz) 920 if (is_66mhz)
911 ltimer = 16; 921 ltimer = 16;
912 else 922 else
913 ltimer = 32; 923 ltimer = 32;
914 } else { 924 } else {
915 int shift_factor; 925 int shift_factor;
916 926
917 if (is_66mhz) 927 if (is_66mhz)
918 shift_factor = 2; 928 shift_factor = 2;
919 else 929 else
920 shift_factor = 3; 930 shift_factor = 3;
921 931
922 /* Use a default value when the min_gnt value 932 /* Use a default value when the min_gnt value
923 * is erroneously high. 933 * is erroneously high.
924 */ 934 */
925 if (((unsigned int) min_gnt << shift_factor) > 512 || 935 if (((unsigned int) min_gnt << shift_factor) > 512 ||
926 ((min_gnt << shift_factor) & 0xff) == 0) { 936 ((min_gnt << shift_factor) & 0xff) == 0) {
927 ltimer = 8 << shift_factor; 937 ltimer = 8 << shift_factor;
928 } else { 938 } else {
929 ltimer = min_gnt << shift_factor; 939 ltimer = min_gnt << shift_factor;
930 } 940 }
931 } 941 }
932 942
933 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer); 943 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
934 } 944 }
935 945
936 void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, 946 void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
937 struct pci_bus *pbus) 947 struct pci_bus *pbus)
938 { 948 {
939 struct pci_dev *pdev; 949 struct pci_dev *pdev;
940 int all_are_66mhz; 950 int all_are_66mhz;
941 u16 status; 951 u16 status;
942 952
943 if (pbm->is_66mhz_capable == 0) { 953 if (pbm->is_66mhz_capable == 0) {
944 all_are_66mhz = 0; 954 all_are_66mhz = 0;
945 goto out; 955 goto out;
946 } 956 }
947 957
948 all_are_66mhz = 1; 958 all_are_66mhz = 1;
949 list_for_each_entry(pdev, &pbus->devices, bus_list) { 959 list_for_each_entry(pdev, &pbus->devices, bus_list) {
950 pci_read_config_word(pdev, PCI_STATUS, &status); 960 pci_read_config_word(pdev, PCI_STATUS, &status);
951 if (!(status & PCI_STATUS_66MHZ)) { 961 if (!(status & PCI_STATUS_66MHZ)) {
952 all_are_66mhz = 0; 962 all_are_66mhz = 0;
953 break; 963 break;
954 } 964 }
955 } 965 }
956 out: 966 out:
957 pbm->all_devs_66mhz = all_are_66mhz; 967 pbm->all_devs_66mhz = all_are_66mhz;
958 968
959 printk("PCI%d(PBM%c): Bus running at %dMHz\n", 969 printk("PCI%d(PBM%c): Bus running at %dMHz\n",
960 pbm->parent->index, 970 pbm->parent->index,
961 (pbm == &pbm->parent->pbm_A) ? 'A' : 'B', 971 (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
962 (all_are_66mhz ? 66 : 33)); 972 (all_are_66mhz ? 66 : 33));
963 } 973 }
964 974
965 void pci_setup_busmastering(struct pci_pbm_info *pbm, 975 void pci_setup_busmastering(struct pci_pbm_info *pbm,
966 struct pci_bus *pbus) 976 struct pci_bus *pbus)
967 { 977 {
968 struct pci_dev *dev; 978 struct pci_dev *dev;
969 struct pci_bus *bus; 979 struct pci_bus *bus;
970 int is_66mhz; 980 int is_66mhz;
971 981
972 is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz; 982 is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
973 983
974 list_for_each_entry(dev, &pbus->devices, bus_list) 984 list_for_each_entry(dev, &pbus->devices, bus_list)
975 pdev_setup_busmastering(dev, is_66mhz); 985 pdev_setup_busmastering(dev, is_66mhz);
976 986
977 list_for_each_entry(bus, &pbus->children, node) 987 list_for_each_entry(bus, &pbus->children, node)
978 pci_setup_busmastering(pbm, bus); 988 pci_setup_busmastering(pbm, bus);
979 } 989 }
980 990
981 void pci_register_legacy_regions(struct resource *io_res, 991 void pci_register_legacy_regions(struct resource *io_res,
982 struct resource *mem_res) 992 struct resource *mem_res)
983 { 993 {
984 struct resource *p; 994 struct resource *p;
985 995
986 /* VGA Video RAM. */ 996 /* VGA Video RAM. */
987 p = kzalloc(sizeof(*p), GFP_KERNEL); 997 p = kzalloc(sizeof(*p), GFP_KERNEL);
988 if (!p) 998 if (!p)
989 return; 999 return;
990 1000
991 p->name = "Video RAM area"; 1001 p->name = "Video RAM area";
992 p->start = mem_res->start + 0xa0000UL; 1002 p->start = mem_res->start + 0xa0000UL;
993 p->end = p->start + 0x1ffffUL; 1003 p->end = p->start + 0x1ffffUL;
994 p->flags = IORESOURCE_BUSY; 1004 p->flags = IORESOURCE_BUSY;
995 request_resource(mem_res, p); 1005 request_resource(mem_res, p);
996 1006
997 p = kzalloc(sizeof(*p), GFP_KERNEL); 1007 p = kzalloc(sizeof(*p), GFP_KERNEL);
998 if (!p) 1008 if (!p)
999 return; 1009 return;
1000 1010
1001 p->name = "System ROM"; 1011 p->name = "System ROM";
1002 p->start = mem_res->start + 0xf0000UL; 1012 p->start = mem_res->start + 0xf0000UL;
1003 p->end = p->start + 0xffffUL; 1013 p->end = p->start + 0xffffUL;
1004 p->flags = IORESOURCE_BUSY; 1014 p->flags = IORESOURCE_BUSY;
1005 request_resource(mem_res, p); 1015 request_resource(mem_res, p);
1006 1016
1007 p = kzalloc(sizeof(*p), GFP_KERNEL); 1017 p = kzalloc(sizeof(*p), GFP_KERNEL);
1008 if (!p) 1018 if (!p)
1009 return; 1019 return;
1010 1020
1011 p->name = "Video ROM"; 1021 p->name = "Video ROM";
1012 p->start = mem_res->start + 0xc0000UL; 1022 p->start = mem_res->start + 0xc0000UL;
1013 p->end = p->start + 0x7fffUL; 1023 p->end = p->start + 0x7fffUL;
1014 p->flags = IORESOURCE_BUSY; 1024 p->flags = IORESOURCE_BUSY;
1015 request_resource(mem_res, p); 1025 request_resource(mem_res, p);
1016 } 1026 }
1017 1027
1018 /* Generic helper routines for PCI error reporting. */ 1028 /* Generic helper routines for PCI error reporting. */
1019 void pci_scan_for_target_abort(struct pci_controller_info *p, 1029 void pci_scan_for_target_abort(struct pci_controller_info *p,
1020 struct pci_pbm_info *pbm, 1030 struct pci_pbm_info *pbm,
1021 struct pci_bus *pbus) 1031 struct pci_bus *pbus)
1022 { 1032 {
1023 struct pci_dev *pdev; 1033 struct pci_dev *pdev;
1024 struct pci_bus *bus; 1034 struct pci_bus *bus;
1025 1035
1026 list_for_each_entry(pdev, &pbus->devices, bus_list) { 1036 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1027 u16 status, error_bits; 1037 u16 status, error_bits;
1028 1038
1029 pci_read_config_word(pdev, PCI_STATUS, &status); 1039 pci_read_config_word(pdev, PCI_STATUS, &status);
1030 error_bits = 1040 error_bits =
1031 (status & (PCI_STATUS_SIG_TARGET_ABORT | 1041 (status & (PCI_STATUS_SIG_TARGET_ABORT |
1032 PCI_STATUS_REC_TARGET_ABORT)); 1042 PCI_STATUS_REC_TARGET_ABORT));
1033 if (error_bits) { 1043 if (error_bits) {
1034 pci_write_config_word(pdev, PCI_STATUS, error_bits); 1044 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1035 printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n", 1045 printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n",
1036 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'), 1046 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1037 pci_name(pdev), status); 1047 pci_name(pdev), status);
1038 } 1048 }
1039 } 1049 }
1040 1050
1041 list_for_each_entry(bus, &pbus->children, node) 1051 list_for_each_entry(bus, &pbus->children, node)
1042 pci_scan_for_target_abort(p, pbm, bus); 1052 pci_scan_for_target_abort(p, pbm, bus);
1043 } 1053 }
1044 1054
1045 void pci_scan_for_master_abort(struct pci_controller_info *p, 1055 void pci_scan_for_master_abort(struct pci_controller_info *p,
1046 struct pci_pbm_info *pbm, 1056 struct pci_pbm_info *pbm,
1047 struct pci_bus *pbus) 1057 struct pci_bus *pbus)
1048 { 1058 {
1049 struct pci_dev *pdev; 1059 struct pci_dev *pdev;
1050 struct pci_bus *bus; 1060 struct pci_bus *bus;
1051 1061
1052 list_for_each_entry(pdev, &pbus->devices, bus_list) { 1062 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1053 u16 status, error_bits; 1063 u16 status, error_bits;
1054 1064
1055 pci_read_config_word(pdev, PCI_STATUS, &status); 1065 pci_read_config_word(pdev, PCI_STATUS, &status);
1056 error_bits = 1066 error_bits =
1057 (status & (PCI_STATUS_REC_MASTER_ABORT)); 1067 (status & (PCI_STATUS_REC_MASTER_ABORT));
1058 if (error_bits) { 1068 if (error_bits) {
1059 pci_write_config_word(pdev, PCI_STATUS, error_bits); 1069 pci_write_config_word(pdev, PCI_STATUS, error_bits);
1060 printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n", 1070 printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n",
1061 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'), 1071 p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
1062 pci_name(pdev), status); 1072 pci_name(pdev), status);
1063 } 1073 }
1064 } 1074 }
1065 1075
1066 list_for_each_entry(bus, &pbus->children, node) 1076 list_for_each_entry(bus, &pbus->children, node)
1067 pci_scan_for_master_abort(p, pbm, bus); 1077 pci_scan_for_master_abort(p, pbm, bus);
1068 } 1078 }
1069 1079
1070 void pci_scan_for_parity_error(struct pci_controller_info *p, 1080 void pci_scan_for_parity_error(struct pci_controller_info *p,
1071 struct pci_pbm_info *pbm, 1081 struct pci_pbm_info *pbm,
1072 struct pci_bus *pbus) 1082 struct pci_bus *pbus)
1073 { 1083 {
1074 struct pci_dev *pdev; 1084 struct pci_dev *pdev;
1075 struct pci_bus *bus; 1085 struct pci_bus *bus;
1076 1086
1077 list_for_each_entry(pdev, &pbus->devices, bus_list) { 1087 list_for_each_entry(pdev, &pbus->devices, bus_list) {
1078 u16 status, error_bits; 1088 u16 status, error_bits;
1079 1089
1080 pci_read_config_word(pdev, PCI_STATUS, &status); 1090 pci_read_config_word(pdev, PCI_STATUS, &status);
1081 error_bits = 1091 error_bits =
arch/sparc64/kernel/pci_impl.h
1 /* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $ 1 /* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
2 * pci_impl.h: Helper definitions for PCI controller support. 2 * pci_impl.h: Helper definitions for PCI controller support.
3 * 3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7 #ifndef PCI_IMPL_H 7 #ifndef PCI_IMPL_H
8 #define PCI_IMPL_H 8 #define PCI_IMPL_H
9 9
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/spinlock.h> 11 #include <linux/spinlock.h>
12 #include <asm/io.h> 12 #include <asm/io.h>
13 #include <asm/prom.h>
13 14
14 extern struct pci_controller_info *pci_controller_root; 15 extern struct pci_controller_info *pci_controller_root;
15 16
16 extern int pci_num_controllers; 17 extern int pci_num_controllers;
17 18
18 /* PCI bus scanning and fixup support. */ 19 /* PCI bus scanning and fixup support. */
19 extern void pci_fixup_host_bridge_self(struct pci_bus *pbus); 20 extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
20 extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus, 21 extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
21 struct pci_pbm_info *pbm, 22 struct pci_pbm_info *pbm,
22 int prom_node); 23 struct device_node *prom_node);
23 extern void pci_record_assignments(struct pci_pbm_info *pbm, 24 extern void pci_record_assignments(struct pci_pbm_info *pbm,
24 struct pci_bus *pbus); 25 struct pci_bus *pbus);
25 extern void pci_assign_unassigned(struct pci_pbm_info *pbm, 26 extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
26 struct pci_bus *pbus); 27 struct pci_bus *pbus);
27 extern void pci_fixup_irq(struct pci_pbm_info *pbm, 28 extern void pci_fixup_irq(struct pci_pbm_info *pbm,
28 struct pci_bus *pbus); 29 struct pci_bus *pbus);
29 extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm, 30 extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
30 struct pci_bus *pbus); 31 struct pci_bus *pbus);
31 extern void pci_setup_busmastering(struct pci_pbm_info *pbm, 32 extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
32 struct pci_bus *pbus); 33 struct pci_bus *pbus);
33 extern void pci_register_legacy_regions(struct resource *io_res, 34 extern void pci_register_legacy_regions(struct resource *io_res,
34 struct resource *mem_res); 35 struct resource *mem_res);
35 36
36 /* Error reporting support. */ 37 /* Error reporting support. */
37 extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); 38 extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
38 extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); 39 extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
39 extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); 40 extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
40 41
41 /* Configuration space access. */ 42 /* Configuration space access. */
42 extern void pci_config_read8(u8 *addr, u8 *ret); 43 extern void pci_config_read8(u8 *addr, u8 *ret);
43 extern void pci_config_read16(u16 *addr, u16 *ret); 44 extern void pci_config_read16(u16 *addr, u16 *ret);
44 extern void pci_config_read32(u32 *addr, u32 *ret); 45 extern void pci_config_read32(u32 *addr, u32 *ret);
45 extern void pci_config_write8(u8 *addr, u8 val); 46 extern void pci_config_write8(u8 *addr, u8 val);
46 extern void pci_config_write16(u16 *addr, u16 val); 47 extern void pci_config_write16(u16 *addr, u16 val);
47 extern void pci_config_write32(u32 *addr, u32 val); 48 extern void pci_config_write32(u32 *addr, u32 val);
48 49
49 #endif /* !(PCI_IMPL_H) */ 50 #endif /* !(PCI_IMPL_H) */
50 51
arch/sparc64/kernel/pci_psycho.c
1 /* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $ 1 /* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
2 * pci_psycho.c: PSYCHO/U2P specific PCI controller support. 2 * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) 4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) 6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */ 7 */
8 8
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/pci.h> 11 #include <linux/pci.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 15
16 #include <asm/pbm.h> 16 #include <asm/pbm.h>
17 #include <asm/iommu.h> 17 #include <asm/iommu.h>
18 #include <asm/irq.h> 18 #include <asm/irq.h>
19 #include <asm/starfire.h> 19 #include <asm/starfire.h>
20 #include <asm/prom.h> 20 #include <asm/prom.h>
21 21
22 #include "pci_impl.h" 22 #include "pci_impl.h"
23 #include "iommu_common.h" 23 #include "iommu_common.h"
24 24
25 /* All PSYCHO registers are 64-bits. The following accessor 25 /* All PSYCHO registers are 64-bits. The following accessor
26 * routines are how they are accessed. The REG parameter 26 * routines are how they are accessed. The REG parameter
27 * is a physical address. 27 * is a physical address.
28 */ 28 */
29 #define psycho_read(__reg) \ 29 #define psycho_read(__reg) \
30 ({ u64 __ret; \ 30 ({ u64 __ret; \
31 __asm__ __volatile__("ldxa [%1] %2, %0" \ 31 __asm__ __volatile__("ldxa [%1] %2, %0" \
32 : "=r" (__ret) \ 32 : "=r" (__ret) \
33 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ 33 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
34 : "memory"); \ 34 : "memory"); \
35 __ret; \ 35 __ret; \
36 }) 36 })
37 #define psycho_write(__reg, __val) \ 37 #define psycho_write(__reg, __val) \
38 __asm__ __volatile__("stxa %0, [%1] %2" \ 38 __asm__ __volatile__("stxa %0, [%1] %2" \
39 : /* no outputs */ \ 39 : /* no outputs */ \
40 : "r" (__val), "r" (__reg), \ 40 : "r" (__val), "r" (__reg), \
41 "i" (ASI_PHYS_BYPASS_EC_E) \ 41 "i" (ASI_PHYS_BYPASS_EC_E) \
42 : "memory") 42 : "memory")
43 43
44 /* Misc. PSYCHO PCI controller register offsets and definitions. */ 44 /* Misc. PSYCHO PCI controller register offsets and definitions. */
45 #define PSYCHO_CONTROL 0x0010UL 45 #define PSYCHO_CONTROL 0x0010UL
46 #define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/ 46 #define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
47 #define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */ 47 #define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
48 #define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */ 48 #define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
49 #define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */ 49 #define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
50 #define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */ 50 #define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
51 #define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */ 51 #define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
52 #define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */ 52 #define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
53 #define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */ 53 #define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
54 #define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */ 54 #define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
55 #define PSYCHO_PCIA_CTRL 0x2000UL 55 #define PSYCHO_PCIA_CTRL 0x2000UL
56 #define PSYCHO_PCIB_CTRL 0x4000UL 56 #define PSYCHO_PCIB_CTRL 0x4000UL
57 #define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */ 57 #define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
58 #define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */ 58 #define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
59 #define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */ 59 #define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
60 #define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */ 60 #define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
61 #define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */ 61 #define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
62 #define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */ 62 #define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
63 #define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */ 63 #define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
64 #define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */ 64 #define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
65 #define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */ 65 #define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
66 #define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */ 66 #define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
67 #define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */ 67 #define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
68 #define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */ 68 #define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
69 69
70 /* U2P Programmer's Manual, page 13-55, configuration space 70 /* U2P Programmer's Manual, page 13-55, configuration space
71 * address format: 71 * address format:
72 * 72 *
73 * 32 24 23 16 15 11 10 8 7 2 1 0 73 * 32 24 23 16 15 11 10 8 7 2 1 0
74 * --------------------------------------------------------- 74 * ---------------------------------------------------------
75 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 | 75 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
76 * --------------------------------------------------------- 76 * ---------------------------------------------------------
77 */ 77 */
78 #define PSYCHO_CONFIG_BASE(PBM) \ 78 #define PSYCHO_CONFIG_BASE(PBM) \
79 ((PBM)->config_space | (1UL << 24)) 79 ((PBM)->config_space | (1UL << 24))
80 #define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \ 80 #define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
81 (((unsigned long)(BUS) << 16) | \ 81 (((unsigned long)(BUS) << 16) | \
82 ((unsigned long)(DEVFN) << 8) | \ 82 ((unsigned long)(DEVFN) << 8) | \
83 ((unsigned long)(REG))) 83 ((unsigned long)(REG)))
84 84
85 static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm, 85 static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
86 unsigned char bus, 86 unsigned char bus,
87 unsigned int devfn, 87 unsigned int devfn,
88 int where) 88 int where)
89 { 89 {
90 if (!pbm) 90 if (!pbm)
91 return NULL; 91 return NULL;
92 return (void *) 92 return (void *)
93 (PSYCHO_CONFIG_BASE(pbm) | 93 (PSYCHO_CONFIG_BASE(pbm) |
94 PSYCHO_CONFIG_ENCODE(bus, devfn, where)); 94 PSYCHO_CONFIG_ENCODE(bus, devfn, where));
95 } 95 }
96 96
97 static int psycho_out_of_range(struct pci_pbm_info *pbm, 97 static int psycho_out_of_range(struct pci_pbm_info *pbm,
98 unsigned char bus, 98 unsigned char bus,
99 unsigned char devfn) 99 unsigned char devfn)
100 { 100 {
101 return ((pbm->parent == 0) || 101 return ((pbm->parent == 0) ||
102 ((pbm == &pbm->parent->pbm_B) && 102 ((pbm == &pbm->parent->pbm_B) &&
103 (bus == pbm->pci_first_busno) && 103 (bus == pbm->pci_first_busno) &&
104 PCI_SLOT(devfn) > 8) || 104 PCI_SLOT(devfn) > 8) ||
105 ((pbm == &pbm->parent->pbm_A) && 105 ((pbm == &pbm->parent->pbm_A) &&
106 (bus == pbm->pci_first_busno) && 106 (bus == pbm->pci_first_busno) &&
107 PCI_SLOT(devfn) > 8)); 107 PCI_SLOT(devfn) > 8));
108 } 108 }
109 109
110 /* PSYCHO PCI configuration space accessors. */ 110 /* PSYCHO PCI configuration space accessors. */
111 111
112 static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 112 static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
113 int where, int size, u32 *value) 113 int where, int size, u32 *value)
114 { 114 {
115 struct pci_pbm_info *pbm = bus_dev->sysdata; 115 struct pci_pbm_info *pbm = bus_dev->sysdata;
116 unsigned char bus = bus_dev->number; 116 unsigned char bus = bus_dev->number;
117 u32 *addr; 117 u32 *addr;
118 u16 tmp16; 118 u16 tmp16;
119 u8 tmp8; 119 u8 tmp8;
120 120
121 switch (size) { 121 switch (size) {
122 case 1: 122 case 1:
123 *value = 0xff; 123 *value = 0xff;
124 break; 124 break;
125 case 2: 125 case 2:
126 *value = 0xffff; 126 *value = 0xffff;
127 break; 127 break;
128 case 4: 128 case 4:
129 *value = 0xffffffff; 129 *value = 0xffffffff;
130 break; 130 break;
131 } 131 }
132 132
133 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where); 133 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
134 if (!addr) 134 if (!addr)
135 return PCIBIOS_SUCCESSFUL; 135 return PCIBIOS_SUCCESSFUL;
136 136
137 if (psycho_out_of_range(pbm, bus, devfn)) 137 if (psycho_out_of_range(pbm, bus, devfn))
138 return PCIBIOS_SUCCESSFUL; 138 return PCIBIOS_SUCCESSFUL;
139 switch (size) { 139 switch (size) {
140 case 1: 140 case 1:
141 pci_config_read8((u8 *)addr, &tmp8); 141 pci_config_read8((u8 *)addr, &tmp8);
142 *value = (u32) tmp8; 142 *value = (u32) tmp8;
143 break; 143 break;
144 144
145 case 2: 145 case 2:
146 if (where & 0x01) { 146 if (where & 0x01) {
147 printk("pci_read_config_word: misaligned reg [%x]\n", 147 printk("pci_read_config_word: misaligned reg [%x]\n",
148 where); 148 where);
149 return PCIBIOS_SUCCESSFUL; 149 return PCIBIOS_SUCCESSFUL;
150 } 150 }
151 pci_config_read16((u16 *)addr, &tmp16); 151 pci_config_read16((u16 *)addr, &tmp16);
152 *value = (u32) tmp16; 152 *value = (u32) tmp16;
153 break; 153 break;
154 154
155 case 4: 155 case 4:
156 if (where & 0x03) { 156 if (where & 0x03) {
157 printk("pci_read_config_dword: misaligned reg [%x]\n", 157 printk("pci_read_config_dword: misaligned reg [%x]\n",
158 where); 158 where);
159 return PCIBIOS_SUCCESSFUL; 159 return PCIBIOS_SUCCESSFUL;
160 } 160 }
161 pci_config_read32(addr, value); 161 pci_config_read32(addr, value);
162 break; 162 break;
163 } 163 }
164 return PCIBIOS_SUCCESSFUL; 164 return PCIBIOS_SUCCESSFUL;
165 } 165 }
166 166
167 static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 167 static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
168 int where, int size, u32 value) 168 int where, int size, u32 value)
169 { 169 {
170 struct pci_pbm_info *pbm = bus_dev->sysdata; 170 struct pci_pbm_info *pbm = bus_dev->sysdata;
171 unsigned char bus = bus_dev->number; 171 unsigned char bus = bus_dev->number;
172 u32 *addr; 172 u32 *addr;
173 173
174 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where); 174 addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
175 if (!addr) 175 if (!addr)
176 return PCIBIOS_SUCCESSFUL; 176 return PCIBIOS_SUCCESSFUL;
177 177
178 if (psycho_out_of_range(pbm, bus, devfn)) 178 if (psycho_out_of_range(pbm, bus, devfn))
179 return PCIBIOS_SUCCESSFUL; 179 return PCIBIOS_SUCCESSFUL;
180 180
181 switch (size) { 181 switch (size) {
182 case 1: 182 case 1:
183 pci_config_write8((u8 *)addr, value); 183 pci_config_write8((u8 *)addr, value);
184 break; 184 break;
185 185
186 case 2: 186 case 2:
187 if (where & 0x01) { 187 if (where & 0x01) {
188 printk("pci_write_config_word: misaligned reg [%x]\n", 188 printk("pci_write_config_word: misaligned reg [%x]\n",
189 where); 189 where);
190 return PCIBIOS_SUCCESSFUL; 190 return PCIBIOS_SUCCESSFUL;
191 } 191 }
192 pci_config_write16((u16 *)addr, value); 192 pci_config_write16((u16 *)addr, value);
193 break; 193 break;
194 194
195 case 4: 195 case 4:
196 if (where & 0x03) { 196 if (where & 0x03) {
197 printk("pci_write_config_dword: misaligned reg [%x]\n", 197 printk("pci_write_config_dword: misaligned reg [%x]\n",
198 where); 198 where);
199 return PCIBIOS_SUCCESSFUL; 199 return PCIBIOS_SUCCESSFUL;
200 } 200 }
201 pci_config_write32(addr, value); 201 pci_config_write32(addr, value);
202 } 202 }
203 return PCIBIOS_SUCCESSFUL; 203 return PCIBIOS_SUCCESSFUL;
204 } 204 }
205 205
206 static struct pci_ops psycho_ops = { 206 static struct pci_ops psycho_ops = {
207 .read = psycho_read_pci_cfg, 207 .read = psycho_read_pci_cfg,
208 .write = psycho_write_pci_cfg, 208 .write = psycho_write_pci_cfg,
209 }; 209 };
210 210
211 /* PSYCHO interrupt mapping support. */ 211 /* PSYCHO interrupt mapping support. */
212 #define PSYCHO_IMAP_A_SLOT0 0x0c00UL 212 #define PSYCHO_IMAP_A_SLOT0 0x0c00UL
213 #define PSYCHO_IMAP_B_SLOT0 0x0c20UL 213 #define PSYCHO_IMAP_B_SLOT0 0x0c20UL
214 static unsigned long psycho_pcislot_imap_offset(unsigned long ino) 214 static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
215 { 215 {
216 unsigned int bus = (ino & 0x10) >> 4; 216 unsigned int bus = (ino & 0x10) >> 4;
217 unsigned int slot = (ino & 0x0c) >> 2; 217 unsigned int slot = (ino & 0x0c) >> 2;
218 218
219 if (bus == 0) 219 if (bus == 0)
220 return PSYCHO_IMAP_A_SLOT0 + (slot * 8); 220 return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
221 else 221 else
222 return PSYCHO_IMAP_B_SLOT0 + (slot * 8); 222 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
223 } 223 }
224 224
225 #define PSYCHO_IMAP_SCSI 0x1000UL 225 #define PSYCHO_IMAP_SCSI 0x1000UL
226 #define PSYCHO_IMAP_ETH 0x1008UL 226 #define PSYCHO_IMAP_ETH 0x1008UL
227 #define PSYCHO_IMAP_BPP 0x1010UL 227 #define PSYCHO_IMAP_BPP 0x1010UL
228 #define PSYCHO_IMAP_AU_REC 0x1018UL 228 #define PSYCHO_IMAP_AU_REC 0x1018UL
229 #define PSYCHO_IMAP_AU_PLAY 0x1020UL 229 #define PSYCHO_IMAP_AU_PLAY 0x1020UL
230 #define PSYCHO_IMAP_PFAIL 0x1028UL 230 #define PSYCHO_IMAP_PFAIL 0x1028UL
231 #define PSYCHO_IMAP_KMS 0x1030UL 231 #define PSYCHO_IMAP_KMS 0x1030UL
232 #define PSYCHO_IMAP_FLPY 0x1038UL 232 #define PSYCHO_IMAP_FLPY 0x1038UL
233 #define PSYCHO_IMAP_SHW 0x1040UL 233 #define PSYCHO_IMAP_SHW 0x1040UL
234 #define PSYCHO_IMAP_KBD 0x1048UL 234 #define PSYCHO_IMAP_KBD 0x1048UL
235 #define PSYCHO_IMAP_MS 0x1050UL 235 #define PSYCHO_IMAP_MS 0x1050UL
236 #define PSYCHO_IMAP_SER 0x1058UL 236 #define PSYCHO_IMAP_SER 0x1058UL
237 #define PSYCHO_IMAP_TIM0 0x1060UL 237 #define PSYCHO_IMAP_TIM0 0x1060UL
238 #define PSYCHO_IMAP_TIM1 0x1068UL 238 #define PSYCHO_IMAP_TIM1 0x1068UL
239 #define PSYCHO_IMAP_UE 0x1070UL 239 #define PSYCHO_IMAP_UE 0x1070UL
240 #define PSYCHO_IMAP_CE 0x1078UL 240 #define PSYCHO_IMAP_CE 0x1078UL
241 #define PSYCHO_IMAP_A_ERR 0x1080UL 241 #define PSYCHO_IMAP_A_ERR 0x1080UL
242 #define PSYCHO_IMAP_B_ERR 0x1088UL 242 #define PSYCHO_IMAP_B_ERR 0x1088UL
243 #define PSYCHO_IMAP_PMGMT 0x1090UL 243 #define PSYCHO_IMAP_PMGMT 0x1090UL
244 #define PSYCHO_IMAP_GFX 0x1098UL 244 #define PSYCHO_IMAP_GFX 0x1098UL
245 #define PSYCHO_IMAP_EUPA 0x10a0UL 245 #define PSYCHO_IMAP_EUPA 0x10a0UL
246 246
247 static unsigned long __onboard_imap_off[] = { 247 static unsigned long __onboard_imap_off[] = {
248 /*0x20*/ PSYCHO_IMAP_SCSI, 248 /*0x20*/ PSYCHO_IMAP_SCSI,
249 /*0x21*/ PSYCHO_IMAP_ETH, 249 /*0x21*/ PSYCHO_IMAP_ETH,
250 /*0x22*/ PSYCHO_IMAP_BPP, 250 /*0x22*/ PSYCHO_IMAP_BPP,
251 /*0x23*/ PSYCHO_IMAP_AU_REC, 251 /*0x23*/ PSYCHO_IMAP_AU_REC,
252 /*0x24*/ PSYCHO_IMAP_AU_PLAY, 252 /*0x24*/ PSYCHO_IMAP_AU_PLAY,
253 /*0x25*/ PSYCHO_IMAP_PFAIL, 253 /*0x25*/ PSYCHO_IMAP_PFAIL,
254 /*0x26*/ PSYCHO_IMAP_KMS, 254 /*0x26*/ PSYCHO_IMAP_KMS,
255 /*0x27*/ PSYCHO_IMAP_FLPY, 255 /*0x27*/ PSYCHO_IMAP_FLPY,
256 /*0x28*/ PSYCHO_IMAP_SHW, 256 /*0x28*/ PSYCHO_IMAP_SHW,
257 /*0x29*/ PSYCHO_IMAP_KBD, 257 /*0x29*/ PSYCHO_IMAP_KBD,
258 /*0x2a*/ PSYCHO_IMAP_MS, 258 /*0x2a*/ PSYCHO_IMAP_MS,
259 /*0x2b*/ PSYCHO_IMAP_SER, 259 /*0x2b*/ PSYCHO_IMAP_SER,
260 /*0x2c*/ PSYCHO_IMAP_TIM0, 260 /*0x2c*/ PSYCHO_IMAP_TIM0,
261 /*0x2d*/ PSYCHO_IMAP_TIM1, 261 /*0x2d*/ PSYCHO_IMAP_TIM1,
262 /*0x2e*/ PSYCHO_IMAP_UE, 262 /*0x2e*/ PSYCHO_IMAP_UE,
263 /*0x2f*/ PSYCHO_IMAP_CE, 263 /*0x2f*/ PSYCHO_IMAP_CE,
264 /*0x30*/ PSYCHO_IMAP_A_ERR, 264 /*0x30*/ PSYCHO_IMAP_A_ERR,
265 /*0x31*/ PSYCHO_IMAP_B_ERR, 265 /*0x31*/ PSYCHO_IMAP_B_ERR,
266 /*0x32*/ PSYCHO_IMAP_PMGMT 266 /*0x32*/ PSYCHO_IMAP_PMGMT
267 }; 267 };
268 #define PSYCHO_ONBOARD_IRQ_BASE 0x20 268 #define PSYCHO_ONBOARD_IRQ_BASE 0x20
269 #define PSYCHO_ONBOARD_IRQ_LAST 0x32 269 #define PSYCHO_ONBOARD_IRQ_LAST 0x32
270 #define psycho_onboard_imap_offset(__ino) \ 270 #define psycho_onboard_imap_offset(__ino) \
271 __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE] 271 __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
272 272
273 #define PSYCHO_ICLR_A_SLOT0 0x1400UL 273 #define PSYCHO_ICLR_A_SLOT0 0x1400UL
274 #define PSYCHO_ICLR_SCSI 0x1800UL 274 #define PSYCHO_ICLR_SCSI 0x1800UL
275 275
276 #define psycho_iclr_offset(ino) \ 276 #define psycho_iclr_offset(ino) \
277 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ 277 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
278 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) 278 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
279 279
280 static unsigned int psycho_irq_build(struct pci_pbm_info *pbm, 280 static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
281 struct pci_dev *pdev, 281 struct pci_dev *pdev,
282 unsigned int ino) 282 unsigned int ino)
283 { 283 {
284 unsigned long imap, iclr; 284 unsigned long imap, iclr;
285 unsigned long imap_off, iclr_off; 285 unsigned long imap_off, iclr_off;
286 int inofixup = 0; 286 int inofixup = 0;
287 287
288 ino &= PCI_IRQ_INO; 288 ino &= PCI_IRQ_INO;
289 if (ino < PSYCHO_ONBOARD_IRQ_BASE) { 289 if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
290 /* PCI slot */ 290 /* PCI slot */
291 imap_off = psycho_pcislot_imap_offset(ino); 291 imap_off = psycho_pcislot_imap_offset(ino);
292 } else { 292 } else {
293 /* Onboard device */ 293 /* Onboard device */
294 if (ino > PSYCHO_ONBOARD_IRQ_LAST) { 294 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
295 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino); 295 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
296 prom_halt(); 296 prom_halt();
297 } 297 }
298 imap_off = psycho_onboard_imap_offset(ino); 298 imap_off = psycho_onboard_imap_offset(ino);
299 } 299 }
300 300
301 /* Now build the IRQ bucket. */ 301 /* Now build the IRQ bucket. */
302 imap = pbm->controller_regs + imap_off; 302 imap = pbm->controller_regs + imap_off;
303 imap += 4; 303 imap += 4;
304 304
305 iclr_off = psycho_iclr_offset(ino); 305 iclr_off = psycho_iclr_offset(ino);
306 iclr = pbm->controller_regs + iclr_off; 306 iclr = pbm->controller_regs + iclr_off;
307 iclr += 4; 307 iclr += 4;
308 308
309 if ((ino & 0x20) == 0) 309 if ((ino & 0x20) == 0)
310 inofixup = ino & 0x03; 310 inofixup = ino & 0x03;
311 311
312 return build_irq(inofixup, iclr, imap); 312 return build_irq(inofixup, iclr, imap);
313 } 313 }
314 314
315 /* PSYCHO error handling support. */ 315 /* PSYCHO error handling support. */
316 enum psycho_error_type { 316 enum psycho_error_type {
317 UE_ERR, CE_ERR, PCI_ERR 317 UE_ERR, CE_ERR, PCI_ERR
318 }; 318 };
319 319
320 /* Helper function of IOMMU error checking, which checks out 320 /* Helper function of IOMMU error checking, which checks out
321 * the state of the streaming buffers. The IOMMU lock is 321 * the state of the streaming buffers. The IOMMU lock is
322 * held when this is called. 322 * held when this is called.
323 * 323 *
324 * For the PCI error case we know which PBM (and thus which 324 * For the PCI error case we know which PBM (and thus which
325 * streaming buffer) caused the error, but for the uncorrectable 325 * streaming buffer) caused the error, but for the uncorrectable
326 * error case we do not. So we always check both streaming caches. 326 * error case we do not. So we always check both streaming caches.
327 */ 327 */
328 #define PSYCHO_STRBUF_CONTROL_A 0x2800UL 328 #define PSYCHO_STRBUF_CONTROL_A 0x2800UL
329 #define PSYCHO_STRBUF_CONTROL_B 0x4800UL 329 #define PSYCHO_STRBUF_CONTROL_B 0x4800UL
330 #define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ 330 #define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
331 #define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ 331 #define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
332 #define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ 332 #define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
333 #define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ 333 #define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
334 #define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ 334 #define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
335 #define PSYCHO_STRBUF_FLUSH_A 0x2808UL 335 #define PSYCHO_STRBUF_FLUSH_A 0x2808UL
336 #define PSYCHO_STRBUF_FLUSH_B 0x4808UL 336 #define PSYCHO_STRBUF_FLUSH_B 0x4808UL
337 #define PSYCHO_STRBUF_FSYNC_A 0x2810UL 337 #define PSYCHO_STRBUF_FSYNC_A 0x2810UL
338 #define PSYCHO_STRBUF_FSYNC_B 0x4810UL 338 #define PSYCHO_STRBUF_FSYNC_B 0x4810UL
339 #define PSYCHO_STC_DATA_A 0xb000UL 339 #define PSYCHO_STC_DATA_A 0xb000UL
340 #define PSYCHO_STC_DATA_B 0xc000UL 340 #define PSYCHO_STC_DATA_B 0xc000UL
341 #define PSYCHO_STC_ERR_A 0xb400UL 341 #define PSYCHO_STC_ERR_A 0xb400UL
342 #define PSYCHO_STC_ERR_B 0xc400UL 342 #define PSYCHO_STC_ERR_B 0xc400UL
343 #define PSYCHO_STCERR_WRITE 0x0000000000000002UL /* Write Error */ 343 #define PSYCHO_STCERR_WRITE 0x0000000000000002UL /* Write Error */
344 #define PSYCHO_STCERR_READ 0x0000000000000001UL /* Read Error */ 344 #define PSYCHO_STCERR_READ 0x0000000000000001UL /* Read Error */
345 #define PSYCHO_STC_TAG_A 0xb800UL 345 #define PSYCHO_STC_TAG_A 0xb800UL
346 #define PSYCHO_STC_TAG_B 0xc800UL 346 #define PSYCHO_STC_TAG_B 0xc800UL
347 #define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL /* Physical Page Number */ 347 #define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL /* Physical Page Number */
348 #define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL /* Virtual Page Number */ 348 #define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL /* Virtual Page Number */
349 #define PSYCHO_STCTAG_VALID 0x0000000000000002UL /* Valid */ 349 #define PSYCHO_STCTAG_VALID 0x0000000000000002UL /* Valid */
350 #define PSYCHO_STCTAG_WRITE 0x0000000000000001UL /* Writable */ 350 #define PSYCHO_STCTAG_WRITE 0x0000000000000001UL /* Writable */
351 #define PSYCHO_STC_LINE_A 0xb900UL 351 #define PSYCHO_STC_LINE_A 0xb900UL
352 #define PSYCHO_STC_LINE_B 0xc900UL 352 #define PSYCHO_STC_LINE_B 0xc900UL
353 #define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL /* LRU Index */ 353 #define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL /* LRU Index */
354 #define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL /* Dirty Data Start Pointer */ 354 #define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL /* Dirty Data Start Pointer */
355 #define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL /* Line Address */ 355 #define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL /* Line Address */
356 #define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL /* Dirty Data End Pointer */ 356 #define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL /* Dirty Data End Pointer */
357 #define PSYCHO_STCLINE_VALID 0x0000000000000002UL /* Valid */ 357 #define PSYCHO_STCLINE_VALID 0x0000000000000002UL /* Valid */
358 #define PSYCHO_STCLINE_FOFN 0x0000000000000001UL /* Fetch Outstanding / Flush Necessary */ 358 #define PSYCHO_STCLINE_FOFN 0x0000000000000001UL /* Fetch Outstanding / Flush Necessary */
359 359
360 static DEFINE_SPINLOCK(stc_buf_lock); 360 static DEFINE_SPINLOCK(stc_buf_lock);
361 static unsigned long stc_error_buf[128]; 361 static unsigned long stc_error_buf[128];
362 static unsigned long stc_tag_buf[16]; 362 static unsigned long stc_tag_buf[16];
363 static unsigned long stc_line_buf[16]; 363 static unsigned long stc_line_buf[16];
364 364
365 static void __psycho_check_one_stc(struct pci_controller_info *p, 365 static void __psycho_check_one_stc(struct pci_controller_info *p,
366 struct pci_pbm_info *pbm, 366 struct pci_pbm_info *pbm,
367 int is_pbm_a) 367 int is_pbm_a)
368 { 368 {
369 struct pci_strbuf *strbuf = &pbm->stc; 369 struct pci_strbuf *strbuf = &pbm->stc;
370 unsigned long regbase = p->pbm_A.controller_regs; 370 unsigned long regbase = p->pbm_A.controller_regs;
371 unsigned long err_base, tag_base, line_base; 371 unsigned long err_base, tag_base, line_base;
372 u64 control; 372 u64 control;
373 int i; 373 int i;
374 374
375 if (is_pbm_a) { 375 if (is_pbm_a) {
376 err_base = regbase + PSYCHO_STC_ERR_A; 376 err_base = regbase + PSYCHO_STC_ERR_A;
377 tag_base = regbase + PSYCHO_STC_TAG_A; 377 tag_base = regbase + PSYCHO_STC_TAG_A;
378 line_base = regbase + PSYCHO_STC_LINE_A; 378 line_base = regbase + PSYCHO_STC_LINE_A;
379 } else { 379 } else {
380 err_base = regbase + PSYCHO_STC_ERR_B; 380 err_base = regbase + PSYCHO_STC_ERR_B;
381 tag_base = regbase + PSYCHO_STC_TAG_B; 381 tag_base = regbase + PSYCHO_STC_TAG_B;
382 line_base = regbase + PSYCHO_STC_LINE_B; 382 line_base = regbase + PSYCHO_STC_LINE_B;
383 } 383 }
384 384
385 spin_lock(&stc_buf_lock); 385 spin_lock(&stc_buf_lock);
386 386
387 /* This is __REALLY__ dangerous. When we put the 387 /* This is __REALLY__ dangerous. When we put the
388 * streaming buffer into diagnostic mode to probe 388 * streaming buffer into diagnostic mode to probe
389 * it's tags and error status, we _must_ clear all 389 * it's tags and error status, we _must_ clear all
390 * of the line tag valid bits before re-enabling 390 * of the line tag valid bits before re-enabling
391 * the streaming buffer. If any dirty data lives 391 * the streaming buffer. If any dirty data lives
392 * in the STC when we do this, we will end up 392 * in the STC when we do this, we will end up
393 * invalidating it before it has a chance to reach 393 * invalidating it before it has a chance to reach
394 * main memory. 394 * main memory.
395 */ 395 */
396 control = psycho_read(strbuf->strbuf_control); 396 control = psycho_read(strbuf->strbuf_control);
397 psycho_write(strbuf->strbuf_control, 397 psycho_write(strbuf->strbuf_control,
398 (control | PSYCHO_STRBUF_CTRL_DENAB)); 398 (control | PSYCHO_STRBUF_CTRL_DENAB));
399 for (i = 0; i < 128; i++) { 399 for (i = 0; i < 128; i++) {
400 unsigned long val; 400 unsigned long val;
401 401
402 val = psycho_read(err_base + (i * 8UL)); 402 val = psycho_read(err_base + (i * 8UL));
403 psycho_write(err_base + (i * 8UL), 0UL); 403 psycho_write(err_base + (i * 8UL), 0UL);
404 stc_error_buf[i] = val; 404 stc_error_buf[i] = val;
405 } 405 }
406 for (i = 0; i < 16; i++) { 406 for (i = 0; i < 16; i++) {
407 stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL)); 407 stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL));
408 stc_line_buf[i] = psycho_read(line_base + (i * 8UL)); 408 stc_line_buf[i] = psycho_read(line_base + (i * 8UL));
409 psycho_write(tag_base + (i * 8UL), 0UL); 409 psycho_write(tag_base + (i * 8UL), 0UL);
410 psycho_write(line_base + (i * 8UL), 0UL); 410 psycho_write(line_base + (i * 8UL), 0UL);
411 } 411 }
412 412
413 /* OK, state is logged, exit diagnostic mode. */ 413 /* OK, state is logged, exit diagnostic mode. */
414 psycho_write(strbuf->strbuf_control, control); 414 psycho_write(strbuf->strbuf_control, control);
415 415
416 for (i = 0; i < 16; i++) { 416 for (i = 0; i < 16; i++) {
417 int j, saw_error, first, last; 417 int j, saw_error, first, last;
418 418
419 saw_error = 0; 419 saw_error = 0;
420 first = i * 8; 420 first = i * 8;
421 last = first + 8; 421 last = first + 8;
422 for (j = first; j < last; j++) { 422 for (j = first; j < last; j++) {
423 unsigned long errval = stc_error_buf[j]; 423 unsigned long errval = stc_error_buf[j];
424 if (errval != 0) { 424 if (errval != 0) {
425 saw_error++; 425 saw_error++;
426 printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n", 426 printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n",
427 p->index, 427 p->index,
428 (is_pbm_a ? 'A' : 'B'), 428 (is_pbm_a ? 'A' : 'B'),
429 j, 429 j,
430 (errval & PSYCHO_STCERR_WRITE) ? 1 : 0, 430 (errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
431 (errval & PSYCHO_STCERR_READ) ? 1 : 0); 431 (errval & PSYCHO_STCERR_READ) ? 1 : 0);
432 } 432 }
433 } 433 }
434 if (saw_error != 0) { 434 if (saw_error != 0) {
435 unsigned long tagval = stc_tag_buf[i]; 435 unsigned long tagval = stc_tag_buf[i];
436 unsigned long lineval = stc_line_buf[i]; 436 unsigned long lineval = stc_line_buf[i];
437 printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n", 437 printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n",
438 p->index, 438 p->index,
439 (is_pbm_a ? 'A' : 'B'), 439 (is_pbm_a ? 'A' : 'B'),
440 i, 440 i,
441 ((tagval & PSYCHO_STCTAG_PPN) >> 19UL), 441 ((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
442 (tagval & PSYCHO_STCTAG_VPN), 442 (tagval & PSYCHO_STCTAG_VPN),
443 ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0), 443 ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
444 ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0)); 444 ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
445 printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" 445 printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
446 "V(%d)FOFN(%d)]\n", 446 "V(%d)FOFN(%d)]\n",
447 p->index, 447 p->index,
448 (is_pbm_a ? 'A' : 'B'), 448 (is_pbm_a ? 'A' : 'B'),
449 i, 449 i,
450 ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL), 450 ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
451 ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL), 451 ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
452 ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL), 452 ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
453 ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL), 453 ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
454 ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0), 454 ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
455 ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0)); 455 ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
456 } 456 }
457 } 457 }
458 458
459 spin_unlock(&stc_buf_lock); 459 spin_unlock(&stc_buf_lock);
460 } 460 }
461 461
462 static void __psycho_check_stc_error(struct pci_controller_info *p, 462 static void __psycho_check_stc_error(struct pci_controller_info *p,
463 unsigned long afsr, 463 unsigned long afsr,
464 unsigned long afar, 464 unsigned long afar,
465 enum psycho_error_type type) 465 enum psycho_error_type type)
466 { 466 {
467 struct pci_pbm_info *pbm; 467 struct pci_pbm_info *pbm;
468 468
469 pbm = &p->pbm_A; 469 pbm = &p->pbm_A;
470 if (pbm->stc.strbuf_enabled) 470 if (pbm->stc.strbuf_enabled)
471 __psycho_check_one_stc(p, pbm, 1); 471 __psycho_check_one_stc(p, pbm, 1);
472 472
473 pbm = &p->pbm_B; 473 pbm = &p->pbm_B;
474 if (pbm->stc.strbuf_enabled) 474 if (pbm->stc.strbuf_enabled)
475 __psycho_check_one_stc(p, pbm, 0); 475 __psycho_check_one_stc(p, pbm, 0);
476 } 476 }
477 477
478 /* When an Uncorrectable Error or a PCI Error happens, we 478 /* When an Uncorrectable Error or a PCI Error happens, we
479 * interrogate the IOMMU state to see if it is the cause. 479 * interrogate the IOMMU state to see if it is the cause.
480 */ 480 */
481 #define PSYCHO_IOMMU_CONTROL 0x0200UL 481 #define PSYCHO_IOMMU_CONTROL 0x0200UL
482 #define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ 482 #define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
483 #define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ 483 #define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
484 #define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ 484 #define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
485 #define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ 485 #define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
486 #define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ 486 #define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
487 #define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ 487 #define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
488 #define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ 488 #define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
489 #define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ 489 #define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
490 #define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ 490 #define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
491 #define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ 491 #define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
492 #define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ 492 #define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
493 #define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ 493 #define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
494 #define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ 494 #define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
495 #define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ 495 #define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
496 #define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ 496 #define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
497 #define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ 497 #define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
498 #define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ 498 #define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
499 #define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ 499 #define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
500 #define PSYCHO_IOMMU_TSBBASE 0x0208UL 500 #define PSYCHO_IOMMU_TSBBASE 0x0208UL
501 #define PSYCHO_IOMMU_FLUSH 0x0210UL 501 #define PSYCHO_IOMMU_FLUSH 0x0210UL
502 #define PSYCHO_IOMMU_TAG 0xa580UL 502 #define PSYCHO_IOMMU_TAG 0xa580UL
503 #define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL) 503 #define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
504 #define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL) 504 #define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
505 #define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL) 505 #define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
506 #define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL) 506 #define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
507 #define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL) 507 #define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
508 #define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL 508 #define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
509 #define PSYCHO_IOMMU_DATA 0xa600UL 509 #define PSYCHO_IOMMU_DATA 0xa600UL
510 #define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL) 510 #define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
511 #define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL) 511 #define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
512 #define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL 512 #define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
513 static void psycho_check_iommu_error(struct pci_controller_info *p, 513 static void psycho_check_iommu_error(struct pci_controller_info *p,
514 unsigned long afsr, 514 unsigned long afsr,
515 unsigned long afar, 515 unsigned long afar,
516 enum psycho_error_type type) 516 enum psycho_error_type type)
517 { 517 {
518 struct pci_iommu *iommu = p->pbm_A.iommu; 518 struct pci_iommu *iommu = p->pbm_A.iommu;
519 unsigned long iommu_tag[16]; 519 unsigned long iommu_tag[16];
520 unsigned long iommu_data[16]; 520 unsigned long iommu_data[16];
521 unsigned long flags; 521 unsigned long flags;
522 u64 control; 522 u64 control;
523 int i; 523 int i;
524 524
525 spin_lock_irqsave(&iommu->lock, flags); 525 spin_lock_irqsave(&iommu->lock, flags);
526 control = psycho_read(iommu->iommu_control); 526 control = psycho_read(iommu->iommu_control);
527 if (control & PSYCHO_IOMMU_CTRL_XLTEERR) { 527 if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
528 char *type_string; 528 char *type_string;
529 529
530 /* Clear the error encountered bit. */ 530 /* Clear the error encountered bit. */
531 control &= ~PSYCHO_IOMMU_CTRL_XLTEERR; 531 control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
532 psycho_write(iommu->iommu_control, control); 532 psycho_write(iommu->iommu_control, control);
533 533
534 switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) { 534 switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
535 case 0: 535 case 0:
536 type_string = "Protection Error"; 536 type_string = "Protection Error";
537 break; 537 break;
538 case 1: 538 case 1:
539 type_string = "Invalid Error"; 539 type_string = "Invalid Error";
540 break; 540 break;
541 case 2: 541 case 2:
542 type_string = "TimeOut Error"; 542 type_string = "TimeOut Error";
543 break; 543 break;
544 case 3: 544 case 3:
545 default: 545 default:
546 type_string = "ECC Error"; 546 type_string = "ECC Error";
547 break; 547 break;
548 }; 548 };
549 printk("PSYCHO%d: IOMMU Error, type[%s]\n", 549 printk("PSYCHO%d: IOMMU Error, type[%s]\n",
550 p->index, type_string); 550 p->index, type_string);
551 551
552 /* Put the IOMMU into diagnostic mode and probe 552 /* Put the IOMMU into diagnostic mode and probe
553 * it's TLB for entries with error status. 553 * it's TLB for entries with error status.
554 * 554 *
555 * It is very possible for another DVMA to occur 555 * It is very possible for another DVMA to occur
556 * while we do this probe, and corrupt the system 556 * while we do this probe, and corrupt the system
557 * further. But we are so screwed at this point 557 * further. But we are so screwed at this point
558 * that we are likely to crash hard anyways, so 558 * that we are likely to crash hard anyways, so
559 * get as much diagnostic information to the 559 * get as much diagnostic information to the
560 * console as we can. 560 * console as we can.
561 */ 561 */
562 psycho_write(iommu->iommu_control, 562 psycho_write(iommu->iommu_control,
563 control | PSYCHO_IOMMU_CTRL_DENAB); 563 control | PSYCHO_IOMMU_CTRL_DENAB);
564 for (i = 0; i < 16; i++) { 564 for (i = 0; i < 16; i++) {
565 unsigned long base = p->pbm_A.controller_regs; 565 unsigned long base = p->pbm_A.controller_regs;
566 566
567 iommu_tag[i] = 567 iommu_tag[i] =
568 psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL)); 568 psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL));
569 iommu_data[i] = 569 iommu_data[i] =
570 psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL)); 570 psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL));
571 571
572 /* Now clear out the entry. */ 572 /* Now clear out the entry. */
573 psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0); 573 psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
574 psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0); 574 psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
575 } 575 }
576 576
577 /* Leave diagnostic mode. */ 577 /* Leave diagnostic mode. */
578 psycho_write(iommu->iommu_control, control); 578 psycho_write(iommu->iommu_control, control);
579 579
580 for (i = 0; i < 16; i++) { 580 for (i = 0; i < 16; i++) {
581 unsigned long tag, data; 581 unsigned long tag, data;
582 582
583 tag = iommu_tag[i]; 583 tag = iommu_tag[i];
584 if (!(tag & PSYCHO_IOMMU_TAG_ERR)) 584 if (!(tag & PSYCHO_IOMMU_TAG_ERR))
585 continue; 585 continue;
586 586
587 data = iommu_data[i]; 587 data = iommu_data[i];
588 switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) { 588 switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
589 case 0: 589 case 0:
590 type_string = "Protection Error"; 590 type_string = "Protection Error";
591 break; 591 break;
592 case 1: 592 case 1:
593 type_string = "Invalid Error"; 593 type_string = "Invalid Error";
594 break; 594 break;
595 case 2: 595 case 2:
596 type_string = "TimeOut Error"; 596 type_string = "TimeOut Error";
597 break; 597 break;
598 case 3: 598 case 3:
599 default: 599 default:
600 type_string = "ECC Error"; 600 type_string = "ECC Error";
601 break; 601 break;
602 }; 602 };
603 printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n", 603 printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n",
604 p->index, i, type_string, 604 p->index, i, type_string,
605 ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0), 605 ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
606 ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0), 606 ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
607 ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8), 607 ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
608 (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); 608 (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
609 printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", 609 printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
610 p->index, i, 610 p->index, i,
611 ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0), 611 ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
612 ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0), 612 ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
613 (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); 613 (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
614 } 614 }
615 } 615 }
616 __psycho_check_stc_error(p, afsr, afar, type); 616 __psycho_check_stc_error(p, afsr, afar, type);
617 spin_unlock_irqrestore(&iommu->lock, flags); 617 spin_unlock_irqrestore(&iommu->lock, flags);
618 } 618 }
619 619
620 /* Uncorrectable Errors. Cause of the error and the address are 620 /* Uncorrectable Errors. Cause of the error and the address are
621 * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors 621 * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
622 * relating to UPA interface transactions. 622 * relating to UPA interface transactions.
623 */ 623 */
624 #define PSYCHO_UE_AFSR 0x0030UL 624 #define PSYCHO_UE_AFSR 0x0030UL
625 #define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ 625 #define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
626 #define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ 626 #define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
627 #define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ 627 #define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
628 #define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ 628 #define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
629 #define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ 629 #define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
630 #define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ 630 #define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
631 #define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ 631 #define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
632 #define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ 632 #define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
633 #define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */ 633 #define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
634 #define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ 634 #define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
635 #define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ 635 #define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
636 #define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ 636 #define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
637 #define PSYCHO_UE_AFAR 0x0038UL 637 #define PSYCHO_UE_AFAR 0x0038UL
638 638
639 static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs) 639 static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
640 { 640 {
641 struct pci_controller_info *p = dev_id; 641 struct pci_controller_info *p = dev_id;
642 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR; 642 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR;
643 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR; 643 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR;
644 unsigned long afsr, afar, error_bits; 644 unsigned long afsr, afar, error_bits;
645 int reported; 645 int reported;
646 646
647 /* Latch uncorrectable error status. */ 647 /* Latch uncorrectable error status. */
648 afar = psycho_read(afar_reg); 648 afar = psycho_read(afar_reg);
649 afsr = psycho_read(afsr_reg); 649 afsr = psycho_read(afsr_reg);
650 650
651 /* Clear the primary/secondary error status bits. */ 651 /* Clear the primary/secondary error status bits. */
652 error_bits = afsr & 652 error_bits = afsr &
653 (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR | 653 (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
654 PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR); 654 PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
655 if (!error_bits) 655 if (!error_bits)
656 return IRQ_NONE; 656 return IRQ_NONE;
657 psycho_write(afsr_reg, error_bits); 657 psycho_write(afsr_reg, error_bits);
658 658
659 /* Log the error. */ 659 /* Log the error. */
660 printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n", 660 printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n",
661 p->index, 661 p->index,
662 (((error_bits & PSYCHO_UEAFSR_PPIO) ? 662 (((error_bits & PSYCHO_UEAFSR_PPIO) ?
663 "PIO" : 663 "PIO" :
664 ((error_bits & PSYCHO_UEAFSR_PDRD) ? 664 ((error_bits & PSYCHO_UEAFSR_PDRD) ?
665 "DMA Read" : 665 "DMA Read" :
666 ((error_bits & PSYCHO_UEAFSR_PDWR) ? 666 ((error_bits & PSYCHO_UEAFSR_PDWR) ?
667 "DMA Write" : "???"))))); 667 "DMA Write" : "???")))));
668 printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n", 668 printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
669 p->index, 669 p->index,
670 (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL, 670 (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
671 (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL, 671 (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
672 (afsr & PSYCHO_UEAFSR_MID) >> 24UL, 672 (afsr & PSYCHO_UEAFSR_MID) >> 24UL,
673 ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0)); 673 ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
674 printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar); 674 printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar);
675 printk("PSYCHO%d: UE Secondary errors [", p->index); 675 printk("PSYCHO%d: UE Secondary errors [", p->index);
676 reported = 0; 676 reported = 0;
677 if (afsr & PSYCHO_UEAFSR_SPIO) { 677 if (afsr & PSYCHO_UEAFSR_SPIO) {
678 reported++; 678 reported++;
679 printk("(PIO)"); 679 printk("(PIO)");
680 } 680 }
681 if (afsr & PSYCHO_UEAFSR_SDRD) { 681 if (afsr & PSYCHO_UEAFSR_SDRD) {
682 reported++; 682 reported++;
683 printk("(DMA Read)"); 683 printk("(DMA Read)");
684 } 684 }
685 if (afsr & PSYCHO_UEAFSR_SDWR) { 685 if (afsr & PSYCHO_UEAFSR_SDWR) {
686 reported++; 686 reported++;
687 printk("(DMA Write)"); 687 printk("(DMA Write)");
688 } 688 }
689 if (!reported) 689 if (!reported)
690 printk("(none)"); 690 printk("(none)");
691 printk("]\n"); 691 printk("]\n");
692 692
693 /* Interrogate IOMMU for error status. */ 693 /* Interrogate IOMMU for error status. */
694 psycho_check_iommu_error(p, afsr, afar, UE_ERR); 694 psycho_check_iommu_error(p, afsr, afar, UE_ERR);
695 695
696 return IRQ_HANDLED; 696 return IRQ_HANDLED;
697 } 697 }
698 698
699 /* Correctable Errors. */ 699 /* Correctable Errors. */
700 #define PSYCHO_CE_AFSR 0x0040UL 700 #define PSYCHO_CE_AFSR 0x0040UL
701 #define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */ 701 #define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
702 #define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */ 702 #define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
703 #define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */ 703 #define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
704 #define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ 704 #define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
705 #define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */ 705 #define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
706 #define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/ 706 #define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
707 #define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ 707 #define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
708 #define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ 708 #define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
709 #define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ 709 #define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
710 #define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */ 710 #define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
711 #define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */ 711 #define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
712 #define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */ 712 #define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
713 #define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */ 713 #define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
714 #define PSYCHO_CE_AFAR 0x0040UL 714 #define PSYCHO_CE_AFAR 0x0040UL
715 715
716 static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs) 716 static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
717 { 717 {
718 struct pci_controller_info *p = dev_id; 718 struct pci_controller_info *p = dev_id;
719 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR; 719 unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR;
720 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR; 720 unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR;
721 unsigned long afsr, afar, error_bits; 721 unsigned long afsr, afar, error_bits;
722 int reported; 722 int reported;
723 723
724 /* Latch error status. */ 724 /* Latch error status. */
725 afar = psycho_read(afar_reg); 725 afar = psycho_read(afar_reg);
726 afsr = psycho_read(afsr_reg); 726 afsr = psycho_read(afsr_reg);
727 727
728 /* Clear primary/secondary error status bits. */ 728 /* Clear primary/secondary error status bits. */
729 error_bits = afsr & 729 error_bits = afsr &
730 (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR | 730 (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
731 PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR); 731 PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
732 if (!error_bits) 732 if (!error_bits)
733 return IRQ_NONE; 733 return IRQ_NONE;
734 psycho_write(afsr_reg, error_bits); 734 psycho_write(afsr_reg, error_bits);
735 735
736 /* Log the error. */ 736 /* Log the error. */
737 printk("PSYCHO%d: Correctable Error, primary error type[%s]\n", 737 printk("PSYCHO%d: Correctable Error, primary error type[%s]\n",
738 p->index, 738 p->index,
739 (((error_bits & PSYCHO_CEAFSR_PPIO) ? 739 (((error_bits & PSYCHO_CEAFSR_PPIO) ?
740 "PIO" : 740 "PIO" :
741 ((error_bits & PSYCHO_CEAFSR_PDRD) ? 741 ((error_bits & PSYCHO_CEAFSR_PDRD) ?
742 "DMA Read" : 742 "DMA Read" :
743 ((error_bits & PSYCHO_CEAFSR_PDWR) ? 743 ((error_bits & PSYCHO_CEAFSR_PDWR) ?
744 "DMA Write" : "???"))))); 744 "DMA Write" : "???")))));
745 745
746 /* XXX Use syndrome and afar to print out module string just like 746 /* XXX Use syndrome and afar to print out module string just like
747 * XXX UDB CE trap handler does... -DaveM 747 * XXX UDB CE trap handler does... -DaveM
748 */ 748 */
749 printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " 749 printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
750 "UPA_MID[%02lx] was_block(%d)\n", 750 "UPA_MID[%02lx] was_block(%d)\n",
751 p->index, 751 p->index,
752 (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL, 752 (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
753 (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL, 753 (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
754 (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL, 754 (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
755 (afsr & PSYCHO_CEAFSR_MID) >> 24UL, 755 (afsr & PSYCHO_CEAFSR_MID) >> 24UL,
756 ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0)); 756 ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
757 printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar); 757 printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar);
758 printk("PSYCHO%d: CE Secondary errors [", p->index); 758 printk("PSYCHO%d: CE Secondary errors [", p->index);
759 reported = 0; 759 reported = 0;
760 if (afsr & PSYCHO_CEAFSR_SPIO) { 760 if (afsr & PSYCHO_CEAFSR_SPIO) {
761 reported++; 761 reported++;
762 printk("(PIO)"); 762 printk("(PIO)");
763 } 763 }
764 if (afsr & PSYCHO_CEAFSR_SDRD) { 764 if (afsr & PSYCHO_CEAFSR_SDRD) {
765 reported++; 765 reported++;
766 printk("(DMA Read)"); 766 printk("(DMA Read)");
767 } 767 }
768 if (afsr & PSYCHO_CEAFSR_SDWR) { 768 if (afsr & PSYCHO_CEAFSR_SDWR) {
769 reported++; 769 reported++;
770 printk("(DMA Write)"); 770 printk("(DMA Write)");
771 } 771 }
772 if (!reported) 772 if (!reported)
773 printk("(none)"); 773 printk("(none)");
774 printk("]\n"); 774 printk("]\n");
775 775
776 return IRQ_HANDLED; 776 return IRQ_HANDLED;
777 } 777 }
778 778
779 /* PCI Errors. They are signalled by the PCI bus module since they 779 /* PCI Errors. They are signalled by the PCI bus module since they
780 * are associated with a specific bus segment. 780 * are associated with a specific bus segment.
781 */ 781 */
782 #define PSYCHO_PCI_AFSR_A 0x2010UL 782 #define PSYCHO_PCI_AFSR_A 0x2010UL
783 #define PSYCHO_PCI_AFSR_B 0x4010UL 783 #define PSYCHO_PCI_AFSR_B 0x4010UL
784 #define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL /* Primary Master Abort Error */ 784 #define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL /* Primary Master Abort Error */
785 #define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL /* Primary Target Abort Error */ 785 #define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL /* Primary Target Abort Error */
786 #define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */ 786 #define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
787 #define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */ 787 #define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
788 #define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort Error */ 788 #define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort Error */
789 #define PSYCHO_PCIAFSR_STA 0x0400000000000000UL /* Secondary Target Abort Error */ 789 #define PSYCHO_PCIAFSR_STA 0x0400000000000000UL /* Secondary Target Abort Error */
790 #define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */ 790 #define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
791 #define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */ 791 #define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
792 #define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL /* Reserved */ 792 #define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL /* Reserved */
793 #define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */ 793 #define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
794 #define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL /* Trans was block operation */ 794 #define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL /* Trans was block operation */
795 #define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL /* Reserved */ 795 #define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL /* Reserved */
796 #define PSYCHO_PCIAFSR_MID 0x000000003e000000UL /* MID causing the error */ 796 #define PSYCHO_PCIAFSR_MID 0x000000003e000000UL /* MID causing the error */
797 #define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL /* Reserved */ 797 #define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL /* Reserved */
798 #define PSYCHO_PCI_AFAR_A 0x2018UL 798 #define PSYCHO_PCI_AFAR_A 0x2018UL
799 #define PSYCHO_PCI_AFAR_B 0x4018UL 799 #define PSYCHO_PCI_AFAR_B 0x4018UL
800 800
801 static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a) 801 static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a)
802 { 802 {
803 unsigned long csr_reg, csr, csr_error_bits; 803 unsigned long csr_reg, csr, csr_error_bits;
804 irqreturn_t ret = IRQ_NONE; 804 irqreturn_t ret = IRQ_NONE;
805 u16 stat; 805 u16 stat;
806 806
807 if (is_pbm_a) { 807 if (is_pbm_a) {
808 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL; 808 csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
809 } else { 809 } else {
810 csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL; 810 csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL;
811 } 811 }
812 csr = psycho_read(csr_reg); 812 csr = psycho_read(csr_reg);
813 csr_error_bits = 813 csr_error_bits =
814 csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR); 814 csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
815 if (csr_error_bits) { 815 if (csr_error_bits) {
816 /* Clear the errors. */ 816 /* Clear the errors. */
817 psycho_write(csr_reg, csr); 817 psycho_write(csr_reg, csr);
818 818
819 /* Log 'em. */ 819 /* Log 'em. */
820 if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR) 820 if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
821 printk("%s: PCI streaming byte hole error asserted.\n", 821 printk("%s: PCI streaming byte hole error asserted.\n",
822 pbm->name); 822 pbm->name);
823 if (csr_error_bits & PSYCHO_PCICTRL_SERR) 823 if (csr_error_bits & PSYCHO_PCICTRL_SERR)
824 printk("%s: PCI SERR signal asserted.\n", pbm->name); 824 printk("%s: PCI SERR signal asserted.\n", pbm->name);
825 ret = IRQ_HANDLED; 825 ret = IRQ_HANDLED;
826 } 826 }
827 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); 827 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
828 if (stat & (PCI_STATUS_PARITY | 828 if (stat & (PCI_STATUS_PARITY |
829 PCI_STATUS_SIG_TARGET_ABORT | 829 PCI_STATUS_SIG_TARGET_ABORT |
830 PCI_STATUS_REC_TARGET_ABORT | 830 PCI_STATUS_REC_TARGET_ABORT |
831 PCI_STATUS_REC_MASTER_ABORT | 831 PCI_STATUS_REC_MASTER_ABORT |
832 PCI_STATUS_SIG_SYSTEM_ERROR)) { 832 PCI_STATUS_SIG_SYSTEM_ERROR)) {
833 printk("%s: PCI bus error, PCI_STATUS[%04x]\n", 833 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
834 pbm->name, stat); 834 pbm->name, stat);
835 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); 835 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
836 ret = IRQ_HANDLED; 836 ret = IRQ_HANDLED;
837 } 837 }
838 return ret; 838 return ret;
839 } 839 }
840 840
841 static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs) 841 static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
842 { 842 {
843 struct pci_pbm_info *pbm = dev_id; 843 struct pci_pbm_info *pbm = dev_id;
844 struct pci_controller_info *p = pbm->parent; 844 struct pci_controller_info *p = pbm->parent;
845 unsigned long afsr_reg, afar_reg; 845 unsigned long afsr_reg, afar_reg;
846 unsigned long afsr, afar, error_bits; 846 unsigned long afsr, afar, error_bits;
847 int is_pbm_a, reported; 847 int is_pbm_a, reported;
848 848
849 is_pbm_a = (pbm == &pbm->parent->pbm_A); 849 is_pbm_a = (pbm == &pbm->parent->pbm_A);
850 if (is_pbm_a) { 850 if (is_pbm_a) {
851 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A; 851 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A;
852 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A; 852 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A;
853 } else { 853 } else {
854 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B; 854 afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B;
855 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B; 855 afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B;
856 } 856 }
857 857
858 /* Latch error status. */ 858 /* Latch error status. */
859 afar = psycho_read(afar_reg); 859 afar = psycho_read(afar_reg);
860 afsr = psycho_read(afsr_reg); 860 afsr = psycho_read(afsr_reg);
861 861
862 /* Clear primary/secondary error status bits. */ 862 /* Clear primary/secondary error status bits. */
863 error_bits = afsr & 863 error_bits = afsr &
864 (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA | 864 (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
865 PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR | 865 PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
866 PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA | 866 PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
867 PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR); 867 PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
868 if (!error_bits) 868 if (!error_bits)
869 return psycho_pcierr_intr_other(pbm, is_pbm_a); 869 return psycho_pcierr_intr_other(pbm, is_pbm_a);
870 psycho_write(afsr_reg, error_bits); 870 psycho_write(afsr_reg, error_bits);
871 871
872 /* Log the error. */ 872 /* Log the error. */
873 printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n", 873 printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n",
874 p->index, (is_pbm_a ? 'A' : 'B'), 874 p->index, (is_pbm_a ? 'A' : 'B'),
875 (((error_bits & PSYCHO_PCIAFSR_PMA) ? 875 (((error_bits & PSYCHO_PCIAFSR_PMA) ?
876 "Master Abort" : 876 "Master Abort" :
877 ((error_bits & PSYCHO_PCIAFSR_PTA) ? 877 ((error_bits & PSYCHO_PCIAFSR_PTA) ?
878 "Target Abort" : 878 "Target Abort" :
879 ((error_bits & PSYCHO_PCIAFSR_PRTRY) ? 879 ((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
880 "Excessive Retries" : 880 "Excessive Retries" :
881 ((error_bits & PSYCHO_PCIAFSR_PPERR) ? 881 ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
882 "Parity Error" : "???")))))); 882 "Parity Error" : "???"))))));
883 printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n", 883 printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
884 p->index, (is_pbm_a ? 'A' : 'B'), 884 p->index, (is_pbm_a ? 'A' : 'B'),
885 (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL, 885 (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
886 (afsr & PSYCHO_PCIAFSR_MID) >> 25UL, 886 (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
887 (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0); 887 (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
888 printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n", 888 printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n",
889 p->index, (is_pbm_a ? 'A' : 'B'), afar); 889 p->index, (is_pbm_a ? 'A' : 'B'), afar);
890 printk("PSYCHO%d(PBM%c): PCI Secondary errors [", 890 printk("PSYCHO%d(PBM%c): PCI Secondary errors [",
891 p->index, (is_pbm_a ? 'A' : 'B')); 891 p->index, (is_pbm_a ? 'A' : 'B'));
892 reported = 0; 892 reported = 0;
893 if (afsr & PSYCHO_PCIAFSR_SMA) { 893 if (afsr & PSYCHO_PCIAFSR_SMA) {
894 reported++; 894 reported++;
895 printk("(Master Abort)"); 895 printk("(Master Abort)");
896 } 896 }
897 if (afsr & PSYCHO_PCIAFSR_STA) { 897 if (afsr & PSYCHO_PCIAFSR_STA) {
898 reported++; 898 reported++;
899 printk("(Target Abort)"); 899 printk("(Target Abort)");
900 } 900 }
901 if (afsr & PSYCHO_PCIAFSR_SRTRY) { 901 if (afsr & PSYCHO_PCIAFSR_SRTRY) {
902 reported++; 902 reported++;
903 printk("(Excessive Retries)"); 903 printk("(Excessive Retries)");
904 } 904 }
905 if (afsr & PSYCHO_PCIAFSR_SPERR) { 905 if (afsr & PSYCHO_PCIAFSR_SPERR) {
906 reported++; 906 reported++;
907 printk("(Parity Error)"); 907 printk("(Parity Error)");
908 } 908 }
909 if (!reported) 909 if (!reported)
910 printk("(none)"); 910 printk("(none)");
911 printk("]\n"); 911 printk("]\n");
912 912
913 /* For the error types shown, scan PBM's PCI bus for devices 913 /* For the error types shown, scan PBM's PCI bus for devices
914 * which have logged that error type. 914 * which have logged that error type.
915 */ 915 */
916 916
917 /* If we see a Target Abort, this could be the result of an 917 /* If we see a Target Abort, this could be the result of an
918 * IOMMU translation error of some sort. It is extremely 918 * IOMMU translation error of some sort. It is extremely
919 * useful to log this information as usually it indicates 919 * useful to log this information as usually it indicates
920 * a bug in the IOMMU support code or a PCI device driver. 920 * a bug in the IOMMU support code or a PCI device driver.
921 */ 921 */
922 if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) { 922 if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
923 psycho_check_iommu_error(p, afsr, afar, PCI_ERR); 923 psycho_check_iommu_error(p, afsr, afar, PCI_ERR);
924 pci_scan_for_target_abort(p, pbm, pbm->pci_bus); 924 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
925 } 925 }
926 if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA)) 926 if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
927 pci_scan_for_master_abort(p, pbm, pbm->pci_bus); 927 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
928 928
929 /* For excessive retries, PSYCHO/PBM will abort the device 929 /* For excessive retries, PSYCHO/PBM will abort the device
930 * and there is no way to specifically check for excessive 930 * and there is no way to specifically check for excessive
931 * retries in the config space status registers. So what 931 * retries in the config space status registers. So what
932 * we hope is that we'll catch it via the master/target 932 * we hope is that we'll catch it via the master/target
933 * abort events. 933 * abort events.
934 */ 934 */
935 935
936 if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR)) 936 if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
937 pci_scan_for_parity_error(p, pbm, pbm->pci_bus); 937 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
938 938
939 return IRQ_HANDLED; 939 return IRQ_HANDLED;
940 } 940 }
941 941
942 /* XXX What about PowerFail/PowerManagement??? -DaveM */ 942 /* XXX What about PowerFail/PowerManagement??? -DaveM */
943 #define PSYCHO_ECC_CTRL 0x0020 943 #define PSYCHO_ECC_CTRL 0x0020
944 #define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ 944 #define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
945 #define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ 945 #define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
946 #define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ 946 #define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
947 #define PSYCHO_UE_INO 0x2e 947 #define PSYCHO_UE_INO 0x2e
948 #define PSYCHO_CE_INO 0x2f 948 #define PSYCHO_CE_INO 0x2f
949 #define PSYCHO_PCIERR_A_INO 0x30 949 #define PSYCHO_PCIERR_A_INO 0x30
950 #define PSYCHO_PCIERR_B_INO 0x31 950 #define PSYCHO_PCIERR_B_INO 0x31
951 static void psycho_register_error_handlers(struct pci_controller_info *p) 951 static void psycho_register_error_handlers(struct pci_controller_info *p)
952 { 952 {
953 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */ 953 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
954 unsigned long base = p->pbm_A.controller_regs; 954 unsigned long base = p->pbm_A.controller_regs;
955 unsigned int irq, portid = pbm->portid; 955 unsigned int irq, portid = pbm->portid;
956 u64 tmp; 956 u64 tmp;
957 957
958 /* Build IRQs and register handlers. */ 958 /* Build IRQs and register handlers. */
959 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO); 959 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
960 if (request_irq(irq, psycho_ue_intr, 960 if (request_irq(irq, psycho_ue_intr,
961 SA_SHIRQ, "PSYCHO UE", p) < 0) { 961 SA_SHIRQ, "PSYCHO UE", p) < 0) {
962 prom_printf("PSYCHO%d: Cannot register UE interrupt.\n", 962 prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
963 p->index); 963 p->index);
964 prom_halt(); 964 prom_halt();
965 } 965 }
966 966
967 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO); 967 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
968 if (request_irq(irq, psycho_ce_intr, 968 if (request_irq(irq, psycho_ce_intr,
969 SA_SHIRQ, "PSYCHO CE", p) < 0) { 969 SA_SHIRQ, "PSYCHO CE", p) < 0) {
970 prom_printf("PSYCHO%d: Cannot register CE interrupt.\n", 970 prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
971 p->index); 971 p->index);
972 prom_halt(); 972 prom_halt();
973 } 973 }
974 974
975 pbm = &p->pbm_A; 975 pbm = &p->pbm_A;
976 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO); 976 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
977 if (request_irq(irq, psycho_pcierr_intr, 977 if (request_irq(irq, psycho_pcierr_intr,
978 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) { 978 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
979 prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n", 979 prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
980 p->index); 980 p->index);
981 prom_halt(); 981 prom_halt();
982 } 982 }
983 983
984 pbm = &p->pbm_B; 984 pbm = &p->pbm_B;
985 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO); 985 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
986 if (request_irq(irq, psycho_pcierr_intr, 986 if (request_irq(irq, psycho_pcierr_intr,
987 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) { 987 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
988 prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n", 988 prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
989 p->index); 989 p->index);
990 prom_halt(); 990 prom_halt();
991 } 991 }
992 992
993 /* Enable UE and CE interrupts for controller. */ 993 /* Enable UE and CE interrupts for controller. */
994 psycho_write(base + PSYCHO_ECC_CTRL, 994 psycho_write(base + PSYCHO_ECC_CTRL,
995 (PSYCHO_ECCCTRL_EE | 995 (PSYCHO_ECCCTRL_EE |
996 PSYCHO_ECCCTRL_UE | 996 PSYCHO_ECCCTRL_UE |
997 PSYCHO_ECCCTRL_CE)); 997 PSYCHO_ECCCTRL_CE));
998 998
999 /* Enable PCI Error interrupts and clear error 999 /* Enable PCI Error interrupts and clear error
1000 * bits for each PBM. 1000 * bits for each PBM.
1001 */ 1001 */
1002 tmp = psycho_read(base + PSYCHO_PCIA_CTRL); 1002 tmp = psycho_read(base + PSYCHO_PCIA_CTRL);
1003 tmp |= (PSYCHO_PCICTRL_SERR | 1003 tmp |= (PSYCHO_PCICTRL_SERR |
1004 PSYCHO_PCICTRL_SBH_ERR | 1004 PSYCHO_PCICTRL_SBH_ERR |
1005 PSYCHO_PCICTRL_EEN); 1005 PSYCHO_PCICTRL_EEN);
1006 tmp &= ~(PSYCHO_PCICTRL_SBH_INT); 1006 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1007 psycho_write(base + PSYCHO_PCIA_CTRL, tmp); 1007 psycho_write(base + PSYCHO_PCIA_CTRL, tmp);
1008 1008
1009 tmp = psycho_read(base + PSYCHO_PCIB_CTRL); 1009 tmp = psycho_read(base + PSYCHO_PCIB_CTRL);
1010 tmp |= (PSYCHO_PCICTRL_SERR | 1010 tmp |= (PSYCHO_PCICTRL_SERR |
1011 PSYCHO_PCICTRL_SBH_ERR | 1011 PSYCHO_PCICTRL_SBH_ERR |
1012 PSYCHO_PCICTRL_EEN); 1012 PSYCHO_PCICTRL_EEN);
1013 tmp &= ~(PSYCHO_PCICTRL_SBH_INT); 1013 tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
1014 psycho_write(base + PSYCHO_PCIB_CTRL, tmp); 1014 psycho_write(base + PSYCHO_PCIB_CTRL, tmp);
1015 } 1015 }
1016 1016
1017 /* PSYCHO boot time probing and initialization. */ 1017 /* PSYCHO boot time probing and initialization. */
1018 static void psycho_resource_adjust(struct pci_dev *pdev, 1018 static void psycho_resource_adjust(struct pci_dev *pdev,
1019 struct resource *res, 1019 struct resource *res,
1020 struct resource *root) 1020 struct resource *root)
1021 { 1021 {
1022 res->start += root->start; 1022 res->start += root->start;
1023 res->end += root->start; 1023 res->end += root->start;
1024 } 1024 }
1025 1025
1026 static void psycho_base_address_update(struct pci_dev *pdev, int resource) 1026 static void psycho_base_address_update(struct pci_dev *pdev, int resource)
1027 { 1027 {
1028 struct pcidev_cookie *pcp = pdev->sysdata; 1028 struct pcidev_cookie *pcp = pdev->sysdata;
1029 struct pci_pbm_info *pbm = pcp->pbm; 1029 struct pci_pbm_info *pbm = pcp->pbm;
1030 struct resource *res, *root; 1030 struct resource *res, *root;
1031 u32 reg; 1031 u32 reg;
1032 int where, size, is_64bit; 1032 int where, size, is_64bit;
1033 1033
1034 res = &pdev->resource[resource]; 1034 res = &pdev->resource[resource];
1035 if (resource < 6) { 1035 if (resource < 6) {
1036 where = PCI_BASE_ADDRESS_0 + (resource * 4); 1036 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1037 } else if (resource == PCI_ROM_RESOURCE) { 1037 } else if (resource == PCI_ROM_RESOURCE) {
1038 where = pdev->rom_base_reg; 1038 where = pdev->rom_base_reg;
1039 } else { 1039 } else {
1040 /* Somebody might have asked allocation of a non-standard resource */ 1040 /* Somebody might have asked allocation of a non-standard resource */
1041 return; 1041 return;
1042 } 1042 }
1043 1043
1044 is_64bit = 0; 1044 is_64bit = 0;
1045 if (res->flags & IORESOURCE_IO) 1045 if (res->flags & IORESOURCE_IO)
1046 root = &pbm->io_space; 1046 root = &pbm->io_space;
1047 else { 1047 else {
1048 root = &pbm->mem_space; 1048 root = &pbm->mem_space;
1049 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) 1049 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1050 == PCI_BASE_ADDRESS_MEM_TYPE_64) 1050 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1051 is_64bit = 1; 1051 is_64bit = 1;
1052 } 1052 }
1053 1053
1054 size = res->end - res->start; 1054 size = res->end - res->start;
1055 pci_read_config_dword(pdev, where, &reg); 1055 pci_read_config_dword(pdev, where, &reg);
1056 reg = ((reg & size) | 1056 reg = ((reg & size) |
1057 (((u32)(res->start - root->start)) & ~size)); 1057 (((u32)(res->start - root->start)) & ~size));
1058 if (resource == PCI_ROM_RESOURCE) { 1058 if (resource == PCI_ROM_RESOURCE) {
1059 reg |= PCI_ROM_ADDRESS_ENABLE; 1059 reg |= PCI_ROM_ADDRESS_ENABLE;
1060 res->flags |= IORESOURCE_ROM_ENABLE; 1060 res->flags |= IORESOURCE_ROM_ENABLE;
1061 } 1061 }
1062 pci_write_config_dword(pdev, where, reg); 1062 pci_write_config_dword(pdev, where, reg);
1063 1063
1064 /* This knows that the upper 32-bits of the address 1064 /* This knows that the upper 32-bits of the address
1065 * must be zero. Our PCI common layer enforces this. 1065 * must be zero. Our PCI common layer enforces this.
1066 */ 1066 */
1067 if (is_64bit) 1067 if (is_64bit)
1068 pci_write_config_dword(pdev, where + 4, 0); 1068 pci_write_config_dword(pdev, where + 4, 0);
1069 } 1069 }
1070 1070
1071 static void pbm_config_busmastering(struct pci_pbm_info *pbm) 1071 static void pbm_config_busmastering(struct pci_pbm_info *pbm)
1072 { 1072 {
1073 u8 *addr; 1073 u8 *addr;
1074 1074
1075 /* Set cache-line size to 64 bytes, this is actually 1075 /* Set cache-line size to 64 bytes, this is actually
1076 * a nop but I do it for completeness. 1076 * a nop but I do it for completeness.
1077 */ 1077 */
1078 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, 1078 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1079 0, PCI_CACHE_LINE_SIZE); 1079 0, PCI_CACHE_LINE_SIZE);
1080 pci_config_write8(addr, 64 / sizeof(u32)); 1080 pci_config_write8(addr, 64 / sizeof(u32));
1081 1081
1082 /* Set PBM latency timer to 64 PCI clocks. */ 1082 /* Set PBM latency timer to 64 PCI clocks. */
1083 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno, 1083 addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1084 0, PCI_LATENCY_TIMER); 1084 0, PCI_LATENCY_TIMER);
1085 pci_config_write8(addr, 64); 1085 pci_config_write8(addr, 64);
1086 } 1086 }
1087 1087
1088 static void pbm_scan_bus(struct pci_controller_info *p, 1088 static void pbm_scan_bus(struct pci_controller_info *p,
1089 struct pci_pbm_info *pbm) 1089 struct pci_pbm_info *pbm)
1090 { 1090 {
1091 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 1091 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1092 1092
1093 if (!cookie) { 1093 if (!cookie) {
1094 prom_printf("PSYCHO: Critical allocation failure.\n"); 1094 prom_printf("PSYCHO: Critical allocation failure.\n");
1095 prom_halt(); 1095 prom_halt();
1096 } 1096 }
1097 1097
1098 /* All we care about is the PBM. */ 1098 /* All we care about is the PBM. */
1099 cookie->pbm = pbm; 1099 cookie->pbm = pbm;
1100 1100
1101 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, 1101 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1102 p->pci_ops, 1102 p->pci_ops,
1103 pbm); 1103 pbm);
1104 pci_fixup_host_bridge_self(pbm->pci_bus); 1104 pci_fixup_host_bridge_self(pbm->pci_bus);
1105 pbm->pci_bus->self->sysdata = cookie; 1105 pbm->pci_bus->self->sysdata = cookie;
1106 1106
1107 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node->node); 1107 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1108 pci_record_assignments(pbm, pbm->pci_bus); 1108 pci_record_assignments(pbm, pbm->pci_bus);
1109 pci_assign_unassigned(pbm, pbm->pci_bus); 1109 pci_assign_unassigned(pbm, pbm->pci_bus);
1110 pci_fixup_irq(pbm, pbm->pci_bus); 1110 pci_fixup_irq(pbm, pbm->pci_bus);
1111 pci_determine_66mhz_disposition(pbm, pbm->pci_bus); 1111 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1112 pci_setup_busmastering(pbm, pbm->pci_bus); 1112 pci_setup_busmastering(pbm, pbm->pci_bus);
1113 } 1113 }
1114 1114
1115 static void psycho_scan_bus(struct pci_controller_info *p) 1115 static void psycho_scan_bus(struct pci_controller_info *p)
1116 { 1116 {
1117 pbm_config_busmastering(&p->pbm_B); 1117 pbm_config_busmastering(&p->pbm_B);
1118 p->pbm_B.is_66mhz_capable = 0; 1118 p->pbm_B.is_66mhz_capable = 0;
1119 pbm_config_busmastering(&p->pbm_A); 1119 pbm_config_busmastering(&p->pbm_A);
1120 p->pbm_A.is_66mhz_capable = 1; 1120 p->pbm_A.is_66mhz_capable = 1;
1121 pbm_scan_bus(p, &p->pbm_B); 1121 pbm_scan_bus(p, &p->pbm_B);
1122 pbm_scan_bus(p, &p->pbm_A); 1122 pbm_scan_bus(p, &p->pbm_A);
1123 1123
1124 /* After the PCI bus scan is complete, we can register 1124 /* After the PCI bus scan is complete, we can register
1125 * the error interrupt handlers. 1125 * the error interrupt handlers.
1126 */ 1126 */
1127 psycho_register_error_handlers(p); 1127 psycho_register_error_handlers(p);
1128 } 1128 }
1129 1129
1130 static void psycho_iommu_init(struct pci_controller_info *p) 1130 static void psycho_iommu_init(struct pci_controller_info *p)
1131 { 1131 {
1132 struct pci_iommu *iommu = p->pbm_A.iommu; 1132 struct pci_iommu *iommu = p->pbm_A.iommu;
1133 unsigned long i; 1133 unsigned long i;
1134 u64 control; 1134 u64 control;
1135 1135
1136 /* Register addresses. */ 1136 /* Register addresses. */
1137 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; 1137 iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
1138 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE; 1138 iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
1139 iommu->iommu_flush = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH; 1139 iommu->iommu_flush = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH;
1140 /* PSYCHO's IOMMU lacks ctx flushing. */ 1140 /* PSYCHO's IOMMU lacks ctx flushing. */
1141 iommu->iommu_ctxflush = 0; 1141 iommu->iommu_ctxflush = 0;
1142 1142
1143 /* We use the main control register of PSYCHO as the write 1143 /* We use the main control register of PSYCHO as the write
1144 * completion register. 1144 * completion register.
1145 */ 1145 */
1146 iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL; 1146 iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL;
1147 1147
1148 /* 1148 /*
1149 * Invalidate TLB Entries. 1149 * Invalidate TLB Entries.
1150 */ 1150 */
1151 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); 1151 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1152 control |= PSYCHO_IOMMU_CTRL_DENAB; 1152 control |= PSYCHO_IOMMU_CTRL_DENAB;
1153 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); 1153 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1154 for(i = 0; i < 16; i++) { 1154 for(i = 0; i < 16; i++) {
1155 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0); 1155 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
1156 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0); 1156 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
1157 } 1157 }
1158 1158
1159 /* Leave diag mode enabled for full-flushing done 1159 /* Leave diag mode enabled for full-flushing done
1160 * in pci_iommu.c 1160 * in pci_iommu.c
1161 */ 1161 */
1162 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff); 1162 pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
1163 1163
1164 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, 1164 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
1165 __pa(iommu->page_table)); 1165 __pa(iommu->page_table));
1166 1166
1167 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); 1167 control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
1168 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); 1168 control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
1169 control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB); 1169 control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
1170 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); 1170 psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
1171 1171
1172 /* If necessary, hook us up for starfire IRQ translations. */ 1172 /* If necessary, hook us up for starfire IRQ translations. */
1173 if (this_is_starfire) 1173 if (this_is_starfire)
1174 p->starfire_cookie = starfire_hookup(p->pbm_A.portid); 1174 p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
1175 else 1175 else
1176 p->starfire_cookie = NULL; 1176 p->starfire_cookie = NULL;
1177 } 1177 }
1178 1178
1179 #define PSYCHO_IRQ_RETRY 0x1a00UL 1179 #define PSYCHO_IRQ_RETRY 0x1a00UL
1180 #define PSYCHO_PCIA_DIAG 0x2020UL 1180 #define PSYCHO_PCIA_DIAG 0x2020UL
1181 #define PSYCHO_PCIB_DIAG 0x4020UL 1181 #define PSYCHO_PCIB_DIAG 0x4020UL
1182 #define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */ 1182 #define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
1183 #define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */ 1183 #define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
1184 #define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */ 1184 #define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
1185 #define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */ 1185 #define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
1186 #define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */ 1186 #define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
1187 #define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */ 1187 #define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
1188 #define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */ 1188 #define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
1189 #define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */ 1189 #define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
1190 1190
1191 static void psycho_controller_hwinit(struct pci_controller_info *p) 1191 static void psycho_controller_hwinit(struct pci_controller_info *p)
1192 { 1192 {
1193 u64 tmp; 1193 u64 tmp;
1194 1194
1195 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5); 1195 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5);
1196 1196
1197 /* Enable arbiter for all PCI slots. */ 1197 /* Enable arbiter for all PCI slots. */
1198 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); 1198 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
1199 tmp |= PSYCHO_PCICTRL_AEN; 1199 tmp |= PSYCHO_PCICTRL_AEN;
1200 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp); 1200 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp);
1201 1201
1202 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL); 1202 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL);
1203 tmp |= PSYCHO_PCICTRL_AEN; 1203 tmp |= PSYCHO_PCICTRL_AEN;
1204 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp); 1204 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp);
1205 1205
1206 /* Disable DMA write / PIO read synchronization on 1206 /* Disable DMA write / PIO read synchronization on
1207 * both PCI bus segments. 1207 * both PCI bus segments.
1208 * [ U2P Erratum 1243770, STP2223BGA data sheet ] 1208 * [ U2P Erratum 1243770, STP2223BGA data sheet ]
1209 */ 1209 */
1210 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG); 1210 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG);
1211 tmp |= PSYCHO_PCIDIAG_DDWSYNC; 1211 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1212 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp); 1212 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp);
1213 1213
1214 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG); 1214 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG);
1215 tmp |= PSYCHO_PCIDIAG_DDWSYNC; 1215 tmp |= PSYCHO_PCIDIAG_DDWSYNC;
1216 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp); 1216 psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
1217 } 1217 }
1218 1218
1219 static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1219 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1220 struct pci_pbm_info *pbm) 1220 struct pci_pbm_info *pbm)
1221 { 1221 {
1222 char *name = pbm->name; 1222 char *name = pbm->name;
1223 1223
1224 sprintf(name, "PSYCHO%d PBM%c", 1224 sprintf(name, "PSYCHO%d PBM%c",
1225 p->index, 1225 p->index,
1226 (pbm == &p->pbm_A ? 'A' : 'B')); 1226 (pbm == &p->pbm_A ? 'A' : 'B'));
1227 pbm->io_space.name = pbm->mem_space.name = name; 1227 pbm->io_space.name = pbm->mem_space.name = name;
1228 1228
1229 request_resource(&ioport_resource, &pbm->io_space); 1229 request_resource(&ioport_resource, &pbm->io_space);
1230 request_resource(&iomem_resource, &pbm->mem_space); 1230 request_resource(&iomem_resource, &pbm->mem_space);
1231 pci_register_legacy_regions(&pbm->io_space, 1231 pci_register_legacy_regions(&pbm->io_space,
1232 &pbm->mem_space); 1232 &pbm->mem_space);
1233 } 1233 }
1234 1234
1235 static void psycho_pbm_strbuf_init(struct pci_controller_info *p, 1235 static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
1236 struct pci_pbm_info *pbm, 1236 struct pci_pbm_info *pbm,
1237 int is_pbm_a) 1237 int is_pbm_a)
1238 { 1238 {
1239 unsigned long base = pbm->controller_regs; 1239 unsigned long base = pbm->controller_regs;
1240 u64 control; 1240 u64 control;
1241 1241
1242 if (is_pbm_a) { 1242 if (is_pbm_a) {
1243 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A; 1243 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
1244 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A; 1244 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
1245 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A; 1245 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
1246 } else { 1246 } else {
1247 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B; 1247 pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
1248 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B; 1248 pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
1249 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B; 1249 pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
1250 } 1250 }
1251 /* PSYCHO's streaming buffer lacks ctx flushing. */ 1251 /* PSYCHO's streaming buffer lacks ctx flushing. */
1252 pbm->stc.strbuf_ctxflush = 0; 1252 pbm->stc.strbuf_ctxflush = 0;
1253 pbm->stc.strbuf_ctxmatch_base = 0; 1253 pbm->stc.strbuf_ctxmatch_base = 0;
1254 1254
1255 pbm->stc.strbuf_flushflag = (volatile unsigned long *) 1255 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1256 ((((unsigned long)&pbm->stc.__flushflag_buf[0]) 1256 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1257 + 63UL) 1257 + 63UL)
1258 & ~63UL); 1258 & ~63UL);
1259 pbm->stc.strbuf_flushflag_pa = (unsigned long) 1259 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1260 __pa(pbm->stc.strbuf_flushflag); 1260 __pa(pbm->stc.strbuf_flushflag);
1261 1261
1262 /* Enable the streaming buffer. We have to be careful 1262 /* Enable the streaming buffer. We have to be careful
1263 * just in case OBP left it with LRU locking enabled. 1263 * just in case OBP left it with LRU locking enabled.
1264 * 1264 *
1265 * It is possible to control if PBM will be rerun on 1265 * It is possible to control if PBM will be rerun on
1266 * line misses. Currently I just retain whatever setting 1266 * line misses. Currently I just retain whatever setting
1267 * OBP left us with. All checks so far show it having 1267 * OBP left us with. All checks so far show it having
1268 * a value of zero. 1268 * a value of zero.
1269 */ 1269 */
1270 #undef PSYCHO_STRBUF_RERUN_ENABLE 1270 #undef PSYCHO_STRBUF_RERUN_ENABLE
1271 #undef PSYCHO_STRBUF_RERUN_DISABLE 1271 #undef PSYCHO_STRBUF_RERUN_DISABLE
1272 control = psycho_read(pbm->stc.strbuf_control); 1272 control = psycho_read(pbm->stc.strbuf_control);
1273 control |= PSYCHO_STRBUF_CTRL_ENAB; 1273 control |= PSYCHO_STRBUF_CTRL_ENAB;
1274 control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR); 1274 control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
1275 #ifdef PSYCHO_STRBUF_RERUN_ENABLE 1275 #ifdef PSYCHO_STRBUF_RERUN_ENABLE
1276 control &= ~(PSYCHO_STRBUF_CTRL_RRDIS); 1276 control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
1277 #else 1277 #else
1278 #ifdef PSYCHO_STRBUF_RERUN_DISABLE 1278 #ifdef PSYCHO_STRBUF_RERUN_DISABLE
1279 control |= PSYCHO_STRBUF_CTRL_RRDIS; 1279 control |= PSYCHO_STRBUF_CTRL_RRDIS;
1280 #endif 1280 #endif
1281 #endif 1281 #endif
1282 psycho_write(pbm->stc.strbuf_control, control); 1282 psycho_write(pbm->stc.strbuf_control, control);
1283 1283
1284 pbm->stc.strbuf_enabled = 1; 1284 pbm->stc.strbuf_enabled = 1;
1285 } 1285 }
1286 1286
1287 #define PSYCHO_IOSPACE_A 0x002000000UL 1287 #define PSYCHO_IOSPACE_A 0x002000000UL
1288 #define PSYCHO_IOSPACE_B 0x002010000UL 1288 #define PSYCHO_IOSPACE_B 0x002010000UL
1289 #define PSYCHO_IOSPACE_SIZE 0x00000ffffUL 1289 #define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
1290 #define PSYCHO_MEMSPACE_A 0x100000000UL 1290 #define PSYCHO_MEMSPACE_A 0x100000000UL
1291 #define PSYCHO_MEMSPACE_B 0x180000000UL 1291 #define PSYCHO_MEMSPACE_B 0x180000000UL
1292 #define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL 1292 #define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
1293 1293
1294 static void psycho_pbm_init(struct pci_controller_info *p, 1294 static void psycho_pbm_init(struct pci_controller_info *p,
1295 struct device_node *dp, int is_pbm_a) 1295 struct device_node *dp, int is_pbm_a)
1296 { 1296 {
1297 unsigned int *busrange; 1297 unsigned int *busrange;
1298 struct property *prop; 1298 struct property *prop;
1299 struct pci_pbm_info *pbm; 1299 struct pci_pbm_info *pbm;
1300 int len; 1300 int len;
1301 1301
1302 if (is_pbm_a) { 1302 if (is_pbm_a) {
1303 pbm = &p->pbm_A; 1303 pbm = &p->pbm_A;
1304 pbm->pci_first_slot = 1; 1304 pbm->pci_first_slot = 1;
1305 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A; 1305 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
1306 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A; 1306 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
1307 } else { 1307 } else {
1308 pbm = &p->pbm_B; 1308 pbm = &p->pbm_B;
1309 pbm->pci_first_slot = 2; 1309 pbm->pci_first_slot = 2;
1310 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B; 1310 pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
1311 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B; 1311 pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
1312 } 1312 }
1313 1313
1314 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO; 1314 pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
1315 pbm->chip_version = 0; 1315 pbm->chip_version = 0;
1316 prop = of_find_property(dp, "version#", NULL); 1316 prop = of_find_property(dp, "version#", NULL);
1317 if (prop) 1317 if (prop)
1318 pbm->chip_version = *(int *) prop->value; 1318 pbm->chip_version = *(int *) prop->value;
1319 pbm->chip_revision = 0; 1319 pbm->chip_revision = 0;
1320 prop = of_find_property(dp, "module-revision#", NULL); 1320 prop = of_find_property(dp, "module-revision#", NULL);
1321 if (prop) 1321 if (prop)
1322 pbm->chip_revision = *(int *) prop->value; 1322 pbm->chip_revision = *(int *) prop->value;
1323 1323
1324 pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE; 1324 pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
1325 pbm->io_space.flags = IORESOURCE_IO; 1325 pbm->io_space.flags = IORESOURCE_IO;
1326 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE; 1326 pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
1327 pbm->mem_space.flags = IORESOURCE_MEM; 1327 pbm->mem_space.flags = IORESOURCE_MEM;
1328 pbm_register_toplevel_resources(p, pbm); 1328 pbm_register_toplevel_resources(p, pbm);
1329 1329
1330 pbm->parent = p; 1330 pbm->parent = p;
1331 pbm->prom_node = dp; 1331 pbm->prom_node = dp;
1332 pbm->name = dp->full_name; 1332 pbm->name = dp->full_name;
1333 1333
1334 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n", 1334 printk("%s: PSYCHO PCI Bus Module ver[%x:%x]\n",
1335 pbm->name, 1335 pbm->name,
1336 pbm->chip_version, pbm->chip_revision); 1336 pbm->chip_version, pbm->chip_revision);
1337 1337
1338 prop = of_find_property(dp, "ranges", &len); 1338 prop = of_find_property(dp, "ranges", &len);
1339 if (prop) { 1339 if (prop) {
1340 pbm->pbm_ranges = prop->value; 1340 pbm->pbm_ranges = prop->value;
1341 pbm->num_pbm_ranges = 1341 pbm->num_pbm_ranges =
1342 (len / sizeof(struct linux_prom_pci_ranges)); 1342 (len / sizeof(struct linux_prom_pci_ranges));
1343 } else { 1343 } else {
1344 pbm->num_pbm_ranges = 0; 1344 pbm->num_pbm_ranges = 0;
1345 } 1345 }
1346 1346
1347 prop = of_find_property(dp, "interrupt-map", &len); 1347 prop = of_find_property(dp, "interrupt-map", &len);
1348 if (prop) { 1348 if (prop) {
1349 pbm->pbm_intmap = prop->value; 1349 pbm->pbm_intmap = prop->value;
1350 pbm->num_pbm_intmap = 1350 pbm->num_pbm_intmap =
1351 (len / sizeof(struct linux_prom_pci_intmap)); 1351 (len / sizeof(struct linux_prom_pci_intmap));
1352 1352
1353 prop = of_find_property(dp, "interrupt-map-mask", NULL); 1353 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1354 pbm->pbm_intmask = prop->value; 1354 pbm->pbm_intmask = prop->value;
1355 } else { 1355 } else {
1356 pbm->num_pbm_intmap = 0; 1356 pbm->num_pbm_intmap = 0;
1357 } 1357 }
1358 1358
1359 prop = of_find_property(dp, "bus-range", NULL); 1359 prop = of_find_property(dp, "bus-range", NULL);
1360 busrange = prop->value; 1360 busrange = prop->value;
1361 pbm->pci_first_busno = busrange[0]; 1361 pbm->pci_first_busno = busrange[0];
1362 pbm->pci_last_busno = busrange[1]; 1362 pbm->pci_last_busno = busrange[1];
1363 1363
1364 psycho_pbm_strbuf_init(p, pbm, is_pbm_a); 1364 psycho_pbm_strbuf_init(p, pbm, is_pbm_a);
1365 } 1365 }
1366 1366
1367 #define PSYCHO_CONFIGSPACE 0x001000000UL 1367 #define PSYCHO_CONFIGSPACE 0x001000000UL
1368 1368
1369 void psycho_init(struct device_node *dp, char *model_name) 1369 void psycho_init(struct device_node *dp, char *model_name)
1370 { 1370 {
1371 struct linux_prom64_registers *pr_regs; 1371 struct linux_prom64_registers *pr_regs;
1372 struct pci_controller_info *p; 1372 struct pci_controller_info *p;
1373 struct pci_iommu *iommu; 1373 struct pci_iommu *iommu;
1374 struct property *prop; 1374 struct property *prop;
1375 u32 upa_portid; 1375 u32 upa_portid;
1376 int is_pbm_a; 1376 int is_pbm_a;
1377 1377
1378 upa_portid = 0xff; 1378 upa_portid = 0xff;
1379 prop = of_find_property(dp, "upa-portid", NULL); 1379 prop = of_find_property(dp, "upa-portid", NULL);
1380 if (prop) 1380 if (prop)
1381 upa_portid = *(u32 *) prop->value; 1381 upa_portid = *(u32 *) prop->value;
1382 1382
1383 for(p = pci_controller_root; p; p = p->next) { 1383 for(p = pci_controller_root; p; p = p->next) {
1384 if (p->pbm_A.portid == upa_portid) { 1384 if (p->pbm_A.portid == upa_portid) {
1385 is_pbm_a = (p->pbm_A.prom_node == NULL); 1385 is_pbm_a = (p->pbm_A.prom_node == NULL);
1386 psycho_pbm_init(p, dp, is_pbm_a); 1386 psycho_pbm_init(p, dp, is_pbm_a);
1387 return; 1387 return;
1388 } 1388 }
1389 } 1389 }
1390 1390
1391 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1391 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1392 if (!p) { 1392 if (!p) {
1393 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1393 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1394 prom_halt(); 1394 prom_halt();
1395 } 1395 }
1396 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1396 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1397 if (!iommu) { 1397 if (!iommu) {
1398 prom_printf("PSYCHO: Fatal memory allocation error.\n"); 1398 prom_printf("PSYCHO: Fatal memory allocation error.\n");
1399 prom_halt(); 1399 prom_halt();
1400 } 1400 }
1401 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1401 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1402 1402
1403 p->next = pci_controller_root; 1403 p->next = pci_controller_root;
1404 pci_controller_root = p; 1404 pci_controller_root = p;
1405 1405
1406 p->pbm_A.portid = upa_portid; 1406 p->pbm_A.portid = upa_portid;
1407 p->pbm_B.portid = upa_portid; 1407 p->pbm_B.portid = upa_portid;
1408 p->index = pci_num_controllers++; 1408 p->index = pci_num_controllers++;
1409 p->pbms_same_domain = 0; 1409 p->pbms_same_domain = 0;
1410 p->scan_bus = psycho_scan_bus; 1410 p->scan_bus = psycho_scan_bus;
1411 p->irq_build = psycho_irq_build; 1411 p->irq_build = psycho_irq_build;
1412 p->base_address_update = psycho_base_address_update; 1412 p->base_address_update = psycho_base_address_update;
1413 p->resource_adjust = psycho_resource_adjust; 1413 p->resource_adjust = psycho_resource_adjust;
1414 p->pci_ops = &psycho_ops; 1414 p->pci_ops = &psycho_ops;
1415 1415
1416 prop = of_find_property(dp, "reg", NULL); 1416 prop = of_find_property(dp, "reg", NULL);
1417 pr_regs = prop->value; 1417 pr_regs = prop->value;
1418 1418
1419 p->pbm_A.controller_regs = pr_regs[2].phys_addr; 1419 p->pbm_A.controller_regs = pr_regs[2].phys_addr;
1420 p->pbm_B.controller_regs = pr_regs[2].phys_addr; 1420 p->pbm_B.controller_regs = pr_regs[2].phys_addr;
1421 1421
1422 p->pbm_A.config_space = p->pbm_B.config_space = 1422 p->pbm_A.config_space = p->pbm_B.config_space =
1423 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); 1423 (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
1424 1424
1425 /* 1425 /*
1426 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so 1426 * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
1427 * we need to adjust our MEM space mask. 1427 * we need to adjust our MEM space mask.
1428 */ 1428 */
1429 pci_memspace_mask = 0x7fffffffUL; 1429 pci_memspace_mask = 0x7fffffffUL;
1430 1430
1431 psycho_controller_hwinit(p); 1431 psycho_controller_hwinit(p);
1432 1432
1433 psycho_iommu_init(p); 1433 psycho_iommu_init(p);
1434 1434
1435 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000); 1435 is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
1436 psycho_pbm_init(p, dp, is_pbm_a); 1436 psycho_pbm_init(p, dp, is_pbm_a);
1437 } 1437 }
1438 1438
arch/sparc64/kernel/pci_sabre.c
1 /* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $ 1 /* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
2 * pci_sabre.c: Sabre specific PCI controller support. 2 * pci_sabre.c: Sabre specific PCI controller support.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu) 4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) 6 * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
7 */ 7 */
8 8
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/pci.h> 11 #include <linux/pci.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 15
16 #include <asm/apb.h> 16 #include <asm/apb.h>
17 #include <asm/pbm.h> 17 #include <asm/pbm.h>
18 #include <asm/iommu.h> 18 #include <asm/iommu.h>
19 #include <asm/irq.h> 19 #include <asm/irq.h>
20 #include <asm/smp.h> 20 #include <asm/smp.h>
21 #include <asm/oplib.h> 21 #include <asm/oplib.h>
22 #include <asm/prom.h> 22 #include <asm/prom.h>
23 23
24 #include "pci_impl.h" 24 #include "pci_impl.h"
25 #include "iommu_common.h" 25 #include "iommu_common.h"
26 26
27 /* All SABRE registers are 64-bits. The following accessor 27 /* All SABRE registers are 64-bits. The following accessor
28 * routines are how they are accessed. The REG parameter 28 * routines are how they are accessed. The REG parameter
29 * is a physical address. 29 * is a physical address.
30 */ 30 */
31 #define sabre_read(__reg) \ 31 #define sabre_read(__reg) \
32 ({ u64 __ret; \ 32 ({ u64 __ret; \
33 __asm__ __volatile__("ldxa [%1] %2, %0" \ 33 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "=r" (__ret) \ 34 : "=r" (__ret) \
35 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ 35 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
36 : "memory"); \ 36 : "memory"); \
37 __ret; \ 37 __ret; \
38 }) 38 })
39 #define sabre_write(__reg, __val) \ 39 #define sabre_write(__reg, __val) \
40 __asm__ __volatile__("stxa %0, [%1] %2" \ 40 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : /* no outputs */ \ 41 : /* no outputs */ \
42 : "r" (__val), "r" (__reg), \ 42 : "r" (__val), "r" (__reg), \
43 "i" (ASI_PHYS_BYPASS_EC_E) \ 43 "i" (ASI_PHYS_BYPASS_EC_E) \
44 : "memory") 44 : "memory")
45 45
46 /* SABRE PCI controller register offsets and definitions. */ 46 /* SABRE PCI controller register offsets and definitions. */
47 #define SABRE_UE_AFSR 0x0030UL 47 #define SABRE_UE_AFSR 0x0030UL
48 #define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ 48 #define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
49 #define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ 49 #define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
50 #define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ 50 #define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
51 #define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ 51 #define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
52 #define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */ 52 #define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
53 #define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */ 53 #define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
54 #define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ 54 #define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
55 #define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */ 55 #define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
56 #define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */ 56 #define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
57 #define SABRE_UECE_AFAR 0x0038UL 57 #define SABRE_UECE_AFAR 0x0038UL
58 #define SABRE_CE_AFSR 0x0040UL 58 #define SABRE_CE_AFSR 0x0040UL
59 #define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ 59 #define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
60 #define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ 60 #define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
61 #define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ 61 #define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
62 #define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ 62 #define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
63 #define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */ 63 #define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
64 #define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ 64 #define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
65 #define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */ 65 #define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
66 #define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */ 66 #define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
67 #define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */ 67 #define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
68 #define SABRE_IOMMU_CONTROL 0x0200UL 68 #define SABRE_IOMMU_CONTROL 0x0200UL
69 #define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */ 69 #define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
70 #define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */ 70 #define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
71 #define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */ 71 #define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
72 #define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */ 72 #define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
73 #define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ 73 #define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
74 #define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000 74 #define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
75 #define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000 75 #define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
76 #define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000 76 #define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
77 #define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000 77 #define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
78 #define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000 78 #define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
79 #define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000 79 #define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
80 #define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000 80 #define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
81 #define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000 81 #define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
82 #define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */ 82 #define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
83 #define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ 83 #define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
84 #define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ 84 #define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
85 #define SABRE_IOMMU_TSBBASE 0x0208UL 85 #define SABRE_IOMMU_TSBBASE 0x0208UL
86 #define SABRE_IOMMU_FLUSH 0x0210UL 86 #define SABRE_IOMMU_FLUSH 0x0210UL
87 #define SABRE_IMAP_A_SLOT0 0x0c00UL 87 #define SABRE_IMAP_A_SLOT0 0x0c00UL
88 #define SABRE_IMAP_B_SLOT0 0x0c20UL 88 #define SABRE_IMAP_B_SLOT0 0x0c20UL
89 #define SABRE_IMAP_SCSI 0x1000UL 89 #define SABRE_IMAP_SCSI 0x1000UL
90 #define SABRE_IMAP_ETH 0x1008UL 90 #define SABRE_IMAP_ETH 0x1008UL
91 #define SABRE_IMAP_BPP 0x1010UL 91 #define SABRE_IMAP_BPP 0x1010UL
92 #define SABRE_IMAP_AU_REC 0x1018UL 92 #define SABRE_IMAP_AU_REC 0x1018UL
93 #define SABRE_IMAP_AU_PLAY 0x1020UL 93 #define SABRE_IMAP_AU_PLAY 0x1020UL
94 #define SABRE_IMAP_PFAIL 0x1028UL 94 #define SABRE_IMAP_PFAIL 0x1028UL
95 #define SABRE_IMAP_KMS 0x1030UL 95 #define SABRE_IMAP_KMS 0x1030UL
96 #define SABRE_IMAP_FLPY 0x1038UL 96 #define SABRE_IMAP_FLPY 0x1038UL
97 #define SABRE_IMAP_SHW 0x1040UL 97 #define SABRE_IMAP_SHW 0x1040UL
98 #define SABRE_IMAP_KBD 0x1048UL 98 #define SABRE_IMAP_KBD 0x1048UL
99 #define SABRE_IMAP_MS 0x1050UL 99 #define SABRE_IMAP_MS 0x1050UL
100 #define SABRE_IMAP_SER 0x1058UL 100 #define SABRE_IMAP_SER 0x1058UL
101 #define SABRE_IMAP_UE 0x1070UL 101 #define SABRE_IMAP_UE 0x1070UL
102 #define SABRE_IMAP_CE 0x1078UL 102 #define SABRE_IMAP_CE 0x1078UL
103 #define SABRE_IMAP_PCIERR 0x1080UL 103 #define SABRE_IMAP_PCIERR 0x1080UL
104 #define SABRE_IMAP_GFX 0x1098UL 104 #define SABRE_IMAP_GFX 0x1098UL
105 #define SABRE_IMAP_EUPA 0x10a0UL 105 #define SABRE_IMAP_EUPA 0x10a0UL
106 #define SABRE_ICLR_A_SLOT0 0x1400UL 106 #define SABRE_ICLR_A_SLOT0 0x1400UL
107 #define SABRE_ICLR_B_SLOT0 0x1480UL 107 #define SABRE_ICLR_B_SLOT0 0x1480UL
108 #define SABRE_ICLR_SCSI 0x1800UL 108 #define SABRE_ICLR_SCSI 0x1800UL
109 #define SABRE_ICLR_ETH 0x1808UL 109 #define SABRE_ICLR_ETH 0x1808UL
110 #define SABRE_ICLR_BPP 0x1810UL 110 #define SABRE_ICLR_BPP 0x1810UL
111 #define SABRE_ICLR_AU_REC 0x1818UL 111 #define SABRE_ICLR_AU_REC 0x1818UL
112 #define SABRE_ICLR_AU_PLAY 0x1820UL 112 #define SABRE_ICLR_AU_PLAY 0x1820UL
113 #define SABRE_ICLR_PFAIL 0x1828UL 113 #define SABRE_ICLR_PFAIL 0x1828UL
114 #define SABRE_ICLR_KMS 0x1830UL 114 #define SABRE_ICLR_KMS 0x1830UL
115 #define SABRE_ICLR_FLPY 0x1838UL 115 #define SABRE_ICLR_FLPY 0x1838UL
116 #define SABRE_ICLR_SHW 0x1840UL 116 #define SABRE_ICLR_SHW 0x1840UL
117 #define SABRE_ICLR_KBD 0x1848UL 117 #define SABRE_ICLR_KBD 0x1848UL
118 #define SABRE_ICLR_MS 0x1850UL 118 #define SABRE_ICLR_MS 0x1850UL
119 #define SABRE_ICLR_SER 0x1858UL 119 #define SABRE_ICLR_SER 0x1858UL
120 #define SABRE_ICLR_UE 0x1870UL 120 #define SABRE_ICLR_UE 0x1870UL
121 #define SABRE_ICLR_CE 0x1878UL 121 #define SABRE_ICLR_CE 0x1878UL
122 #define SABRE_ICLR_PCIERR 0x1880UL 122 #define SABRE_ICLR_PCIERR 0x1880UL
123 #define SABRE_WRSYNC 0x1c20UL 123 #define SABRE_WRSYNC 0x1c20UL
124 #define SABRE_PCICTRL 0x2000UL 124 #define SABRE_PCICTRL 0x2000UL
125 #define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */ 125 #define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
126 #define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */ 126 #define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
127 #define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */ 127 #define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
128 #define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */ 128 #define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
129 #define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */ 129 #define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
130 #define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */ 130 #define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
131 #define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */ 131 #define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
132 #define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */ 132 #define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
133 #define SABRE_PIOAFSR 0x2010UL 133 #define SABRE_PIOAFSR 0x2010UL
134 #define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */ 134 #define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
135 #define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */ 135 #define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
136 #define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */ 136 #define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
137 #define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */ 137 #define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
138 #define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */ 138 #define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
139 #define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */ 139 #define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
140 #define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */ 140 #define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
141 #define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */ 141 #define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
142 #define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */ 142 #define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
143 #define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */ 143 #define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
144 #define SABRE_PIOAFAR 0x2018UL 144 #define SABRE_PIOAFAR 0x2018UL
145 #define SABRE_PCIDIAG 0x2020UL 145 #define SABRE_PCIDIAG 0x2020UL
146 #define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */ 146 #define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
147 #define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */ 147 #define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
148 #define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */ 148 #define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
149 #define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */ 149 #define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
150 #define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */ 150 #define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
151 #define SABRE_PCITASR 0x2028UL 151 #define SABRE_PCITASR 0x2028UL
152 #define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */ 152 #define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
153 #define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */ 153 #define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
154 #define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */ 154 #define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
155 #define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */ 155 #define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
156 #define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */ 156 #define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
157 #define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */ 157 #define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
158 #define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */ 158 #define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
159 #define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */ 159 #define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
160 #define SABRE_PIOBUF_DIAG 0x5000UL 160 #define SABRE_PIOBUF_DIAG 0x5000UL
161 #define SABRE_DMABUF_DIAGLO 0x5100UL 161 #define SABRE_DMABUF_DIAGLO 0x5100UL
162 #define SABRE_DMABUF_DIAGHI 0x51c0UL 162 #define SABRE_DMABUF_DIAGHI 0x51c0UL
163 #define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */ 163 #define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
164 #define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */ 164 #define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
165 #define SABRE_IOMMU_VADIAG 0xa400UL 165 #define SABRE_IOMMU_VADIAG 0xa400UL
166 #define SABRE_IOMMU_TCDIAG 0xa408UL 166 #define SABRE_IOMMU_TCDIAG 0xa408UL
167 #define SABRE_IOMMU_TAG 0xa580UL 167 #define SABRE_IOMMU_TAG 0xa580UL
168 #define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */ 168 #define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
169 #define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */ 169 #define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
170 #define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */ 170 #define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
171 #define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */ 171 #define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
172 #define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */ 172 #define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
173 #define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */ 173 #define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
174 #define SABRE_IOMMU_DATA 0xa600UL 174 #define SABRE_IOMMU_DATA 0xa600UL
175 #define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */ 175 #define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
176 #define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */ 176 #define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
177 #define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */ 177 #define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
178 #define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */ 178 #define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
179 #define SABRE_PCI_IRQSTATE 0xa800UL 179 #define SABRE_PCI_IRQSTATE 0xa800UL
180 #define SABRE_OBIO_IRQSTATE 0xa808UL 180 #define SABRE_OBIO_IRQSTATE 0xa808UL
181 #define SABRE_FFBCFG 0xf000UL 181 #define SABRE_FFBCFG 0xf000UL
182 #define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */ 182 #define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
183 #define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */ 183 #define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
184 #define SABRE_MCCTRL0 0xf010UL 184 #define SABRE_MCCTRL0 0xf010UL
185 #define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */ 185 #define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
186 #define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */ 186 #define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
187 #define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */ 187 #define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
188 #define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */ 188 #define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
189 #define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */ 189 #define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
190 #define SABRE_MCCTRL1 0xf018UL 190 #define SABRE_MCCTRL1 0xf018UL
191 #define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */ 191 #define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
192 #define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */ 192 #define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
193 #define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */ 193 #define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
194 #define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */ 194 #define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
195 #define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */ 195 #define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
196 #define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */ 196 #define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
197 #define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */ 197 #define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
198 #define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */ 198 #define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
199 #define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */ 199 #define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
200 #define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */ 200 #define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
201 #define SABRE_RESETCTRL 0xf020UL 201 #define SABRE_RESETCTRL 0xf020UL
202 202
203 #define SABRE_CONFIGSPACE 0x001000000UL 203 #define SABRE_CONFIGSPACE 0x001000000UL
204 #define SABRE_IOSPACE 0x002000000UL 204 #define SABRE_IOSPACE 0x002000000UL
205 #define SABRE_IOSPACE_SIZE 0x000ffffffUL 205 #define SABRE_IOSPACE_SIZE 0x000ffffffUL
206 #define SABRE_MEMSPACE 0x100000000UL 206 #define SABRE_MEMSPACE 0x100000000UL
207 #define SABRE_MEMSPACE_SIZE 0x07fffffffUL 207 #define SABRE_MEMSPACE_SIZE 0x07fffffffUL
208 208
209 /* UltraSparc-IIi Programmer's Manual, page 325, PCI 209 /* UltraSparc-IIi Programmer's Manual, page 325, PCI
210 * configuration space address format: 210 * configuration space address format:
211 * 211 *
212 * 32 24 23 16 15 11 10 8 7 2 1 0 212 * 32 24 23 16 15 11 10 8 7 2 1 0
213 * --------------------------------------------------------- 213 * ---------------------------------------------------------
214 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 | 214 * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
215 * --------------------------------------------------------- 215 * ---------------------------------------------------------
216 */ 216 */
217 #define SABRE_CONFIG_BASE(PBM) \ 217 #define SABRE_CONFIG_BASE(PBM) \
218 ((PBM)->config_space | (1UL << 24)) 218 ((PBM)->config_space | (1UL << 24))
219 #define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \ 219 #define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
220 (((unsigned long)(BUS) << 16) | \ 220 (((unsigned long)(BUS) << 16) | \
221 ((unsigned long)(DEVFN) << 8) | \ 221 ((unsigned long)(DEVFN) << 8) | \
222 ((unsigned long)(REG))) 222 ((unsigned long)(REG)))
223 223
224 static int hummingbird_p; 224 static int hummingbird_p;
225 static struct pci_bus *sabre_root_bus; 225 static struct pci_bus *sabre_root_bus;
226 226
227 static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm, 227 static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm,
228 unsigned char bus, 228 unsigned char bus,
229 unsigned int devfn, 229 unsigned int devfn,
230 int where) 230 int where)
231 { 231 {
232 if (!pbm) 232 if (!pbm)
233 return NULL; 233 return NULL;
234 return (void *) 234 return (void *)
235 (SABRE_CONFIG_BASE(pbm) | 235 (SABRE_CONFIG_BASE(pbm) |
236 SABRE_CONFIG_ENCODE(bus, devfn, where)); 236 SABRE_CONFIG_ENCODE(bus, devfn, where));
237 } 237 }
238 238
239 static int sabre_out_of_range(unsigned char devfn) 239 static int sabre_out_of_range(unsigned char devfn)
240 { 240 {
241 if (hummingbird_p) 241 if (hummingbird_p)
242 return 0; 242 return 0;
243 243
244 return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) || 244 return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) ||
245 ((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) || 245 ((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) ||
246 (PCI_SLOT(devfn) > 1)); 246 (PCI_SLOT(devfn) > 1));
247 } 247 }
248 248
249 static int __sabre_out_of_range(struct pci_pbm_info *pbm, 249 static int __sabre_out_of_range(struct pci_pbm_info *pbm,
250 unsigned char bus, 250 unsigned char bus,
251 unsigned char devfn) 251 unsigned char devfn)
252 { 252 {
253 if (hummingbird_p) 253 if (hummingbird_p)
254 return 0; 254 return 0;
255 255
256 return ((pbm->parent == 0) || 256 return ((pbm->parent == 0) ||
257 ((pbm == &pbm->parent->pbm_B) && 257 ((pbm == &pbm->parent->pbm_B) &&
258 (bus == pbm->pci_first_busno) && 258 (bus == pbm->pci_first_busno) &&
259 PCI_SLOT(devfn) > 8) || 259 PCI_SLOT(devfn) > 8) ||
260 ((pbm == &pbm->parent->pbm_A) && 260 ((pbm == &pbm->parent->pbm_A) &&
261 (bus == pbm->pci_first_busno) && 261 (bus == pbm->pci_first_busno) &&
262 PCI_SLOT(devfn) > 8)); 262 PCI_SLOT(devfn) > 8));
263 } 263 }
264 264
265 static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 265 static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
266 int where, int size, u32 *value) 266 int where, int size, u32 *value)
267 { 267 {
268 struct pci_pbm_info *pbm = bus_dev->sysdata; 268 struct pci_pbm_info *pbm = bus_dev->sysdata;
269 unsigned char bus = bus_dev->number; 269 unsigned char bus = bus_dev->number;
270 u32 *addr; 270 u32 *addr;
271 u16 tmp16; 271 u16 tmp16;
272 u8 tmp8; 272 u8 tmp8;
273 273
274 switch (size) { 274 switch (size) {
275 case 1: 275 case 1:
276 *value = 0xff; 276 *value = 0xff;
277 break; 277 break;
278 case 2: 278 case 2:
279 *value = 0xffff; 279 *value = 0xffff;
280 break; 280 break;
281 case 4: 281 case 4:
282 *value = 0xffffffff; 282 *value = 0xffffffff;
283 break; 283 break;
284 } 284 }
285 285
286 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where); 286 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
287 if (!addr) 287 if (!addr)
288 return PCIBIOS_SUCCESSFUL; 288 return PCIBIOS_SUCCESSFUL;
289 289
290 if (__sabre_out_of_range(pbm, bus, devfn)) 290 if (__sabre_out_of_range(pbm, bus, devfn))
291 return PCIBIOS_SUCCESSFUL; 291 return PCIBIOS_SUCCESSFUL;
292 292
293 switch (size) { 293 switch (size) {
294 case 1: 294 case 1:
295 pci_config_read8((u8 *) addr, &tmp8); 295 pci_config_read8((u8 *) addr, &tmp8);
296 *value = tmp8; 296 *value = tmp8;
297 break; 297 break;
298 298
299 case 2: 299 case 2:
300 if (where & 0x01) { 300 if (where & 0x01) {
301 printk("pci_read_config_word: misaligned reg [%x]\n", 301 printk("pci_read_config_word: misaligned reg [%x]\n",
302 where); 302 where);
303 return PCIBIOS_SUCCESSFUL; 303 return PCIBIOS_SUCCESSFUL;
304 } 304 }
305 pci_config_read16((u16 *) addr, &tmp16); 305 pci_config_read16((u16 *) addr, &tmp16);
306 *value = tmp16; 306 *value = tmp16;
307 break; 307 break;
308 308
309 case 4: 309 case 4:
310 if (where & 0x03) { 310 if (where & 0x03) {
311 printk("pci_read_config_dword: misaligned reg [%x]\n", 311 printk("pci_read_config_dword: misaligned reg [%x]\n",
312 where); 312 where);
313 return PCIBIOS_SUCCESSFUL; 313 return PCIBIOS_SUCCESSFUL;
314 } 314 }
315 pci_config_read32(addr, value); 315 pci_config_read32(addr, value);
316 break; 316 break;
317 } 317 }
318 318
319 return PCIBIOS_SUCCESSFUL; 319 return PCIBIOS_SUCCESSFUL;
320 } 320 }
321 321
322 static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn, 322 static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
323 int where, int size, u32 *value) 323 int where, int size, u32 *value)
324 { 324 {
325 if (!bus->number && sabre_out_of_range(devfn)) { 325 if (!bus->number && sabre_out_of_range(devfn)) {
326 switch (size) { 326 switch (size) {
327 case 1: 327 case 1:
328 *value = 0xff; 328 *value = 0xff;
329 break; 329 break;
330 case 2: 330 case 2:
331 *value = 0xffff; 331 *value = 0xffff;
332 break; 332 break;
333 case 4: 333 case 4:
334 *value = 0xffffffff; 334 *value = 0xffffffff;
335 break; 335 break;
336 } 336 }
337 return PCIBIOS_SUCCESSFUL; 337 return PCIBIOS_SUCCESSFUL;
338 } 338 }
339 339
340 if (bus->number || PCI_SLOT(devfn)) 340 if (bus->number || PCI_SLOT(devfn))
341 return __sabre_read_pci_cfg(bus, devfn, where, size, value); 341 return __sabre_read_pci_cfg(bus, devfn, where, size, value);
342 342
343 /* When accessing PCI config space of the PCI controller itself (bus 343 /* When accessing PCI config space of the PCI controller itself (bus
344 * 0, device slot 0, function 0) there are restrictions. Each 344 * 0, device slot 0, function 0) there are restrictions. Each
345 * register must be accessed as it's natural size. Thus, for example 345 * register must be accessed as it's natural size. Thus, for example
346 * the Vendor ID must be accessed as a 16-bit quantity. 346 * the Vendor ID must be accessed as a 16-bit quantity.
347 */ 347 */
348 348
349 switch (size) { 349 switch (size) {
350 case 1: 350 case 1:
351 if (where < 8) { 351 if (where < 8) {
352 u32 tmp32; 352 u32 tmp32;
353 u16 tmp16; 353 u16 tmp16;
354 354
355 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32); 355 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
356 tmp16 = (u16) tmp32; 356 tmp16 = (u16) tmp32;
357 if (where & 1) 357 if (where & 1)
358 *value = tmp16 >> 8; 358 *value = tmp16 >> 8;
359 else 359 else
360 *value = tmp16 & 0xff; 360 *value = tmp16 & 0xff;
361 } else 361 } else
362 return __sabre_read_pci_cfg(bus, devfn, where, 1, value); 362 return __sabre_read_pci_cfg(bus, devfn, where, 1, value);
363 break; 363 break;
364 364
365 case 2: 365 case 2:
366 if (where < 8) 366 if (where < 8)
367 return __sabre_read_pci_cfg(bus, devfn, where, 2, value); 367 return __sabre_read_pci_cfg(bus, devfn, where, 2, value);
368 else { 368 else {
369 u32 tmp32; 369 u32 tmp32;
370 u8 tmp8; 370 u8 tmp8;
371 371
372 __sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32); 372 __sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32);
373 tmp8 = (u8) tmp32; 373 tmp8 = (u8) tmp32;
374 *value = tmp8; 374 *value = tmp8;
375 __sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32); 375 __sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32);
376 tmp8 = (u8) tmp32; 376 tmp8 = (u8) tmp32;
377 *value |= tmp8 << 8; 377 *value |= tmp8 << 8;
378 } 378 }
379 break; 379 break;
380 380
381 case 4: { 381 case 4: {
382 u32 tmp32; 382 u32 tmp32;
383 u16 tmp16; 383 u16 tmp16;
384 384
385 sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32); 385 sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32);
386 tmp16 = (u16) tmp32; 386 tmp16 = (u16) tmp32;
387 *value = tmp16; 387 *value = tmp16;
388 sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32); 388 sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32);
389 tmp16 = (u16) tmp32; 389 tmp16 = (u16) tmp32;
390 *value |= tmp16 << 16; 390 *value |= tmp16 << 16;
391 break; 391 break;
392 } 392 }
393 } 393 }
394 return PCIBIOS_SUCCESSFUL; 394 return PCIBIOS_SUCCESSFUL;
395 } 395 }
396 396
397 static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 397 static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
398 int where, int size, u32 value) 398 int where, int size, u32 value)
399 { 399 {
400 struct pci_pbm_info *pbm = bus_dev->sysdata; 400 struct pci_pbm_info *pbm = bus_dev->sysdata;
401 unsigned char bus = bus_dev->number; 401 unsigned char bus = bus_dev->number;
402 u32 *addr; 402 u32 *addr;
403 403
404 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where); 404 addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
405 if (!addr) 405 if (!addr)
406 return PCIBIOS_SUCCESSFUL; 406 return PCIBIOS_SUCCESSFUL;
407 407
408 if (__sabre_out_of_range(pbm, bus, devfn)) 408 if (__sabre_out_of_range(pbm, bus, devfn))
409 return PCIBIOS_SUCCESSFUL; 409 return PCIBIOS_SUCCESSFUL;
410 410
411 switch (size) { 411 switch (size) {
412 case 1: 412 case 1:
413 pci_config_write8((u8 *) addr, value); 413 pci_config_write8((u8 *) addr, value);
414 break; 414 break;
415 415
416 case 2: 416 case 2:
417 if (where & 0x01) { 417 if (where & 0x01) {
418 printk("pci_write_config_word: misaligned reg [%x]\n", 418 printk("pci_write_config_word: misaligned reg [%x]\n",
419 where); 419 where);
420 return PCIBIOS_SUCCESSFUL; 420 return PCIBIOS_SUCCESSFUL;
421 } 421 }
422 pci_config_write16((u16 *) addr, value); 422 pci_config_write16((u16 *) addr, value);
423 break; 423 break;
424 424
425 case 4: 425 case 4:
426 if (where & 0x03) { 426 if (where & 0x03) {
427 printk("pci_write_config_dword: misaligned reg [%x]\n", 427 printk("pci_write_config_dword: misaligned reg [%x]\n",
428 where); 428 where);
429 return PCIBIOS_SUCCESSFUL; 429 return PCIBIOS_SUCCESSFUL;
430 } 430 }
431 pci_config_write32(addr, value); 431 pci_config_write32(addr, value);
432 break; 432 break;
433 } 433 }
434 434
435 return PCIBIOS_SUCCESSFUL; 435 return PCIBIOS_SUCCESSFUL;
436 } 436 }
437 437
438 static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn, 438 static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
439 int where, int size, u32 value) 439 int where, int size, u32 value)
440 { 440 {
441 if (bus->number) 441 if (bus->number)
442 return __sabre_write_pci_cfg(bus, devfn, where, size, value); 442 return __sabre_write_pci_cfg(bus, devfn, where, size, value);
443 443
444 if (sabre_out_of_range(devfn)) 444 if (sabre_out_of_range(devfn))
445 return PCIBIOS_SUCCESSFUL; 445 return PCIBIOS_SUCCESSFUL;
446 446
447 switch (size) { 447 switch (size) {
448 case 1: 448 case 1:
449 if (where < 8) { 449 if (where < 8) {
450 u32 tmp32; 450 u32 tmp32;
451 u16 tmp16; 451 u16 tmp16;
452 452
453 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32); 453 __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
454 tmp16 = (u16) tmp32; 454 tmp16 = (u16) tmp32;
455 if (where & 1) { 455 if (where & 1) {
456 value &= 0x00ff; 456 value &= 0x00ff;
457 value |= tmp16 << 8; 457 value |= tmp16 << 8;
458 } else { 458 } else {
459 value &= 0xff00; 459 value &= 0xff00;
460 value |= tmp16; 460 value |= tmp16;
461 } 461 }
462 tmp32 = (u32) tmp16; 462 tmp32 = (u32) tmp16;
463 return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32); 463 return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32);
464 } else 464 } else
465 return __sabre_write_pci_cfg(bus, devfn, where, 1, value); 465 return __sabre_write_pci_cfg(bus, devfn, where, 1, value);
466 break; 466 break;
467 case 2: 467 case 2:
468 if (where < 8) 468 if (where < 8)
469 return __sabre_write_pci_cfg(bus, devfn, where, 2, value); 469 return __sabre_write_pci_cfg(bus, devfn, where, 2, value);
470 else { 470 else {
471 __sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff); 471 __sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff);
472 __sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8); 472 __sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8);
473 } 473 }
474 break; 474 break;
475 case 4: 475 case 4:
476 sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff); 476 sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff);
477 sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16); 477 sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16);
478 break; 478 break;
479 } 479 }
480 return PCIBIOS_SUCCESSFUL; 480 return PCIBIOS_SUCCESSFUL;
481 } 481 }
482 482
483 static struct pci_ops sabre_ops = { 483 static struct pci_ops sabre_ops = {
484 .read = sabre_read_pci_cfg, 484 .read = sabre_read_pci_cfg,
485 .write = sabre_write_pci_cfg, 485 .write = sabre_write_pci_cfg,
486 }; 486 };
487 487
488 static unsigned long sabre_pcislot_imap_offset(unsigned long ino) 488 static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
489 { 489 {
490 unsigned int bus = (ino & 0x10) >> 4; 490 unsigned int bus = (ino & 0x10) >> 4;
491 unsigned int slot = (ino & 0x0c) >> 2; 491 unsigned int slot = (ino & 0x0c) >> 2;
492 492
493 if (bus == 0) 493 if (bus == 0)
494 return SABRE_IMAP_A_SLOT0 + (slot * 8); 494 return SABRE_IMAP_A_SLOT0 + (slot * 8);
495 else 495 else
496 return SABRE_IMAP_B_SLOT0 + (slot * 8); 496 return SABRE_IMAP_B_SLOT0 + (slot * 8);
497 } 497 }
498 498
499 static unsigned long __onboard_imap_off[] = { 499 static unsigned long __onboard_imap_off[] = {
500 /*0x20*/ SABRE_IMAP_SCSI, 500 /*0x20*/ SABRE_IMAP_SCSI,
501 /*0x21*/ SABRE_IMAP_ETH, 501 /*0x21*/ SABRE_IMAP_ETH,
502 /*0x22*/ SABRE_IMAP_BPP, 502 /*0x22*/ SABRE_IMAP_BPP,
503 /*0x23*/ SABRE_IMAP_AU_REC, 503 /*0x23*/ SABRE_IMAP_AU_REC,
504 /*0x24*/ SABRE_IMAP_AU_PLAY, 504 /*0x24*/ SABRE_IMAP_AU_PLAY,
505 /*0x25*/ SABRE_IMAP_PFAIL, 505 /*0x25*/ SABRE_IMAP_PFAIL,
506 /*0x26*/ SABRE_IMAP_KMS, 506 /*0x26*/ SABRE_IMAP_KMS,
507 /*0x27*/ SABRE_IMAP_FLPY, 507 /*0x27*/ SABRE_IMAP_FLPY,
508 /*0x28*/ SABRE_IMAP_SHW, 508 /*0x28*/ SABRE_IMAP_SHW,
509 /*0x29*/ SABRE_IMAP_KBD, 509 /*0x29*/ SABRE_IMAP_KBD,
510 /*0x2a*/ SABRE_IMAP_MS, 510 /*0x2a*/ SABRE_IMAP_MS,
511 /*0x2b*/ SABRE_IMAP_SER, 511 /*0x2b*/ SABRE_IMAP_SER,
512 /*0x2c*/ 0 /* reserved */, 512 /*0x2c*/ 0 /* reserved */,
513 /*0x2d*/ 0 /* reserved */, 513 /*0x2d*/ 0 /* reserved */,
514 /*0x2e*/ SABRE_IMAP_UE, 514 /*0x2e*/ SABRE_IMAP_UE,
515 /*0x2f*/ SABRE_IMAP_CE, 515 /*0x2f*/ SABRE_IMAP_CE,
516 /*0x30*/ SABRE_IMAP_PCIERR, 516 /*0x30*/ SABRE_IMAP_PCIERR,
517 }; 517 };
518 #define SABRE_ONBOARD_IRQ_BASE 0x20 518 #define SABRE_ONBOARD_IRQ_BASE 0x20
519 #define SABRE_ONBOARD_IRQ_LAST 0x30 519 #define SABRE_ONBOARD_IRQ_LAST 0x30
520 #define sabre_onboard_imap_offset(__ino) \ 520 #define sabre_onboard_imap_offset(__ino) \
521 __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE] 521 __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
522 522
523 #define sabre_iclr_offset(ino) \ 523 #define sabre_iclr_offset(ino) \
524 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \ 524 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
525 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3))) 525 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
526 526
527 /* When a device lives behind a bridge deeper in the PCI bus topology 527 /* When a device lives behind a bridge deeper in the PCI bus topology
528 * than APB, a special sequence must run to make sure all pending DMA 528 * than APB, a special sequence must run to make sure all pending DMA
529 * transfers at the time of IRQ delivery are visible in the coherency 529 * transfers at the time of IRQ delivery are visible in the coherency
530 * domain by the cpu. This sequence is to perform a read on the far 530 * domain by the cpu. This sequence is to perform a read on the far
531 * side of the non-APB bridge, then perform a read of Sabre's DMA 531 * side of the non-APB bridge, then perform a read of Sabre's DMA
532 * write-sync register. 532 * write-sync register.
533 */ 533 */
534 static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) 534 static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
535 { 535 {
536 struct pci_dev *pdev = _arg1; 536 struct pci_dev *pdev = _arg1;
537 unsigned long sync_reg = (unsigned long) _arg2; 537 unsigned long sync_reg = (unsigned long) _arg2;
538 u16 _unused; 538 u16 _unused;
539 539
540 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused); 540 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused);
541 sabre_read(sync_reg); 541 sabre_read(sync_reg);
542 } 542 }
543 543
544 static unsigned int sabre_irq_build(struct pci_pbm_info *pbm, 544 static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
545 struct pci_dev *pdev, 545 struct pci_dev *pdev,
546 unsigned int ino) 546 unsigned int ino)
547 { 547 {
548 unsigned long imap, iclr; 548 unsigned long imap, iclr;
549 unsigned long imap_off, iclr_off; 549 unsigned long imap_off, iclr_off;
550 int inofixup = 0; 550 int inofixup = 0;
551 int virt_irq; 551 int virt_irq;
552 552
553 ino &= PCI_IRQ_INO; 553 ino &= PCI_IRQ_INO;
554 if (ino < SABRE_ONBOARD_IRQ_BASE) { 554 if (ino < SABRE_ONBOARD_IRQ_BASE) {
555 /* PCI slot */ 555 /* PCI slot */
556 imap_off = sabre_pcislot_imap_offset(ino); 556 imap_off = sabre_pcislot_imap_offset(ino);
557 } else { 557 } else {
558 /* onboard device */ 558 /* onboard device */
559 if (ino > SABRE_ONBOARD_IRQ_LAST) { 559 if (ino > SABRE_ONBOARD_IRQ_LAST) {
560 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino); 560 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
561 prom_halt(); 561 prom_halt();
562 } 562 }
563 imap_off = sabre_onboard_imap_offset(ino); 563 imap_off = sabre_onboard_imap_offset(ino);
564 } 564 }
565 565
566 /* Now build the IRQ bucket. */ 566 /* Now build the IRQ bucket. */
567 imap = pbm->controller_regs + imap_off; 567 imap = pbm->controller_regs + imap_off;
568 imap += 4; 568 imap += 4;
569 569
570 iclr_off = sabre_iclr_offset(ino); 570 iclr_off = sabre_iclr_offset(ino);
571 iclr = pbm->controller_regs + iclr_off; 571 iclr = pbm->controller_regs + iclr_off;
572 iclr += 4; 572 iclr += 4;
573 573
574 if ((ino & 0x20) == 0) 574 if ((ino & 0x20) == 0)
575 inofixup = ino & 0x03; 575 inofixup = ino & 0x03;
576 576
577 virt_irq = build_irq(inofixup, iclr, imap); 577 virt_irq = build_irq(inofixup, iclr, imap);
578 578
579 if (pdev) { 579 if (pdev) {
580 struct pcidev_cookie *pcp = pdev->sysdata; 580 struct pcidev_cookie *pcp = pdev->sysdata;
581 581
582 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 582 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
583 struct pci_controller_info *p = pcp->pbm->parent; 583 struct pci_controller_info *p = pcp->pbm->parent;
584 584
585 irq_install_pre_handler(virt_irq, 585 irq_install_pre_handler(virt_irq,
586 sabre_wsync_handler, 586 sabre_wsync_handler,
587 pdev, 587 pdev,
588 (void *) 588 (void *)
589 p->pbm_A.controller_regs + 589 p->pbm_A.controller_regs +
590 SABRE_WRSYNC); 590 SABRE_WRSYNC);
591 } 591 }
592 } 592 }
593 return virt_irq; 593 return virt_irq;
594 } 594 }
595 595
596 /* SABRE error handling support. */ 596 /* SABRE error handling support. */
597 static void sabre_check_iommu_error(struct pci_controller_info *p, 597 static void sabre_check_iommu_error(struct pci_controller_info *p,
598 unsigned long afsr, 598 unsigned long afsr,
599 unsigned long afar) 599 unsigned long afar)
600 { 600 {
601 struct pci_iommu *iommu = p->pbm_A.iommu; 601 struct pci_iommu *iommu = p->pbm_A.iommu;
602 unsigned long iommu_tag[16]; 602 unsigned long iommu_tag[16];
603 unsigned long iommu_data[16]; 603 unsigned long iommu_data[16];
604 unsigned long flags; 604 unsigned long flags;
605 u64 control; 605 u64 control;
606 int i; 606 int i;
607 607
608 spin_lock_irqsave(&iommu->lock, flags); 608 spin_lock_irqsave(&iommu->lock, flags);
609 control = sabre_read(iommu->iommu_control); 609 control = sabre_read(iommu->iommu_control);
610 if (control & SABRE_IOMMUCTRL_ERR) { 610 if (control & SABRE_IOMMUCTRL_ERR) {
611 char *type_string; 611 char *type_string;
612 612
613 /* Clear the error encountered bit. 613 /* Clear the error encountered bit.
614 * NOTE: On Sabre this is write 1 to clear, 614 * NOTE: On Sabre this is write 1 to clear,
615 * which is different from Psycho. 615 * which is different from Psycho.
616 */ 616 */
617 sabre_write(iommu->iommu_control, control); 617 sabre_write(iommu->iommu_control, control);
618 switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) { 618 switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
619 case 1: 619 case 1:
620 type_string = "Invalid Error"; 620 type_string = "Invalid Error";
621 break; 621 break;
622 case 3: 622 case 3:
623 type_string = "ECC Error"; 623 type_string = "ECC Error";
624 break; 624 break;
625 default: 625 default:
626 type_string = "Unknown"; 626 type_string = "Unknown";
627 break; 627 break;
628 }; 628 };
629 printk("SABRE%d: IOMMU Error, type[%s]\n", 629 printk("SABRE%d: IOMMU Error, type[%s]\n",
630 p->index, type_string); 630 p->index, type_string);
631 631
632 /* Enter diagnostic mode and probe for error'd 632 /* Enter diagnostic mode and probe for error'd
633 * entries in the IOTLB. 633 * entries in the IOTLB.
634 */ 634 */
635 control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR); 635 control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
636 sabre_write(iommu->iommu_control, 636 sabre_write(iommu->iommu_control,
637 (control | SABRE_IOMMUCTRL_DENAB)); 637 (control | SABRE_IOMMUCTRL_DENAB));
638 for (i = 0; i < 16; i++) { 638 for (i = 0; i < 16; i++) {
639 unsigned long base = p->pbm_A.controller_regs; 639 unsigned long base = p->pbm_A.controller_regs;
640 640
641 iommu_tag[i] = 641 iommu_tag[i] =
642 sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL)); 642 sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL));
643 iommu_data[i] = 643 iommu_data[i] =
644 sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL)); 644 sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL));
645 sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0); 645 sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
646 sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0); 646 sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
647 } 647 }
648 sabre_write(iommu->iommu_control, control); 648 sabre_write(iommu->iommu_control, control);
649 649
650 for (i = 0; i < 16; i++) { 650 for (i = 0; i < 16; i++) {
651 unsigned long tag, data; 651 unsigned long tag, data;
652 652
653 tag = iommu_tag[i]; 653 tag = iommu_tag[i];
654 if (!(tag & SABRE_IOMMUTAG_ERR)) 654 if (!(tag & SABRE_IOMMUTAG_ERR))
655 continue; 655 continue;
656 656
657 data = iommu_data[i]; 657 data = iommu_data[i];
658 switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) { 658 switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) {
659 case 1: 659 case 1:
660 type_string = "Invalid Error"; 660 type_string = "Invalid Error";
661 break; 661 break;
662 case 3: 662 case 3:
663 type_string = "ECC Error"; 663 type_string = "ECC Error";
664 break; 664 break;
665 default: 665 default:
666 type_string = "Unknown"; 666 type_string = "Unknown";
667 break; 667 break;
668 }; 668 };
669 printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n", 669 printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
670 p->index, i, tag, type_string, 670 p->index, i, tag, type_string,
671 ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0), 671 ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
672 ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8), 672 ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
673 ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT)); 673 ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT));
674 printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n", 674 printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
675 p->index, i, data, 675 p->index, i, data,
676 ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0), 676 ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
677 ((data & SABRE_IOMMUDATA_USED) ? 1 : 0), 677 ((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
678 ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0), 678 ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
679 ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT)); 679 ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT));
680 } 680 }
681 } 681 }
682 spin_unlock_irqrestore(&iommu->lock, flags); 682 spin_unlock_irqrestore(&iommu->lock, flags);
683 } 683 }
684 684
685 static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs) 685 static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
686 { 686 {
687 struct pci_controller_info *p = dev_id; 687 struct pci_controller_info *p = dev_id;
688 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR; 688 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR;
689 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR; 689 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
690 unsigned long afsr, afar, error_bits; 690 unsigned long afsr, afar, error_bits;
691 int reported; 691 int reported;
692 692
693 /* Latch uncorrectable error status. */ 693 /* Latch uncorrectable error status. */
694 afar = sabre_read(afar_reg); 694 afar = sabre_read(afar_reg);
695 afsr = sabre_read(afsr_reg); 695 afsr = sabre_read(afsr_reg);
696 696
697 /* Clear the primary/secondary error status bits. */ 697 /* Clear the primary/secondary error status bits. */
698 error_bits = afsr & 698 error_bits = afsr &
699 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | 699 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
700 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | 700 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
701 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE); 701 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
702 if (!error_bits) 702 if (!error_bits)
703 return IRQ_NONE; 703 return IRQ_NONE;
704 sabre_write(afsr_reg, error_bits); 704 sabre_write(afsr_reg, error_bits);
705 705
706 /* Log the error. */ 706 /* Log the error. */
707 printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n", 707 printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n",
708 p->index, 708 p->index,
709 ((error_bits & SABRE_UEAFSR_PDRD) ? 709 ((error_bits & SABRE_UEAFSR_PDRD) ?
710 "DMA Read" : 710 "DMA Read" :
711 ((error_bits & SABRE_UEAFSR_PDWR) ? 711 ((error_bits & SABRE_UEAFSR_PDWR) ?
712 "DMA Write" : "???")), 712 "DMA Write" : "???")),
713 ((error_bits & SABRE_UEAFSR_PDTE) ? 713 ((error_bits & SABRE_UEAFSR_PDTE) ?
714 ":Translation Error" : "")); 714 ":Translation Error" : ""));
715 printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n", 715 printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
716 p->index, 716 p->index,
717 (afsr & SABRE_UEAFSR_BMSK) >> 32UL, 717 (afsr & SABRE_UEAFSR_BMSK) >> 32UL,
718 (afsr & SABRE_UEAFSR_OFF) >> 29UL, 718 (afsr & SABRE_UEAFSR_OFF) >> 29UL,
719 ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0)); 719 ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
720 printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar); 720 printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar);
721 printk("SABRE%d: UE Secondary errors [", p->index); 721 printk("SABRE%d: UE Secondary errors [", p->index);
722 reported = 0; 722 reported = 0;
723 if (afsr & SABRE_UEAFSR_SDRD) { 723 if (afsr & SABRE_UEAFSR_SDRD) {
724 reported++; 724 reported++;
725 printk("(DMA Read)"); 725 printk("(DMA Read)");
726 } 726 }
727 if (afsr & SABRE_UEAFSR_SDWR) { 727 if (afsr & SABRE_UEAFSR_SDWR) {
728 reported++; 728 reported++;
729 printk("(DMA Write)"); 729 printk("(DMA Write)");
730 } 730 }
731 if (afsr & SABRE_UEAFSR_SDTE) { 731 if (afsr & SABRE_UEAFSR_SDTE) {
732 reported++; 732 reported++;
733 printk("(Translation Error)"); 733 printk("(Translation Error)");
734 } 734 }
735 if (!reported) 735 if (!reported)
736 printk("(none)"); 736 printk("(none)");
737 printk("]\n"); 737 printk("]\n");
738 738
739 /* Interrogate IOMMU for error status. */ 739 /* Interrogate IOMMU for error status. */
740 sabre_check_iommu_error(p, afsr, afar); 740 sabre_check_iommu_error(p, afsr, afar);
741 741
742 return IRQ_HANDLED; 742 return IRQ_HANDLED;
743 } 743 }
744 744
745 static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs) 745 static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
746 { 746 {
747 struct pci_controller_info *p = dev_id; 747 struct pci_controller_info *p = dev_id;
748 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR; 748 unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR;
749 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR; 749 unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
750 unsigned long afsr, afar, error_bits; 750 unsigned long afsr, afar, error_bits;
751 int reported; 751 int reported;
752 752
753 /* Latch error status. */ 753 /* Latch error status. */
754 afar = sabre_read(afar_reg); 754 afar = sabre_read(afar_reg);
755 afsr = sabre_read(afsr_reg); 755 afsr = sabre_read(afsr_reg);
756 756
757 /* Clear primary/secondary error status bits. */ 757 /* Clear primary/secondary error status bits. */
758 error_bits = afsr & 758 error_bits = afsr &
759 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | 759 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
760 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR); 760 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
761 if (!error_bits) 761 if (!error_bits)
762 return IRQ_NONE; 762 return IRQ_NONE;
763 sabre_write(afsr_reg, error_bits); 763 sabre_write(afsr_reg, error_bits);
764 764
765 /* Log the error. */ 765 /* Log the error. */
766 printk("SABRE%d: Correctable Error, primary error type[%s]\n", 766 printk("SABRE%d: Correctable Error, primary error type[%s]\n",
767 p->index, 767 p->index,
768 ((error_bits & SABRE_CEAFSR_PDRD) ? 768 ((error_bits & SABRE_CEAFSR_PDRD) ?
769 "DMA Read" : 769 "DMA Read" :
770 ((error_bits & SABRE_CEAFSR_PDWR) ? 770 ((error_bits & SABRE_CEAFSR_PDWR) ?
771 "DMA Write" : "???"))); 771 "DMA Write" : "???")));
772 772
773 /* XXX Use syndrome and afar to print out module string just like 773 /* XXX Use syndrome and afar to print out module string just like
774 * XXX UDB CE trap handler does... -DaveM 774 * XXX UDB CE trap handler does... -DaveM
775 */ 775 */
776 printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " 776 printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
777 "was_block(%d)\n", 777 "was_block(%d)\n",
778 p->index, 778 p->index,
779 (afsr & SABRE_CEAFSR_ESYND) >> 48UL, 779 (afsr & SABRE_CEAFSR_ESYND) >> 48UL,
780 (afsr & SABRE_CEAFSR_BMSK) >> 32UL, 780 (afsr & SABRE_CEAFSR_BMSK) >> 32UL,
781 (afsr & SABRE_CEAFSR_OFF) >> 29UL, 781 (afsr & SABRE_CEAFSR_OFF) >> 29UL,
782 ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0)); 782 ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
783 printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar); 783 printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar);
784 printk("SABRE%d: CE Secondary errors [", p->index); 784 printk("SABRE%d: CE Secondary errors [", p->index);
785 reported = 0; 785 reported = 0;
786 if (afsr & SABRE_CEAFSR_SDRD) { 786 if (afsr & SABRE_CEAFSR_SDRD) {
787 reported++; 787 reported++;
788 printk("(DMA Read)"); 788 printk("(DMA Read)");
789 } 789 }
790 if (afsr & SABRE_CEAFSR_SDWR) { 790 if (afsr & SABRE_CEAFSR_SDWR) {
791 reported++; 791 reported++;
792 printk("(DMA Write)"); 792 printk("(DMA Write)");
793 } 793 }
794 if (!reported) 794 if (!reported)
795 printk("(none)"); 795 printk("(none)");
796 printk("]\n"); 796 printk("]\n");
797 797
798 return IRQ_HANDLED; 798 return IRQ_HANDLED;
799 } 799 }
800 800
801 static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p) 801 static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
802 { 802 {
803 unsigned long csr_reg, csr, csr_error_bits; 803 unsigned long csr_reg, csr, csr_error_bits;
804 irqreturn_t ret = IRQ_NONE; 804 irqreturn_t ret = IRQ_NONE;
805 u16 stat; 805 u16 stat;
806 806
807 csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL; 807 csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL;
808 csr = sabre_read(csr_reg); 808 csr = sabre_read(csr_reg);
809 csr_error_bits = 809 csr_error_bits =
810 csr & SABRE_PCICTRL_SERR; 810 csr & SABRE_PCICTRL_SERR;
811 if (csr_error_bits) { 811 if (csr_error_bits) {
812 /* Clear the errors. */ 812 /* Clear the errors. */
813 sabre_write(csr_reg, csr); 813 sabre_write(csr_reg, csr);
814 814
815 /* Log 'em. */ 815 /* Log 'em. */
816 if (csr_error_bits & SABRE_PCICTRL_SERR) 816 if (csr_error_bits & SABRE_PCICTRL_SERR)
817 printk("SABRE%d: PCI SERR signal asserted.\n", 817 printk("SABRE%d: PCI SERR signal asserted.\n",
818 p->index); 818 p->index);
819 ret = IRQ_HANDLED; 819 ret = IRQ_HANDLED;
820 } 820 }
821 pci_read_config_word(sabre_root_bus->self, 821 pci_read_config_word(sabre_root_bus->self,
822 PCI_STATUS, &stat); 822 PCI_STATUS, &stat);
823 if (stat & (PCI_STATUS_PARITY | 823 if (stat & (PCI_STATUS_PARITY |
824 PCI_STATUS_SIG_TARGET_ABORT | 824 PCI_STATUS_SIG_TARGET_ABORT |
825 PCI_STATUS_REC_TARGET_ABORT | 825 PCI_STATUS_REC_TARGET_ABORT |
826 PCI_STATUS_REC_MASTER_ABORT | 826 PCI_STATUS_REC_MASTER_ABORT |
827 PCI_STATUS_SIG_SYSTEM_ERROR)) { 827 PCI_STATUS_SIG_SYSTEM_ERROR)) {
828 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n", 828 printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
829 p->index, stat); 829 p->index, stat);
830 pci_write_config_word(sabre_root_bus->self, 830 pci_write_config_word(sabre_root_bus->self,
831 PCI_STATUS, 0xffff); 831 PCI_STATUS, 0xffff);
832 ret = IRQ_HANDLED; 832 ret = IRQ_HANDLED;
833 } 833 }
834 return ret; 834 return ret;
835 } 835 }
836 836
837 static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs) 837 static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
838 { 838 {
839 struct pci_controller_info *p = dev_id; 839 struct pci_controller_info *p = dev_id;
840 unsigned long afsr_reg, afar_reg; 840 unsigned long afsr_reg, afar_reg;
841 unsigned long afsr, afar, error_bits; 841 unsigned long afsr, afar, error_bits;
842 int reported; 842 int reported;
843 843
844 afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR; 844 afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR;
845 afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR; 845 afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR;
846 846
847 /* Latch error status. */ 847 /* Latch error status. */
848 afar = sabre_read(afar_reg); 848 afar = sabre_read(afar_reg);
849 afsr = sabre_read(afsr_reg); 849 afsr = sabre_read(afsr_reg);
850 850
851 /* Clear primary/secondary error status bits. */ 851 /* Clear primary/secondary error status bits. */
852 error_bits = afsr & 852 error_bits = afsr &
853 (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA | 853 (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA |
854 SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR | 854 SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR |
855 SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA | 855 SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA |
856 SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR); 856 SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR);
857 if (!error_bits) 857 if (!error_bits)
858 return sabre_pcierr_intr_other(p); 858 return sabre_pcierr_intr_other(p);
859 sabre_write(afsr_reg, error_bits); 859 sabre_write(afsr_reg, error_bits);
860 860
861 /* Log the error. */ 861 /* Log the error. */
862 printk("SABRE%d: PCI Error, primary error type[%s]\n", 862 printk("SABRE%d: PCI Error, primary error type[%s]\n",
863 p->index, 863 p->index,
864 (((error_bits & SABRE_PIOAFSR_PMA) ? 864 (((error_bits & SABRE_PIOAFSR_PMA) ?
865 "Master Abort" : 865 "Master Abort" :
866 ((error_bits & SABRE_PIOAFSR_PTA) ? 866 ((error_bits & SABRE_PIOAFSR_PTA) ?
867 "Target Abort" : 867 "Target Abort" :
868 ((error_bits & SABRE_PIOAFSR_PRTRY) ? 868 ((error_bits & SABRE_PIOAFSR_PRTRY) ?
869 "Excessive Retries" : 869 "Excessive Retries" :
870 ((error_bits & SABRE_PIOAFSR_PPERR) ? 870 ((error_bits & SABRE_PIOAFSR_PPERR) ?
871 "Parity Error" : "???")))))); 871 "Parity Error" : "???"))))));
872 printk("SABRE%d: bytemask[%04lx] was_block(%d)\n", 872 printk("SABRE%d: bytemask[%04lx] was_block(%d)\n",
873 p->index, 873 p->index,
874 (afsr & SABRE_PIOAFSR_BMSK) >> 32UL, 874 (afsr & SABRE_PIOAFSR_BMSK) >> 32UL,
875 (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0); 875 (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0);
876 printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar); 876 printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar);
877 printk("SABRE%d: PCI Secondary errors [", p->index); 877 printk("SABRE%d: PCI Secondary errors [", p->index);
878 reported = 0; 878 reported = 0;
879 if (afsr & SABRE_PIOAFSR_SMA) { 879 if (afsr & SABRE_PIOAFSR_SMA) {
880 reported++; 880 reported++;
881 printk("(Master Abort)"); 881 printk("(Master Abort)");
882 } 882 }
883 if (afsr & SABRE_PIOAFSR_STA) { 883 if (afsr & SABRE_PIOAFSR_STA) {
884 reported++; 884 reported++;
885 printk("(Target Abort)"); 885 printk("(Target Abort)");
886 } 886 }
887 if (afsr & SABRE_PIOAFSR_SRTRY) { 887 if (afsr & SABRE_PIOAFSR_SRTRY) {
888 reported++; 888 reported++;
889 printk("(Excessive Retries)"); 889 printk("(Excessive Retries)");
890 } 890 }
891 if (afsr & SABRE_PIOAFSR_SPERR) { 891 if (afsr & SABRE_PIOAFSR_SPERR) {
892 reported++; 892 reported++;
893 printk("(Parity Error)"); 893 printk("(Parity Error)");
894 } 894 }
895 if (!reported) 895 if (!reported)
896 printk("(none)"); 896 printk("(none)");
897 printk("]\n"); 897 printk("]\n");
898 898
899 /* For the error types shown, scan both PCI buses for devices 899 /* For the error types shown, scan both PCI buses for devices
900 * which have logged that error type. 900 * which have logged that error type.
901 */ 901 */
902 902
903 /* If we see a Target Abort, this could be the result of an 903 /* If we see a Target Abort, this could be the result of an
904 * IOMMU translation error of some sort. It is extremely 904 * IOMMU translation error of some sort. It is extremely
905 * useful to log this information as usually it indicates 905 * useful to log this information as usually it indicates
906 * a bug in the IOMMU support code or a PCI device driver. 906 * a bug in the IOMMU support code or a PCI device driver.
907 */ 907 */
908 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) { 908 if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
909 sabre_check_iommu_error(p, afsr, afar); 909 sabre_check_iommu_error(p, afsr, afar);
910 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus); 910 pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
911 pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus); 911 pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
912 } 912 }
913 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) { 913 if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
914 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus); 914 pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
915 pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus); 915 pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
916 } 916 }
917 /* For excessive retries, SABRE/PBM will abort the device 917 /* For excessive retries, SABRE/PBM will abort the device
918 * and there is no way to specifically check for excessive 918 * and there is no way to specifically check for excessive
919 * retries in the config space status registers. So what 919 * retries in the config space status registers. So what
920 * we hope is that we'll catch it via the master/target 920 * we hope is that we'll catch it via the master/target
921 * abort events. 921 * abort events.
922 */ 922 */
923 923
924 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) { 924 if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
925 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus); 925 pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
926 pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus); 926 pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
927 } 927 }
928 928
929 return IRQ_HANDLED; 929 return IRQ_HANDLED;
930 } 930 }
931 931
932 /* XXX What about PowerFail/PowerManagement??? -DaveM */ 932 /* XXX What about PowerFail/PowerManagement??? -DaveM */
933 #define SABRE_UE_INO 0x2e 933 #define SABRE_UE_INO 0x2e
934 #define SABRE_CE_INO 0x2f 934 #define SABRE_CE_INO 0x2f
935 #define SABRE_PCIERR_INO 0x30 935 #define SABRE_PCIERR_INO 0x30
936 static void sabre_register_error_handlers(struct pci_controller_info *p) 936 static void sabre_register_error_handlers(struct pci_controller_info *p)
937 { 937 {
938 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */ 938 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
939 unsigned long base = pbm->controller_regs; 939 unsigned long base = pbm->controller_regs;
940 unsigned long irq, portid = pbm->portid; 940 unsigned long irq, portid = pbm->portid;
941 u64 tmp; 941 u64 tmp;
942 942
943 /* We clear the error bits in the appropriate AFSR before 943 /* We clear the error bits in the appropriate AFSR before
944 * registering the handler so that we don't get spurious 944 * registering the handler so that we don't get spurious
945 * interrupts. 945 * interrupts.
946 */ 946 */
947 sabre_write(base + SABRE_UE_AFSR, 947 sabre_write(base + SABRE_UE_AFSR,
948 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | 948 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
949 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | 949 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
950 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE)); 950 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
951 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO); 951 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
952 if (request_irq(irq, sabre_ue_intr, 952 if (request_irq(irq, sabre_ue_intr,
953 SA_SHIRQ, "SABRE UE", p) < 0) { 953 SA_SHIRQ, "SABRE UE", p) < 0) {
954 prom_printf("SABRE%d: Cannot register UE interrupt.\n", 954 prom_printf("SABRE%d: Cannot register UE interrupt.\n",
955 p->index); 955 p->index);
956 prom_halt(); 956 prom_halt();
957 } 957 }
958 958
959 sabre_write(base + SABRE_CE_AFSR, 959 sabre_write(base + SABRE_CE_AFSR,
960 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | 960 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
961 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR)); 961 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
962 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO); 962 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
963 if (request_irq(irq, sabre_ce_intr, 963 if (request_irq(irq, sabre_ce_intr,
964 SA_SHIRQ, "SABRE CE", p) < 0) { 964 SA_SHIRQ, "SABRE CE", p) < 0) {
965 prom_printf("SABRE%d: Cannot register CE interrupt.\n", 965 prom_printf("SABRE%d: Cannot register CE interrupt.\n",
966 p->index); 966 p->index);
967 prom_halt(); 967 prom_halt();
968 } 968 }
969 969
970 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO); 970 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
971 if (request_irq(irq, sabre_pcierr_intr, 971 if (request_irq(irq, sabre_pcierr_intr,
972 SA_SHIRQ, "SABRE PCIERR", p) < 0) { 972 SA_SHIRQ, "SABRE PCIERR", p) < 0) {
973 prom_printf("SABRE%d: Cannot register PciERR interrupt.\n", 973 prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
974 p->index); 974 p->index);
975 prom_halt(); 975 prom_halt();
976 } 976 }
977 977
978 tmp = sabre_read(base + SABRE_PCICTRL); 978 tmp = sabre_read(base + SABRE_PCICTRL);
979 tmp |= SABRE_PCICTRL_ERREN; 979 tmp |= SABRE_PCICTRL_ERREN;
980 sabre_write(base + SABRE_PCICTRL, tmp); 980 sabre_write(base + SABRE_PCICTRL, tmp);
981 } 981 }
982 982
983 static void sabre_resource_adjust(struct pci_dev *pdev, 983 static void sabre_resource_adjust(struct pci_dev *pdev,
984 struct resource *res, 984 struct resource *res,
985 struct resource *root) 985 struct resource *root)
986 { 986 {
987 struct pci_pbm_info *pbm = pdev->bus->sysdata; 987 struct pci_pbm_info *pbm = pdev->bus->sysdata;
988 unsigned long base; 988 unsigned long base;
989 989
990 if (res->flags & IORESOURCE_IO) 990 if (res->flags & IORESOURCE_IO)
991 base = pbm->controller_regs + SABRE_IOSPACE; 991 base = pbm->controller_regs + SABRE_IOSPACE;
992 else 992 else
993 base = pbm->controller_regs + SABRE_MEMSPACE; 993 base = pbm->controller_regs + SABRE_MEMSPACE;
994 994
995 res->start += base; 995 res->start += base;
996 res->end += base; 996 res->end += base;
997 } 997 }
998 998
999 static void sabre_base_address_update(struct pci_dev *pdev, int resource) 999 static void sabre_base_address_update(struct pci_dev *pdev, int resource)
1000 { 1000 {
1001 struct pcidev_cookie *pcp = pdev->sysdata; 1001 struct pcidev_cookie *pcp = pdev->sysdata;
1002 struct pci_pbm_info *pbm = pcp->pbm; 1002 struct pci_pbm_info *pbm = pcp->pbm;
1003 struct resource *res; 1003 struct resource *res;
1004 unsigned long base; 1004 unsigned long base;
1005 u32 reg; 1005 u32 reg;
1006 int where, size, is_64bit; 1006 int where, size, is_64bit;
1007 1007
1008 res = &pdev->resource[resource]; 1008 res = &pdev->resource[resource];
1009 if (resource < 6) { 1009 if (resource < 6) {
1010 where = PCI_BASE_ADDRESS_0 + (resource * 4); 1010 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1011 } else if (resource == PCI_ROM_RESOURCE) { 1011 } else if (resource == PCI_ROM_RESOURCE) {
1012 where = pdev->rom_base_reg; 1012 where = pdev->rom_base_reg;
1013 } else { 1013 } else {
1014 /* Somebody might have asked allocation of a non-standard resource */ 1014 /* Somebody might have asked allocation of a non-standard resource */
1015 return; 1015 return;
1016 } 1016 }
1017 1017
1018 is_64bit = 0; 1018 is_64bit = 0;
1019 if (res->flags & IORESOURCE_IO) 1019 if (res->flags & IORESOURCE_IO)
1020 base = pbm->controller_regs + SABRE_IOSPACE; 1020 base = pbm->controller_regs + SABRE_IOSPACE;
1021 else { 1021 else {
1022 base = pbm->controller_regs + SABRE_MEMSPACE; 1022 base = pbm->controller_regs + SABRE_MEMSPACE;
1023 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) 1023 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1024 == PCI_BASE_ADDRESS_MEM_TYPE_64) 1024 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1025 is_64bit = 1; 1025 is_64bit = 1;
1026 } 1026 }
1027 1027
1028 size = res->end - res->start; 1028 size = res->end - res->start;
1029 pci_read_config_dword(pdev, where, &reg); 1029 pci_read_config_dword(pdev, where, &reg);
1030 reg = ((reg & size) | 1030 reg = ((reg & size) |
1031 (((u32)(res->start - base)) & ~size)); 1031 (((u32)(res->start - base)) & ~size));
1032 if (resource == PCI_ROM_RESOURCE) { 1032 if (resource == PCI_ROM_RESOURCE) {
1033 reg |= PCI_ROM_ADDRESS_ENABLE; 1033 reg |= PCI_ROM_ADDRESS_ENABLE;
1034 res->flags |= IORESOURCE_ROM_ENABLE; 1034 res->flags |= IORESOURCE_ROM_ENABLE;
1035 } 1035 }
1036 pci_write_config_dword(pdev, where, reg); 1036 pci_write_config_dword(pdev, where, reg);
1037 1037
1038 /* This knows that the upper 32-bits of the address 1038 /* This knows that the upper 32-bits of the address
1039 * must be zero. Our PCI common layer enforces this. 1039 * must be zero. Our PCI common layer enforces this.
1040 */ 1040 */
1041 if (is_64bit) 1041 if (is_64bit)
1042 pci_write_config_dword(pdev, where + 4, 0); 1042 pci_write_config_dword(pdev, where + 4, 0);
1043 } 1043 }
1044 1044
1045 static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) 1045 static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
1046 { 1046 {
1047 struct pci_dev *pdev; 1047 struct pci_dev *pdev;
1048 1048
1049 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { 1049 list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
1050 1050
1051 if (pdev->vendor == PCI_VENDOR_ID_SUN && 1051 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1052 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { 1052 pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
1053 u32 word32; 1053 u32 word32;
1054 u16 word16; 1054 u16 word16;
1055 1055
1056 sabre_read_pci_cfg(pdev->bus, pdev->devfn, 1056 sabre_read_pci_cfg(pdev->bus, pdev->devfn,
1057 PCI_COMMAND, 2, &word32); 1057 PCI_COMMAND, 2, &word32);
1058 word16 = (u16) word32; 1058 word16 = (u16) word32;
1059 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | 1059 word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
1060 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | 1060 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
1061 PCI_COMMAND_IO; 1061 PCI_COMMAND_IO;
1062 word32 = (u32) word16; 1062 word32 = (u32) word16;
1063 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1063 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1064 PCI_COMMAND, 2, word32); 1064 PCI_COMMAND, 2, word32);
1065 1065
1066 /* Status register bits are "write 1 to clear". */ 1066 /* Status register bits are "write 1 to clear". */
1067 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1067 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1068 PCI_STATUS, 2, 0xffff); 1068 PCI_STATUS, 2, 0xffff);
1069 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1069 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1070 PCI_SEC_STATUS, 2, 0xffff); 1070 PCI_SEC_STATUS, 2, 0xffff);
1071 1071
1072 /* Use a primary/seconday latency timer value 1072 /* Use a primary/seconday latency timer value
1073 * of 64. 1073 * of 64.
1074 */ 1074 */
1075 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1075 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1076 PCI_LATENCY_TIMER, 1, 64); 1076 PCI_LATENCY_TIMER, 1, 64);
1077 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1077 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1078 PCI_SEC_LATENCY_TIMER, 1, 64); 1078 PCI_SEC_LATENCY_TIMER, 1, 64);
1079 1079
1080 /* Enable reporting/forwarding of master aborts, 1080 /* Enable reporting/forwarding of master aborts,
1081 * parity, and SERR. 1081 * parity, and SERR.
1082 */ 1082 */
1083 sabre_write_pci_cfg(pdev->bus, pdev->devfn, 1083 sabre_write_pci_cfg(pdev->bus, pdev->devfn,
1084 PCI_BRIDGE_CONTROL, 1, 1084 PCI_BRIDGE_CONTROL, 1,
1085 (PCI_BRIDGE_CTL_PARITY | 1085 (PCI_BRIDGE_CTL_PARITY |
1086 PCI_BRIDGE_CTL_SERR | 1086 PCI_BRIDGE_CTL_SERR |
1087 PCI_BRIDGE_CTL_MASTER_ABORT)); 1087 PCI_BRIDGE_CTL_MASTER_ABORT));
1088 } 1088 }
1089 } 1089 }
1090 } 1090 }
1091 1091
1092 static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) 1092 static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
1093 { 1093 {
1094 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 1094 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1095 1095
1096 if (!cookie) { 1096 if (!cookie) {
1097 prom_printf("SABRE: Critical allocation failure.\n"); 1097 prom_printf("SABRE: Critical allocation failure.\n");
1098 prom_halt(); 1098 prom_halt();
1099 } 1099 }
1100 1100
1101 /* All we care about is the PBM. */ 1101 /* All we care about is the PBM. */
1102 cookie->pbm = pbm; 1102 cookie->pbm = pbm;
1103 1103
1104 return cookie; 1104 return cookie;
1105 } 1105 }
1106 1106
1107 static void sabre_scan_bus(struct pci_controller_info *p) 1107 static void sabre_scan_bus(struct pci_controller_info *p)
1108 { 1108 {
1109 static int once; 1109 static int once;
1110 struct pci_bus *sabre_bus, *pbus; 1110 struct pci_bus *sabre_bus, *pbus;
1111 struct pci_pbm_info *pbm; 1111 struct pci_pbm_info *pbm;
1112 struct pcidev_cookie *cookie; 1112 struct pcidev_cookie *cookie;
1113 int sabres_scanned; 1113 int sabres_scanned;
1114 1114
1115 /* The APB bridge speaks to the Sabre host PCI bridge 1115 /* The APB bridge speaks to the Sabre host PCI bridge
1116 * at 66Mhz, but the front side of APB runs at 33Mhz 1116 * at 66Mhz, but the front side of APB runs at 33Mhz
1117 * for both segments. 1117 * for both segments.
1118 */ 1118 */
1119 p->pbm_A.is_66mhz_capable = 0; 1119 p->pbm_A.is_66mhz_capable = 0;
1120 p->pbm_B.is_66mhz_capable = 0; 1120 p->pbm_B.is_66mhz_capable = 0;
1121 1121
1122 /* This driver has not been verified to handle 1122 /* This driver has not been verified to handle
1123 * multiple SABREs yet, so trap this. 1123 * multiple SABREs yet, so trap this.
1124 * 1124 *
1125 * Also note that the SABRE host bridge is hardwired 1125 * Also note that the SABRE host bridge is hardwired
1126 * to live at bus 0. 1126 * to live at bus 0.
1127 */ 1127 */
1128 if (once != 0) { 1128 if (once != 0) {
1129 prom_printf("SABRE: Multiple controllers unsupported.\n"); 1129 prom_printf("SABRE: Multiple controllers unsupported.\n");
1130 prom_halt(); 1130 prom_halt();
1131 } 1131 }
1132 once++; 1132 once++;
1133 1133
1134 cookie = alloc_bridge_cookie(&p->pbm_A); 1134 cookie = alloc_bridge_cookie(&p->pbm_A);
1135 1135
1136 sabre_bus = pci_scan_bus(p->pci_first_busno, 1136 sabre_bus = pci_scan_bus(p->pci_first_busno,
1137 p->pci_ops, 1137 p->pci_ops,
1138 &p->pbm_A); 1138 &p->pbm_A);
1139 pci_fixup_host_bridge_self(sabre_bus); 1139 pci_fixup_host_bridge_self(sabre_bus);
1140 sabre_bus->self->sysdata = cookie; 1140 sabre_bus->self->sysdata = cookie;
1141 1141
1142 sabre_root_bus = sabre_bus; 1142 sabre_root_bus = sabre_bus;
1143 1143
1144 apb_init(p, sabre_bus); 1144 apb_init(p, sabre_bus);
1145 1145
1146 sabres_scanned = 0; 1146 sabres_scanned = 0;
1147 1147
1148 list_for_each_entry(pbus, &sabre_bus->children, node) { 1148 list_for_each_entry(pbus, &sabre_bus->children, node) {
1149 1149
1150 if (pbus->number == p->pbm_A.pci_first_busno) { 1150 if (pbus->number == p->pbm_A.pci_first_busno) {
1151 pbm = &p->pbm_A; 1151 pbm = &p->pbm_A;
1152 } else if (pbus->number == p->pbm_B.pci_first_busno) { 1152 } else if (pbus->number == p->pbm_B.pci_first_busno) {
1153 pbm = &p->pbm_B; 1153 pbm = &p->pbm_B;
1154 } else 1154 } else
1155 continue; 1155 continue;
1156 1156
1157 cookie = alloc_bridge_cookie(pbm); 1157 cookie = alloc_bridge_cookie(pbm);
1158 pbus->self->sysdata = cookie; 1158 pbus->self->sysdata = cookie;
1159 1159
1160 sabres_scanned++; 1160 sabres_scanned++;
1161 1161
1162 pbus->sysdata = pbm; 1162 pbus->sysdata = pbm;
1163 pbm->pci_bus = pbus; 1163 pbm->pci_bus = pbus;
1164 pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node->node); 1164 pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
1165 pci_record_assignments(pbm, pbus); 1165 pci_record_assignments(pbm, pbus);
1166 pci_assign_unassigned(pbm, pbus); 1166 pci_assign_unassigned(pbm, pbus);
1167 pci_fixup_irq(pbm, pbus); 1167 pci_fixup_irq(pbm, pbus);
1168 pci_determine_66mhz_disposition(pbm, pbus); 1168 pci_determine_66mhz_disposition(pbm, pbus);
1169 pci_setup_busmastering(pbm, pbus); 1169 pci_setup_busmastering(pbm, pbus);
1170 } 1170 }
1171 1171
1172 if (!sabres_scanned) { 1172 if (!sabres_scanned) {
1173 /* Hummingbird, no APBs. */ 1173 /* Hummingbird, no APBs. */
1174 pbm = &p->pbm_A; 1174 pbm = &p->pbm_A;
1175 sabre_bus->sysdata = pbm; 1175 sabre_bus->sysdata = pbm;
1176 pbm->pci_bus = sabre_bus; 1176 pbm->pci_bus = sabre_bus;
1177 pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node->node); 1177 pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
1178 pci_record_assignments(pbm, sabre_bus); 1178 pci_record_assignments(pbm, sabre_bus);
1179 pci_assign_unassigned(pbm, sabre_bus); 1179 pci_assign_unassigned(pbm, sabre_bus);
1180 pci_fixup_irq(pbm, sabre_bus); 1180 pci_fixup_irq(pbm, sabre_bus);
1181 pci_determine_66mhz_disposition(pbm, sabre_bus); 1181 pci_determine_66mhz_disposition(pbm, sabre_bus);
1182 pci_setup_busmastering(pbm, sabre_bus); 1182 pci_setup_busmastering(pbm, sabre_bus);
1183 } 1183 }
1184 1184
1185 sabre_register_error_handlers(p); 1185 sabre_register_error_handlers(p);
1186 } 1186 }
1187 1187
1188 static void sabre_iommu_init(struct pci_controller_info *p, 1188 static void sabre_iommu_init(struct pci_controller_info *p,
1189 int tsbsize, unsigned long dvma_offset, 1189 int tsbsize, unsigned long dvma_offset,
1190 u32 dma_mask) 1190 u32 dma_mask)
1191 { 1191 {
1192 struct pci_iommu *iommu = p->pbm_A.iommu; 1192 struct pci_iommu *iommu = p->pbm_A.iommu;
1193 unsigned long i; 1193 unsigned long i;
1194 u64 control; 1194 u64 control;
1195 1195
1196 /* Register addresses. */ 1196 /* Register addresses. */
1197 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; 1197 iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
1198 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE; 1198 iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
1199 iommu->iommu_flush = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH; 1199 iommu->iommu_flush = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH;
1200 iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC; 1200 iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC;
1201 /* Sabre's IOMMU lacks ctx flushing. */ 1201 /* Sabre's IOMMU lacks ctx flushing. */
1202 iommu->iommu_ctxflush = 0; 1202 iommu->iommu_ctxflush = 0;
1203 1203
1204 /* Invalidate TLB Entries. */ 1204 /* Invalidate TLB Entries. */
1205 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); 1205 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1206 control |= SABRE_IOMMUCTRL_DENAB; 1206 control |= SABRE_IOMMUCTRL_DENAB;
1207 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 1207 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1208 1208
1209 for(i = 0; i < 16; i++) { 1209 for(i = 0; i < 16; i++) {
1210 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0); 1210 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0);
1211 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0); 1211 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
1212 } 1212 }
1213 1213
1214 /* Leave diag mode enabled for full-flushing done 1214 /* Leave diag mode enabled for full-flushing done
1215 * in pci_iommu.c 1215 * in pci_iommu.c
1216 */ 1216 */
1217 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask); 1217 pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
1218 1218
1219 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, 1219 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
1220 __pa(iommu->page_table)); 1220 __pa(iommu->page_table));
1221 1221
1222 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); 1222 control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
1223 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ); 1223 control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
1224 control |= SABRE_IOMMUCTRL_ENAB; 1224 control |= SABRE_IOMMUCTRL_ENAB;
1225 switch(tsbsize) { 1225 switch(tsbsize) {
1226 case 64: 1226 case 64:
1227 control |= SABRE_IOMMU_TSBSZ_64K; 1227 control |= SABRE_IOMMU_TSBSZ_64K;
1228 break; 1228 break;
1229 case 128: 1229 case 128:
1230 control |= SABRE_IOMMU_TSBSZ_128K; 1230 control |= SABRE_IOMMU_TSBSZ_128K;
1231 break; 1231 break;
1232 default: 1232 default:
1233 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); 1233 prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
1234 prom_halt(); 1234 prom_halt();
1235 break; 1235 break;
1236 } 1236 }
1237 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); 1237 sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
1238 } 1238 }
1239 1239
1240 static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1240 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1241 struct pci_pbm_info *pbm) 1241 struct pci_pbm_info *pbm)
1242 { 1242 {
1243 char *name = pbm->name; 1243 char *name = pbm->name;
1244 unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE; 1244 unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
1245 unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE; 1245 unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1246 unsigned int devfn; 1246 unsigned int devfn;
1247 unsigned long first, last, i; 1247 unsigned long first, last, i;
1248 u8 *addr, map; 1248 u8 *addr, map;
1249 1249
1250 sprintf(name, "SABRE%d PBM%c", 1250 sprintf(name, "SABRE%d PBM%c",
1251 p->index, 1251 p->index,
1252 (pbm == &p->pbm_A ? 'A' : 'B')); 1252 (pbm == &p->pbm_A ? 'A' : 'B'));
1253 pbm->io_space.name = pbm->mem_space.name = name; 1253 pbm->io_space.name = pbm->mem_space.name = name;
1254 1254
1255 devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1); 1255 devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
1256 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP); 1256 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
1257 map = 0; 1257 map = 0;
1258 pci_config_read8(addr, &map); 1258 pci_config_read8(addr, &map);
1259 1259
1260 first = 8; 1260 first = 8;
1261 last = 0; 1261 last = 0;
1262 for (i = 0; i < 8; i++) { 1262 for (i = 0; i < 8; i++) {
1263 if ((map & (1 << i)) != 0) { 1263 if ((map & (1 << i)) != 0) {
1264 if (first > i) 1264 if (first > i)
1265 first = i; 1265 first = i;
1266 if (last < i) 1266 if (last < i)
1267 last = i; 1267 last = i;
1268 } 1268 }
1269 } 1269 }
1270 pbm->io_space.start = ibase + (first << 21UL); 1270 pbm->io_space.start = ibase + (first << 21UL);
1271 pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1); 1271 pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
1272 pbm->io_space.flags = IORESOURCE_IO; 1272 pbm->io_space.flags = IORESOURCE_IO;
1273 1273
1274 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP); 1274 addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
1275 map = 0; 1275 map = 0;
1276 pci_config_read8(addr, &map); 1276 pci_config_read8(addr, &map);
1277 1277
1278 first = 8; 1278 first = 8;
1279 last = 0; 1279 last = 0;
1280 for (i = 0; i < 8; i++) { 1280 for (i = 0; i < 8; i++) {
1281 if ((map & (1 << i)) != 0) { 1281 if ((map & (1 << i)) != 0) {
1282 if (first > i) 1282 if (first > i)
1283 first = i; 1283 first = i;
1284 if (last < i) 1284 if (last < i)
1285 last = i; 1285 last = i;
1286 } 1286 }
1287 } 1287 }
1288 pbm->mem_space.start = mbase + (first << 29UL); 1288 pbm->mem_space.start = mbase + (first << 29UL);
1289 pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1); 1289 pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
1290 pbm->mem_space.flags = IORESOURCE_MEM; 1290 pbm->mem_space.flags = IORESOURCE_MEM;
1291 1291
1292 if (request_resource(&ioport_resource, &pbm->io_space) < 0) { 1292 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1293 prom_printf("Cannot register PBM-%c's IO space.\n", 1293 prom_printf("Cannot register PBM-%c's IO space.\n",
1294 (pbm == &p->pbm_A ? 'A' : 'B')); 1294 (pbm == &p->pbm_A ? 'A' : 'B'));
1295 prom_halt(); 1295 prom_halt();
1296 } 1296 }
1297 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) { 1297 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1298 prom_printf("Cannot register PBM-%c's MEM space.\n", 1298 prom_printf("Cannot register PBM-%c's MEM space.\n",
1299 (pbm == &p->pbm_A ? 'A' : 'B')); 1299 (pbm == &p->pbm_A ? 'A' : 'B'));
1300 prom_halt(); 1300 prom_halt();
1301 } 1301 }
1302 1302
1303 /* Register legacy regions if this PBM covers that area. */ 1303 /* Register legacy regions if this PBM covers that area. */
1304 if (pbm->io_space.start == ibase && 1304 if (pbm->io_space.start == ibase &&
1305 pbm->mem_space.start == mbase) 1305 pbm->mem_space.start == mbase)
1306 pci_register_legacy_regions(&pbm->io_space, 1306 pci_register_legacy_regions(&pbm->io_space,
1307 &pbm->mem_space); 1307 &pbm->mem_space);
1308 } 1308 }
1309 1309
1310 static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_begin) 1310 static void sabre_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 dma_begin)
1311 { 1311 {
1312 struct pci_pbm_info *pbm; 1312 struct pci_pbm_info *pbm;
1313 struct device_node *node; 1313 struct device_node *node;
1314 struct property *prop; 1314 struct property *prop;
1315 u32 *busrange; 1315 u32 *busrange;
1316 int len, simbas_found; 1316 int len, simbas_found;
1317 1317
1318 simbas_found = 0; 1318 simbas_found = 0;
1319 node = dp->child; 1319 node = dp->child;
1320 while (node != NULL) { 1320 while (node != NULL) {
1321 if (strcmp(node->name, "pci")) 1321 if (strcmp(node->name, "pci"))
1322 goto next_pci; 1322 goto next_pci;
1323 1323
1324 prop = of_find_property(node, "model", NULL); 1324 prop = of_find_property(node, "model", NULL);
1325 if (!prop || strncmp(prop->value, "SUNW,simba", prop->length)) 1325 if (!prop || strncmp(prop->value, "SUNW,simba", prop->length))
1326 goto next_pci; 1326 goto next_pci;
1327 1327
1328 simbas_found++; 1328 simbas_found++;
1329 1329
1330 prop = of_find_property(node, "bus-range", NULL); 1330 prop = of_find_property(node, "bus-range", NULL);
1331 busrange = prop->value; 1331 busrange = prop->value;
1332 if (busrange[0] == 1) 1332 if (busrange[0] == 1)
1333 pbm = &p->pbm_B; 1333 pbm = &p->pbm_B;
1334 else 1334 else
1335 pbm = &p->pbm_A; 1335 pbm = &p->pbm_A;
1336 1336
1337 pbm->name = node->full_name; 1337 pbm->name = node->full_name;
1338 printk("%s: SABRE PCI Bus Module\n", pbm->name); 1338 printk("%s: SABRE PCI Bus Module\n", pbm->name);
1339 1339
1340 pbm->chip_type = PBM_CHIP_TYPE_SABRE; 1340 pbm->chip_type = PBM_CHIP_TYPE_SABRE;
1341 pbm->parent = p; 1341 pbm->parent = p;
1342 pbm->prom_node = node; 1342 pbm->prom_node = node;
1343 pbm->pci_first_slot = 1; 1343 pbm->pci_first_slot = 1;
1344 pbm->pci_first_busno = busrange[0]; 1344 pbm->pci_first_busno = busrange[0];
1345 pbm->pci_last_busno = busrange[1]; 1345 pbm->pci_last_busno = busrange[1];
1346 1346
1347 prop = of_find_property(node, "ranges", &len); 1347 prop = of_find_property(node, "ranges", &len);
1348 if (prop) { 1348 if (prop) {
1349 pbm->pbm_ranges = prop->value; 1349 pbm->pbm_ranges = prop->value;
1350 pbm->num_pbm_ranges = 1350 pbm->num_pbm_ranges =
1351 (len / sizeof(struct linux_prom_pci_ranges)); 1351 (len / sizeof(struct linux_prom_pci_ranges));
1352 } else { 1352 } else {
1353 pbm->num_pbm_ranges = 0; 1353 pbm->num_pbm_ranges = 0;
1354 } 1354 }
1355 1355
1356 prop = of_find_property(node, "interrupt-map", &len); 1356 prop = of_find_property(node, "interrupt-map", &len);
1357 if (prop) { 1357 if (prop) {
1358 pbm->pbm_intmap = prop->value; 1358 pbm->pbm_intmap = prop->value;
1359 pbm->num_pbm_intmap = 1359 pbm->num_pbm_intmap =
1360 (len / sizeof(struct linux_prom_pci_intmap)); 1360 (len / sizeof(struct linux_prom_pci_intmap));
1361 1361
1362 prop = of_find_property(node, "interrupt-map-mask", 1362 prop = of_find_property(node, "interrupt-map-mask",
1363 NULL); 1363 NULL);
1364 pbm->pbm_intmask = prop->value; 1364 pbm->pbm_intmask = prop->value;
1365 } else { 1365 } else {
1366 pbm->num_pbm_intmap = 0; 1366 pbm->num_pbm_intmap = 0;
1367 } 1367 }
1368 1368
1369 pbm_register_toplevel_resources(p, pbm); 1369 pbm_register_toplevel_resources(p, pbm);
1370 1370
1371 next_pci: 1371 next_pci:
1372 node = node->sibling; 1372 node = node->sibling;
1373 } 1373 }
1374 if (simbas_found == 0) { 1374 if (simbas_found == 0) {
1375 /* No APBs underneath, probably this is a hummingbird 1375 /* No APBs underneath, probably this is a hummingbird
1376 * system. 1376 * system.
1377 */ 1377 */
1378 pbm = &p->pbm_A; 1378 pbm = &p->pbm_A;
1379 pbm->parent = p; 1379 pbm->parent = p;
1380 pbm->prom_node = dp; 1380 pbm->prom_node = dp;
1381 pbm->pci_first_busno = p->pci_first_busno; 1381 pbm->pci_first_busno = p->pci_first_busno;
1382 pbm->pci_last_busno = p->pci_last_busno; 1382 pbm->pci_last_busno = p->pci_last_busno;
1383 1383
1384 prop = of_find_property(dp, "ranges", &len); 1384 prop = of_find_property(dp, "ranges", &len);
1385 if (prop) { 1385 if (prop) {
1386 pbm->pbm_ranges = prop->value; 1386 pbm->pbm_ranges = prop->value;
1387 pbm->num_pbm_ranges = 1387 pbm->num_pbm_ranges =
1388 (len / sizeof(struct linux_prom_pci_ranges)); 1388 (len / sizeof(struct linux_prom_pci_ranges));
1389 } else { 1389 } else {
1390 pbm->num_pbm_ranges = 0; 1390 pbm->num_pbm_ranges = 0;
1391 } 1391 }
1392 1392
1393 prop = of_find_property(dp, "interrupt-map", &len); 1393 prop = of_find_property(dp, "interrupt-map", &len);
1394 if (prop) { 1394 if (prop) {
1395 pbm->pbm_intmap = prop->value; 1395 pbm->pbm_intmap = prop->value;
1396 pbm->num_pbm_intmap = 1396 pbm->num_pbm_intmap =
1397 (len / sizeof(struct linux_prom_pci_intmap)); 1397 (len / sizeof(struct linux_prom_pci_intmap));
1398 1398
1399 prop = of_find_property(dp, "interrupt-map-mask", 1399 prop = of_find_property(dp, "interrupt-map-mask",
1400 NULL); 1400 NULL);
1401 pbm->pbm_intmask = prop->value; 1401 pbm->pbm_intmask = prop->value;
1402 } else { 1402 } else {
1403 pbm->num_pbm_intmap = 0; 1403 pbm->num_pbm_intmap = 0;
1404 } 1404 }
1405 1405
1406 pbm->name = dp->full_name; 1406 pbm->name = dp->full_name;
1407 printk("%s: SABRE PCI Bus Module\n", pbm->name); 1407 printk("%s: SABRE PCI Bus Module\n", pbm->name);
1408 1408
1409 pbm->io_space.name = pbm->mem_space.name = pbm->name; 1409 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1410 1410
1411 /* Hack up top-level resources. */ 1411 /* Hack up top-level resources. */
1412 pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE; 1412 pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
1413 pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL; 1413 pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
1414 pbm->io_space.flags = IORESOURCE_IO; 1414 pbm->io_space.flags = IORESOURCE_IO;
1415 1415
1416 pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE; 1416 pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE;
1417 pbm->mem_space.end = pbm->mem_space.start + (unsigned long)dma_begin - 1UL; 1417 pbm->mem_space.end = pbm->mem_space.start + (unsigned long)dma_begin - 1UL;
1418 pbm->mem_space.flags = IORESOURCE_MEM; 1418 pbm->mem_space.flags = IORESOURCE_MEM;
1419 1419
1420 if (request_resource(&ioport_resource, &pbm->io_space) < 0) { 1420 if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
1421 prom_printf("Cannot register Hummingbird's IO space.\n"); 1421 prom_printf("Cannot register Hummingbird's IO space.\n");
1422 prom_halt(); 1422 prom_halt();
1423 } 1423 }
1424 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) { 1424 if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
1425 prom_printf("Cannot register Hummingbird's MEM space.\n"); 1425 prom_printf("Cannot register Hummingbird's MEM space.\n");
1426 prom_halt(); 1426 prom_halt();
1427 } 1427 }
1428 1428
1429 pci_register_legacy_regions(&pbm->io_space, 1429 pci_register_legacy_regions(&pbm->io_space,
1430 &pbm->mem_space); 1430 &pbm->mem_space);
1431 } 1431 }
1432 } 1432 }
1433 1433
1434 void sabre_init(struct device_node *dp, char *model_name) 1434 void sabre_init(struct device_node *dp, char *model_name)
1435 { 1435 {
1436 struct linux_prom64_registers *pr_regs; 1436 struct linux_prom64_registers *pr_regs;
1437 struct pci_controller_info *p; 1437 struct pci_controller_info *p;
1438 struct pci_iommu *iommu; 1438 struct pci_iommu *iommu;
1439 struct property *prop; 1439 struct property *prop;
1440 int tsbsize; 1440 int tsbsize;
1441 u32 *busrange; 1441 u32 *busrange;
1442 u32 *vdma; 1442 u32 *vdma;
1443 u32 upa_portid, dma_mask; 1443 u32 upa_portid, dma_mask;
1444 u64 clear_irq; 1444 u64 clear_irq;
1445 1445
1446 hummingbird_p = 0; 1446 hummingbird_p = 0;
1447 if (!strcmp(model_name, "pci108e,a001")) 1447 if (!strcmp(model_name, "pci108e,a001"))
1448 hummingbird_p = 1; 1448 hummingbird_p = 1;
1449 else if (!strcmp(model_name, "SUNW,sabre")) { 1449 else if (!strcmp(model_name, "SUNW,sabre")) {
1450 prop = of_find_property(dp, "compatible", NULL); 1450 prop = of_find_property(dp, "compatible", NULL);
1451 if (prop) { 1451 if (prop) {
1452 const char *compat = prop->value; 1452 const char *compat = prop->value;
1453 1453
1454 if (!strcmp(compat, "pci108e,a001")) 1454 if (!strcmp(compat, "pci108e,a001"))
1455 hummingbird_p = 1; 1455 hummingbird_p = 1;
1456 } 1456 }
1457 if (!hummingbird_p) { 1457 if (!hummingbird_p) {
1458 struct device_node *dp; 1458 struct device_node *dp;
1459 1459
1460 /* Of course, Sun has to encode things a thousand 1460 /* Of course, Sun has to encode things a thousand
1461 * different ways, inconsistently. 1461 * different ways, inconsistently.
1462 */ 1462 */
1463 cpu_find_by_instance(0, &dp, NULL); 1463 cpu_find_by_instance(0, &dp, NULL);
1464 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) 1464 if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
1465 hummingbird_p = 1; 1465 hummingbird_p = 1;
1466 } 1466 }
1467 } 1467 }
1468 1468
1469 p = kzalloc(sizeof(*p), GFP_ATOMIC); 1469 p = kzalloc(sizeof(*p), GFP_ATOMIC);
1470 if (!p) { 1470 if (!p) {
1471 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); 1471 prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
1472 prom_halt(); 1472 prom_halt();
1473 } 1473 }
1474 1474
1475 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); 1475 iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
1476 if (!iommu) { 1476 if (!iommu) {
1477 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); 1477 prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
1478 prom_halt(); 1478 prom_halt();
1479 } 1479 }
1480 p->pbm_A.iommu = p->pbm_B.iommu = iommu; 1480 p->pbm_A.iommu = p->pbm_B.iommu = iommu;
1481 1481
1482 upa_portid = 0xff; 1482 upa_portid = 0xff;
1483 prop = of_find_property(dp, "upa-portid", NULL); 1483 prop = of_find_property(dp, "upa-portid", NULL);
1484 if (prop) 1484 if (prop)
1485 upa_portid = *(u32 *) prop->value; 1485 upa_portid = *(u32 *) prop->value;
1486 1486
1487 p->next = pci_controller_root; 1487 p->next = pci_controller_root;
1488 pci_controller_root = p; 1488 pci_controller_root = p;
1489 1489
1490 p->pbm_A.portid = upa_portid; 1490 p->pbm_A.portid = upa_portid;
1491 p->pbm_B.portid = upa_portid; 1491 p->pbm_B.portid = upa_portid;
1492 p->index = pci_num_controllers++; 1492 p->index = pci_num_controllers++;
1493 p->pbms_same_domain = 1; 1493 p->pbms_same_domain = 1;
1494 p->scan_bus = sabre_scan_bus; 1494 p->scan_bus = sabre_scan_bus;
1495 p->irq_build = sabre_irq_build; 1495 p->irq_build = sabre_irq_build;
1496 p->base_address_update = sabre_base_address_update; 1496 p->base_address_update = sabre_base_address_update;
1497 p->resource_adjust = sabre_resource_adjust; 1497 p->resource_adjust = sabre_resource_adjust;
1498 p->pci_ops = &sabre_ops; 1498 p->pci_ops = &sabre_ops;
1499 1499
1500 /* 1500 /*
1501 * Map in SABRE register set and report the presence of this SABRE. 1501 * Map in SABRE register set and report the presence of this SABRE.
1502 */ 1502 */
1503 1503
1504 prop = of_find_property(dp, "reg", NULL); 1504 prop = of_find_property(dp, "reg", NULL);
1505 pr_regs = prop->value; 1505 pr_regs = prop->value;
1506 1506
1507 /* 1507 /*
1508 * First REG in property is base of entire SABRE register space. 1508 * First REG in property is base of entire SABRE register space.
1509 */ 1509 */
1510 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1510 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1511 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1511 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1512 1512
1513 /* Clear interrupts */ 1513 /* Clear interrupts */
1514 1514
1515 /* PCI first */ 1515 /* PCI first */
1516 for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8) 1516 for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
1517 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL); 1517 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1518 1518
1519 /* Then OBIO */ 1519 /* Then OBIO */
1520 for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8) 1520 for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
1521 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL); 1521 sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
1522 1522
1523 /* Error interrupts are enabled later after the bus scan. */ 1523 /* Error interrupts are enabled later after the bus scan. */
1524 sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL, 1524 sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL,
1525 (SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR | 1525 (SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
1526 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN)); 1526 SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
1527 1527
1528 /* Now map in PCI config space for entire SABRE. */ 1528 /* Now map in PCI config space for entire SABRE. */
1529 p->pbm_A.config_space = p->pbm_B.config_space = 1529 p->pbm_A.config_space = p->pbm_B.config_space =
1530 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE); 1530 (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
1531 1531
1532 prop = of_find_property(dp, "virtual-dma", NULL); 1532 prop = of_find_property(dp, "virtual-dma", NULL);
1533 vdma = prop->value; 1533 vdma = prop->value;
1534 1534
1535 dma_mask = vdma[0]; 1535 dma_mask = vdma[0];
1536 switch(vdma[1]) { 1536 switch(vdma[1]) {
1537 case 0x20000000: 1537 case 0x20000000:
1538 dma_mask |= 0x1fffffff; 1538 dma_mask |= 0x1fffffff;
1539 tsbsize = 64; 1539 tsbsize = 64;
1540 break; 1540 break;
1541 case 0x40000000: 1541 case 0x40000000:
1542 dma_mask |= 0x3fffffff; 1542 dma_mask |= 0x3fffffff;
1543 tsbsize = 128; 1543 tsbsize = 128;
1544 break; 1544 break;
1545 1545
1546 case 0x80000000: 1546 case 0x80000000:
1547 dma_mask |= 0x7fffffff; 1547 dma_mask |= 0x7fffffff;
1548 tsbsize = 128; 1548 tsbsize = 128;
1549 break; 1549 break;
1550 default: 1550 default:
1551 prom_printf("SABRE: strange virtual-dma size.\n"); 1551 prom_printf("SABRE: strange virtual-dma size.\n");
1552 prom_halt(); 1552 prom_halt();
1553 } 1553 }
1554 1554
1555 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask); 1555 sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
1556 1556
1557 prop = of_find_property(dp, "bus-range", NULL); 1557 prop = of_find_property(dp, "bus-range", NULL);
1558 busrange = prop->value; 1558 busrange = prop->value;
1559 p->pci_first_busno = busrange[0]; 1559 p->pci_first_busno = busrange[0];
1560 p->pci_last_busno = busrange[1]; 1560 p->pci_last_busno = busrange[1];
1561 1561
1562 /* 1562 /*
1563 * Look for APB underneath. 1563 * Look for APB underneath.
1564 */ 1564 */
1565 sabre_pbm_init(p, dp, vdma[0]); 1565 sabre_pbm_init(p, dp, vdma[0]);
1566 } 1566 }
1567 1567
arch/sparc64/kernel/pci_schizo.c
1 /* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $ 1 /* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
2 * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. 2 * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
3 * 3 *
4 * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7 #include <linux/kernel.h> 7 #include <linux/kernel.h>
8 #include <linux/types.h> 8 #include <linux/types.h>
9 #include <linux/pci.h> 9 #include <linux/pci.h>
10 #include <linux/init.h> 10 #include <linux/init.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/interrupt.h> 12 #include <linux/interrupt.h>
13 13
14 #include <asm/pbm.h> 14 #include <asm/pbm.h>
15 #include <asm/iommu.h> 15 #include <asm/iommu.h>
16 #include <asm/irq.h> 16 #include <asm/irq.h>
17 #include <asm/upa.h> 17 #include <asm/upa.h>
18 #include <asm/pstate.h> 18 #include <asm/pstate.h>
19 #include <asm/prom.h> 19 #include <asm/prom.h>
20 20
21 #include "pci_impl.h" 21 #include "pci_impl.h"
22 #include "iommu_common.h" 22 #include "iommu_common.h"
23 23
24 /* All SCHIZO registers are 64-bits. The following accessor 24 /* All SCHIZO registers are 64-bits. The following accessor
25 * routines are how they are accessed. The REG parameter 25 * routines are how they are accessed. The REG parameter
26 * is a physical address. 26 * is a physical address.
27 */ 27 */
28 #define schizo_read(__reg) \ 28 #define schizo_read(__reg) \
29 ({ u64 __ret; \ 29 ({ u64 __ret; \
30 __asm__ __volatile__("ldxa [%1] %2, %0" \ 30 __asm__ __volatile__("ldxa [%1] %2, %0" \
31 : "=r" (__ret) \ 31 : "=r" (__ret) \
32 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ 32 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
33 : "memory"); \ 33 : "memory"); \
34 __ret; \ 34 __ret; \
35 }) 35 })
36 #define schizo_write(__reg, __val) \ 36 #define schizo_write(__reg, __val) \
37 __asm__ __volatile__("stxa %0, [%1] %2" \ 37 __asm__ __volatile__("stxa %0, [%1] %2" \
38 : /* no outputs */ \ 38 : /* no outputs */ \
39 : "r" (__val), "r" (__reg), \ 39 : "r" (__val), "r" (__reg), \
40 "i" (ASI_PHYS_BYPASS_EC_E) \ 40 "i" (ASI_PHYS_BYPASS_EC_E) \
41 : "memory") 41 : "memory")
42 42
43 /* This is a convention that at least Excalibur and Merlin 43 /* This is a convention that at least Excalibur and Merlin
44 * follow. I suppose the SCHIZO used in Starcat and friends 44 * follow. I suppose the SCHIZO used in Starcat and friends
45 * will do similar. 45 * will do similar.
46 * 46 *
47 * The only way I could see this changing is if the newlink 47 * The only way I could see this changing is if the newlink
48 * block requires more space in Schizo's address space than 48 * block requires more space in Schizo's address space than
49 * they predicted, thus requiring an address space reorg when 49 * they predicted, thus requiring an address space reorg when
50 * the newer Schizo is taped out. 50 * the newer Schizo is taped out.
51 */ 51 */
52 52
53 /* Streaming buffer control register. */ 53 /* Streaming buffer control register. */
54 #define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ 54 #define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
55 #define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ 55 #define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
56 #define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ 56 #define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
57 #define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ 57 #define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
58 #define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ 58 #define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
59 59
60 /* IOMMU control register. */ 60 /* IOMMU control register. */
61 #define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ 61 #define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
62 #define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ 62 #define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
63 #define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ 63 #define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
64 #define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ 64 #define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
65 #define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ 65 #define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
66 #define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ 66 #define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
67 #define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ 67 #define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
68 #define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ 68 #define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
69 #define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ 69 #define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
70 #define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ 70 #define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
71 #define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ 71 #define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
72 #define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ 72 #define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
73 #define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ 73 #define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
74 #define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ 74 #define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
75 #define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ 75 #define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
76 #define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ 76 #define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
77 #define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ 77 #define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
78 #define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ 78 #define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
79 79
80 /* Schizo config space address format is nearly identical to 80 /* Schizo config space address format is nearly identical to
81 * that of PSYCHO: 81 * that of PSYCHO:
82 * 82 *
83 * 32 24 23 16 15 11 10 8 7 2 1 0 83 * 32 24 23 16 15 11 10 8 7 2 1 0
84 * --------------------------------------------------------- 84 * ---------------------------------------------------------
85 * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 | 85 * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
86 * --------------------------------------------------------- 86 * ---------------------------------------------------------
87 */ 87 */
88 #define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space) 88 #define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
89 #define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \ 89 #define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
90 (((unsigned long)(BUS) << 16) | \ 90 (((unsigned long)(BUS) << 16) | \
91 ((unsigned long)(DEVFN) << 8) | \ 91 ((unsigned long)(DEVFN) << 8) | \
92 ((unsigned long)(REG))) 92 ((unsigned long)(REG)))
93 93
94 static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm, 94 static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
95 unsigned char bus, 95 unsigned char bus,
96 unsigned int devfn, 96 unsigned int devfn,
97 int where) 97 int where)
98 { 98 {
99 if (!pbm) 99 if (!pbm)
100 return NULL; 100 return NULL;
101 bus -= pbm->pci_first_busno; 101 bus -= pbm->pci_first_busno;
102 return (void *) 102 return (void *)
103 (SCHIZO_CONFIG_BASE(pbm) | 103 (SCHIZO_CONFIG_BASE(pbm) |
104 SCHIZO_CONFIG_ENCODE(bus, devfn, where)); 104 SCHIZO_CONFIG_ENCODE(bus, devfn, where));
105 } 105 }
106 106
107 /* Just make sure the bus number is in range. */ 107 /* Just make sure the bus number is in range. */
108 static int schizo_out_of_range(struct pci_pbm_info *pbm, 108 static int schizo_out_of_range(struct pci_pbm_info *pbm,
109 unsigned char bus, 109 unsigned char bus,
110 unsigned char devfn) 110 unsigned char devfn)
111 { 111 {
112 if (bus < pbm->pci_first_busno || 112 if (bus < pbm->pci_first_busno ||
113 bus > pbm->pci_last_busno) 113 bus > pbm->pci_last_busno)
114 return 1; 114 return 1;
115 return 0; 115 return 0;
116 } 116 }
117 117
118 /* SCHIZO PCI configuration space accessors. */ 118 /* SCHIZO PCI configuration space accessors. */
119 119
120 static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 120 static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
121 int where, int size, u32 *value) 121 int where, int size, u32 *value)
122 { 122 {
123 struct pci_pbm_info *pbm = bus_dev->sysdata; 123 struct pci_pbm_info *pbm = bus_dev->sysdata;
124 unsigned char bus = bus_dev->number; 124 unsigned char bus = bus_dev->number;
125 u32 *addr; 125 u32 *addr;
126 u16 tmp16; 126 u16 tmp16;
127 u8 tmp8; 127 u8 tmp8;
128 128
129 switch (size) { 129 switch (size) {
130 case 1: 130 case 1:
131 *value = 0xff; 131 *value = 0xff;
132 break; 132 break;
133 case 2: 133 case 2:
134 *value = 0xffff; 134 *value = 0xffff;
135 break; 135 break;
136 case 4: 136 case 4:
137 *value = 0xffffffff; 137 *value = 0xffffffff;
138 break; 138 break;
139 } 139 }
140 140
141 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where); 141 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
142 if (!addr) 142 if (!addr)
143 return PCIBIOS_SUCCESSFUL; 143 return PCIBIOS_SUCCESSFUL;
144 144
145 if (schizo_out_of_range(pbm, bus, devfn)) 145 if (schizo_out_of_range(pbm, bus, devfn))
146 return PCIBIOS_SUCCESSFUL; 146 return PCIBIOS_SUCCESSFUL;
147 switch (size) { 147 switch (size) {
148 case 1: 148 case 1:
149 pci_config_read8((u8 *)addr, &tmp8); 149 pci_config_read8((u8 *)addr, &tmp8);
150 *value = tmp8; 150 *value = tmp8;
151 break; 151 break;
152 152
153 case 2: 153 case 2:
154 if (where & 0x01) { 154 if (where & 0x01) {
155 printk("pci_read_config_word: misaligned reg [%x]\n", 155 printk("pci_read_config_word: misaligned reg [%x]\n",
156 where); 156 where);
157 return PCIBIOS_SUCCESSFUL; 157 return PCIBIOS_SUCCESSFUL;
158 } 158 }
159 pci_config_read16((u16 *)addr, &tmp16); 159 pci_config_read16((u16 *)addr, &tmp16);
160 *value = tmp16; 160 *value = tmp16;
161 break; 161 break;
162 162
163 case 4: 163 case 4:
164 if (where & 0x03) { 164 if (where & 0x03) {
165 printk("pci_read_config_dword: misaligned reg [%x]\n", 165 printk("pci_read_config_dword: misaligned reg [%x]\n",
166 where); 166 where);
167 return PCIBIOS_SUCCESSFUL; 167 return PCIBIOS_SUCCESSFUL;
168 } 168 }
169 pci_config_read32(addr, value); 169 pci_config_read32(addr, value);
170 break; 170 break;
171 } 171 }
172 return PCIBIOS_SUCCESSFUL; 172 return PCIBIOS_SUCCESSFUL;
173 } 173 }
174 174
175 static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 175 static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
176 int where, int size, u32 value) 176 int where, int size, u32 value)
177 { 177 {
178 struct pci_pbm_info *pbm = bus_dev->sysdata; 178 struct pci_pbm_info *pbm = bus_dev->sysdata;
179 unsigned char bus = bus_dev->number; 179 unsigned char bus = bus_dev->number;
180 u32 *addr; 180 u32 *addr;
181 181
182 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where); 182 addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
183 if (!addr) 183 if (!addr)
184 return PCIBIOS_SUCCESSFUL; 184 return PCIBIOS_SUCCESSFUL;
185 185
186 if (schizo_out_of_range(pbm, bus, devfn)) 186 if (schizo_out_of_range(pbm, bus, devfn))
187 return PCIBIOS_SUCCESSFUL; 187 return PCIBIOS_SUCCESSFUL;
188 188
189 switch (size) { 189 switch (size) {
190 case 1: 190 case 1:
191 pci_config_write8((u8 *)addr, value); 191 pci_config_write8((u8 *)addr, value);
192 break; 192 break;
193 193
194 case 2: 194 case 2:
195 if (where & 0x01) { 195 if (where & 0x01) {
196 printk("pci_write_config_word: misaligned reg [%x]\n", 196 printk("pci_write_config_word: misaligned reg [%x]\n",
197 where); 197 where);
198 return PCIBIOS_SUCCESSFUL; 198 return PCIBIOS_SUCCESSFUL;
199 } 199 }
200 pci_config_write16((u16 *)addr, value); 200 pci_config_write16((u16 *)addr, value);
201 break; 201 break;
202 202
203 case 4: 203 case 4:
204 if (where & 0x03) { 204 if (where & 0x03) {
205 printk("pci_write_config_dword: misaligned reg [%x]\n", 205 printk("pci_write_config_dword: misaligned reg [%x]\n",
206 where); 206 where);
207 return PCIBIOS_SUCCESSFUL; 207 return PCIBIOS_SUCCESSFUL;
208 } 208 }
209 209
210 pci_config_write32(addr, value); 210 pci_config_write32(addr, value);
211 } 211 }
212 return PCIBIOS_SUCCESSFUL; 212 return PCIBIOS_SUCCESSFUL;
213 } 213 }
214 214
215 static struct pci_ops schizo_ops = { 215 static struct pci_ops schizo_ops = {
216 .read = schizo_read_pci_cfg, 216 .read = schizo_read_pci_cfg,
217 .write = schizo_write_pci_cfg, 217 .write = schizo_write_pci_cfg,
218 }; 218 };
219 219
220 /* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the 220 /* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
221 * imap/iclr registers are per-PBM. 221 * imap/iclr registers are per-PBM.
222 */ 222 */
223 #define SCHIZO_IMAP_BASE 0x1000UL 223 #define SCHIZO_IMAP_BASE 0x1000UL
224 #define SCHIZO_ICLR_BASE 0x1400UL 224 #define SCHIZO_ICLR_BASE 0x1400UL
225 225
226 static unsigned long schizo_imap_offset(unsigned long ino) 226 static unsigned long schizo_imap_offset(unsigned long ino)
227 { 227 {
228 return SCHIZO_IMAP_BASE + (ino * 8UL); 228 return SCHIZO_IMAP_BASE + (ino * 8UL);
229 } 229 }
230 230
231 static unsigned long schizo_iclr_offset(unsigned long ino) 231 static unsigned long schizo_iclr_offset(unsigned long ino)
232 { 232 {
233 return SCHIZO_ICLR_BASE + (ino * 8UL); 233 return SCHIZO_ICLR_BASE + (ino * 8UL);
234 } 234 }
235 235
236 static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2) 236 static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
237 { 237 {
238 unsigned long sync_reg = (unsigned long) _arg2; 238 unsigned long sync_reg = (unsigned long) _arg2;
239 u64 mask = 1UL << (ino & IMAP_INO); 239 u64 mask = 1UL << (ino & IMAP_INO);
240 u64 val; 240 u64 val;
241 int limit; 241 int limit;
242 242
243 schizo_write(sync_reg, mask); 243 schizo_write(sync_reg, mask);
244 244
245 limit = 100000; 245 limit = 100000;
246 val = 0; 246 val = 0;
247 while (--limit) { 247 while (--limit) {
248 val = schizo_read(sync_reg); 248 val = schizo_read(sync_reg);
249 if (!(val & mask)) 249 if (!(val & mask))
250 break; 250 break;
251 } 251 }
252 if (limit <= 0) { 252 if (limit <= 0) {
253 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n", 253 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
254 val, mask); 254 val, mask);
255 } 255 }
256 256
257 if (_arg1) { 257 if (_arg1) {
258 static unsigned char cacheline[64] 258 static unsigned char cacheline[64]
259 __attribute__ ((aligned (64))); 259 __attribute__ ((aligned (64)));
260 260
261 __asm__ __volatile__("rd %%fprs, %0\n\t" 261 __asm__ __volatile__("rd %%fprs, %0\n\t"
262 "or %0, %4, %1\n\t" 262 "or %0, %4, %1\n\t"
263 "wr %1, 0x0, %%fprs\n\t" 263 "wr %1, 0x0, %%fprs\n\t"
264 "stda %%f0, [%5] %6\n\t" 264 "stda %%f0, [%5] %6\n\t"
265 "wr %0, 0x0, %%fprs\n\t" 265 "wr %0, 0x0, %%fprs\n\t"
266 "membar #Sync" 266 "membar #Sync"
267 : "=&r" (mask), "=&r" (val) 267 : "=&r" (mask), "=&r" (val)
268 : "0" (mask), "1" (val), 268 : "0" (mask), "1" (val),
269 "i" (FPRS_FEF), "r" (&cacheline[0]), 269 "i" (FPRS_FEF), "r" (&cacheline[0]),
270 "i" (ASI_BLK_COMMIT_P)); 270 "i" (ASI_BLK_COMMIT_P));
271 } 271 }
272 } 272 }
273 273
274 static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm, 274 static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
275 unsigned int ino) 275 unsigned int ino)
276 { 276 {
277 ino &= PCI_IRQ_INO; 277 ino &= PCI_IRQ_INO;
278 return pbm->pbm_regs + schizo_iclr_offset(ino) + 4; 278 return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
279 } 279 }
280 280
281 static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm, 281 static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
282 unsigned int ino) 282 unsigned int ino)
283 { 283 {
284 ino &= PCI_IRQ_INO; 284 ino &= PCI_IRQ_INO;
285 return pbm->pbm_regs + schizo_imap_offset(ino) + 4; 285 return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
286 } 286 }
287 287
288 static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, 288 static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
289 struct pci_dev *pdev, 289 struct pci_dev *pdev,
290 unsigned int ino) 290 unsigned int ino)
291 { 291 {
292 unsigned long imap, iclr; 292 unsigned long imap, iclr;
293 int ign_fixup; 293 int ign_fixup;
294 int virt_irq; 294 int virt_irq;
295 295
296 ino &= PCI_IRQ_INO; 296 ino &= PCI_IRQ_INO;
297 297
298 /* Now build the IRQ bucket. */ 298 /* Now build the IRQ bucket. */
299 imap = schizo_ino_to_imap(pbm, ino); 299 imap = schizo_ino_to_imap(pbm, ino);
300 iclr = schizo_ino_to_iclr(pbm, ino); 300 iclr = schizo_ino_to_iclr(pbm, ino);
301 301
302 /* On Schizo, no inofixup occurs. This is because each 302 /* On Schizo, no inofixup occurs. This is because each
303 * INO has it's own IMAP register. On Psycho and Sabre 303 * INO has it's own IMAP register. On Psycho and Sabre
304 * there is only one IMAP register for each PCI slot even 304 * there is only one IMAP register for each PCI slot even
305 * though four different INOs can be generated by each 305 * though four different INOs can be generated by each
306 * PCI slot. 306 * PCI slot.
307 * 307 *
308 * But, for JBUS variants (essentially, Tomatillo), we have 308 * But, for JBUS variants (essentially, Tomatillo), we have
309 * to fixup the lowest bit of the interrupt group number. 309 * to fixup the lowest bit of the interrupt group number.
310 */ 310 */
311 ign_fixup = 0; 311 ign_fixup = 0;
312 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 312 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
313 if (pbm->portid & 1) 313 if (pbm->portid & 1)
314 ign_fixup = (1 << 6); 314 ign_fixup = (1 << 6);
315 } 315 }
316 316
317 virt_irq = build_irq(ign_fixup, iclr, imap); 317 virt_irq = build_irq(ign_fixup, iclr, imap);
318 318
319 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 319 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
320 irq_install_pre_handler(virt_irq, 320 irq_install_pre_handler(virt_irq,
321 tomatillo_wsync_handler, 321 tomatillo_wsync_handler,
322 ((pbm->chip_version <= 4) ? 322 ((pbm->chip_version <= 4) ?
323 (void *) 1 : (void *) 0), 323 (void *) 1 : (void *) 0),
324 (void *) pbm->sync_reg); 324 (void *) pbm->sync_reg);
325 } 325 }
326 326
327 return virt_irq; 327 return virt_irq;
328 } 328 }
329 329
330 /* SCHIZO error handling support. */ 330 /* SCHIZO error handling support. */
331 enum schizo_error_type { 331 enum schizo_error_type {
332 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR 332 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
333 }; 333 };
334 334
335 static DEFINE_SPINLOCK(stc_buf_lock); 335 static DEFINE_SPINLOCK(stc_buf_lock);
336 static unsigned long stc_error_buf[128]; 336 static unsigned long stc_error_buf[128];
337 static unsigned long stc_tag_buf[16]; 337 static unsigned long stc_tag_buf[16];
338 static unsigned long stc_line_buf[16]; 338 static unsigned long stc_line_buf[16];
339 339
340 #define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */ 340 #define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
341 #define SCHIZO_CE_INO 0x31 /* Correctable ECC error */ 341 #define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
342 #define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */ 342 #define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
343 #define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */ 343 #define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
344 #define SCHIZO_SERR_INO 0x34 /* Safari interface error */ 344 #define SCHIZO_SERR_INO 0x34 /* Safari interface error */
345 345
346 struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino) 346 struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
347 { 347 {
348 ino &= IMAP_INO; 348 ino &= IMAP_INO;
349 if (p->pbm_A.ino_bitmap & (1UL << ino)) 349 if (p->pbm_A.ino_bitmap & (1UL << ino))
350 return &p->pbm_A; 350 return &p->pbm_A;
351 if (p->pbm_B.ino_bitmap & (1UL << ino)) 351 if (p->pbm_B.ino_bitmap & (1UL << ino))
352 return &p->pbm_B; 352 return &p->pbm_B;
353 353
354 printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps " 354 printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps "
355 "PBM_A[%016lx] PBM_B[%016lx]", 355 "PBM_A[%016lx] PBM_B[%016lx]",
356 p->index, ino, 356 p->index, ino,
357 p->pbm_A.ino_bitmap, 357 p->pbm_A.ino_bitmap,
358 p->pbm_B.ino_bitmap); 358 p->pbm_B.ino_bitmap);
359 printk("PCI%d: Using PBM_A, report this problem immediately.\n", 359 printk("PCI%d: Using PBM_A, report this problem immediately.\n",
360 p->index); 360 p->index);
361 361
362 return &p->pbm_A; 362 return &p->pbm_A;
363 } 363 }
364 364
365 static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq) 365 static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
366 { 366 {
367 struct pci_pbm_info *pbm; 367 struct pci_pbm_info *pbm;
368 unsigned long iclr; 368 unsigned long iclr;
369 369
370 /* Do not clear the interrupt for the other PCI bus. 370 /* Do not clear the interrupt for the other PCI bus.
371 * 371 *
372 * This "ACK both PBM IRQs" only needs to be performed 372 * This "ACK both PBM IRQs" only needs to be performed
373 * for chip-wide error interrupts. 373 * for chip-wide error interrupts.
374 */ 374 */
375 if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO || 375 if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
376 (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO) 376 (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
377 return; 377 return;
378 378
379 pbm = pbm_for_ino(p, irq); 379 pbm = pbm_for_ino(p, irq);
380 if (pbm == &p->pbm_A) 380 if (pbm == &p->pbm_A)
381 pbm = &p->pbm_B; 381 pbm = &p->pbm_B;
382 else 382 else
383 pbm = &p->pbm_A; 383 pbm = &p->pbm_A;
384 384
385 schizo_irq_build(pbm, NULL, 385 schizo_irq_build(pbm, NULL,
386 (pbm->portid << 6) | (irq & IMAP_INO)); 386 (pbm->portid << 6) | (irq & IMAP_INO));
387 387
388 iclr = schizo_ino_to_iclr(pbm, 388 iclr = schizo_ino_to_iclr(pbm,
389 (pbm->portid << 6) | (irq & IMAP_INO)); 389 (pbm->portid << 6) | (irq & IMAP_INO));
390 upa_writel(ICLR_IDLE, iclr); 390 upa_writel(ICLR_IDLE, iclr);
391 } 391 }
392 392
393 #define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ 393 #define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
394 #define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ 394 #define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
395 #define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ 395 #define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
396 396
397 #define SCHIZO_STCERR_WRITE 0x2UL 397 #define SCHIZO_STCERR_WRITE 0x2UL
398 #define SCHIZO_STCERR_READ 0x1UL 398 #define SCHIZO_STCERR_READ 0x1UL
399 399
400 #define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL 400 #define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
401 #define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL 401 #define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
402 #define SCHIZO_STCTAG_VALID 0x8000000000000000UL 402 #define SCHIZO_STCTAG_VALID 0x8000000000000000UL
403 #define SCHIZO_STCTAG_READ 0x4000000000000000UL 403 #define SCHIZO_STCTAG_READ 0x4000000000000000UL
404 404
405 #define SCHIZO_STCLINE_LINDX 0x0000000007800000UL 405 #define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
406 #define SCHIZO_STCLINE_SPTR 0x000000000007e000UL 406 #define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
407 #define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL 407 #define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
408 #define SCHIZO_STCLINE_EPTR 0x000000000000003fUL 408 #define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
409 #define SCHIZO_STCLINE_VALID 0x0000000000600000UL 409 #define SCHIZO_STCLINE_VALID 0x0000000000600000UL
410 #define SCHIZO_STCLINE_FOFN 0x0000000000180000UL 410 #define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
411 411
412 static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, 412 static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
413 enum schizo_error_type type) 413 enum schizo_error_type type)
414 { 414 {
415 struct pci_strbuf *strbuf = &pbm->stc; 415 struct pci_strbuf *strbuf = &pbm->stc;
416 unsigned long regbase = pbm->pbm_regs; 416 unsigned long regbase = pbm->pbm_regs;
417 unsigned long err_base, tag_base, line_base; 417 unsigned long err_base, tag_base, line_base;
418 u64 control; 418 u64 control;
419 int i; 419 int i;
420 420
421 err_base = regbase + SCHIZO_STC_ERR; 421 err_base = regbase + SCHIZO_STC_ERR;
422 tag_base = regbase + SCHIZO_STC_TAG; 422 tag_base = regbase + SCHIZO_STC_TAG;
423 line_base = regbase + SCHIZO_STC_LINE; 423 line_base = regbase + SCHIZO_STC_LINE;
424 424
425 spin_lock(&stc_buf_lock); 425 spin_lock(&stc_buf_lock);
426 426
427 /* This is __REALLY__ dangerous. When we put the 427 /* This is __REALLY__ dangerous. When we put the
428 * streaming buffer into diagnostic mode to probe 428 * streaming buffer into diagnostic mode to probe
429 * it's tags and error status, we _must_ clear all 429 * it's tags and error status, we _must_ clear all
430 * of the line tag valid bits before re-enabling 430 * of the line tag valid bits before re-enabling
431 * the streaming buffer. If any dirty data lives 431 * the streaming buffer. If any dirty data lives
432 * in the STC when we do this, we will end up 432 * in the STC when we do this, we will end up
433 * invalidating it before it has a chance to reach 433 * invalidating it before it has a chance to reach
434 * main memory. 434 * main memory.
435 */ 435 */
436 control = schizo_read(strbuf->strbuf_control); 436 control = schizo_read(strbuf->strbuf_control);
437 schizo_write(strbuf->strbuf_control, 437 schizo_write(strbuf->strbuf_control,
438 (control | SCHIZO_STRBUF_CTRL_DENAB)); 438 (control | SCHIZO_STRBUF_CTRL_DENAB));
439 for (i = 0; i < 128; i++) { 439 for (i = 0; i < 128; i++) {
440 unsigned long val; 440 unsigned long val;
441 441
442 val = schizo_read(err_base + (i * 8UL)); 442 val = schizo_read(err_base + (i * 8UL));
443 schizo_write(err_base + (i * 8UL), 0UL); 443 schizo_write(err_base + (i * 8UL), 0UL);
444 stc_error_buf[i] = val; 444 stc_error_buf[i] = val;
445 } 445 }
446 for (i = 0; i < 16; i++) { 446 for (i = 0; i < 16; i++) {
447 stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL)); 447 stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
448 stc_line_buf[i] = schizo_read(line_base + (i * 8UL)); 448 stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
449 schizo_write(tag_base + (i * 8UL), 0UL); 449 schizo_write(tag_base + (i * 8UL), 0UL);
450 schizo_write(line_base + (i * 8UL), 0UL); 450 schizo_write(line_base + (i * 8UL), 0UL);
451 } 451 }
452 452
453 /* OK, state is logged, exit diagnostic mode. */ 453 /* OK, state is logged, exit diagnostic mode. */
454 schizo_write(strbuf->strbuf_control, control); 454 schizo_write(strbuf->strbuf_control, control);
455 455
456 for (i = 0; i < 16; i++) { 456 for (i = 0; i < 16; i++) {
457 int j, saw_error, first, last; 457 int j, saw_error, first, last;
458 458
459 saw_error = 0; 459 saw_error = 0;
460 first = i * 8; 460 first = i * 8;
461 last = first + 8; 461 last = first + 8;
462 for (j = first; j < last; j++) { 462 for (j = first; j < last; j++) {
463 unsigned long errval = stc_error_buf[j]; 463 unsigned long errval = stc_error_buf[j];
464 if (errval != 0) { 464 if (errval != 0) {
465 saw_error++; 465 saw_error++;
466 printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n", 466 printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
467 pbm->name, 467 pbm->name,
468 j, 468 j,
469 (errval & SCHIZO_STCERR_WRITE) ? 1 : 0, 469 (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
470 (errval & SCHIZO_STCERR_READ) ? 1 : 0); 470 (errval & SCHIZO_STCERR_READ) ? 1 : 0);
471 } 471 }
472 } 472 }
473 if (saw_error != 0) { 473 if (saw_error != 0) {
474 unsigned long tagval = stc_tag_buf[i]; 474 unsigned long tagval = stc_tag_buf[i];
475 unsigned long lineval = stc_line_buf[i]; 475 unsigned long lineval = stc_line_buf[i];
476 printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n", 476 printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
477 pbm->name, 477 pbm->name,
478 i, 478 i,
479 ((tagval & SCHIZO_STCTAG_PPN) >> 19UL), 479 ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
480 (tagval & SCHIZO_STCTAG_VPN), 480 (tagval & SCHIZO_STCTAG_VPN),
481 ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0), 481 ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
482 ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0)); 482 ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
483 483
484 /* XXX Should spit out per-bank error information... -DaveM */ 484 /* XXX Should spit out per-bank error information... -DaveM */
485 printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" 485 printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
486 "V(%d)FOFN(%d)]\n", 486 "V(%d)FOFN(%d)]\n",
487 pbm->name, 487 pbm->name,
488 i, 488 i,
489 ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL), 489 ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
490 ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL), 490 ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
491 ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL), 491 ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
492 ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL), 492 ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
493 ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0), 493 ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
494 ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0)); 494 ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
495 } 495 }
496 } 496 }
497 497
498 spin_unlock(&stc_buf_lock); 498 spin_unlock(&stc_buf_lock);
499 } 499 }
500 500
501 /* IOMMU is per-PBM in Schizo, so interrogate both for anonymous 501 /* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
502 * controller level errors. 502 * controller level errors.
503 */ 503 */
504 504
505 #define SCHIZO_IOMMU_TAG 0xa580UL 505 #define SCHIZO_IOMMU_TAG 0xa580UL
506 #define SCHIZO_IOMMU_DATA 0xa600UL 506 #define SCHIZO_IOMMU_DATA 0xa600UL
507 507
508 #define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL 508 #define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
509 #define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL 509 #define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
510 #define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL 510 #define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
511 #define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL 511 #define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
512 #define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL 512 #define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
513 #define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL 513 #define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
514 #define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL 514 #define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
515 515
516 #define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL 516 #define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
517 #define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL 517 #define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
518 #define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL 518 #define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
519 519
520 static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, 520 static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
521 enum schizo_error_type type) 521 enum schizo_error_type type)
522 { 522 {
523 struct pci_iommu *iommu = pbm->iommu; 523 struct pci_iommu *iommu = pbm->iommu;
524 unsigned long iommu_tag[16]; 524 unsigned long iommu_tag[16];
525 unsigned long iommu_data[16]; 525 unsigned long iommu_data[16];
526 unsigned long flags; 526 unsigned long flags;
527 u64 control; 527 u64 control;
528 int i; 528 int i;
529 529
530 spin_lock_irqsave(&iommu->lock, flags); 530 spin_lock_irqsave(&iommu->lock, flags);
531 control = schizo_read(iommu->iommu_control); 531 control = schizo_read(iommu->iommu_control);
532 if (control & SCHIZO_IOMMU_CTRL_XLTEERR) { 532 if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
533 unsigned long base; 533 unsigned long base;
534 char *type_string; 534 char *type_string;
535 535
536 /* Clear the error encountered bit. */ 536 /* Clear the error encountered bit. */
537 control &= ~SCHIZO_IOMMU_CTRL_XLTEERR; 537 control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
538 schizo_write(iommu->iommu_control, control); 538 schizo_write(iommu->iommu_control, control);
539 539
540 switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) { 540 switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
541 case 0: 541 case 0:
542 type_string = "Protection Error"; 542 type_string = "Protection Error";
543 break; 543 break;
544 case 1: 544 case 1:
545 type_string = "Invalid Error"; 545 type_string = "Invalid Error";
546 break; 546 break;
547 case 2: 547 case 2:
548 type_string = "TimeOut Error"; 548 type_string = "TimeOut Error";
549 break; 549 break;
550 case 3: 550 case 3:
551 default: 551 default:
552 type_string = "ECC Error"; 552 type_string = "ECC Error";
553 break; 553 break;
554 }; 554 };
555 printk("%s: IOMMU Error, type[%s]\n", 555 printk("%s: IOMMU Error, type[%s]\n",
556 pbm->name, type_string); 556 pbm->name, type_string);
557 557
558 /* Put the IOMMU into diagnostic mode and probe 558 /* Put the IOMMU into diagnostic mode and probe
559 * it's TLB for entries with error status. 559 * it's TLB for entries with error status.
560 * 560 *
561 * It is very possible for another DVMA to occur 561 * It is very possible for another DVMA to occur
562 * while we do this probe, and corrupt the system 562 * while we do this probe, and corrupt the system
563 * further. But we are so screwed at this point 563 * further. But we are so screwed at this point
564 * that we are likely to crash hard anyways, so 564 * that we are likely to crash hard anyways, so
565 * get as much diagnostic information to the 565 * get as much diagnostic information to the
566 * console as we can. 566 * console as we can.
567 */ 567 */
568 schizo_write(iommu->iommu_control, 568 schizo_write(iommu->iommu_control,
569 control | SCHIZO_IOMMU_CTRL_DENAB); 569 control | SCHIZO_IOMMU_CTRL_DENAB);
570 570
571 base = pbm->pbm_regs; 571 base = pbm->pbm_regs;
572 572
573 for (i = 0; i < 16; i++) { 573 for (i = 0; i < 16; i++) {
574 iommu_tag[i] = 574 iommu_tag[i] =
575 schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL)); 575 schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
576 iommu_data[i] = 576 iommu_data[i] =
577 schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL)); 577 schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
578 578
579 /* Now clear out the entry. */ 579 /* Now clear out the entry. */
580 schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0); 580 schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
581 schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0); 581 schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
582 } 582 }
583 583
584 /* Leave diagnostic mode. */ 584 /* Leave diagnostic mode. */
585 schizo_write(iommu->iommu_control, control); 585 schizo_write(iommu->iommu_control, control);
586 586
587 for (i = 0; i < 16; i++) { 587 for (i = 0; i < 16; i++) {
588 unsigned long tag, data; 588 unsigned long tag, data;
589 589
590 tag = iommu_tag[i]; 590 tag = iommu_tag[i];
591 if (!(tag & SCHIZO_IOMMU_TAG_ERR)) 591 if (!(tag & SCHIZO_IOMMU_TAG_ERR))
592 continue; 592 continue;
593 593
594 data = iommu_data[i]; 594 data = iommu_data[i];
595 switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) { 595 switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
596 case 0: 596 case 0:
597 type_string = "Protection Error"; 597 type_string = "Protection Error";
598 break; 598 break;
599 case 1: 599 case 1:
600 type_string = "Invalid Error"; 600 type_string = "Invalid Error";
601 break; 601 break;
602 case 2: 602 case 2:
603 type_string = "TimeOut Error"; 603 type_string = "TimeOut Error";
604 break; 604 break;
605 case 3: 605 case 3:
606 default: 606 default:
607 type_string = "ECC Error"; 607 type_string = "ECC Error";
608 break; 608 break;
609 }; 609 };
610 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " 610 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
611 "sz(%dK) vpg(%08lx)]\n", 611 "sz(%dK) vpg(%08lx)]\n",
612 pbm->name, i, type_string, 612 pbm->name, i, type_string,
613 (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL), 613 (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
614 ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0), 614 ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
615 ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0), 615 ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
616 ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8), 616 ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
617 (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); 617 (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
618 printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", 618 printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
619 pbm->name, i, 619 pbm->name, i,
620 ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0), 620 ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
621 ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0), 621 ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
622 (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); 622 (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
623 } 623 }
624 } 624 }
625 if (pbm->stc.strbuf_enabled) 625 if (pbm->stc.strbuf_enabled)
626 __schizo_check_stc_error_pbm(pbm, type); 626 __schizo_check_stc_error_pbm(pbm, type);
627 spin_unlock_irqrestore(&iommu->lock, flags); 627 spin_unlock_irqrestore(&iommu->lock, flags);
628 } 628 }
629 629
630 static void schizo_check_iommu_error(struct pci_controller_info *p, 630 static void schizo_check_iommu_error(struct pci_controller_info *p,
631 enum schizo_error_type type) 631 enum schizo_error_type type)
632 { 632 {
633 schizo_check_iommu_error_pbm(&p->pbm_A, type); 633 schizo_check_iommu_error_pbm(&p->pbm_A, type);
634 schizo_check_iommu_error_pbm(&p->pbm_B, type); 634 schizo_check_iommu_error_pbm(&p->pbm_B, type);
635 } 635 }
636 636
637 /* Uncorrectable ECC error status gathering. */ 637 /* Uncorrectable ECC error status gathering. */
638 #define SCHIZO_UE_AFSR 0x10030UL 638 #define SCHIZO_UE_AFSR 0x10030UL
639 #define SCHIZO_UE_AFAR 0x10038UL 639 #define SCHIZO_UE_AFAR 0x10038UL
640 640
641 #define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */ 641 #define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
642 #define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */ 642 #define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
643 #define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */ 643 #define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
644 #define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */ 644 #define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
645 #define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */ 645 #define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
646 #define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */ 646 #define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
647 #define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */ 647 #define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
648 #define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */ 648 #define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
649 #define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */ 649 #define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
650 #define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */ 650 #define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
651 #define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */ 651 #define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
652 #define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */ 652 #define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
653 #define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */ 653 #define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
654 #define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */ 654 #define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
655 655
656 static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs) 656 static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
657 { 657 {
658 struct pci_controller_info *p = dev_id; 658 struct pci_controller_info *p = dev_id;
659 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR; 659 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR;
660 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR; 660 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR;
661 unsigned long afsr, afar, error_bits; 661 unsigned long afsr, afar, error_bits;
662 int reported, limit; 662 int reported, limit;
663 663
664 /* Latch uncorrectable error status. */ 664 /* Latch uncorrectable error status. */
665 afar = schizo_read(afar_reg); 665 afar = schizo_read(afar_reg);
666 666
667 /* If either of the error pending bits are set in the 667 /* If either of the error pending bits are set in the
668 * AFSR, the error status is being actively updated by 668 * AFSR, the error status is being actively updated by
669 * the hardware and we must re-read to get a clean value. 669 * the hardware and we must re-read to get a clean value.
670 */ 670 */
671 limit = 1000; 671 limit = 1000;
672 do { 672 do {
673 afsr = schizo_read(afsr_reg); 673 afsr = schizo_read(afsr_reg);
674 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); 674 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
675 675
676 /* Clear the primary/secondary error status bits. */ 676 /* Clear the primary/secondary error status bits. */
677 error_bits = afsr & 677 error_bits = afsr &
678 (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR | 678 (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
679 SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA); 679 SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
680 if (!error_bits) 680 if (!error_bits)
681 return IRQ_NONE; 681 return IRQ_NONE;
682 schizo_write(afsr_reg, error_bits); 682 schizo_write(afsr_reg, error_bits);
683 683
684 /* Log the error. */ 684 /* Log the error. */
685 printk("PCI%d: Uncorrectable Error, primary error type[%s]\n", 685 printk("PCI%d: Uncorrectable Error, primary error type[%s]\n",
686 p->index, 686 p->index,
687 (((error_bits & SCHIZO_UEAFSR_PPIO) ? 687 (((error_bits & SCHIZO_UEAFSR_PPIO) ?
688 "PIO" : 688 "PIO" :
689 ((error_bits & SCHIZO_UEAFSR_PDRD) ? 689 ((error_bits & SCHIZO_UEAFSR_PDRD) ?
690 "DMA Read" : 690 "DMA Read" :
691 ((error_bits & SCHIZO_UEAFSR_PDWR) ? 691 ((error_bits & SCHIZO_UEAFSR_PDWR) ?
692 "DMA Write" : "???"))))); 692 "DMA Write" : "???")))));
693 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", 693 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
694 p->index, 694 p->index,
695 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, 695 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
696 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, 696 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
697 (afsr & SCHIZO_UEAFSR_AID) >> 24UL); 697 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
698 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", 698 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
699 p->index, 699 p->index,
700 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, 700 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
701 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, 701 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
702 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, 702 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
703 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, 703 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
704 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); 704 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
705 printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar); 705 printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar);
706 printk("PCI%d: UE Secondary errors [", p->index); 706 printk("PCI%d: UE Secondary errors [", p->index);
707 reported = 0; 707 reported = 0;
708 if (afsr & SCHIZO_UEAFSR_SPIO) { 708 if (afsr & SCHIZO_UEAFSR_SPIO) {
709 reported++; 709 reported++;
710 printk("(PIO)"); 710 printk("(PIO)");
711 } 711 }
712 if (afsr & SCHIZO_UEAFSR_SDMA) { 712 if (afsr & SCHIZO_UEAFSR_SDMA) {
713 reported++; 713 reported++;
714 printk("(DMA)"); 714 printk("(DMA)");
715 } 715 }
716 if (!reported) 716 if (!reported)
717 printk("(none)"); 717 printk("(none)");
718 printk("]\n"); 718 printk("]\n");
719 719
720 /* Interrogate IOMMU for error status. */ 720 /* Interrogate IOMMU for error status. */
721 schizo_check_iommu_error(p, UE_ERR); 721 schizo_check_iommu_error(p, UE_ERR);
722 722
723 schizo_clear_other_err_intr(p, irq); 723 schizo_clear_other_err_intr(p, irq);
724 724
725 return IRQ_HANDLED; 725 return IRQ_HANDLED;
726 } 726 }
727 727
728 #define SCHIZO_CE_AFSR 0x10040UL 728 #define SCHIZO_CE_AFSR 0x10040UL
729 #define SCHIZO_CE_AFAR 0x10048UL 729 #define SCHIZO_CE_AFAR 0x10048UL
730 730
731 #define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL 731 #define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
732 #define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL 732 #define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
733 #define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL 733 #define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
734 #define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL 734 #define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
735 #define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL 735 #define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
736 #define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL 736 #define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
737 #define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL 737 #define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
738 #define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL 738 #define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
739 #define SCHIZO_CEAFSR_AID 0x000000001f000000UL 739 #define SCHIZO_CEAFSR_AID 0x000000001f000000UL
740 #define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL 740 #define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
741 #define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL 741 #define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
742 #define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL 742 #define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
743 #define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL 743 #define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
744 #define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL 744 #define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
745 745
746 static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs) 746 static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
747 { 747 {
748 struct pci_controller_info *p = dev_id; 748 struct pci_controller_info *p = dev_id;
749 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR; 749 unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR;
750 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR; 750 unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR;
751 unsigned long afsr, afar, error_bits; 751 unsigned long afsr, afar, error_bits;
752 int reported, limit; 752 int reported, limit;
753 753
754 /* Latch error status. */ 754 /* Latch error status. */
755 afar = schizo_read(afar_reg); 755 afar = schizo_read(afar_reg);
756 756
757 /* If either of the error pending bits are set in the 757 /* If either of the error pending bits are set in the
758 * AFSR, the error status is being actively updated by 758 * AFSR, the error status is being actively updated by
759 * the hardware and we must re-read to get a clean value. 759 * the hardware and we must re-read to get a clean value.
760 */ 760 */
761 limit = 1000; 761 limit = 1000;
762 do { 762 do {
763 afsr = schizo_read(afsr_reg); 763 afsr = schizo_read(afsr_reg);
764 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); 764 } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
765 765
766 /* Clear primary/secondary error status bits. */ 766 /* Clear primary/secondary error status bits. */
767 error_bits = afsr & 767 error_bits = afsr &
768 (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR | 768 (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
769 SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA); 769 SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
770 if (!error_bits) 770 if (!error_bits)
771 return IRQ_NONE; 771 return IRQ_NONE;
772 schizo_write(afsr_reg, error_bits); 772 schizo_write(afsr_reg, error_bits);
773 773
774 /* Log the error. */ 774 /* Log the error. */
775 printk("PCI%d: Correctable Error, primary error type[%s]\n", 775 printk("PCI%d: Correctable Error, primary error type[%s]\n",
776 p->index, 776 p->index,
777 (((error_bits & SCHIZO_CEAFSR_PPIO) ? 777 (((error_bits & SCHIZO_CEAFSR_PPIO) ?
778 "PIO" : 778 "PIO" :
779 ((error_bits & SCHIZO_CEAFSR_PDRD) ? 779 ((error_bits & SCHIZO_CEAFSR_PDRD) ?
780 "DMA Read" : 780 "DMA Read" :
781 ((error_bits & SCHIZO_CEAFSR_PDWR) ? 781 ((error_bits & SCHIZO_CEAFSR_PDWR) ?
782 "DMA Write" : "???"))))); 782 "DMA Write" : "???")))));
783 783
784 /* XXX Use syndrome and afar to print out module string just like 784 /* XXX Use syndrome and afar to print out module string just like
785 * XXX UDB CE trap handler does... -DaveM 785 * XXX UDB CE trap handler does... -DaveM
786 */ 786 */
787 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", 787 printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
788 p->index, 788 p->index,
789 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, 789 (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
790 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, 790 (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
791 (afsr & SCHIZO_UEAFSR_AID) >> 24UL); 791 (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
792 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", 792 printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
793 p->index, 793 p->index,
794 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, 794 (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
795 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, 795 (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
796 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, 796 (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
797 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, 797 (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
798 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); 798 (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
799 printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar); 799 printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar);
800 printk("PCI%d: CE Secondary errors [", p->index); 800 printk("PCI%d: CE Secondary errors [", p->index);
801 reported = 0; 801 reported = 0;
802 if (afsr & SCHIZO_CEAFSR_SPIO) { 802 if (afsr & SCHIZO_CEAFSR_SPIO) {
803 reported++; 803 reported++;
804 printk("(PIO)"); 804 printk("(PIO)");
805 } 805 }
806 if (afsr & SCHIZO_CEAFSR_SDMA) { 806 if (afsr & SCHIZO_CEAFSR_SDMA) {
807 reported++; 807 reported++;
808 printk("(DMA)"); 808 printk("(DMA)");
809 } 809 }
810 if (!reported) 810 if (!reported)
811 printk("(none)"); 811 printk("(none)");
812 printk("]\n"); 812 printk("]\n");
813 813
814 schizo_clear_other_err_intr(p, irq); 814 schizo_clear_other_err_intr(p, irq);
815 815
816 return IRQ_HANDLED; 816 return IRQ_HANDLED;
817 } 817 }
818 818
819 #define SCHIZO_PCI_AFSR 0x2010UL 819 #define SCHIZO_PCI_AFSR 0x2010UL
820 #define SCHIZO_PCI_AFAR 0x2018UL 820 #define SCHIZO_PCI_AFAR 0x2018UL
821 821
822 #define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */ 822 #define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
823 #define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */ 823 #define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
824 #define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */ 824 #define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
825 #define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */ 825 #define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
826 #define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */ 826 #define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
827 #define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */ 827 #define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
828 #define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */ 828 #define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
829 #define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */ 829 #define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
830 #define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */ 830 #define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
831 #define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */ 831 #define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
832 #define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */ 832 #define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
833 #define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */ 833 #define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
834 #define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */ 834 #define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
835 #define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */ 835 #define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
836 #define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */ 836 #define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
837 #define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */ 837 #define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
838 #define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */ 838 #define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
839 839
840 #define SCHIZO_PCI_CTRL (0x2000UL) 840 #define SCHIZO_PCI_CTRL (0x2000UL)
841 #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ 841 #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
842 #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ 842 #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */
843 #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ 843 #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
844 #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ 844 #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
845 #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ 845 #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
846 #define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */ 846 #define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
847 #define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */ 847 #define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
848 #define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */ 848 #define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
849 #define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */ 849 #define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
850 #define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */ 850 #define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
851 #define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */ 851 #define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
852 #define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */ 852 #define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
853 #define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */ 853 #define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
854 #define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */ 854 #define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
855 #define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */ 855 #define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
856 #define SCHIZO_PCICTRL_PTO_SHIFT 24UL 856 #define SCHIZO_PCICTRL_PTO_SHIFT 24UL
857 #define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */ 857 #define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
858 #define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */ 858 #define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
859 #define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */ 859 #define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
860 #define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */ 860 #define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
861 #define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */ 861 #define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
862 #define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */ 862 #define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
863 #define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */ 863 #define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
864 #define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */ 864 #define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
865 #define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */ 865 #define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
866 #define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */ 866 #define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
867 #define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */ 867 #define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
868 868
869 static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) 869 static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
870 { 870 {
871 unsigned long csr_reg, csr, csr_error_bits; 871 unsigned long csr_reg, csr, csr_error_bits;
872 irqreturn_t ret = IRQ_NONE; 872 irqreturn_t ret = IRQ_NONE;
873 u16 stat; 873 u16 stat;
874 874
875 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; 875 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
876 csr = schizo_read(csr_reg); 876 csr = schizo_read(csr_reg);
877 csr_error_bits = 877 csr_error_bits =
878 csr & (SCHIZO_PCICTRL_BUS_UNUS | 878 csr & (SCHIZO_PCICTRL_BUS_UNUS |
879 SCHIZO_PCICTRL_TTO_ERR | 879 SCHIZO_PCICTRL_TTO_ERR |
880 SCHIZO_PCICTRL_RTRY_ERR | 880 SCHIZO_PCICTRL_RTRY_ERR |
881 SCHIZO_PCICTRL_DTO_ERR | 881 SCHIZO_PCICTRL_DTO_ERR |
882 SCHIZO_PCICTRL_SBH_ERR | 882 SCHIZO_PCICTRL_SBH_ERR |
883 SCHIZO_PCICTRL_SERR); 883 SCHIZO_PCICTRL_SERR);
884 if (csr_error_bits) { 884 if (csr_error_bits) {
885 /* Clear the errors. */ 885 /* Clear the errors. */
886 schizo_write(csr_reg, csr); 886 schizo_write(csr_reg, csr);
887 887
888 /* Log 'em. */ 888 /* Log 'em. */
889 if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS) 889 if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
890 printk("%s: Bus unusable error asserted.\n", 890 printk("%s: Bus unusable error asserted.\n",
891 pbm->name); 891 pbm->name);
892 if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR) 892 if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
893 printk("%s: PCI TRDY# timeout error asserted.\n", 893 printk("%s: PCI TRDY# timeout error asserted.\n",
894 pbm->name); 894 pbm->name);
895 if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR) 895 if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
896 printk("%s: PCI excessive retry error asserted.\n", 896 printk("%s: PCI excessive retry error asserted.\n",
897 pbm->name); 897 pbm->name);
898 if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR) 898 if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
899 printk("%s: PCI discard timeout error asserted.\n", 899 printk("%s: PCI discard timeout error asserted.\n",
900 pbm->name); 900 pbm->name);
901 if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR) 901 if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
902 printk("%s: PCI streaming byte hole error asserted.\n", 902 printk("%s: PCI streaming byte hole error asserted.\n",
903 pbm->name); 903 pbm->name);
904 if (csr_error_bits & SCHIZO_PCICTRL_SERR) 904 if (csr_error_bits & SCHIZO_PCICTRL_SERR)
905 printk("%s: PCI SERR signal asserted.\n", 905 printk("%s: PCI SERR signal asserted.\n",
906 pbm->name); 906 pbm->name);
907 ret = IRQ_HANDLED; 907 ret = IRQ_HANDLED;
908 } 908 }
909 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); 909 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
910 if (stat & (PCI_STATUS_PARITY | 910 if (stat & (PCI_STATUS_PARITY |
911 PCI_STATUS_SIG_TARGET_ABORT | 911 PCI_STATUS_SIG_TARGET_ABORT |
912 PCI_STATUS_REC_TARGET_ABORT | 912 PCI_STATUS_REC_TARGET_ABORT |
913 PCI_STATUS_REC_MASTER_ABORT | 913 PCI_STATUS_REC_MASTER_ABORT |
914 PCI_STATUS_SIG_SYSTEM_ERROR)) { 914 PCI_STATUS_SIG_SYSTEM_ERROR)) {
915 printk("%s: PCI bus error, PCI_STATUS[%04x]\n", 915 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
916 pbm->name, stat); 916 pbm->name, stat);
917 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); 917 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
918 ret = IRQ_HANDLED; 918 ret = IRQ_HANDLED;
919 } 919 }
920 return ret; 920 return ret;
921 } 921 }
922 922
923 static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs) 923 static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
924 { 924 {
925 struct pci_pbm_info *pbm = dev_id; 925 struct pci_pbm_info *pbm = dev_id;
926 struct pci_controller_info *p = pbm->parent; 926 struct pci_controller_info *p = pbm->parent;
927 unsigned long afsr_reg, afar_reg, base; 927 unsigned long afsr_reg, afar_reg, base;
928 unsigned long afsr, afar, error_bits; 928 unsigned long afsr, afar, error_bits;
929 int reported; 929 int reported;
930 930
931 base = pbm->pbm_regs; 931 base = pbm->pbm_regs;
932 932
933 afsr_reg = base + SCHIZO_PCI_AFSR; 933 afsr_reg = base + SCHIZO_PCI_AFSR;
934 afar_reg = base + SCHIZO_PCI_AFAR; 934 afar_reg = base + SCHIZO_PCI_AFAR;
935 935
936 /* Latch error status. */ 936 /* Latch error status. */
937 afar = schizo_read(afar_reg); 937 afar = schizo_read(afar_reg);
938 afsr = schizo_read(afsr_reg); 938 afsr = schizo_read(afsr_reg);
939 939
940 /* Clear primary/secondary error status bits. */ 940 /* Clear primary/secondary error status bits. */
941 error_bits = afsr & 941 error_bits = afsr &
942 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | 942 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
943 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | 943 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
944 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | 944 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
945 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | 945 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
946 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | 946 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
947 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS); 947 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
948 if (!error_bits) 948 if (!error_bits)
949 return schizo_pcierr_intr_other(pbm); 949 return schizo_pcierr_intr_other(pbm);
950 schizo_write(afsr_reg, error_bits); 950 schizo_write(afsr_reg, error_bits);
951 951
952 /* Log the error. */ 952 /* Log the error. */
953 printk("%s: PCI Error, primary error type[%s]\n", 953 printk("%s: PCI Error, primary error type[%s]\n",
954 pbm->name, 954 pbm->name,
955 (((error_bits & SCHIZO_PCIAFSR_PMA) ? 955 (((error_bits & SCHIZO_PCIAFSR_PMA) ?
956 "Master Abort" : 956 "Master Abort" :
957 ((error_bits & SCHIZO_PCIAFSR_PTA) ? 957 ((error_bits & SCHIZO_PCIAFSR_PTA) ?
958 "Target Abort" : 958 "Target Abort" :
959 ((error_bits & SCHIZO_PCIAFSR_PRTRY) ? 959 ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
960 "Excessive Retries" : 960 "Excessive Retries" :
961 ((error_bits & SCHIZO_PCIAFSR_PPERR) ? 961 ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
962 "Parity Error" : 962 "Parity Error" :
963 ((error_bits & SCHIZO_PCIAFSR_PTTO) ? 963 ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
964 "Timeout" : 964 "Timeout" :
965 ((error_bits & SCHIZO_PCIAFSR_PUNUS) ? 965 ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
966 "Bus Unusable" : "???")))))))); 966 "Bus Unusable" : "???"))))))));
967 printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n", 967 printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
968 pbm->name, 968 pbm->name,
969 (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL, 969 (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
970 (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0, 970 (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
971 ((afsr & SCHIZO_PCIAFSR_CFG) ? 971 ((afsr & SCHIZO_PCIAFSR_CFG) ?
972 "Config" : 972 "Config" :
973 ((afsr & SCHIZO_PCIAFSR_MEM) ? 973 ((afsr & SCHIZO_PCIAFSR_MEM) ?
974 "Memory" : 974 "Memory" :
975 ((afsr & SCHIZO_PCIAFSR_IO) ? 975 ((afsr & SCHIZO_PCIAFSR_IO) ?
976 "I/O" : "???")))); 976 "I/O" : "???"))));
977 printk("%s: PCI AFAR [%016lx]\n", 977 printk("%s: PCI AFAR [%016lx]\n",
978 pbm->name, afar); 978 pbm->name, afar);
979 printk("%s: PCI Secondary errors [", 979 printk("%s: PCI Secondary errors [",
980 pbm->name); 980 pbm->name);
981 reported = 0; 981 reported = 0;
982 if (afsr & SCHIZO_PCIAFSR_SMA) { 982 if (afsr & SCHIZO_PCIAFSR_SMA) {
983 reported++; 983 reported++;
984 printk("(Master Abort)"); 984 printk("(Master Abort)");
985 } 985 }
986 if (afsr & SCHIZO_PCIAFSR_STA) { 986 if (afsr & SCHIZO_PCIAFSR_STA) {
987 reported++; 987 reported++;
988 printk("(Target Abort)"); 988 printk("(Target Abort)");
989 } 989 }
990 if (afsr & SCHIZO_PCIAFSR_SRTRY) { 990 if (afsr & SCHIZO_PCIAFSR_SRTRY) {
991 reported++; 991 reported++;
992 printk("(Excessive Retries)"); 992 printk("(Excessive Retries)");
993 } 993 }
994 if (afsr & SCHIZO_PCIAFSR_SPERR) { 994 if (afsr & SCHIZO_PCIAFSR_SPERR) {
995 reported++; 995 reported++;
996 printk("(Parity Error)"); 996 printk("(Parity Error)");
997 } 997 }
998 if (afsr & SCHIZO_PCIAFSR_STTO) { 998 if (afsr & SCHIZO_PCIAFSR_STTO) {
999 reported++; 999 reported++;
1000 printk("(Timeout)"); 1000 printk("(Timeout)");
1001 } 1001 }
1002 if (afsr & SCHIZO_PCIAFSR_SUNUS) { 1002 if (afsr & SCHIZO_PCIAFSR_SUNUS) {
1003 reported++; 1003 reported++;
1004 printk("(Bus Unusable)"); 1004 printk("(Bus Unusable)");
1005 } 1005 }
1006 if (!reported) 1006 if (!reported)
1007 printk("(none)"); 1007 printk("(none)");
1008 printk("]\n"); 1008 printk("]\n");
1009 1009
1010 /* For the error types shown, scan PBM's PCI bus for devices 1010 /* For the error types shown, scan PBM's PCI bus for devices
1011 * which have logged that error type. 1011 * which have logged that error type.
1012 */ 1012 */
1013 1013
1014 /* If we see a Target Abort, this could be the result of an 1014 /* If we see a Target Abort, this could be the result of an
1015 * IOMMU translation error of some sort. It is extremely 1015 * IOMMU translation error of some sort. It is extremely
1016 * useful to log this information as usually it indicates 1016 * useful to log this information as usually it indicates
1017 * a bug in the IOMMU support code or a PCI device driver. 1017 * a bug in the IOMMU support code or a PCI device driver.
1018 */ 1018 */
1019 if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) { 1019 if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
1020 schizo_check_iommu_error(p, PCI_ERR); 1020 schizo_check_iommu_error(p, PCI_ERR);
1021 pci_scan_for_target_abort(p, pbm, pbm->pci_bus); 1021 pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
1022 } 1022 }
1023 if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA)) 1023 if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
1024 pci_scan_for_master_abort(p, pbm, pbm->pci_bus); 1024 pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
1025 1025
1026 /* For excessive retries, PSYCHO/PBM will abort the device 1026 /* For excessive retries, PSYCHO/PBM will abort the device
1027 * and there is no way to specifically check for excessive 1027 * and there is no way to specifically check for excessive
1028 * retries in the config space status registers. So what 1028 * retries in the config space status registers. So what
1029 * we hope is that we'll catch it via the master/target 1029 * we hope is that we'll catch it via the master/target
1030 * abort events. 1030 * abort events.
1031 */ 1031 */
1032 1032
1033 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) 1033 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
1034 pci_scan_for_parity_error(p, pbm, pbm->pci_bus); 1034 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1035 1035
1036 schizo_clear_other_err_intr(p, irq); 1036 schizo_clear_other_err_intr(p, irq);
1037 1037
1038 return IRQ_HANDLED; 1038 return IRQ_HANDLED;
1039 } 1039 }
1040 1040
1041 #define SCHIZO_SAFARI_ERRLOG 0x10018UL 1041 #define SCHIZO_SAFARI_ERRLOG 0x10018UL
1042 1042
1043 #define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL 1043 #define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
1044 1044
1045 #define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */ 1045 #define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
1046 #define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */ 1046 #define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
1047 #define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */ 1047 #define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
1048 #define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */ 1048 #define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
1049 #define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */ 1049 #define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
1050 #define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */ 1050 #define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
1051 #define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */ 1051 #define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
1052 #define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */ 1052 #define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
1053 #define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */ 1053 #define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
1054 #define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */ 1054 #define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
1055 #define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */ 1055 #define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
1056 #define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */ 1056 #define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
1057 #define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */ 1057 #define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
1058 #define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */ 1058 #define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
1059 #define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */ 1059 #define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
1060 #define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */ 1060 #define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
1061 #define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */ 1061 #define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
1062 #define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */ 1062 #define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
1063 #define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */ 1063 #define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
1064 #define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */ 1064 #define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
1065 #define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */ 1065 #define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
1066 #define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */ 1066 #define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
1067 #define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */ 1067 #define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
1068 #define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */ 1068 #define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
1069 #define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */ 1069 #define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
1070 #define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */ 1070 #define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
1071 #define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */ 1071 #define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
1072 #define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */ 1072 #define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
1073 #define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */ 1073 #define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
1074 #define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */ 1074 #define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
1075 #define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */ 1075 #define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
1076 1076
1077 /* We only expect UNMAP errors here. The rest of the Safari errors 1077 /* We only expect UNMAP errors here. The rest of the Safari errors
1078 * are marked fatal and thus cause a system reset. 1078 * are marked fatal and thus cause a system reset.
1079 */ 1079 */
1080 static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs) 1080 static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
1081 { 1081 {
1082 struct pci_controller_info *p = dev_id; 1082 struct pci_controller_info *p = dev_id;
1083 u64 errlog; 1083 u64 errlog;
1084 1084
1085 errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG); 1085 errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG);
1086 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG, 1086 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG,
1087 errlog & ~(SAFARI_ERRLOG_ERROUT)); 1087 errlog & ~(SAFARI_ERRLOG_ERROUT));
1088 1088
1089 if (!(errlog & BUS_ERROR_UNMAP)) { 1089 if (!(errlog & BUS_ERROR_UNMAP)) {
1090 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n", 1090 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
1091 p->index, errlog); 1091 p->index, errlog);
1092 1092
1093 schizo_clear_other_err_intr(p, irq); 1093 schizo_clear_other_err_intr(p, irq);
1094 return IRQ_HANDLED; 1094 return IRQ_HANDLED;
1095 } 1095 }
1096 1096
1097 printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n", 1097 printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
1098 p->index); 1098 p->index);
1099 schizo_check_iommu_error(p, SAFARI_ERR); 1099 schizo_check_iommu_error(p, SAFARI_ERR);
1100 1100
1101 schizo_clear_other_err_intr(p, irq); 1101 schizo_clear_other_err_intr(p, irq);
1102 return IRQ_HANDLED; 1102 return IRQ_HANDLED;
1103 } 1103 }
1104 1104
1105 /* Nearly identical to PSYCHO equivalents... */ 1105 /* Nearly identical to PSYCHO equivalents... */
1106 #define SCHIZO_ECC_CTRL 0x10020UL 1106 #define SCHIZO_ECC_CTRL 0x10020UL
1107 #define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ 1107 #define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
1108 #define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ 1108 #define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
1109 #define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ 1109 #define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
1110 1110
1111 #define SCHIZO_SAFARI_ERRCTRL 0x10008UL 1111 #define SCHIZO_SAFARI_ERRCTRL 0x10008UL
1112 #define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL 1112 #define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
1113 #define SCHIZO_SAFARI_IRQCTRL 0x10010UL 1113 #define SCHIZO_SAFARI_IRQCTRL 0x10010UL
1114 #define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL 1114 #define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
1115 1115
1116 /* How the Tomatillo IRQs are routed around is pure guesswork here. 1116 /* How the Tomatillo IRQs are routed around is pure guesswork here.
1117 * 1117 *
1118 * All the Tomatillo devices I see in prtconf dumps seem to have only 1118 * All the Tomatillo devices I see in prtconf dumps seem to have only
1119 * a single PCI bus unit attached to it. It would seem they are seperate 1119 * a single PCI bus unit attached to it. It would seem they are seperate
1120 * devices because their PortID (ie. JBUS ID) values are all different 1120 * devices because their PortID (ie. JBUS ID) values are all different
1121 * and thus the registers are mapped to totally different locations. 1121 * and thus the registers are mapped to totally different locations.
1122 * 1122 *
1123 * However, two Tomatillo's look "similar" in that the only difference 1123 * However, two Tomatillo's look "similar" in that the only difference
1124 * in their PortID is the lowest bit. 1124 * in their PortID is the lowest bit.
1125 * 1125 *
1126 * So if we were to ignore this lower bit, it certainly looks like two 1126 * So if we were to ignore this lower bit, it certainly looks like two
1127 * PCI bus units of the same Tomatillo. I still have not really 1127 * PCI bus units of the same Tomatillo. I still have not really
1128 * figured this out... 1128 * figured this out...
1129 */ 1129 */
1130 static void tomatillo_register_error_handlers(struct pci_controller_info *p) 1130 static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1131 { 1131 {
1132 struct pci_pbm_info *pbm; 1132 struct pci_pbm_info *pbm;
1133 unsigned int irq; 1133 unsigned int irq;
1134 u64 tmp, err_mask, err_no_mask; 1134 u64 tmp, err_mask, err_no_mask;
1135 1135
1136 /* Build IRQs and register handlers. */ 1136 /* Build IRQs and register handlers. */
1137 pbm = pbm_for_ino(p, SCHIZO_UE_INO); 1137 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1138 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO); 1138 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1139 if (request_irq(irq, schizo_ue_intr, 1139 if (request_irq(irq, schizo_ue_intr,
1140 SA_SHIRQ, "TOMATILLO UE", p) < 0) { 1140 SA_SHIRQ, "TOMATILLO UE", p) < 0) {
1141 prom_printf("%s: Cannot register UE interrupt.\n", 1141 prom_printf("%s: Cannot register UE interrupt.\n",
1142 pbm->name); 1142 pbm->name);
1143 prom_halt(); 1143 prom_halt();
1144 } 1144 }
1145 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO)); 1145 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1146 upa_writel(tmp, (pbm->pbm_regs + 1146 upa_writel(tmp, (pbm->pbm_regs +
1147 schizo_imap_offset(SCHIZO_UE_INO) + 4)); 1147 schizo_imap_offset(SCHIZO_UE_INO) + 4));
1148 1148
1149 pbm = pbm_for_ino(p, SCHIZO_CE_INO); 1149 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1150 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO); 1150 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1151 if (request_irq(irq, schizo_ce_intr, 1151 if (request_irq(irq, schizo_ce_intr,
1152 SA_SHIRQ, "TOMATILLO CE", p) < 0) { 1152 SA_SHIRQ, "TOMATILLO CE", p) < 0) {
1153 prom_printf("%s: Cannot register CE interrupt.\n", 1153 prom_printf("%s: Cannot register CE interrupt.\n",
1154 pbm->name); 1154 pbm->name);
1155 prom_halt(); 1155 prom_halt();
1156 } 1156 }
1157 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO)); 1157 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1158 upa_writel(tmp, (pbm->pbm_regs + 1158 upa_writel(tmp, (pbm->pbm_regs +
1159 schizo_imap_offset(SCHIZO_CE_INO) + 4)); 1159 schizo_imap_offset(SCHIZO_CE_INO) + 4));
1160 1160
1161 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); 1161 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1162 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) | 1162 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1163 SCHIZO_PCIERR_A_INO)); 1163 SCHIZO_PCIERR_A_INO));
1164 if (request_irq(irq, schizo_pcierr_intr, 1164 if (request_irq(irq, schizo_pcierr_intr,
1165 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) { 1165 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1166 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n", 1166 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1167 pbm->name); 1167 pbm->name);
1168 prom_halt(); 1168 prom_halt();
1169 } 1169 }
1170 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) | 1170 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1171 SCHIZO_PCIERR_A_INO))); 1171 SCHIZO_PCIERR_A_INO)));
1172 upa_writel(tmp, (pbm->pbm_regs + 1172 upa_writel(tmp, (pbm->pbm_regs +
1173 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); 1173 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1174 1174
1175 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); 1175 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1176 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) | 1176 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
1177 SCHIZO_PCIERR_B_INO)); 1177 SCHIZO_PCIERR_B_INO));
1178 if (request_irq(irq, schizo_pcierr_intr, 1178 if (request_irq(irq, schizo_pcierr_intr,
1179 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) { 1179 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
1180 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n", 1180 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1181 pbm->name); 1181 pbm->name);
1182 prom_halt(); 1182 prom_halt();
1183 } 1183 }
1184 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) | 1184 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1185 SCHIZO_PCIERR_B_INO))); 1185 SCHIZO_PCIERR_B_INO)));
1186 upa_writel(tmp, (pbm->pbm_regs + 1186 upa_writel(tmp, (pbm->pbm_regs +
1187 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); 1187 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1188 1188
1189 pbm = pbm_for_ino(p, SCHIZO_SERR_INO); 1189 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1190 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO); 1190 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1191 if (request_irq(irq, schizo_safarierr_intr, 1191 if (request_irq(irq, schizo_safarierr_intr,
1192 SA_SHIRQ, "TOMATILLO SERR", p) < 0) { 1192 SA_SHIRQ, "TOMATILLO SERR", p) < 0) {
1193 prom_printf("%s: Cannot register SafariERR interrupt.\n", 1193 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1194 pbm->name); 1194 pbm->name);
1195 prom_halt(); 1195 prom_halt();
1196 } 1196 }
1197 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) | 1197 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1198 SCHIZO_SERR_INO))); 1198 SCHIZO_SERR_INO)));
1199 upa_writel(tmp, (pbm->pbm_regs + 1199 upa_writel(tmp, (pbm->pbm_regs +
1200 schizo_imap_offset(SCHIZO_SERR_INO) + 4)); 1200 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1201 1201
1202 /* Enable UE and CE interrupts for controller. */ 1202 /* Enable UE and CE interrupts for controller. */
1203 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL, 1203 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1204 (SCHIZO_ECCCTRL_EE | 1204 (SCHIZO_ECCCTRL_EE |
1205 SCHIZO_ECCCTRL_UE | 1205 SCHIZO_ECCCTRL_UE |
1206 SCHIZO_ECCCTRL_CE)); 1206 SCHIZO_ECCCTRL_CE));
1207 1207
1208 schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL, 1208 schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL,
1209 (SCHIZO_ECCCTRL_EE | 1209 (SCHIZO_ECCCTRL_EE |
1210 SCHIZO_ECCCTRL_UE | 1210 SCHIZO_ECCCTRL_UE |
1211 SCHIZO_ECCCTRL_CE)); 1211 SCHIZO_ECCCTRL_CE));
1212 1212
1213 /* Enable PCI Error interrupts and clear error 1213 /* Enable PCI Error interrupts and clear error
1214 * bits. 1214 * bits.
1215 */ 1215 */
1216 err_mask = (SCHIZO_PCICTRL_BUS_UNUS | 1216 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1217 SCHIZO_PCICTRL_TTO_ERR | 1217 SCHIZO_PCICTRL_TTO_ERR |
1218 SCHIZO_PCICTRL_RTRY_ERR | 1218 SCHIZO_PCICTRL_RTRY_ERR |
1219 SCHIZO_PCICTRL_SERR | 1219 SCHIZO_PCICTRL_SERR |
1220 SCHIZO_PCICTRL_EEN); 1220 SCHIZO_PCICTRL_EEN);
1221 1221
1222 err_no_mask = SCHIZO_PCICTRL_DTO_ERR; 1222 err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
1223 1223
1224 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL); 1224 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1225 tmp |= err_mask; 1225 tmp |= err_mask;
1226 tmp &= ~err_no_mask; 1226 tmp &= ~err_no_mask;
1227 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp); 1227 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1228 1228
1229 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL); 1229 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1230 tmp |= err_mask; 1230 tmp |= err_mask;
1231 tmp &= ~err_no_mask; 1231 tmp &= ~err_no_mask;
1232 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp); 1232 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1233 1233
1234 err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | 1234 err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1235 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | 1235 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1236 SCHIZO_PCIAFSR_PTTO | 1236 SCHIZO_PCIAFSR_PTTO |
1237 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | 1237 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1238 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | 1238 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1239 SCHIZO_PCIAFSR_STTO); 1239 SCHIZO_PCIAFSR_STTO);
1240 1240
1241 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask); 1241 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1242 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask); 1242 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
1243 1243
1244 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR | 1244 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
1245 BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD | 1245 BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
1246 BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA | 1246 BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
1247 BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO | 1247 BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
1248 BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR | 1248 BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
1249 BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B | 1249 BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
1250 BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR | 1250 BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
1251 BUS_ERROR_APERR | BUS_ERROR_UNMAP | 1251 BUS_ERROR_APERR | BUS_ERROR_UNMAP |
1252 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT); 1252 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
1253 1253
1254 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL, 1254 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1255 (SCHIZO_SAFERRCTRL_EN | err_mask)); 1255 (SCHIZO_SAFERRCTRL_EN | err_mask));
1256 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL, 1256 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1257 (SCHIZO_SAFERRCTRL_EN | err_mask)); 1257 (SCHIZO_SAFERRCTRL_EN | err_mask));
1258 1258
1259 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL, 1259 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1260 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP))); 1260 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1261 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL, 1261 schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1262 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP))); 1262 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1263 } 1263 }
1264 1264
1265 static void schizo_register_error_handlers(struct pci_controller_info *p) 1265 static void schizo_register_error_handlers(struct pci_controller_info *p)
1266 { 1266 {
1267 struct pci_pbm_info *pbm; 1267 struct pci_pbm_info *pbm;
1268 unsigned int irq; 1268 unsigned int irq;
1269 u64 tmp, err_mask, err_no_mask; 1269 u64 tmp, err_mask, err_no_mask;
1270 1270
1271 /* Build IRQs and register handlers. */ 1271 /* Build IRQs and register handlers. */
1272 pbm = pbm_for_ino(p, SCHIZO_UE_INO); 1272 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1273 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO); 1273 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
1274 if (request_irq(irq, schizo_ue_intr, 1274 if (request_irq(irq, schizo_ue_intr,
1275 SA_SHIRQ, "SCHIZO UE", p) < 0) { 1275 SA_SHIRQ, "SCHIZO UE", p) < 0) {
1276 prom_printf("%s: Cannot register UE interrupt.\n", 1276 prom_printf("%s: Cannot register UE interrupt.\n",
1277 pbm->name); 1277 pbm->name);
1278 prom_halt(); 1278 prom_halt();
1279 } 1279 }
1280 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO)); 1280 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1281 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4)); 1281 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
1282 1282
1283 pbm = pbm_for_ino(p, SCHIZO_CE_INO); 1283 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1284 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO); 1284 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
1285 if (request_irq(irq, schizo_ce_intr, 1285 if (request_irq(irq, schizo_ce_intr,
1286 SA_SHIRQ, "SCHIZO CE", p) < 0) { 1286 SA_SHIRQ, "SCHIZO CE", p) < 0) {
1287 prom_printf("%s: Cannot register CE interrupt.\n", 1287 prom_printf("%s: Cannot register CE interrupt.\n",
1288 pbm->name); 1288 pbm->name);
1289 prom_halt(); 1289 prom_halt();
1290 } 1290 }
1291 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO)); 1291 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1292 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4)); 1292 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
1293 1293
1294 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); 1294 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1295 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO); 1295 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO);
1296 if (request_irq(irq, schizo_pcierr_intr, 1296 if (request_irq(irq, schizo_pcierr_intr,
1297 SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) { 1297 SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) {
1298 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n", 1298 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
1299 pbm->name); 1299 pbm->name);
1300 prom_halt(); 1300 prom_halt();
1301 } 1301 }
1302 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO)); 1302 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
1303 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4)); 1303 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1304 1304
1305 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); 1305 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1306 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO); 1306 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO);
1307 if (request_irq(irq, schizo_pcierr_intr, 1307 if (request_irq(irq, schizo_pcierr_intr,
1308 SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) { 1308 SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
1309 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n", 1309 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1310 pbm->name); 1310 pbm->name);
1311 prom_halt(); 1311 prom_halt();
1312 } 1312 }
1313 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO)); 1313 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
1314 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4)); 1314 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1315 1315
1316 pbm = pbm_for_ino(p, SCHIZO_SERR_INO); 1316 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1317 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO); 1317 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
1318 if (request_irq(irq, schizo_safarierr_intr, 1318 if (request_irq(irq, schizo_safarierr_intr,
1319 SA_SHIRQ, "SCHIZO SERR", p) < 0) { 1319 SA_SHIRQ, "SCHIZO SERR", p) < 0) {
1320 prom_printf("%s: Cannot register SafariERR interrupt.\n", 1320 prom_printf("%s: Cannot register SafariERR interrupt.\n",
1321 pbm->name); 1321 pbm->name);
1322 prom_halt(); 1322 prom_halt();
1323 } 1323 }
1324 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO)); 1324 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
1325 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4)); 1325 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1326 1326
1327 /* Enable UE and CE interrupts for controller. */ 1327 /* Enable UE and CE interrupts for controller. */
1328 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL, 1328 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
1329 (SCHIZO_ECCCTRL_EE | 1329 (SCHIZO_ECCCTRL_EE |
1330 SCHIZO_ECCCTRL_UE | 1330 SCHIZO_ECCCTRL_UE |
1331 SCHIZO_ECCCTRL_CE)); 1331 SCHIZO_ECCCTRL_CE));
1332 1332
1333 err_mask = (SCHIZO_PCICTRL_BUS_UNUS | 1333 err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
1334 SCHIZO_PCICTRL_ESLCK | 1334 SCHIZO_PCICTRL_ESLCK |
1335 SCHIZO_PCICTRL_TTO_ERR | 1335 SCHIZO_PCICTRL_TTO_ERR |
1336 SCHIZO_PCICTRL_RTRY_ERR | 1336 SCHIZO_PCICTRL_RTRY_ERR |
1337 SCHIZO_PCICTRL_SBH_ERR | 1337 SCHIZO_PCICTRL_SBH_ERR |
1338 SCHIZO_PCICTRL_SERR | 1338 SCHIZO_PCICTRL_SERR |
1339 SCHIZO_PCICTRL_EEN); 1339 SCHIZO_PCICTRL_EEN);
1340 1340
1341 err_no_mask = (SCHIZO_PCICTRL_DTO_ERR | 1341 err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
1342 SCHIZO_PCICTRL_SBH_INT); 1342 SCHIZO_PCICTRL_SBH_INT);
1343 1343
1344 /* Enable PCI Error interrupts and clear error 1344 /* Enable PCI Error interrupts and clear error
1345 * bits for each PBM. 1345 * bits for each PBM.
1346 */ 1346 */
1347 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL); 1347 tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
1348 tmp |= err_mask; 1348 tmp |= err_mask;
1349 tmp &= ~err_no_mask; 1349 tmp &= ~err_no_mask;
1350 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp); 1350 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1351 1351
1352 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, 1352 schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR,
1353 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | 1353 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1354 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | 1354 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1355 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | 1355 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1356 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | 1356 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1357 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | 1357 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1358 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS)); 1358 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1359 1359
1360 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL); 1360 tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
1361 tmp |= err_mask; 1361 tmp |= err_mask;
1362 tmp &= ~err_no_mask; 1362 tmp &= ~err_no_mask;
1363 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp); 1363 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
1364 1364
1365 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, 1365 schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR,
1366 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | 1366 (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
1367 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | 1367 SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
1368 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | 1368 SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
1369 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | 1369 SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
1370 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | 1370 SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
1371 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS)); 1371 SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
1372 1372
1373 /* Make all Safari error conditions fatal except unmapped 1373 /* Make all Safari error conditions fatal except unmapped
1374 * errors which we make generate interrupts. 1374 * errors which we make generate interrupts.
1375 */ 1375 */
1376 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS | 1376 err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
1377 BUS_ERROR_BADMA | BUS_ERROR_BADMB | 1377 BUS_ERROR_BADMA | BUS_ERROR_BADMB |
1378 BUS_ERROR_BADMC | 1378 BUS_ERROR_BADMC |
1379 BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | 1379 BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1380 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB | 1380 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
1381 BUS_ERROR_CIQTO | 1381 BUS_ERROR_CIQTO |
1382 BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO | 1382 BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
1383 BUS_ERROR_UFPQTO | BUS_ERROR_APERR | 1383 BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
1384 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT | 1384 BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
1385 BUS_ERROR_ILL); 1385 BUS_ERROR_ILL);
1386 #if 1 1386 #if 1
1387 /* XXX Something wrong with some Excalibur systems 1387 /* XXX Something wrong with some Excalibur systems
1388 * XXX Sun is shipping. The behavior on a 2-cpu 1388 * XXX Sun is shipping. The behavior on a 2-cpu
1389 * XXX machine is that both CPU1 parity error bits 1389 * XXX machine is that both CPU1 parity error bits
1390 * XXX are set and are immediately set again when 1390 * XXX are set and are immediately set again when
1391 * XXX their error status bits are cleared. Just 1391 * XXX their error status bits are cleared. Just
1392 * XXX ignore them for now. -DaveM 1392 * XXX ignore them for now. -DaveM
1393 */ 1393 */
1394 err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | 1394 err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
1395 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB); 1395 BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
1396 #endif 1396 #endif
1397 1397
1398 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL, 1398 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
1399 (SCHIZO_SAFERRCTRL_EN | err_mask)); 1399 (SCHIZO_SAFERRCTRL_EN | err_mask));
1400 1400
1401 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL, 1401 schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
1402 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP))); 1402 (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
1403 } 1403 }
1404 1404
1405 static void pbm_config_busmastering(struct pci_pbm_info *pbm) 1405 static void pbm_config_busmastering(struct pci_pbm_info *pbm)
1406 { 1406 {
1407 u8 *addr; 1407 u8 *addr;
1408 1408
1409 /* Set cache-line size to 64 bytes, this is actually 1409 /* Set cache-line size to 64 bytes, this is actually
1410 * a nop but I do it for completeness. 1410 * a nop but I do it for completeness.
1411 */ 1411 */
1412 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 1412 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1413 0, PCI_CACHE_LINE_SIZE); 1413 0, PCI_CACHE_LINE_SIZE);
1414 pci_config_write8(addr, 64 / sizeof(u32)); 1414 pci_config_write8(addr, 64 / sizeof(u32));
1415 1415
1416 /* Set PBM latency timer to 64 PCI clocks. */ 1416 /* Set PBM latency timer to 64 PCI clocks. */
1417 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 1417 addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
1418 0, PCI_LATENCY_TIMER); 1418 0, PCI_LATENCY_TIMER);
1419 pci_config_write8(addr, 64); 1419 pci_config_write8(addr, 64);
1420 } 1420 }
1421 1421
1422 static void pbm_scan_bus(struct pci_controller_info *p, 1422 static void pbm_scan_bus(struct pci_controller_info *p,
1423 struct pci_pbm_info *pbm) 1423 struct pci_pbm_info *pbm)
1424 { 1424 {
1425 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 1425 struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
1426 1426
1427 if (!cookie) { 1427 if (!cookie) {
1428 prom_printf("%s: Critical allocation failure.\n", pbm->name); 1428 prom_printf("%s: Critical allocation failure.\n", pbm->name);
1429 prom_halt(); 1429 prom_halt();
1430 } 1430 }
1431 1431
1432 /* All we care about is the PBM. */ 1432 /* All we care about is the PBM. */
1433 cookie->pbm = pbm; 1433 cookie->pbm = pbm;
1434 1434
1435 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, 1435 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
1436 p->pci_ops, 1436 p->pci_ops,
1437 pbm); 1437 pbm);
1438 pci_fixup_host_bridge_self(pbm->pci_bus); 1438 pci_fixup_host_bridge_self(pbm->pci_bus);
1439 pbm->pci_bus->self->sysdata = cookie; 1439 pbm->pci_bus->self->sysdata = cookie;
1440 1440
1441 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node->node); 1441 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
1442 pci_record_assignments(pbm, pbm->pci_bus); 1442 pci_record_assignments(pbm, pbm->pci_bus);
1443 pci_assign_unassigned(pbm, pbm->pci_bus); 1443 pci_assign_unassigned(pbm, pbm->pci_bus);
1444 pci_fixup_irq(pbm, pbm->pci_bus); 1444 pci_fixup_irq(pbm, pbm->pci_bus);
1445 pci_determine_66mhz_disposition(pbm, pbm->pci_bus); 1445 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
1446 pci_setup_busmastering(pbm, pbm->pci_bus); 1446 pci_setup_busmastering(pbm, pbm->pci_bus);
1447 } 1447 }
1448 1448
1449 static void __schizo_scan_bus(struct pci_controller_info *p, 1449 static void __schizo_scan_bus(struct pci_controller_info *p,
1450 int chip_type) 1450 int chip_type)
1451 { 1451 {
1452 if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) { 1452 if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
1453 printk("PCI: Only one PCI bus module of controller found.\n"); 1453 printk("PCI: Only one PCI bus module of controller found.\n");
1454 printk("PCI: Ignoring entire controller.\n"); 1454 printk("PCI: Ignoring entire controller.\n");
1455 return; 1455 return;
1456 } 1456 }
1457 1457
1458 pbm_config_busmastering(&p->pbm_B); 1458 pbm_config_busmastering(&p->pbm_B);
1459 p->pbm_B.is_66mhz_capable = 1459 p->pbm_B.is_66mhz_capable =
1460 (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL) 1460 (of_find_property(p->pbm_B.prom_node, "66mhz-capable", NULL)
1461 != NULL); 1461 != NULL);
1462 pbm_config_busmastering(&p->pbm_A); 1462 pbm_config_busmastering(&p->pbm_A);
1463 p->pbm_A.is_66mhz_capable = 1463 p->pbm_A.is_66mhz_capable =
1464 (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL) 1464 (of_find_property(p->pbm_A.prom_node, "66mhz-capable", NULL)
1465 != NULL); 1465 != NULL);
1466 pbm_scan_bus(p, &p->pbm_B); 1466 pbm_scan_bus(p, &p->pbm_B);
1467 pbm_scan_bus(p, &p->pbm_A); 1467 pbm_scan_bus(p, &p->pbm_A);
1468 1468
1469 /* After the PCI bus scan is complete, we can register 1469 /* After the PCI bus scan is complete, we can register
1470 * the error interrupt handlers. 1470 * the error interrupt handlers.
1471 */ 1471 */
1472 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) 1472 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
1473 tomatillo_register_error_handlers(p); 1473 tomatillo_register_error_handlers(p);
1474 else 1474 else
1475 schizo_register_error_handlers(p); 1475 schizo_register_error_handlers(p);
1476 } 1476 }
1477 1477
1478 static void schizo_scan_bus(struct pci_controller_info *p) 1478 static void schizo_scan_bus(struct pci_controller_info *p)
1479 { 1479 {
1480 __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO); 1480 __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
1481 } 1481 }
1482 1482
1483 static void tomatillo_scan_bus(struct pci_controller_info *p) 1483 static void tomatillo_scan_bus(struct pci_controller_info *p)
1484 { 1484 {
1485 __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO); 1485 __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
1486 } 1486 }
1487 1487
1488 static void schizo_base_address_update(struct pci_dev *pdev, int resource) 1488 static void schizo_base_address_update(struct pci_dev *pdev, int resource)
1489 { 1489 {
1490 struct pcidev_cookie *pcp = pdev->sysdata; 1490 struct pcidev_cookie *pcp = pdev->sysdata;
1491 struct pci_pbm_info *pbm = pcp->pbm; 1491 struct pci_pbm_info *pbm = pcp->pbm;
1492 struct resource *res, *root; 1492 struct resource *res, *root;
1493 u32 reg; 1493 u32 reg;
1494 int where, size, is_64bit; 1494 int where, size, is_64bit;
1495 1495
1496 res = &pdev->resource[resource]; 1496 res = &pdev->resource[resource];
1497 if (resource < 6) { 1497 if (resource < 6) {
1498 where = PCI_BASE_ADDRESS_0 + (resource * 4); 1498 where = PCI_BASE_ADDRESS_0 + (resource * 4);
1499 } else if (resource == PCI_ROM_RESOURCE) { 1499 } else if (resource == PCI_ROM_RESOURCE) {
1500 where = pdev->rom_base_reg; 1500 where = pdev->rom_base_reg;
1501 } else { 1501 } else {
1502 /* Somebody might have asked allocation of a non-standard resource */ 1502 /* Somebody might have asked allocation of a non-standard resource */
1503 return; 1503 return;
1504 } 1504 }
1505 1505
1506 is_64bit = 0; 1506 is_64bit = 0;
1507 if (res->flags & IORESOURCE_IO) 1507 if (res->flags & IORESOURCE_IO)
1508 root = &pbm->io_space; 1508 root = &pbm->io_space;
1509 else { 1509 else {
1510 root = &pbm->mem_space; 1510 root = &pbm->mem_space;
1511 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) 1511 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
1512 == PCI_BASE_ADDRESS_MEM_TYPE_64) 1512 == PCI_BASE_ADDRESS_MEM_TYPE_64)
1513 is_64bit = 1; 1513 is_64bit = 1;
1514 } 1514 }
1515 1515
1516 size = res->end - res->start; 1516 size = res->end - res->start;
1517 pci_read_config_dword(pdev, where, &reg); 1517 pci_read_config_dword(pdev, where, &reg);
1518 reg = ((reg & size) | 1518 reg = ((reg & size) |
1519 (((u32)(res->start - root->start)) & ~size)); 1519 (((u32)(res->start - root->start)) & ~size));
1520 if (resource == PCI_ROM_RESOURCE) { 1520 if (resource == PCI_ROM_RESOURCE) {
1521 reg |= PCI_ROM_ADDRESS_ENABLE; 1521 reg |= PCI_ROM_ADDRESS_ENABLE;
1522 res->flags |= IORESOURCE_ROM_ENABLE; 1522 res->flags |= IORESOURCE_ROM_ENABLE;
1523 } 1523 }
1524 pci_write_config_dword(pdev, where, reg); 1524 pci_write_config_dword(pdev, where, reg);
1525 1525
1526 /* This knows that the upper 32-bits of the address 1526 /* This knows that the upper 32-bits of the address
1527 * must be zero. Our PCI common layer enforces this. 1527 * must be zero. Our PCI common layer enforces this.
1528 */ 1528 */
1529 if (is_64bit) 1529 if (is_64bit)
1530 pci_write_config_dword(pdev, where + 4, 0); 1530 pci_write_config_dword(pdev, where + 4, 0);
1531 } 1531 }
1532 1532
1533 static void schizo_resource_adjust(struct pci_dev *pdev, 1533 static void schizo_resource_adjust(struct pci_dev *pdev,
1534 struct resource *res, 1534 struct resource *res,
1535 struct resource *root) 1535 struct resource *root)
1536 { 1536 {
1537 res->start += root->start; 1537 res->start += root->start;
1538 res->end += root->start; 1538 res->end += root->start;
1539 } 1539 }
1540 1540
1541 /* Use ranges property to determine where PCI MEM, I/O, and Config 1541 /* Use ranges property to determine where PCI MEM, I/O, and Config
1542 * space are for this PCI bus module. 1542 * space are for this PCI bus module.
1543 */ 1543 */
1544 static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm) 1544 static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
1545 { 1545 {
1546 int i, saw_cfg, saw_mem, saw_io; 1546 int i, saw_cfg, saw_mem, saw_io;
1547 1547
1548 saw_cfg = saw_mem = saw_io = 0; 1548 saw_cfg = saw_mem = saw_io = 0;
1549 for (i = 0; i < pbm->num_pbm_ranges; i++) { 1549 for (i = 0; i < pbm->num_pbm_ranges; i++) {
1550 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; 1550 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
1551 unsigned long a; 1551 unsigned long a;
1552 int type; 1552 int type;
1553 1553
1554 type = (pr->child_phys_hi >> 24) & 0x3; 1554 type = (pr->child_phys_hi >> 24) & 0x3;
1555 a = (((unsigned long)pr->parent_phys_hi << 32UL) | 1555 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
1556 ((unsigned long)pr->parent_phys_lo << 0UL)); 1556 ((unsigned long)pr->parent_phys_lo << 0UL));
1557 1557
1558 switch (type) { 1558 switch (type) {
1559 case 0: 1559 case 0:
1560 /* PCI config space, 16MB */ 1560 /* PCI config space, 16MB */
1561 pbm->config_space = a; 1561 pbm->config_space = a;
1562 saw_cfg = 1; 1562 saw_cfg = 1;
1563 break; 1563 break;
1564 1564
1565 case 1: 1565 case 1:
1566 /* 16-bit IO space, 16MB */ 1566 /* 16-bit IO space, 16MB */
1567 pbm->io_space.start = a; 1567 pbm->io_space.start = a;
1568 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); 1568 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
1569 pbm->io_space.flags = IORESOURCE_IO; 1569 pbm->io_space.flags = IORESOURCE_IO;
1570 saw_io = 1; 1570 saw_io = 1;
1571 break; 1571 break;
1572 1572
1573 case 2: 1573 case 2:
1574 /* 32-bit MEM space, 2GB */ 1574 /* 32-bit MEM space, 2GB */
1575 pbm->mem_space.start = a; 1575 pbm->mem_space.start = a;
1576 pbm->mem_space.end = a + (0x80000000UL - 1UL); 1576 pbm->mem_space.end = a + (0x80000000UL - 1UL);
1577 pbm->mem_space.flags = IORESOURCE_MEM; 1577 pbm->mem_space.flags = IORESOURCE_MEM;
1578 saw_mem = 1; 1578 saw_mem = 1;
1579 break; 1579 break;
1580 1580
1581 default: 1581 default:
1582 break; 1582 break;
1583 }; 1583 };
1584 } 1584 }
1585 1585
1586 if (!saw_cfg || !saw_io || !saw_mem) { 1586 if (!saw_cfg || !saw_io || !saw_mem) {
1587 prom_printf("%s: Fatal error, missing %s PBM range.\n", 1587 prom_printf("%s: Fatal error, missing %s PBM range.\n",
1588 pbm->name, 1588 pbm->name,
1589 ((!saw_cfg ? 1589 ((!saw_cfg ?
1590 "CFG" : 1590 "CFG" :
1591 (!saw_io ? 1591 (!saw_io ?
1592 "IO" : "MEM")))); 1592 "IO" : "MEM"))));
1593 prom_halt(); 1593 prom_halt();
1594 } 1594 }
1595 1595
1596 printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n", 1596 printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
1597 pbm->name, 1597 pbm->name,
1598 pbm->config_space, 1598 pbm->config_space,
1599 pbm->io_space.start, 1599 pbm->io_space.start,
1600 pbm->mem_space.start); 1600 pbm->mem_space.start);
1601 } 1601 }
1602 1602
1603 static void pbm_register_toplevel_resources(struct pci_controller_info *p, 1603 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
1604 struct pci_pbm_info *pbm) 1604 struct pci_pbm_info *pbm)
1605 { 1605 {
1606 pbm->io_space.name = pbm->mem_space.name = pbm->name; 1606 pbm->io_space.name = pbm->mem_space.name = pbm->name;
1607 1607
1608 request_resource(&ioport_resource, &pbm->io_space); 1608 request_resource(&ioport_resource, &pbm->io_space);
1609 request_resource(&iomem_resource, &pbm->mem_space); 1609 request_resource(&iomem_resource, &pbm->mem_space);
1610 pci_register_legacy_regions(&pbm->io_space, 1610 pci_register_legacy_regions(&pbm->io_space,
1611 &pbm->mem_space); 1611 &pbm->mem_space);
1612 } 1612 }
1613 1613
1614 #define SCHIZO_STRBUF_CONTROL (0x02800UL) 1614 #define SCHIZO_STRBUF_CONTROL (0x02800UL)
1615 #define SCHIZO_STRBUF_FLUSH (0x02808UL) 1615 #define SCHIZO_STRBUF_FLUSH (0x02808UL)
1616 #define SCHIZO_STRBUF_FSYNC (0x02810UL) 1616 #define SCHIZO_STRBUF_FSYNC (0x02810UL)
1617 #define SCHIZO_STRBUF_CTXFLUSH (0x02818UL) 1617 #define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
1618 #define SCHIZO_STRBUF_CTXMATCH (0x10000UL) 1618 #define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
1619 1619
1620 static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) 1620 static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
1621 { 1621 {
1622 unsigned long base = pbm->pbm_regs; 1622 unsigned long base = pbm->pbm_regs;
1623 u64 control; 1623 u64 control;
1624 1624
1625 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 1625 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1626 /* TOMATILLO lacks streaming cache. */ 1626 /* TOMATILLO lacks streaming cache. */
1627 return; 1627 return;
1628 } 1628 }
1629 1629
1630 /* SCHIZO has context flushing. */ 1630 /* SCHIZO has context flushing. */
1631 pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL; 1631 pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
1632 pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH; 1632 pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
1633 pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC; 1633 pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
1634 pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH; 1634 pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
1635 pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH; 1635 pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
1636 1636
1637 pbm->stc.strbuf_flushflag = (volatile unsigned long *) 1637 pbm->stc.strbuf_flushflag = (volatile unsigned long *)
1638 ((((unsigned long)&pbm->stc.__flushflag_buf[0]) 1638 ((((unsigned long)&pbm->stc.__flushflag_buf[0])
1639 + 63UL) 1639 + 63UL)
1640 & ~63UL); 1640 & ~63UL);
1641 pbm->stc.strbuf_flushflag_pa = (unsigned long) 1641 pbm->stc.strbuf_flushflag_pa = (unsigned long)
1642 __pa(pbm->stc.strbuf_flushflag); 1642 __pa(pbm->stc.strbuf_flushflag);
1643 1643
1644 /* Turn off LRU locking and diag mode, enable the 1644 /* Turn off LRU locking and diag mode, enable the
1645 * streaming buffer and leave the rerun-disable 1645 * streaming buffer and leave the rerun-disable
1646 * setting however OBP set it. 1646 * setting however OBP set it.
1647 */ 1647 */
1648 control = schizo_read(pbm->stc.strbuf_control); 1648 control = schizo_read(pbm->stc.strbuf_control);
1649 control &= ~(SCHIZO_STRBUF_CTRL_LPTR | 1649 control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
1650 SCHIZO_STRBUF_CTRL_LENAB | 1650 SCHIZO_STRBUF_CTRL_LENAB |
1651 SCHIZO_STRBUF_CTRL_DENAB); 1651 SCHIZO_STRBUF_CTRL_DENAB);
1652 control |= SCHIZO_STRBUF_CTRL_ENAB; 1652 control |= SCHIZO_STRBUF_CTRL_ENAB;
1653 schizo_write(pbm->stc.strbuf_control, control); 1653 schizo_write(pbm->stc.strbuf_control, control);
1654 1654
1655 pbm->stc.strbuf_enabled = 1; 1655 pbm->stc.strbuf_enabled = 1;
1656 } 1656 }
1657 1657
1658 #define SCHIZO_IOMMU_CONTROL (0x00200UL) 1658 #define SCHIZO_IOMMU_CONTROL (0x00200UL)
1659 #define SCHIZO_IOMMU_TSBBASE (0x00208UL) 1659 #define SCHIZO_IOMMU_TSBBASE (0x00208UL)
1660 #define SCHIZO_IOMMU_FLUSH (0x00210UL) 1660 #define SCHIZO_IOMMU_FLUSH (0x00210UL)
1661 #define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) 1661 #define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
1662 1662
1663 static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) 1663 static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
1664 { 1664 {
1665 struct pci_iommu *iommu = pbm->iommu; 1665 struct pci_iommu *iommu = pbm->iommu;
1666 unsigned long i, tagbase, database; 1666 unsigned long i, tagbase, database;
1667 struct property *prop; 1667 struct property *prop;
1668 u32 vdma[2], dma_mask; 1668 u32 vdma[2], dma_mask;
1669 u64 control; 1669 u64 control;
1670 int tsbsize; 1670 int tsbsize;
1671 1671
1672 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 1672 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
1673 if (prop) { 1673 if (prop) {
1674 u32 *val = prop->value; 1674 u32 *val = prop->value;
1675 1675
1676 vdma[0] = val[0]; 1676 vdma[0] = val[0];
1677 vdma[1] = val[1]; 1677 vdma[1] = val[1];
1678 } else { 1678 } else {
1679 /* No property, use default values. */ 1679 /* No property, use default values. */
1680 vdma[0] = 0xc0000000; 1680 vdma[0] = 0xc0000000;
1681 vdma[1] = 0x40000000; 1681 vdma[1] = 0x40000000;
1682 } 1682 }
1683 1683
1684 dma_mask = vdma[0]; 1684 dma_mask = vdma[0];
1685 switch (vdma[1]) { 1685 switch (vdma[1]) {
1686 case 0x20000000: 1686 case 0x20000000:
1687 dma_mask |= 0x1fffffff; 1687 dma_mask |= 0x1fffffff;
1688 tsbsize = 64; 1688 tsbsize = 64;
1689 break; 1689 break;
1690 1690
1691 case 0x40000000: 1691 case 0x40000000:
1692 dma_mask |= 0x3fffffff; 1692 dma_mask |= 0x3fffffff;
1693 tsbsize = 128; 1693 tsbsize = 128;
1694 break; 1694 break;
1695 1695
1696 case 0x80000000: 1696 case 0x80000000:
1697 dma_mask |= 0x7fffffff; 1697 dma_mask |= 0x7fffffff;
1698 tsbsize = 128; 1698 tsbsize = 128;
1699 break; 1699 break;
1700 1700
1701 default: 1701 default:
1702 prom_printf("SCHIZO: strange virtual-dma size.\n"); 1702 prom_printf("SCHIZO: strange virtual-dma size.\n");
1703 prom_halt(); 1703 prom_halt();
1704 }; 1704 };
1705 1705
1706 /* Register addresses, SCHIZO has iommu ctx flushing. */ 1706 /* Register addresses, SCHIZO has iommu ctx flushing. */
1707 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; 1707 iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
1708 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; 1708 iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
1709 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; 1709 iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
1710 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; 1710 iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
1711 1711
1712 /* We use the main control/status register of SCHIZO as the write 1712 /* We use the main control/status register of SCHIZO as the write
1713 * completion register. 1713 * completion register.
1714 */ 1714 */
1715 iommu->write_complete_reg = pbm->controller_regs + 0x10000UL; 1715 iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
1716 1716
1717 /* 1717 /*
1718 * Invalidate TLB Entries. 1718 * Invalidate TLB Entries.
1719 */ 1719 */
1720 control = schizo_read(iommu->iommu_control); 1720 control = schizo_read(iommu->iommu_control);
1721 control |= SCHIZO_IOMMU_CTRL_DENAB; 1721 control |= SCHIZO_IOMMU_CTRL_DENAB;
1722 schizo_write(iommu->iommu_control, control); 1722 schizo_write(iommu->iommu_control, control);
1723 1723
1724 tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA; 1724 tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
1725 1725
1726 for(i = 0; i < 16; i++) { 1726 for(i = 0; i < 16; i++) {
1727 schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0); 1727 schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0);
1728 schizo_write(pbm->pbm_regs + database + (i * 8UL), 0); 1728 schizo_write(pbm->pbm_regs + database + (i * 8UL), 0);
1729 } 1729 }
1730 1730
1731 /* Leave diag mode enabled for full-flushing done 1731 /* Leave diag mode enabled for full-flushing done
1732 * in pci_iommu.c 1732 * in pci_iommu.c
1733 */ 1733 */
1734 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask); 1734 pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
1735 1735
1736 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table)); 1736 schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
1737 1737
1738 control = schizo_read(iommu->iommu_control); 1738 control = schizo_read(iommu->iommu_control);
1739 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); 1739 control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
1740 switch (tsbsize) { 1740 switch (tsbsize) {
1741 case 64: 1741 case 64:
1742 control |= SCHIZO_IOMMU_TSBSZ_64K; 1742 control |= SCHIZO_IOMMU_TSBSZ_64K;
1743 break; 1743 break;
1744 case 128: 1744 case 128:
1745 control |= SCHIZO_IOMMU_TSBSZ_128K; 1745 control |= SCHIZO_IOMMU_TSBSZ_128K;
1746 break; 1746 break;
1747 }; 1747 };
1748 1748
1749 control |= SCHIZO_IOMMU_CTRL_ENAB; 1749 control |= SCHIZO_IOMMU_CTRL_ENAB;
1750 schizo_write(iommu->iommu_control, control); 1750 schizo_write(iommu->iommu_control, control);
1751 } 1751 }
1752 1752
1753 #define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) 1753 #define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
1754 #define SCHIZO_IRQ_RETRY_INF 0xffUL 1754 #define SCHIZO_IRQ_RETRY_INF 0xffUL
1755 1755
1756 #define SCHIZO_PCI_DIAG (0x2020UL) 1756 #define SCHIZO_PCI_DIAG (0x2020UL)
1757 #define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */ 1757 #define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
1758 #define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */ 1758 #define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
1759 #define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */ 1759 #define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
1760 #define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */ 1760 #define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
1761 #define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */ 1761 #define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
1762 #define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */ 1762 #define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
1763 #define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */ 1763 #define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
1764 #define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */ 1764 #define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
1765 #define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */ 1765 #define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
1766 1766
1767 #define TOMATILLO_PCI_IOC_CSR (0x2248UL) 1767 #define TOMATILLO_PCI_IOC_CSR (0x2248UL)
1768 #define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL 1768 #define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
1769 #define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL 1769 #define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
1770 #define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL 1770 #define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
1771 #define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL 1771 #define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
1772 #define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL 1772 #define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
1773 #define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL 1773 #define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
1774 #define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL 1774 #define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
1775 #define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL 1775 #define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
1776 #define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL 1776 #define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
1777 #define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL 1777 #define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
1778 #define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL 1778 #define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
1779 #define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL 1779 #define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
1780 #define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL 1780 #define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
1781 #define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL 1781 #define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
1782 #define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL 1782 #define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
1783 1783
1784 #define TOMATILLO_PCI_IOC_TDIAG (0x2250UL) 1784 #define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
1785 #define TOMATILLO_PCI_IOC_DDIAG (0x2290UL) 1785 #define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
1786 1786
1787 static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) 1787 static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1788 { 1788 {
1789 struct property *prop; 1789 struct property *prop;
1790 u64 tmp; 1790 u64 tmp;
1791 1791
1792 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5); 1792 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5);
1793 1793
1794 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); 1794 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
1795 1795
1796 /* Enable arbiter for all PCI slots. */ 1796 /* Enable arbiter for all PCI slots. */
1797 tmp |= 0xff; 1797 tmp |= 0xff;
1798 1798
1799 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1799 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1800 pbm->chip_version >= 0x2) 1800 pbm->chip_version >= 0x2)
1801 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; 1801 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
1802 1802
1803 prop = of_find_property(pbm->prom_node, "no-bus-parking", NULL); 1803 prop = of_find_property(pbm->prom_node, "no-bus-parking", NULL);
1804 if (!prop) 1804 if (!prop)
1805 tmp |= SCHIZO_PCICTRL_PARK; 1805 tmp |= SCHIZO_PCICTRL_PARK;
1806 else 1806 else
1807 tmp &= ~SCHIZO_PCICTRL_PARK; 1807 tmp &= ~SCHIZO_PCICTRL_PARK;
1808 1808
1809 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1809 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1810 pbm->chip_version <= 0x1) 1810 pbm->chip_version <= 0x1)
1811 tmp |= SCHIZO_PCICTRL_DTO_INT; 1811 tmp |= SCHIZO_PCICTRL_DTO_INT;
1812 else 1812 else
1813 tmp &= ~SCHIZO_PCICTRL_DTO_INT; 1813 tmp &= ~SCHIZO_PCICTRL_DTO_INT;
1814 1814
1815 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) 1815 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
1816 tmp |= (SCHIZO_PCICTRL_MRM_PREF | 1816 tmp |= (SCHIZO_PCICTRL_MRM_PREF |
1817 SCHIZO_PCICTRL_RDO_PREF | 1817 SCHIZO_PCICTRL_RDO_PREF |
1818 SCHIZO_PCICTRL_RDL_PREF); 1818 SCHIZO_PCICTRL_RDL_PREF);
1819 1819
1820 schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp); 1820 schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp);
1821 1821
1822 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG); 1822 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG);
1823 tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB | 1823 tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
1824 SCHIZO_PCIDIAG_D_RETRY | 1824 SCHIZO_PCIDIAG_D_RETRY |
1825 SCHIZO_PCIDIAG_D_INTSYNC); 1825 SCHIZO_PCIDIAG_D_INTSYNC);
1826 schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp); 1826 schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp);
1827 1827
1828 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 1828 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1829 /* Clear prefetch lengths to workaround a bug in 1829 /* Clear prefetch lengths to workaround a bug in
1830 * Jalapeno... 1830 * Jalapeno...
1831 */ 1831 */
1832 tmp = (TOMATILLO_IOC_PART_WPENAB | 1832 tmp = (TOMATILLO_IOC_PART_WPENAB |
1833 (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) | 1833 (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
1834 TOMATILLO_IOC_RDMULT_CPENAB | 1834 TOMATILLO_IOC_RDMULT_CPENAB |
1835 TOMATILLO_IOC_RDONE_CPENAB | 1835 TOMATILLO_IOC_RDONE_CPENAB |
1836 TOMATILLO_IOC_RDLINE_CPENAB); 1836 TOMATILLO_IOC_RDLINE_CPENAB);
1837 1837
1838 schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR, 1838 schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR,
1839 tmp); 1839 tmp);
1840 } 1840 }
1841 } 1841 }
1842 1842
1843 static void schizo_pbm_init(struct pci_controller_info *p, 1843 static void schizo_pbm_init(struct pci_controller_info *p,
1844 struct device_node *dp, u32 portid, 1844 struct device_node *dp, u32 portid,
1845 int chip_type) 1845 int chip_type)
1846 { 1846 {
1847 struct linux_prom64_registers *regs; 1847 struct linux_prom64_registers *regs;
1848 struct property *prop; 1848 struct property *prop;
1849 unsigned int *busrange; 1849 unsigned int *busrange;
1850 struct pci_pbm_info *pbm; 1850 struct pci_pbm_info *pbm;
1851 const char *chipset_name; 1851 const char *chipset_name;
1852 u32 *ino_bitmap; 1852 u32 *ino_bitmap;
1853 int is_pbm_a; 1853 int is_pbm_a;
1854 int len; 1854 int len;
1855 1855
1856 switch (chip_type) { 1856 switch (chip_type) {
1857 case PBM_CHIP_TYPE_TOMATILLO: 1857 case PBM_CHIP_TYPE_TOMATILLO:
1858 chipset_name = "TOMATILLO"; 1858 chipset_name = "TOMATILLO";
1859 break; 1859 break;
1860 1860
1861 case PBM_CHIP_TYPE_SCHIZO_PLUS: 1861 case PBM_CHIP_TYPE_SCHIZO_PLUS:
1862 chipset_name = "SCHIZO+"; 1862 chipset_name = "SCHIZO+";
1863 break; 1863 break;
1864 1864
1865 case PBM_CHIP_TYPE_SCHIZO: 1865 case PBM_CHIP_TYPE_SCHIZO:
1866 default: 1866 default:
1867 chipset_name = "SCHIZO"; 1867 chipset_name = "SCHIZO";
1868 break; 1868 break;
1869 }; 1869 };
1870 1870
1871 /* For SCHIZO, three OBP regs: 1871 /* For SCHIZO, three OBP regs:
1872 * 1) PBM controller regs 1872 * 1) PBM controller regs
1873 * 2) Schizo front-end controller regs (same for both PBMs) 1873 * 2) Schizo front-end controller regs (same for both PBMs)
1874 * 3) PBM PCI config space 1874 * 3) PBM PCI config space
1875 * 1875 *
1876 * For TOMATILLO, four OBP regs: 1876 * For TOMATILLO, four OBP regs:
1877 * 1) PBM controller regs 1877 * 1) PBM controller regs
1878 * 2) Tomatillo front-end controller regs 1878 * 2) Tomatillo front-end controller regs
1879 * 3) PBM PCI config space 1879 * 3) PBM PCI config space
1880 * 4) Ichip regs 1880 * 4) Ichip regs
1881 */ 1881 */
1882 prop = of_find_property(dp, "reg", NULL); 1882 prop = of_find_property(dp, "reg", NULL);
1883 regs = prop->value; 1883 regs = prop->value;
1884 1884
1885 is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); 1885 is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
1886 1886
1887 if (is_pbm_a) 1887 if (is_pbm_a)
1888 pbm = &p->pbm_A; 1888 pbm = &p->pbm_A;
1889 else 1889 else
1890 pbm = &p->pbm_B; 1890 pbm = &p->pbm_B;
1891 1891
1892 pbm->portid = portid; 1892 pbm->portid = portid;
1893 pbm->parent = p; 1893 pbm->parent = p;
1894 pbm->prom_node = dp; 1894 pbm->prom_node = dp;
1895 pbm->pci_first_slot = 1; 1895 pbm->pci_first_slot = 1;
1896 1896
1897 pbm->chip_type = chip_type; 1897 pbm->chip_type = chip_type;
1898 pbm->chip_version = 0; 1898 pbm->chip_version = 0;
1899 prop = of_find_property(dp, "version#", NULL); 1899 prop = of_find_property(dp, "version#", NULL);
1900 if (prop) 1900 if (prop)
1901 pbm->chip_version = *(int *) prop->value; 1901 pbm->chip_version = *(int *) prop->value;
1902 pbm->chip_revision = 0; 1902 pbm->chip_revision = 0;
1903 prop = of_find_property(dp, "module-revision#", NULL); 1903 prop = of_find_property(dp, "module-revision#", NULL);
1904 if (prop) 1904 if (prop)
1905 pbm->chip_revision = *(int *) prop->value; 1905 pbm->chip_revision = *(int *) prop->value;
1906 1906
1907 pbm->pbm_regs = regs[0].phys_addr; 1907 pbm->pbm_regs = regs[0].phys_addr;
1908 pbm->controller_regs = regs[1].phys_addr - 0x10000UL; 1908 pbm->controller_regs = regs[1].phys_addr - 0x10000UL;
1909 1909
1910 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) 1910 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
1911 pbm->sync_reg = regs[3].phys_addr + 0x1a18UL; 1911 pbm->sync_reg = regs[3].phys_addr + 0x1a18UL;
1912 1912
1913 pbm->name = dp->full_name; 1913 pbm->name = dp->full_name;
1914 1914
1915 printk("%s: %s PCI Bus Module ver[%x:%x]\n", 1915 printk("%s: %s PCI Bus Module ver[%x:%x]\n",
1916 pbm->name, 1916 pbm->name,
1917 (chip_type == PBM_CHIP_TYPE_TOMATILLO ? 1917 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
1918 "TOMATILLO" : "SCHIZO"), 1918 "TOMATILLO" : "SCHIZO"),
1919 pbm->chip_version, pbm->chip_revision); 1919 pbm->chip_version, pbm->chip_revision);
1920 1920
1921 schizo_pbm_hw_init(pbm); 1921 schizo_pbm_hw_init(pbm);
1922 1922
1923 prop = of_find_property(dp, "ranges", &len); 1923 prop = of_find_property(dp, "ranges", &len);
1924 pbm->pbm_ranges = prop->value; 1924 pbm->pbm_ranges = prop->value;
1925 pbm->num_pbm_ranges = 1925 pbm->num_pbm_ranges =
1926 (len / sizeof(struct linux_prom_pci_ranges)); 1926 (len / sizeof(struct linux_prom_pci_ranges));
1927 1927
1928 schizo_determine_mem_io_space(pbm); 1928 schizo_determine_mem_io_space(pbm);
1929 pbm_register_toplevel_resources(p, pbm); 1929 pbm_register_toplevel_resources(p, pbm);
1930 1930
1931 prop = of_find_property(dp, "interrupt-map", &len); 1931 prop = of_find_property(dp, "interrupt-map", &len);
1932 if (prop) { 1932 if (prop) {
1933 pbm->pbm_intmap = prop->value; 1933 pbm->pbm_intmap = prop->value;
1934 pbm->num_pbm_intmap = 1934 pbm->num_pbm_intmap =
1935 (len / sizeof(struct linux_prom_pci_intmap)); 1935 (len / sizeof(struct linux_prom_pci_intmap));
1936 1936
1937 prop = of_find_property(dp, "interrupt-map-mask", NULL); 1937 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1938 pbm->pbm_intmask = prop->value; 1938 pbm->pbm_intmask = prop->value;
1939 } else { 1939 } else {
1940 pbm->num_pbm_intmap = 0; 1940 pbm->num_pbm_intmap = 0;
1941 } 1941 }
1942 1942
1943 prop = of_find_property(dp, "ino-bitmap", NULL); 1943 prop = of_find_property(dp, "ino-bitmap", NULL);
1944 ino_bitmap = prop->value; 1944 ino_bitmap = prop->value;
1945 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) | 1945 pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
1946 ((u64)ino_bitmap[0] << 0UL)); 1946 ((u64)ino_bitmap[0] << 0UL));
1947 1947
1948 prop = of_find_property(dp, "bus-range", NULL); 1948 prop = of_find_property(dp, "bus-range", NULL);
1949 busrange = prop->value; 1949 busrange = prop->value;
1950 pbm->pci_first_busno = busrange[0]; 1950 pbm->pci_first_busno = busrange[0];
1951 pbm->pci_last_busno = busrange[1]; 1951 pbm->pci_last_busno = busrange[1];
1952 1952
1953 schizo_pbm_iommu_init(pbm); 1953 schizo_pbm_iommu_init(pbm);
1954 schizo_pbm_strbuf_init(pbm); 1954 schizo_pbm_strbuf_init(pbm);
1955 } 1955 }
1956 1956
1957 static inline int portid_compare(u32 x, u32 y, int chip_type) 1957 static inline int portid_compare(u32 x, u32 y, int chip_type)
1958 { 1958 {
1959 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) { 1959 if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
1960 if (x == (y ^ 1)) 1960 if (x == (y ^ 1))
1961 return 1; 1961 return 1;
1962 return 0; 1962 return 0;
1963 } 1963 }
1964 return (x == y); 1964 return (x == y);
1965 } 1965 }
1966 1966
1967 static void __schizo_init(struct device_node *dp, char *model_name, int chip_type) 1967 static void __schizo_init(struct device_node *dp, char *model_name, int chip_type)
1968 { 1968 {
1969 struct pci_controller_info *p; 1969 struct pci_controller_info *p;
1970 struct pci_iommu *iommu; 1970 struct pci_iommu *iommu;
1971 struct property *prop; 1971 struct property *prop;
1972 int is_pbm_a; 1972 int is_pbm_a;
1973 u32 portid; 1973 u32 portid;
1974 1974
1975 portid = 0xff; 1975 portid = 0xff;
1976 prop = of_find_property(dp, "portid", NULL); 1976 prop = of_find_property(dp, "portid", NULL);
1977 if (prop) 1977 if (prop)
1978 portid = *(u32 *) prop->value; 1978 portid = *(u32 *) prop->value;
1979 1979
1980 for (p = pci_controller_root; p; p = p->next) { 1980 for (p = pci_controller_root; p; p = p->next) {
1981 struct pci_pbm_info *pbm; 1981 struct pci_pbm_info *pbm;
1982 1982
1983 if (p->pbm_A.prom_node && p->pbm_B.prom_node) 1983 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1984 continue; 1984 continue;
1985 1985
1986 pbm = (p->pbm_A.prom_node ? 1986 pbm = (p->pbm_A.prom_node ?
1987 &p->pbm_A : 1987 &p->pbm_A :
1988 &p->pbm_B); 1988 &p->pbm_B);
1989 1989
1990 if (portid_compare(pbm->portid, portid, chip_type)) { 1990 if (portid_compare(pbm->portid, portid, chip_type)) {
1991 is_pbm_a = (p->pbm_A.prom_node == NULL); 1991 is_pbm_a = (p->pbm_A.prom_node == NULL);
1992 schizo_pbm_init(p, dp, portid, chip_type); 1992 schizo_pbm_init(p, dp, portid, chip_type);
1993 return; 1993 return;
1994 } 1994 }
1995 } 1995 }
1996 1996
1997 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1997 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1998 if (!p) { 1998 if (!p) {
1999 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 1999 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2000 prom_halt(); 2000 prom_halt();
2001 } 2001 }
2002 2002
2003 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 2003 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2004 if (!iommu) { 2004 if (!iommu) {
2005 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 2005 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2006 prom_halt(); 2006 prom_halt();
2007 } 2007 }
2008 p->pbm_A.iommu = iommu; 2008 p->pbm_A.iommu = iommu;
2009 2009
2010 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 2010 iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
2011 if (!iommu) { 2011 if (!iommu) {
2012 prom_printf("SCHIZO: Fatal memory allocation error.\n"); 2012 prom_printf("SCHIZO: Fatal memory allocation error.\n");
2013 prom_halt(); 2013 prom_halt();
2014 } 2014 }
2015 p->pbm_B.iommu = iommu; 2015 p->pbm_B.iommu = iommu;
2016 2016
2017 p->next = pci_controller_root; 2017 p->next = pci_controller_root;
2018 pci_controller_root = p; 2018 pci_controller_root = p;
2019 2019
2020 p->index = pci_num_controllers++; 2020 p->index = pci_num_controllers++;
2021 p->pbms_same_domain = 0; 2021 p->pbms_same_domain = 0;
2022 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ? 2022 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2023 tomatillo_scan_bus : 2023 tomatillo_scan_bus :
2024 schizo_scan_bus); 2024 schizo_scan_bus);
2025 p->irq_build = schizo_irq_build; 2025 p->irq_build = schizo_irq_build;
2026 p->base_address_update = schizo_base_address_update; 2026 p->base_address_update = schizo_base_address_update;
2027 p->resource_adjust = schizo_resource_adjust; 2027 p->resource_adjust = schizo_resource_adjust;
2028 p->pci_ops = &schizo_ops; 2028 p->pci_ops = &schizo_ops;
2029 2029
2030 /* Like PSYCHO we have a 2GB aligned area for memory space. */ 2030 /* Like PSYCHO we have a 2GB aligned area for memory space. */
2031 pci_memspace_mask = 0x7fffffffUL; 2031 pci_memspace_mask = 0x7fffffffUL;
2032 2032
2033 schizo_pbm_init(p, dp, portid, chip_type); 2033 schizo_pbm_init(p, dp, portid, chip_type);
2034 } 2034 }
2035 2035
2036 void schizo_init(struct device_node *dp, char *model_name) 2036 void schizo_init(struct device_node *dp, char *model_name)
2037 { 2037 {
2038 __schizo_init(dp, model_name, PBM_CHIP_TYPE_SCHIZO); 2038 __schizo_init(dp, model_name, PBM_CHIP_TYPE_SCHIZO);
2039 } 2039 }
2040 2040
2041 void schizo_plus_init(struct device_node *dp, char *model_name) 2041 void schizo_plus_init(struct device_node *dp, char *model_name)
2042 { 2042 {
2043 __schizo_init(dp, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS); 2043 __schizo_init(dp, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS);
2044 } 2044 }
2045 2045
2046 void tomatillo_init(struct device_node *dp, char *model_name) 2046 void tomatillo_init(struct device_node *dp, char *model_name)
2047 { 2047 {
2048 __schizo_init(dp, model_name, PBM_CHIP_TYPE_TOMATILLO); 2048 __schizo_init(dp, model_name, PBM_CHIP_TYPE_TOMATILLO);
2049 } 2049 }
2050 2050
arch/sparc64/kernel/pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support. 1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2 * 2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6 #include <linux/kernel.h> 6 #include <linux/kernel.h>
7 #include <linux/types.h> 7 #include <linux/types.h>
8 #include <linux/pci.h> 8 #include <linux/pci.h>
9 #include <linux/init.h> 9 #include <linux/init.h>
10 #include <linux/slab.h> 10 #include <linux/slab.h>
11 #include <linux/interrupt.h> 11 #include <linux/interrupt.h>
12 #include <linux/percpu.h> 12 #include <linux/percpu.h>
13 13
14 #include <asm/pbm.h> 14 #include <asm/pbm.h>
15 #include <asm/iommu.h> 15 #include <asm/iommu.h>
16 #include <asm/irq.h> 16 #include <asm/irq.h>
17 #include <asm/upa.h> 17 #include <asm/upa.h>
18 #include <asm/pstate.h> 18 #include <asm/pstate.h>
19 #include <asm/oplib.h> 19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h> 20 #include <asm/hypervisor.h>
21 #include <asm/prom.h> 21 #include <asm/prom.h>
22 22
23 #include "pci_impl.h" 23 #include "pci_impl.h"
24 #include "iommu_common.h" 24 #include "iommu_common.h"
25 25
26 #include "pci_sun4v.h" 26 #include "pci_sun4v.h"
27 27
28 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 28 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
29 29
30 struct pci_iommu_batch { 30 struct pci_iommu_batch {
31 struct pci_dev *pdev; /* Device mapping is for. */ 31 struct pci_dev *pdev; /* Device mapping is for. */
32 unsigned long prot; /* IOMMU page protections */ 32 unsigned long prot; /* IOMMU page protections */
33 unsigned long entry; /* Index into IOTSB. */ 33 unsigned long entry; /* Index into IOTSB. */
34 u64 *pglist; /* List of physical pages */ 34 u64 *pglist; /* List of physical pages */
35 unsigned long npages; /* Number of pages in list. */ 35 unsigned long npages; /* Number of pages in list. */
36 }; 36 };
37 37
38 static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); 38 static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
39 39
40 /* Interrupts must be disabled. */ 40 /* Interrupts must be disabled. */
41 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) 41 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
42 { 42 {
43 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 43 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
44 44
45 p->pdev = pdev; 45 p->pdev = pdev;
46 p->prot = prot; 46 p->prot = prot;
47 p->entry = entry; 47 p->entry = entry;
48 p->npages = 0; 48 p->npages = 0;
49 } 49 }
50 50
51 /* Interrupts must be disabled. */ 51 /* Interrupts must be disabled. */
52 static long pci_iommu_batch_flush(struct pci_iommu_batch *p) 52 static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
53 { 53 {
54 struct pcidev_cookie *pcp = p->pdev->sysdata; 54 struct pcidev_cookie *pcp = p->pdev->sysdata;
55 unsigned long devhandle = pcp->pbm->devhandle; 55 unsigned long devhandle = pcp->pbm->devhandle;
56 unsigned long prot = p->prot; 56 unsigned long prot = p->prot;
57 unsigned long entry = p->entry; 57 unsigned long entry = p->entry;
58 u64 *pglist = p->pglist; 58 u64 *pglist = p->pglist;
59 unsigned long npages = p->npages; 59 unsigned long npages = p->npages;
60 60
61 while (npages != 0) { 61 while (npages != 0) {
62 long num; 62 long num;
63 63
64 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 64 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
65 npages, prot, __pa(pglist)); 65 npages, prot, __pa(pglist));
66 if (unlikely(num < 0)) { 66 if (unlikely(num < 0)) {
67 if (printk_ratelimit()) 67 if (printk_ratelimit())
68 printk("pci_iommu_batch_flush: IOMMU map of " 68 printk("pci_iommu_batch_flush: IOMMU map of "
69 "[%08lx:%08lx:%lx:%lx:%lx] failed with " 69 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
70 "status %ld\n", 70 "status %ld\n",
71 devhandle, HV_PCI_TSBID(0, entry), 71 devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist), num); 72 npages, prot, __pa(pglist), num);
73 return -1; 73 return -1;
74 } 74 }
75 75
76 entry += num; 76 entry += num;
77 npages -= num; 77 npages -= num;
78 pglist += num; 78 pglist += num;
79 } 79 }
80 80
81 p->entry = entry; 81 p->entry = entry;
82 p->npages = 0; 82 p->npages = 0;
83 83
84 return 0; 84 return 0;
85 } 85 }
86 86
87 /* Interrupts must be disabled. */ 87 /* Interrupts must be disabled. */
88 static inline long pci_iommu_batch_add(u64 phys_page) 88 static inline long pci_iommu_batch_add(u64 phys_page)
89 { 89 {
90 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 90 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
91 91
92 BUG_ON(p->npages >= PGLIST_NENTS); 92 BUG_ON(p->npages >= PGLIST_NENTS);
93 93
94 p->pglist[p->npages++] = phys_page; 94 p->pglist[p->npages++] = phys_page;
95 if (p->npages == PGLIST_NENTS) 95 if (p->npages == PGLIST_NENTS)
96 return pci_iommu_batch_flush(p); 96 return pci_iommu_batch_flush(p);
97 97
98 return 0; 98 return 0;
99 } 99 }
100 100
101 /* Interrupts must be disabled. */ 101 /* Interrupts must be disabled. */
102 static inline long pci_iommu_batch_end(void) 102 static inline long pci_iommu_batch_end(void)
103 { 103 {
104 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); 104 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
105 105
106 BUG_ON(p->npages >= PGLIST_NENTS); 106 BUG_ON(p->npages >= PGLIST_NENTS);
107 107
108 return pci_iommu_batch_flush(p); 108 return pci_iommu_batch_flush(p);
109 } 109 }
110 110
111 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) 111 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
112 { 112 {
113 unsigned long n, i, start, end, limit; 113 unsigned long n, i, start, end, limit;
114 int pass; 114 int pass;
115 115
116 limit = arena->limit; 116 limit = arena->limit;
117 start = arena->hint; 117 start = arena->hint;
118 pass = 0; 118 pass = 0;
119 119
120 again: 120 again:
121 n = find_next_zero_bit(arena->map, limit, start); 121 n = find_next_zero_bit(arena->map, limit, start);
122 end = n + npages; 122 end = n + npages;
123 if (unlikely(end >= limit)) { 123 if (unlikely(end >= limit)) {
124 if (likely(pass < 1)) { 124 if (likely(pass < 1)) {
125 limit = start; 125 limit = start;
126 start = 0; 126 start = 0;
127 pass++; 127 pass++;
128 goto again; 128 goto again;
129 } else { 129 } else {
130 /* Scanned the whole thing, give up. */ 130 /* Scanned the whole thing, give up. */
131 return -1; 131 return -1;
132 } 132 }
133 } 133 }
134 134
135 for (i = n; i < end; i++) { 135 for (i = n; i < end; i++) {
136 if (test_bit(i, arena->map)) { 136 if (test_bit(i, arena->map)) {
137 start = i + 1; 137 start = i + 1;
138 goto again; 138 goto again;
139 } 139 }
140 } 140 }
141 141
142 for (i = n; i < end; i++) 142 for (i = n; i < end; i++)
143 __set_bit(i, arena->map); 143 __set_bit(i, arena->map);
144 144
145 arena->hint = end; 145 arena->hint = end;
146 146
147 return n; 147 return n;
148 } 148 }
149 149
150 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) 150 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
151 { 151 {
152 unsigned long i; 152 unsigned long i;
153 153
154 for (i = base; i < (base + npages); i++) 154 for (i = base; i < (base + npages); i++)
155 __clear_bit(i, arena->map); 155 __clear_bit(i, arena->map);
156 } 156 }
157 157
158 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) 158 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
159 { 159 {
160 struct pcidev_cookie *pcp; 160 struct pcidev_cookie *pcp;
161 struct pci_iommu *iommu; 161 struct pci_iommu *iommu;
162 unsigned long flags, order, first_page, npages, n; 162 unsigned long flags, order, first_page, npages, n;
163 void *ret; 163 void *ret;
164 long entry; 164 long entry;
165 165
166 size = IO_PAGE_ALIGN(size); 166 size = IO_PAGE_ALIGN(size);
167 order = get_order(size); 167 order = get_order(size);
168 if (unlikely(order >= MAX_ORDER)) 168 if (unlikely(order >= MAX_ORDER))
169 return NULL; 169 return NULL;
170 170
171 npages = size >> IO_PAGE_SHIFT; 171 npages = size >> IO_PAGE_SHIFT;
172 172
173 first_page = __get_free_pages(gfp, order); 173 first_page = __get_free_pages(gfp, order);
174 if (unlikely(first_page == 0UL)) 174 if (unlikely(first_page == 0UL))
175 return NULL; 175 return NULL;
176 176
177 memset((char *)first_page, 0, PAGE_SIZE << order); 177 memset((char *)first_page, 0, PAGE_SIZE << order);
178 178
179 pcp = pdev->sysdata; 179 pcp = pdev->sysdata;
180 iommu = pcp->pbm->iommu; 180 iommu = pcp->pbm->iommu;
181 181
182 spin_lock_irqsave(&iommu->lock, flags); 182 spin_lock_irqsave(&iommu->lock, flags);
183 entry = pci_arena_alloc(&iommu->arena, npages); 183 entry = pci_arena_alloc(&iommu->arena, npages);
184 spin_unlock_irqrestore(&iommu->lock, flags); 184 spin_unlock_irqrestore(&iommu->lock, flags);
185 185
186 if (unlikely(entry < 0L)) 186 if (unlikely(entry < 0L))
187 goto arena_alloc_fail; 187 goto arena_alloc_fail;
188 188
189 *dma_addrp = (iommu->page_table_map_base + 189 *dma_addrp = (iommu->page_table_map_base +
190 (entry << IO_PAGE_SHIFT)); 190 (entry << IO_PAGE_SHIFT));
191 ret = (void *) first_page; 191 ret = (void *) first_page;
192 first_page = __pa(first_page); 192 first_page = __pa(first_page);
193 193
194 local_irq_save(flags); 194 local_irq_save(flags);
195 195
196 pci_iommu_batch_start(pdev, 196 pci_iommu_batch_start(pdev,
197 (HV_PCI_MAP_ATTR_READ | 197 (HV_PCI_MAP_ATTR_READ |
198 HV_PCI_MAP_ATTR_WRITE), 198 HV_PCI_MAP_ATTR_WRITE),
199 entry); 199 entry);
200 200
201 for (n = 0; n < npages; n++) { 201 for (n = 0; n < npages; n++) {
202 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); 202 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
203 if (unlikely(err < 0L)) 203 if (unlikely(err < 0L))
204 goto iommu_map_fail; 204 goto iommu_map_fail;
205 } 205 }
206 206
207 if (unlikely(pci_iommu_batch_end() < 0L)) 207 if (unlikely(pci_iommu_batch_end() < 0L))
208 goto iommu_map_fail; 208 goto iommu_map_fail;
209 209
210 local_irq_restore(flags); 210 local_irq_restore(flags);
211 211
212 return ret; 212 return ret;
213 213
214 iommu_map_fail: 214 iommu_map_fail:
215 /* Interrupts are disabled. */ 215 /* Interrupts are disabled. */
216 spin_lock(&iommu->lock); 216 spin_lock(&iommu->lock);
217 pci_arena_free(&iommu->arena, entry, npages); 217 pci_arena_free(&iommu->arena, entry, npages);
218 spin_unlock_irqrestore(&iommu->lock, flags); 218 spin_unlock_irqrestore(&iommu->lock, flags);
219 219
220 arena_alloc_fail: 220 arena_alloc_fail:
221 free_pages(first_page, order); 221 free_pages(first_page, order);
222 return NULL; 222 return NULL;
223 } 223 }
224 224
225 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) 225 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
226 { 226 {
227 struct pcidev_cookie *pcp; 227 struct pcidev_cookie *pcp;
228 struct pci_iommu *iommu; 228 struct pci_iommu *iommu;
229 unsigned long flags, order, npages, entry; 229 unsigned long flags, order, npages, entry;
230 u32 devhandle; 230 u32 devhandle;
231 231
232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
233 pcp = pdev->sysdata; 233 pcp = pdev->sysdata;
234 iommu = pcp->pbm->iommu; 234 iommu = pcp->pbm->iommu;
235 devhandle = pcp->pbm->devhandle; 235 devhandle = pcp->pbm->devhandle;
236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
237 237
238 spin_lock_irqsave(&iommu->lock, flags); 238 spin_lock_irqsave(&iommu->lock, flags);
239 239
240 pci_arena_free(&iommu->arena, entry, npages); 240 pci_arena_free(&iommu->arena, entry, npages);
241 241
242 do { 242 do {
243 unsigned long num; 243 unsigned long num;
244 244
245 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 245 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
246 npages); 246 npages);
247 entry += num; 247 entry += num;
248 npages -= num; 248 npages -= num;
249 } while (npages != 0); 249 } while (npages != 0);
250 250
251 spin_unlock_irqrestore(&iommu->lock, flags); 251 spin_unlock_irqrestore(&iommu->lock, flags);
252 252
253 order = get_order(size); 253 order = get_order(size);
254 if (order < 10) 254 if (order < 10)
255 free_pages((unsigned long)cpu, order); 255 free_pages((unsigned long)cpu, order);
256 } 256 }
257 257
258 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) 258 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
259 { 259 {
260 struct pcidev_cookie *pcp; 260 struct pcidev_cookie *pcp;
261 struct pci_iommu *iommu; 261 struct pci_iommu *iommu;
262 unsigned long flags, npages, oaddr; 262 unsigned long flags, npages, oaddr;
263 unsigned long i, base_paddr; 263 unsigned long i, base_paddr;
264 u32 bus_addr, ret; 264 u32 bus_addr, ret;
265 unsigned long prot; 265 unsigned long prot;
266 long entry; 266 long entry;
267 267
268 pcp = pdev->sysdata; 268 pcp = pdev->sysdata;
269 iommu = pcp->pbm->iommu; 269 iommu = pcp->pbm->iommu;
270 270
271 if (unlikely(direction == PCI_DMA_NONE)) 271 if (unlikely(direction == PCI_DMA_NONE))
272 goto bad; 272 goto bad;
273 273
274 oaddr = (unsigned long)ptr; 274 oaddr = (unsigned long)ptr;
275 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 275 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
276 npages >>= IO_PAGE_SHIFT; 276 npages >>= IO_PAGE_SHIFT;
277 277
278 spin_lock_irqsave(&iommu->lock, flags); 278 spin_lock_irqsave(&iommu->lock, flags);
279 entry = pci_arena_alloc(&iommu->arena, npages); 279 entry = pci_arena_alloc(&iommu->arena, npages);
280 spin_unlock_irqrestore(&iommu->lock, flags); 280 spin_unlock_irqrestore(&iommu->lock, flags);
281 281
282 if (unlikely(entry < 0L)) 282 if (unlikely(entry < 0L))
283 goto bad; 283 goto bad;
284 284
285 bus_addr = (iommu->page_table_map_base + 285 bus_addr = (iommu->page_table_map_base +
286 (entry << IO_PAGE_SHIFT)); 286 (entry << IO_PAGE_SHIFT));
287 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 287 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
288 base_paddr = __pa(oaddr & IO_PAGE_MASK); 288 base_paddr = __pa(oaddr & IO_PAGE_MASK);
289 prot = HV_PCI_MAP_ATTR_READ; 289 prot = HV_PCI_MAP_ATTR_READ;
290 if (direction != PCI_DMA_TODEVICE) 290 if (direction != PCI_DMA_TODEVICE)
291 prot |= HV_PCI_MAP_ATTR_WRITE; 291 prot |= HV_PCI_MAP_ATTR_WRITE;
292 292
293 local_irq_save(flags); 293 local_irq_save(flags);
294 294
295 pci_iommu_batch_start(pdev, prot, entry); 295 pci_iommu_batch_start(pdev, prot, entry);
296 296
297 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 297 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
298 long err = pci_iommu_batch_add(base_paddr); 298 long err = pci_iommu_batch_add(base_paddr);
299 if (unlikely(err < 0L)) 299 if (unlikely(err < 0L))
300 goto iommu_map_fail; 300 goto iommu_map_fail;
301 } 301 }
302 if (unlikely(pci_iommu_batch_end() < 0L)) 302 if (unlikely(pci_iommu_batch_end() < 0L))
303 goto iommu_map_fail; 303 goto iommu_map_fail;
304 304
305 local_irq_restore(flags); 305 local_irq_restore(flags);
306 306
307 return ret; 307 return ret;
308 308
309 bad: 309 bad:
310 if (printk_ratelimit()) 310 if (printk_ratelimit())
311 WARN_ON(1); 311 WARN_ON(1);
312 return PCI_DMA_ERROR_CODE; 312 return PCI_DMA_ERROR_CODE;
313 313
314 iommu_map_fail: 314 iommu_map_fail:
315 /* Interrupts are disabled. */ 315 /* Interrupts are disabled. */
316 spin_lock(&iommu->lock); 316 spin_lock(&iommu->lock);
317 pci_arena_free(&iommu->arena, entry, npages); 317 pci_arena_free(&iommu->arena, entry, npages);
318 spin_unlock_irqrestore(&iommu->lock, flags); 318 spin_unlock_irqrestore(&iommu->lock, flags);
319 319
320 return PCI_DMA_ERROR_CODE; 320 return PCI_DMA_ERROR_CODE;
321 } 321 }
322 322
323 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 323 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
324 { 324 {
325 struct pcidev_cookie *pcp; 325 struct pcidev_cookie *pcp;
326 struct pci_iommu *iommu; 326 struct pci_iommu *iommu;
327 unsigned long flags, npages; 327 unsigned long flags, npages;
328 long entry; 328 long entry;
329 u32 devhandle; 329 u32 devhandle;
330 330
331 if (unlikely(direction == PCI_DMA_NONE)) { 331 if (unlikely(direction == PCI_DMA_NONE)) {
332 if (printk_ratelimit()) 332 if (printk_ratelimit())
333 WARN_ON(1); 333 WARN_ON(1);
334 return; 334 return;
335 } 335 }
336 336
337 pcp = pdev->sysdata; 337 pcp = pdev->sysdata;
338 iommu = pcp->pbm->iommu; 338 iommu = pcp->pbm->iommu;
339 devhandle = pcp->pbm->devhandle; 339 devhandle = pcp->pbm->devhandle;
340 340
341 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 341 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
342 npages >>= IO_PAGE_SHIFT; 342 npages >>= IO_PAGE_SHIFT;
343 bus_addr &= IO_PAGE_MASK; 343 bus_addr &= IO_PAGE_MASK;
344 344
345 spin_lock_irqsave(&iommu->lock, flags); 345 spin_lock_irqsave(&iommu->lock, flags);
346 346
347 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; 347 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
348 pci_arena_free(&iommu->arena, entry, npages); 348 pci_arena_free(&iommu->arena, entry, npages);
349 349
350 do { 350 do {
351 unsigned long num; 351 unsigned long num;
352 352
353 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 353 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
354 npages); 354 npages);
355 entry += num; 355 entry += num;
356 npages -= num; 356 npages -= num;
357 } while (npages != 0); 357 } while (npages != 0);
358 358
359 spin_unlock_irqrestore(&iommu->lock, flags); 359 spin_unlock_irqrestore(&iommu->lock, flags);
360 } 360 }
361 361
362 #define SG_ENT_PHYS_ADDRESS(SG) \ 362 #define SG_ENT_PHYS_ADDRESS(SG) \
363 (__pa(page_address((SG)->page)) + (SG)->offset) 363 (__pa(page_address((SG)->page)) + (SG)->offset)
364 364
365 static inline long fill_sg(long entry, struct pci_dev *pdev, 365 static inline long fill_sg(long entry, struct pci_dev *pdev,
366 struct scatterlist *sg, 366 struct scatterlist *sg,
367 int nused, int nelems, unsigned long prot) 367 int nused, int nelems, unsigned long prot)
368 { 368 {
369 struct scatterlist *dma_sg = sg; 369 struct scatterlist *dma_sg = sg;
370 struct scatterlist *sg_end = sg + nelems; 370 struct scatterlist *sg_end = sg + nelems;
371 unsigned long flags; 371 unsigned long flags;
372 int i; 372 int i;
373 373
374 local_irq_save(flags); 374 local_irq_save(flags);
375 375
376 pci_iommu_batch_start(pdev, prot, entry); 376 pci_iommu_batch_start(pdev, prot, entry);
377 377
378 for (i = 0; i < nused; i++) { 378 for (i = 0; i < nused; i++) {
379 unsigned long pteval = ~0UL; 379 unsigned long pteval = ~0UL;
380 u32 dma_npages; 380 u32 dma_npages;
381 381
382 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + 382 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
383 dma_sg->dma_length + 383 dma_sg->dma_length +
384 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; 384 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
385 do { 385 do {
386 unsigned long offset; 386 unsigned long offset;
387 signed int len; 387 signed int len;
388 388
389 /* If we are here, we know we have at least one 389 /* If we are here, we know we have at least one
390 * more page to map. So walk forward until we 390 * more page to map. So walk forward until we
391 * hit a page crossing, and begin creating new 391 * hit a page crossing, and begin creating new
392 * mappings from that spot. 392 * mappings from that spot.
393 */ 393 */
394 for (;;) { 394 for (;;) {
395 unsigned long tmp; 395 unsigned long tmp;
396 396
397 tmp = SG_ENT_PHYS_ADDRESS(sg); 397 tmp = SG_ENT_PHYS_ADDRESS(sg);
398 len = sg->length; 398 len = sg->length;
399 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { 399 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
400 pteval = tmp & IO_PAGE_MASK; 400 pteval = tmp & IO_PAGE_MASK;
401 offset = tmp & (IO_PAGE_SIZE - 1UL); 401 offset = tmp & (IO_PAGE_SIZE - 1UL);
402 break; 402 break;
403 } 403 }
404 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { 404 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
405 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; 405 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
406 offset = 0UL; 406 offset = 0UL;
407 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); 407 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
408 break; 408 break;
409 } 409 }
410 sg++; 410 sg++;
411 } 411 }
412 412
413 pteval = (pteval & IOPTE_PAGE); 413 pteval = (pteval & IOPTE_PAGE);
414 while (len > 0) { 414 while (len > 0) {
415 long err; 415 long err;
416 416
417 err = pci_iommu_batch_add(pteval); 417 err = pci_iommu_batch_add(pteval);
418 if (unlikely(err < 0L)) 418 if (unlikely(err < 0L))
419 goto iommu_map_failed; 419 goto iommu_map_failed;
420 420
421 pteval += IO_PAGE_SIZE; 421 pteval += IO_PAGE_SIZE;
422 len -= (IO_PAGE_SIZE - offset); 422 len -= (IO_PAGE_SIZE - offset);
423 offset = 0; 423 offset = 0;
424 dma_npages--; 424 dma_npages--;
425 } 425 }
426 426
427 pteval = (pteval & IOPTE_PAGE) + len; 427 pteval = (pteval & IOPTE_PAGE) + len;
428 sg++; 428 sg++;
429 429
430 /* Skip over any tail mappings we've fully mapped, 430 /* Skip over any tail mappings we've fully mapped,
431 * adjusting pteval along the way. Stop when we 431 * adjusting pteval along the way. Stop when we
432 * detect a page crossing event. 432 * detect a page crossing event.
433 */ 433 */
434 while (sg < sg_end && 434 while (sg < sg_end &&
435 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && 435 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
436 (pteval == SG_ENT_PHYS_ADDRESS(sg)) && 436 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
437 ((pteval ^ 437 ((pteval ^
438 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { 438 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
439 pteval += sg->length; 439 pteval += sg->length;
440 sg++; 440 sg++;
441 } 441 }
442 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) 442 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
443 pteval = ~0UL; 443 pteval = ~0UL;
444 } while (dma_npages != 0); 444 } while (dma_npages != 0);
445 dma_sg++; 445 dma_sg++;
446 } 446 }
447 447
448 if (unlikely(pci_iommu_batch_end() < 0L)) 448 if (unlikely(pci_iommu_batch_end() < 0L))
449 goto iommu_map_failed; 449 goto iommu_map_failed;
450 450
451 local_irq_restore(flags); 451 local_irq_restore(flags);
452 return 0; 452 return 0;
453 453
454 iommu_map_failed: 454 iommu_map_failed:
455 local_irq_restore(flags); 455 local_irq_restore(flags);
456 return -1L; 456 return -1L;
457 } 457 }
458 458
459 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 459 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
460 { 460 {
461 struct pcidev_cookie *pcp; 461 struct pcidev_cookie *pcp;
462 struct pci_iommu *iommu; 462 struct pci_iommu *iommu;
463 unsigned long flags, npages, prot; 463 unsigned long flags, npages, prot;
464 u32 dma_base; 464 u32 dma_base;
465 struct scatterlist *sgtmp; 465 struct scatterlist *sgtmp;
466 long entry, err; 466 long entry, err;
467 int used; 467 int used;
468 468
469 /* Fast path single entry scatterlists. */ 469 /* Fast path single entry scatterlists. */
470 if (nelems == 1) { 470 if (nelems == 1) {
471 sglist->dma_address = 471 sglist->dma_address =
472 pci_4v_map_single(pdev, 472 pci_4v_map_single(pdev,
473 (page_address(sglist->page) + sglist->offset), 473 (page_address(sglist->page) + sglist->offset),
474 sglist->length, direction); 474 sglist->length, direction);
475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) 475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
476 return 0; 476 return 0;
477 sglist->dma_length = sglist->length; 477 sglist->dma_length = sglist->length;
478 return 1; 478 return 1;
479 } 479 }
480 480
481 pcp = pdev->sysdata; 481 pcp = pdev->sysdata;
482 iommu = pcp->pbm->iommu; 482 iommu = pcp->pbm->iommu;
483 483
484 if (unlikely(direction == PCI_DMA_NONE)) 484 if (unlikely(direction == PCI_DMA_NONE))
485 goto bad; 485 goto bad;
486 486
487 /* Step 1: Prepare scatter list. */ 487 /* Step 1: Prepare scatter list. */
488 npages = prepare_sg(sglist, nelems); 488 npages = prepare_sg(sglist, nelems);
489 489
490 /* Step 2: Allocate a cluster and context, if necessary. */ 490 /* Step 2: Allocate a cluster and context, if necessary. */
491 spin_lock_irqsave(&iommu->lock, flags); 491 spin_lock_irqsave(&iommu->lock, flags);
492 entry = pci_arena_alloc(&iommu->arena, npages); 492 entry = pci_arena_alloc(&iommu->arena, npages);
493 spin_unlock_irqrestore(&iommu->lock, flags); 493 spin_unlock_irqrestore(&iommu->lock, flags);
494 494
495 if (unlikely(entry < 0L)) 495 if (unlikely(entry < 0L))
496 goto bad; 496 goto bad;
497 497
498 dma_base = iommu->page_table_map_base + 498 dma_base = iommu->page_table_map_base +
499 (entry << IO_PAGE_SHIFT); 499 (entry << IO_PAGE_SHIFT);
500 500
501 /* Step 3: Normalize DMA addresses. */ 501 /* Step 3: Normalize DMA addresses. */
502 used = nelems; 502 used = nelems;
503 503
504 sgtmp = sglist; 504 sgtmp = sglist;
505 while (used && sgtmp->dma_length) { 505 while (used && sgtmp->dma_length) {
506 sgtmp->dma_address += dma_base; 506 sgtmp->dma_address += dma_base;
507 sgtmp++; 507 sgtmp++;
508 used--; 508 used--;
509 } 509 }
510 used = nelems - used; 510 used = nelems - used;
511 511
512 /* Step 4: Create the mappings. */ 512 /* Step 4: Create the mappings. */
513 prot = HV_PCI_MAP_ATTR_READ; 513 prot = HV_PCI_MAP_ATTR_READ;
514 if (direction != PCI_DMA_TODEVICE) 514 if (direction != PCI_DMA_TODEVICE)
515 prot |= HV_PCI_MAP_ATTR_WRITE; 515 prot |= HV_PCI_MAP_ATTR_WRITE;
516 516
517 err = fill_sg(entry, pdev, sglist, used, nelems, prot); 517 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
518 if (unlikely(err < 0L)) 518 if (unlikely(err < 0L))
519 goto iommu_map_failed; 519 goto iommu_map_failed;
520 520
521 return used; 521 return used;
522 522
523 bad: 523 bad:
524 if (printk_ratelimit()) 524 if (printk_ratelimit())
525 WARN_ON(1); 525 WARN_ON(1);
526 return 0; 526 return 0;
527 527
528 iommu_map_failed: 528 iommu_map_failed:
529 spin_lock_irqsave(&iommu->lock, flags); 529 spin_lock_irqsave(&iommu->lock, flags);
530 pci_arena_free(&iommu->arena, entry, npages); 530 pci_arena_free(&iommu->arena, entry, npages);
531 spin_unlock_irqrestore(&iommu->lock, flags); 531 spin_unlock_irqrestore(&iommu->lock, flags);
532 532
533 return 0; 533 return 0;
534 } 534 }
535 535
536 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 536 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
537 { 537 {
538 struct pcidev_cookie *pcp; 538 struct pcidev_cookie *pcp;
539 struct pci_iommu *iommu; 539 struct pci_iommu *iommu;
540 unsigned long flags, i, npages; 540 unsigned long flags, i, npages;
541 long entry; 541 long entry;
542 u32 devhandle, bus_addr; 542 u32 devhandle, bus_addr;
543 543
544 if (unlikely(direction == PCI_DMA_NONE)) { 544 if (unlikely(direction == PCI_DMA_NONE)) {
545 if (printk_ratelimit()) 545 if (printk_ratelimit())
546 WARN_ON(1); 546 WARN_ON(1);
547 } 547 }
548 548
549 pcp = pdev->sysdata; 549 pcp = pdev->sysdata;
550 iommu = pcp->pbm->iommu; 550 iommu = pcp->pbm->iommu;
551 devhandle = pcp->pbm->devhandle; 551 devhandle = pcp->pbm->devhandle;
552 552
553 bus_addr = sglist->dma_address & IO_PAGE_MASK; 553 bus_addr = sglist->dma_address & IO_PAGE_MASK;
554 554
555 for (i = 1; i < nelems; i++) 555 for (i = 1; i < nelems; i++)
556 if (sglist[i].dma_length == 0) 556 if (sglist[i].dma_length == 0)
557 break; 557 break;
558 i--; 558 i--;
559 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 559 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
560 bus_addr) >> IO_PAGE_SHIFT; 560 bus_addr) >> IO_PAGE_SHIFT;
561 561
562 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 562 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
563 563
564 spin_lock_irqsave(&iommu->lock, flags); 564 spin_lock_irqsave(&iommu->lock, flags);
565 565
566 pci_arena_free(&iommu->arena, entry, npages); 566 pci_arena_free(&iommu->arena, entry, npages);
567 567
568 do { 568 do {
569 unsigned long num; 569 unsigned long num;
570 570
571 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), 571 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
572 npages); 572 npages);
573 entry += num; 573 entry += num;
574 npages -= num; 574 npages -= num;
575 } while (npages != 0); 575 } while (npages != 0);
576 576
577 spin_unlock_irqrestore(&iommu->lock, flags); 577 spin_unlock_irqrestore(&iommu->lock, flags);
578 } 578 }
579 579
580 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 580 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
581 { 581 {
582 /* Nothing to do... */ 582 /* Nothing to do... */
583 } 583 }
584 584
585 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) 585 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
586 { 586 {
587 /* Nothing to do... */ 587 /* Nothing to do... */
588 } 588 }
589 589
590 struct pci_iommu_ops pci_sun4v_iommu_ops = { 590 struct pci_iommu_ops pci_sun4v_iommu_ops = {
591 .alloc_consistent = pci_4v_alloc_consistent, 591 .alloc_consistent = pci_4v_alloc_consistent,
592 .free_consistent = pci_4v_free_consistent, 592 .free_consistent = pci_4v_free_consistent,
593 .map_single = pci_4v_map_single, 593 .map_single = pci_4v_map_single,
594 .unmap_single = pci_4v_unmap_single, 594 .unmap_single = pci_4v_unmap_single,
595 .map_sg = pci_4v_map_sg, 595 .map_sg = pci_4v_map_sg,
596 .unmap_sg = pci_4v_unmap_sg, 596 .unmap_sg = pci_4v_unmap_sg,
597 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, 597 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
598 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, 598 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
599 }; 599 };
600 600
601 /* SUN4V PCI configuration space accessors. */ 601 /* SUN4V PCI configuration space accessors. */
602 602
603 struct pdev_entry { 603 struct pdev_entry {
604 struct pdev_entry *next; 604 struct pdev_entry *next;
605 u32 devhandle; 605 u32 devhandle;
606 unsigned int bus; 606 unsigned int bus;
607 unsigned int device; 607 unsigned int device;
608 unsigned int func; 608 unsigned int func;
609 }; 609 };
610 610
611 #define PDEV_HTAB_SIZE 16 611 #define PDEV_HTAB_SIZE 16
612 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1) 612 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
613 static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE]; 613 static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
614 614
615 static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) 615 static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
616 { 616 {
617 unsigned int val; 617 unsigned int val;
618 618
619 val = (devhandle ^ (devhandle >> 4)); 619 val = (devhandle ^ (devhandle >> 4));
620 val ^= bus; 620 val ^= bus;
621 val ^= device; 621 val ^= device;
622 val ^= func; 622 val ^= func;
623 623
624 return val & PDEV_HTAB_MASK; 624 return val & PDEV_HTAB_MASK;
625 } 625 }
626 626
627 static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) 627 static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
628 { 628 {
629 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL); 629 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
630 struct pdev_entry **slot; 630 struct pdev_entry **slot;
631 631
632 if (!p) 632 if (!p)
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; 635 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
636 p->next = *slot; 636 p->next = *slot;
637 *slot = p; 637 *slot = p;
638 638
639 p->devhandle = devhandle; 639 p->devhandle = devhandle;
640 p->bus = bus; 640 p->bus = bus;
641 p->device = device; 641 p->device = device;
642 p->func = func; 642 p->func = func;
643 643
644 return 0; 644 return 0;
645 } 645 }
646 646
647 /* Recursively descend into the OBP device tree, rooted at toplevel_node, 647 /* Recursively descend into the OBP device tree, rooted at toplevel_node,
648 * looking for a PCI device matching bus and devfn. 648 * looking for a PCI device matching bus and devfn.
649 */ 649 */
650 static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn) 650 static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
651 { 651 {
652 toplevel_node = toplevel_node->child; 652 toplevel_node = toplevel_node->child;
653 653
654 while (toplevel_node != NULL) { 654 while (toplevel_node != NULL) {
655 struct linux_prom_pci_registers *regs; 655 struct linux_prom_pci_registers *regs;
656 struct property *prop; 656 struct property *prop;
657 int ret; 657 int ret;
658 658
659 ret = obp_find(toplevel_node, bus, devfn); 659 ret = obp_find(toplevel_node, bus, devfn);
660 if (ret != 0) 660 if (ret != 0)
661 return ret; 661 return ret;
662 662
663 prop = of_find_property(toplevel_node, "reg", NULL); 663 prop = of_find_property(toplevel_node, "reg", NULL);
664 if (!prop) 664 if (!prop)
665 goto next_sibling; 665 goto next_sibling;
666 666
667 regs = prop->value; 667 regs = prop->value;
668 if (((regs->phys_hi >> 16) & 0xff) == bus && 668 if (((regs->phys_hi >> 16) & 0xff) == bus &&
669 ((regs->phys_hi >> 8) & 0xff) == devfn) 669 ((regs->phys_hi >> 8) & 0xff) == devfn)
670 break; 670 break;
671 671
672 next_sibling: 672 next_sibling:
673 toplevel_node = toplevel_node->sibling; 673 toplevel_node = toplevel_node->sibling;
674 } 674 }
675 675
676 return toplevel_node != NULL; 676 return toplevel_node != NULL;
677 } 677 }
678 678
679 static int pdev_htab_populate(struct pci_pbm_info *pbm) 679 static int pdev_htab_populate(struct pci_pbm_info *pbm)
680 { 680 {
681 u32 devhandle = pbm->devhandle; 681 u32 devhandle = pbm->devhandle;
682 unsigned int bus; 682 unsigned int bus;
683 683
684 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) { 684 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
685 unsigned int devfn; 685 unsigned int devfn;
686 686
687 for (devfn = 0; devfn < 256; devfn++) { 687 for (devfn = 0; devfn < 256; devfn++) {
688 unsigned int device = PCI_SLOT(devfn); 688 unsigned int device = PCI_SLOT(devfn);
689 unsigned int func = PCI_FUNC(devfn); 689 unsigned int func = PCI_FUNC(devfn);
690 690
691 if (obp_find(pbm->prom_node, bus, devfn)) { 691 if (obp_find(pbm->prom_node, bus, devfn)) {
692 int err = pdev_htab_add(devhandle, bus, 692 int err = pdev_htab_add(devhandle, bus,
693 device, func); 693 device, func);
694 if (err) 694 if (err)
695 return err; 695 return err;
696 } 696 }
697 } 697 }
698 } 698 }
699 699
700 return 0; 700 return 0;
701 } 701 }
702 702
703 static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) 703 static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
704 { 704 {
705 struct pdev_entry *p; 705 struct pdev_entry *p;
706 706
707 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; 707 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
708 while (p) { 708 while (p) {
709 if (p->devhandle == devhandle && 709 if (p->devhandle == devhandle &&
710 p->bus == bus && 710 p->bus == bus &&
711 p->device == device && 711 p->device == device &&
712 p->func == func) 712 p->func == func)
713 break; 713 break;
714 714
715 p = p->next; 715 p = p->next;
716 } 716 }
717 717
718 return p; 718 return p;
719 } 719 }
720 720
721 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) 721 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
722 { 722 {
723 if (bus < pbm->pci_first_busno || 723 if (bus < pbm->pci_first_busno ||
724 bus > pbm->pci_last_busno) 724 bus > pbm->pci_last_busno)
725 return 1; 725 return 1;
726 return pdev_find(pbm->devhandle, bus, device, func) == NULL; 726 return pdev_find(pbm->devhandle, bus, device, func) == NULL;
727 } 727 }
728 728
729 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 729 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
730 int where, int size, u32 *value) 730 int where, int size, u32 *value)
731 { 731 {
732 struct pci_pbm_info *pbm = bus_dev->sysdata; 732 struct pci_pbm_info *pbm = bus_dev->sysdata;
733 u32 devhandle = pbm->devhandle; 733 u32 devhandle = pbm->devhandle;
734 unsigned int bus = bus_dev->number; 734 unsigned int bus = bus_dev->number;
735 unsigned int device = PCI_SLOT(devfn); 735 unsigned int device = PCI_SLOT(devfn);
736 unsigned int func = PCI_FUNC(devfn); 736 unsigned int func = PCI_FUNC(devfn);
737 unsigned long ret; 737 unsigned long ret;
738 738
739 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 739 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
740 ret = ~0UL; 740 ret = ~0UL;
741 } else { 741 } else {
742 ret = pci_sun4v_config_get(devhandle, 742 ret = pci_sun4v_config_get(devhandle,
743 HV_PCI_DEVICE_BUILD(bus, device, func), 743 HV_PCI_DEVICE_BUILD(bus, device, func),
744 where, size); 744 where, size);
745 #if 0 745 #if 0
746 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", 746 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
747 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), 747 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
748 where, size, ret); 748 where, size, ret);
749 #endif 749 #endif
750 } 750 }
751 switch (size) { 751 switch (size) {
752 case 1: 752 case 1:
753 *value = ret & 0xff; 753 *value = ret & 0xff;
754 break; 754 break;
755 case 2: 755 case 2:
756 *value = ret & 0xffff; 756 *value = ret & 0xffff;
757 break; 757 break;
758 case 4: 758 case 4:
759 *value = ret & 0xffffffff; 759 *value = ret & 0xffffffff;
760 break; 760 break;
761 }; 761 };
762 762
763 763
764 return PCIBIOS_SUCCESSFUL; 764 return PCIBIOS_SUCCESSFUL;
765 } 765 }
766 766
767 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, 767 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
768 int where, int size, u32 value) 768 int where, int size, u32 value)
769 { 769 {
770 struct pci_pbm_info *pbm = bus_dev->sysdata; 770 struct pci_pbm_info *pbm = bus_dev->sysdata;
771 u32 devhandle = pbm->devhandle; 771 u32 devhandle = pbm->devhandle;
772 unsigned int bus = bus_dev->number; 772 unsigned int bus = bus_dev->number;
773 unsigned int device = PCI_SLOT(devfn); 773 unsigned int device = PCI_SLOT(devfn);
774 unsigned int func = PCI_FUNC(devfn); 774 unsigned int func = PCI_FUNC(devfn);
775 unsigned long ret; 775 unsigned long ret;
776 776
777 if (pci_sun4v_out_of_range(pbm, bus, device, func)) { 777 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
778 /* Do nothing. */ 778 /* Do nothing. */
779 } else { 779 } else {
780 ret = pci_sun4v_config_put(devhandle, 780 ret = pci_sun4v_config_put(devhandle,
781 HV_PCI_DEVICE_BUILD(bus, device, func), 781 HV_PCI_DEVICE_BUILD(bus, device, func),
782 where, size, value); 782 where, size, value);
783 #if 0 783 #if 0
784 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", 784 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
785 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), 785 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
786 where, size, value, ret); 786 where, size, value, ret);
787 #endif 787 #endif
788 } 788 }
789 return PCIBIOS_SUCCESSFUL; 789 return PCIBIOS_SUCCESSFUL;
790 } 790 }
791 791
792 static struct pci_ops pci_sun4v_ops = { 792 static struct pci_ops pci_sun4v_ops = {
793 .read = pci_sun4v_read_pci_cfg, 793 .read = pci_sun4v_read_pci_cfg,
794 .write = pci_sun4v_write_pci_cfg, 794 .write = pci_sun4v_write_pci_cfg,
795 }; 795 };
796 796
797 797
798 static void pbm_scan_bus(struct pci_controller_info *p, 798 static void pbm_scan_bus(struct pci_controller_info *p,
799 struct pci_pbm_info *pbm) 799 struct pci_pbm_info *pbm)
800 { 800 {
801 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); 801 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
802 802
803 if (!cookie) { 803 if (!cookie) {
804 prom_printf("%s: Critical allocation failure.\n", pbm->name); 804 prom_printf("%s: Critical allocation failure.\n", pbm->name);
805 prom_halt(); 805 prom_halt();
806 } 806 }
807 807
808 /* All we care about is the PBM. */ 808 /* All we care about is the PBM. */
809 memset(cookie, 0, sizeof(*cookie)); 809 memset(cookie, 0, sizeof(*cookie));
810 cookie->pbm = pbm; 810 cookie->pbm = pbm;
811 811
812 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); 812 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
813 #if 0 813 #if 0
814 pci_fixup_host_bridge_self(pbm->pci_bus); 814 pci_fixup_host_bridge_self(pbm->pci_bus);
815 pbm->pci_bus->self->sysdata = cookie; 815 pbm->pci_bus->self->sysdata = cookie;
816 #endif 816 #endif
817 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, 817 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
818 pbm->prom_node->node);
819 pci_record_assignments(pbm, pbm->pci_bus); 818 pci_record_assignments(pbm, pbm->pci_bus);
820 pci_assign_unassigned(pbm, pbm->pci_bus); 819 pci_assign_unassigned(pbm, pbm->pci_bus);
821 pci_fixup_irq(pbm, pbm->pci_bus); 820 pci_fixup_irq(pbm, pbm->pci_bus);
822 pci_determine_66mhz_disposition(pbm, pbm->pci_bus); 821 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
823 pci_setup_busmastering(pbm, pbm->pci_bus); 822 pci_setup_busmastering(pbm, pbm->pci_bus);
824 } 823 }
825 824
826 static void pci_sun4v_scan_bus(struct pci_controller_info *p) 825 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
827 { 826 {
828 struct property *prop; 827 struct property *prop;
829 struct device_node *dp; 828 struct device_node *dp;
830 829
831 if ((dp = p->pbm_A.prom_node) != NULL) { 830 if ((dp = p->pbm_A.prom_node) != NULL) {
832 prop = of_find_property(dp, "66mhz-capable", NULL); 831 prop = of_find_property(dp, "66mhz-capable", NULL);
833 p->pbm_A.is_66mhz_capable = (prop != NULL); 832 p->pbm_A.is_66mhz_capable = (prop != NULL);
834 833
835 pbm_scan_bus(p, &p->pbm_A); 834 pbm_scan_bus(p, &p->pbm_A);
836 } 835 }
837 if ((dp = p->pbm_B.prom_node) != NULL) { 836 if ((dp = p->pbm_B.prom_node) != NULL) {
838 prop = of_find_property(dp, "66mhz-capable", NULL); 837 prop = of_find_property(dp, "66mhz-capable", NULL);
839 p->pbm_B.is_66mhz_capable = (prop != NULL); 838 p->pbm_B.is_66mhz_capable = (prop != NULL);
840 839
841 pbm_scan_bus(p, &p->pbm_B); 840 pbm_scan_bus(p, &p->pbm_B);
842 } 841 }
843 842
844 /* XXX register error interrupt handlers XXX */ 843 /* XXX register error interrupt handlers XXX */
845 } 844 }
846 845
847 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, 846 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
848 struct pci_dev *pdev, 847 struct pci_dev *pdev,
849 unsigned int devino) 848 unsigned int devino)
850 { 849 {
851 u32 devhandle = pbm->devhandle; 850 u32 devhandle = pbm->devhandle;
852 851
853 return sun4v_build_irq(devhandle, devino); 852 return sun4v_build_irq(devhandle, devino);
854 } 853 }
855 854
856 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) 855 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
857 { 856 {
858 struct pcidev_cookie *pcp = pdev->sysdata; 857 struct pcidev_cookie *pcp = pdev->sysdata;
859 struct pci_pbm_info *pbm = pcp->pbm; 858 struct pci_pbm_info *pbm = pcp->pbm;
860 struct resource *res, *root; 859 struct resource *res, *root;
861 u32 reg; 860 u32 reg;
862 int where, size, is_64bit; 861 int where, size, is_64bit;
863 862
864 res = &pdev->resource[resource]; 863 res = &pdev->resource[resource];
865 if (resource < 6) { 864 if (resource < 6) {
866 where = PCI_BASE_ADDRESS_0 + (resource * 4); 865 where = PCI_BASE_ADDRESS_0 + (resource * 4);
867 } else if (resource == PCI_ROM_RESOURCE) { 866 } else if (resource == PCI_ROM_RESOURCE) {
868 where = pdev->rom_base_reg; 867 where = pdev->rom_base_reg;
869 } else { 868 } else {
870 /* Somebody might have asked allocation of a non-standard resource */ 869 /* Somebody might have asked allocation of a non-standard resource */
871 return; 870 return;
872 } 871 }
873 872
874 /* XXX 64-bit MEM handling is not %100 correct... XXX */ 873 /* XXX 64-bit MEM handling is not %100 correct... XXX */
875 is_64bit = 0; 874 is_64bit = 0;
876 if (res->flags & IORESOURCE_IO) 875 if (res->flags & IORESOURCE_IO)
877 root = &pbm->io_space; 876 root = &pbm->io_space;
878 else { 877 else {
879 root = &pbm->mem_space; 878 root = &pbm->mem_space;
880 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) 879 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
881 == PCI_BASE_ADDRESS_MEM_TYPE_64) 880 == PCI_BASE_ADDRESS_MEM_TYPE_64)
882 is_64bit = 1; 881 is_64bit = 1;
883 } 882 }
884 883
885 size = res->end - res->start; 884 size = res->end - res->start;
886 pci_read_config_dword(pdev, where, &reg); 885 pci_read_config_dword(pdev, where, &reg);
887 reg = ((reg & size) | 886 reg = ((reg & size) |
888 (((u32)(res->start - root->start)) & ~size)); 887 (((u32)(res->start - root->start)) & ~size));
889 if (resource == PCI_ROM_RESOURCE) { 888 if (resource == PCI_ROM_RESOURCE) {
890 reg |= PCI_ROM_ADDRESS_ENABLE; 889 reg |= PCI_ROM_ADDRESS_ENABLE;
891 res->flags |= IORESOURCE_ROM_ENABLE; 890 res->flags |= IORESOURCE_ROM_ENABLE;
892 } 891 }
893 pci_write_config_dword(pdev, where, reg); 892 pci_write_config_dword(pdev, where, reg);
894 893
895 /* This knows that the upper 32-bits of the address 894 /* This knows that the upper 32-bits of the address
896 * must be zero. Our PCI common layer enforces this. 895 * must be zero. Our PCI common layer enforces this.
897 */ 896 */
898 if (is_64bit) 897 if (is_64bit)
899 pci_write_config_dword(pdev, where + 4, 0); 898 pci_write_config_dword(pdev, where + 4, 0);
900 } 899 }
901 900
902 static void pci_sun4v_resource_adjust(struct pci_dev *pdev, 901 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
903 struct resource *res, 902 struct resource *res,
904 struct resource *root) 903 struct resource *root)
905 { 904 {
906 res->start += root->start; 905 res->start += root->start;
907 res->end += root->start; 906 res->end += root->start;
908 } 907 }
909 908
910 /* Use ranges property to determine where PCI MEM, I/O, and Config 909 /* Use ranges property to determine where PCI MEM, I/O, and Config
911 * space are for this PCI bus module. 910 * space are for this PCI bus module.
912 */ 911 */
913 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) 912 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
914 { 913 {
915 int i, saw_mem, saw_io; 914 int i, saw_mem, saw_io;
916 915
917 saw_mem = saw_io = 0; 916 saw_mem = saw_io = 0;
918 for (i = 0; i < pbm->num_pbm_ranges; i++) { 917 for (i = 0; i < pbm->num_pbm_ranges; i++) {
919 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; 918 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
920 unsigned long a; 919 unsigned long a;
921 int type; 920 int type;
922 921
923 type = (pr->child_phys_hi >> 24) & 0x3; 922 type = (pr->child_phys_hi >> 24) & 0x3;
924 a = (((unsigned long)pr->parent_phys_hi << 32UL) | 923 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
925 ((unsigned long)pr->parent_phys_lo << 0UL)); 924 ((unsigned long)pr->parent_phys_lo << 0UL));
926 925
927 switch (type) { 926 switch (type) {
928 case 1: 927 case 1:
929 /* 16-bit IO space, 16MB */ 928 /* 16-bit IO space, 16MB */
930 pbm->io_space.start = a; 929 pbm->io_space.start = a;
931 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); 930 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
932 pbm->io_space.flags = IORESOURCE_IO; 931 pbm->io_space.flags = IORESOURCE_IO;
933 saw_io = 1; 932 saw_io = 1;
934 break; 933 break;
935 934
936 case 2: 935 case 2:
937 /* 32-bit MEM space, 2GB */ 936 /* 32-bit MEM space, 2GB */
938 pbm->mem_space.start = a; 937 pbm->mem_space.start = a;
939 pbm->mem_space.end = a + (0x80000000UL - 1UL); 938 pbm->mem_space.end = a + (0x80000000UL - 1UL);
940 pbm->mem_space.flags = IORESOURCE_MEM; 939 pbm->mem_space.flags = IORESOURCE_MEM;
941 saw_mem = 1; 940 saw_mem = 1;
942 break; 941 break;
943 942
944 case 3: 943 case 3:
945 /* XXX 64-bit MEM handling XXX */ 944 /* XXX 64-bit MEM handling XXX */
946 945
947 default: 946 default:
948 break; 947 break;
949 }; 948 };
950 } 949 }
951 950
952 if (!saw_io || !saw_mem) { 951 if (!saw_io || !saw_mem) {
953 prom_printf("%s: Fatal error, missing %s PBM range.\n", 952 prom_printf("%s: Fatal error, missing %s PBM range.\n",
954 pbm->name, 953 pbm->name,
955 (!saw_io ? "IO" : "MEM")); 954 (!saw_io ? "IO" : "MEM"));
956 prom_halt(); 955 prom_halt();
957 } 956 }
958 957
959 printk("%s: PCI IO[%lx] MEM[%lx]\n", 958 printk("%s: PCI IO[%lx] MEM[%lx]\n",
960 pbm->name, 959 pbm->name,
961 pbm->io_space.start, 960 pbm->io_space.start,
962 pbm->mem_space.start); 961 pbm->mem_space.start);
963 } 962 }
964 963
965 static void pbm_register_toplevel_resources(struct pci_controller_info *p, 964 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
966 struct pci_pbm_info *pbm) 965 struct pci_pbm_info *pbm)
967 { 966 {
968 pbm->io_space.name = pbm->mem_space.name = pbm->name; 967 pbm->io_space.name = pbm->mem_space.name = pbm->name;
969 968
970 request_resource(&ioport_resource, &pbm->io_space); 969 request_resource(&ioport_resource, &pbm->io_space);
971 request_resource(&iomem_resource, &pbm->mem_space); 970 request_resource(&iomem_resource, &pbm->mem_space);
972 pci_register_legacy_regions(&pbm->io_space, 971 pci_register_legacy_regions(&pbm->io_space,
973 &pbm->mem_space); 972 &pbm->mem_space);
974 } 973 }
975 974
976 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, 975 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
977 struct pci_iommu *iommu) 976 struct pci_iommu *iommu)
978 { 977 {
979 struct pci_iommu_arena *arena = &iommu->arena; 978 struct pci_iommu_arena *arena = &iommu->arena;
980 unsigned long i, cnt = 0; 979 unsigned long i, cnt = 0;
981 u32 devhandle; 980 u32 devhandle;
982 981
983 devhandle = pbm->devhandle; 982 devhandle = pbm->devhandle;
984 for (i = 0; i < arena->limit; i++) { 983 for (i = 0; i < arena->limit; i++) {
985 unsigned long ret, io_attrs, ra; 984 unsigned long ret, io_attrs, ra;
986 985
987 ret = pci_sun4v_iommu_getmap(devhandle, 986 ret = pci_sun4v_iommu_getmap(devhandle,
988 HV_PCI_TSBID(0, i), 987 HV_PCI_TSBID(0, i),
989 &io_attrs, &ra); 988 &io_attrs, &ra);
990 if (ret == HV_EOK) { 989 if (ret == HV_EOK) {
991 if (page_in_phys_avail(ra)) { 990 if (page_in_phys_avail(ra)) {
992 pci_sun4v_iommu_demap(devhandle, 991 pci_sun4v_iommu_demap(devhandle,
993 HV_PCI_TSBID(0, i), 1); 992 HV_PCI_TSBID(0, i), 1);
994 } else { 993 } else {
995 cnt++; 994 cnt++;
996 __set_bit(i, arena->map); 995 __set_bit(i, arena->map);
997 } 996 }
998 } 997 }
999 } 998 }
1000 999
1001 return cnt; 1000 return cnt;
1002 } 1001 }
1003 1002
1004 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 1003 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
1005 { 1004 {
1006 struct pci_iommu *iommu = pbm->iommu; 1005 struct pci_iommu *iommu = pbm->iommu;
1007 struct property *prop; 1006 struct property *prop;
1008 unsigned long num_tsb_entries, sz; 1007 unsigned long num_tsb_entries, sz;
1009 u32 vdma[2], dma_mask, dma_offset; 1008 u32 vdma[2], dma_mask, dma_offset;
1010 int tsbsize; 1009 int tsbsize;
1011 1010
1012 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); 1011 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
1013 if (prop) { 1012 if (prop) {
1014 u32 *val = prop->value; 1013 u32 *val = prop->value;
1015 1014
1016 vdma[0] = val[0]; 1015 vdma[0] = val[0];
1017 vdma[1] = val[1]; 1016 vdma[1] = val[1];
1018 } else { 1017 } else {
1019 /* No property, use default values. */ 1018 /* No property, use default values. */
1020 vdma[0] = 0x80000000; 1019 vdma[0] = 0x80000000;
1021 vdma[1] = 0x80000000; 1020 vdma[1] = 0x80000000;
1022 } 1021 }
1023 1022
1024 dma_mask = vdma[0]; 1023 dma_mask = vdma[0];
1025 switch (vdma[1]) { 1024 switch (vdma[1]) {
1026 case 0x20000000: 1025 case 0x20000000:
1027 dma_mask |= 0x1fffffff; 1026 dma_mask |= 0x1fffffff;
1028 tsbsize = 64; 1027 tsbsize = 64;
1029 break; 1028 break;
1030 1029
1031 case 0x40000000: 1030 case 0x40000000:
1032 dma_mask |= 0x3fffffff; 1031 dma_mask |= 0x3fffffff;
1033 tsbsize = 128; 1032 tsbsize = 128;
1034 break; 1033 break;
1035 1034
1036 case 0x80000000: 1035 case 0x80000000:
1037 dma_mask |= 0x7fffffff; 1036 dma_mask |= 0x7fffffff;
1038 tsbsize = 256; 1037 tsbsize = 256;
1039 break; 1038 break;
1040 1039
1041 default: 1040 default:
1042 prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); 1041 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
1043 prom_halt(); 1042 prom_halt();
1044 }; 1043 };
1045 1044
1046 tsbsize *= (8 * 1024); 1045 tsbsize *= (8 * 1024);
1047 1046
1048 num_tsb_entries = tsbsize / sizeof(iopte_t); 1047 num_tsb_entries = tsbsize / sizeof(iopte_t);
1049 1048
1050 dma_offset = vdma[0]; 1049 dma_offset = vdma[0];
1051 1050
1052 /* Setup initial software IOMMU state. */ 1051 /* Setup initial software IOMMU state. */
1053 spin_lock_init(&iommu->lock); 1052 spin_lock_init(&iommu->lock);
1054 iommu->ctx_lowest_free = 1; 1053 iommu->ctx_lowest_free = 1;
1055 iommu->page_table_map_base = dma_offset; 1054 iommu->page_table_map_base = dma_offset;
1056 iommu->dma_addr_mask = dma_mask; 1055 iommu->dma_addr_mask = dma_mask;
1057 1056
1058 /* Allocate and initialize the free area map. */ 1057 /* Allocate and initialize the free area map. */
1059 sz = num_tsb_entries / 8; 1058 sz = num_tsb_entries / 8;
1060 sz = (sz + 7UL) & ~7UL; 1059 sz = (sz + 7UL) & ~7UL;
1061 iommu->arena.map = kmalloc(sz, GFP_KERNEL); 1060 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
1062 if (!iommu->arena.map) { 1061 if (!iommu->arena.map) {
1063 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); 1062 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1064 prom_halt(); 1063 prom_halt();
1065 } 1064 }
1066 memset(iommu->arena.map, 0, sz); 1065 memset(iommu->arena.map, 0, sz);
1067 iommu->arena.limit = num_tsb_entries; 1066 iommu->arena.limit = num_tsb_entries;
1068 1067
1069 sz = probe_existing_entries(pbm, iommu); 1068 sz = probe_existing_entries(pbm, iommu);
1070 if (sz) 1069 if (sz)
1071 printk("%s: Imported %lu TSB entries from OBP\n", 1070 printk("%s: Imported %lu TSB entries from OBP\n",
1072 pbm->name, sz); 1071 pbm->name, sz);
1073 } 1072 }
1074 1073
1075 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) 1074 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
1076 { 1075 {
1077 struct property *prop; 1076 struct property *prop;
1078 unsigned int *busrange; 1077 unsigned int *busrange;
1079 1078
1080 prop = of_find_property(pbm->prom_node, "bus-range", NULL); 1079 prop = of_find_property(pbm->prom_node, "bus-range", NULL);
1081 1080
1082 busrange = prop->value; 1081 busrange = prop->value;
1083 1082
1084 pbm->pci_first_busno = busrange[0]; 1083 pbm->pci_first_busno = busrange[0];
1085 pbm->pci_last_busno = busrange[1]; 1084 pbm->pci_last_busno = busrange[1];
1086 1085
1087 } 1086 }
1088 1087
1089 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) 1088 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1090 { 1089 {
1091 struct pci_pbm_info *pbm; 1090 struct pci_pbm_info *pbm;
1092 struct property *prop; 1091 struct property *prop;
1093 int len, i; 1092 int len, i;
1094 1093
1095 if (devhandle & 0x40) 1094 if (devhandle & 0x40)
1096 pbm = &p->pbm_B; 1095 pbm = &p->pbm_B;
1097 else 1096 else
1098 pbm = &p->pbm_A; 1097 pbm = &p->pbm_A;
1099 1098
1100 pbm->parent = p; 1099 pbm->parent = p;
1101 pbm->prom_node = dp; 1100 pbm->prom_node = dp;
1102 pbm->pci_first_slot = 1; 1101 pbm->pci_first_slot = 1;
1103 1102
1104 pbm->devhandle = devhandle; 1103 pbm->devhandle = devhandle;
1105 1104
1106 pbm->name = dp->full_name; 1105 pbm->name = dp->full_name;
1107 1106
1108 printk("%s: SUN4V PCI Bus Module\n", pbm->name); 1107 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1109 1108
1110 prop = of_find_property(dp, "ranges", &len); 1109 prop = of_find_property(dp, "ranges", &len);
1111 pbm->pbm_ranges = prop->value; 1110 pbm->pbm_ranges = prop->value;
1112 pbm->num_pbm_ranges = 1111 pbm->num_pbm_ranges =
1113 (len / sizeof(struct linux_prom_pci_ranges)); 1112 (len / sizeof(struct linux_prom_pci_ranges));
1114 1113
1115 /* Mask out the top 8 bits of the ranges, leaving the real 1114 /* Mask out the top 8 bits of the ranges, leaving the real
1116 * physical address. 1115 * physical address.
1117 */ 1116 */
1118 for (i = 0; i < pbm->num_pbm_ranges; i++) 1117 for (i = 0; i < pbm->num_pbm_ranges; i++)
1119 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; 1118 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1120 1119
1121 pci_sun4v_determine_mem_io_space(pbm); 1120 pci_sun4v_determine_mem_io_space(pbm);
1122 pbm_register_toplevel_resources(p, pbm); 1121 pbm_register_toplevel_resources(p, pbm);
1123 1122
1124 prop = of_find_property(dp, "interrupt-map", &len); 1123 prop = of_find_property(dp, "interrupt-map", &len);
1125 pbm->pbm_intmap = prop->value; 1124 pbm->pbm_intmap = prop->value;
1126 pbm->num_pbm_intmap = 1125 pbm->num_pbm_intmap =
1127 (len / sizeof(struct linux_prom_pci_intmap)); 1126 (len / sizeof(struct linux_prom_pci_intmap));
1128 1127
1129 prop = of_find_property(dp, "interrupt-map-mask", NULL); 1128 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1130 pbm->pbm_intmask = prop->value; 1129 pbm->pbm_intmask = prop->value;
1131 1130
1132 pci_sun4v_get_bus_range(pbm); 1131 pci_sun4v_get_bus_range(pbm);
1133 pci_sun4v_iommu_init(pbm); 1132 pci_sun4v_iommu_init(pbm);
1134 1133
1135 pdev_htab_populate(pbm); 1134 pdev_htab_populate(pbm);
1136 } 1135 }
1137 1136
1138 void sun4v_pci_init(struct device_node *dp, char *model_name) 1137 void sun4v_pci_init(struct device_node *dp, char *model_name)
1139 { 1138 {
1140 struct pci_controller_info *p; 1139 struct pci_controller_info *p;
1141 struct pci_iommu *iommu; 1140 struct pci_iommu *iommu;
1142 struct property *prop; 1141 struct property *prop;
1143 struct linux_prom64_registers *regs; 1142 struct linux_prom64_registers *regs;
1144 u32 devhandle; 1143 u32 devhandle;
1145 int i; 1144 int i;
1146 1145
1147 prop = of_find_property(dp, "reg", NULL); 1146 prop = of_find_property(dp, "reg", NULL);
1148 regs = prop->value; 1147 regs = prop->value;
1149 1148
1150 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; 1149 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1151 1150
1152 for (p = pci_controller_root; p; p = p->next) { 1151 for (p = pci_controller_root; p; p = p->next) {
1153 struct pci_pbm_info *pbm; 1152 struct pci_pbm_info *pbm;
1154 1153
1155 if (p->pbm_A.prom_node && p->pbm_B.prom_node) 1154 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1156 continue; 1155 continue;
1157 1156
1158 pbm = (p->pbm_A.prom_node ? 1157 pbm = (p->pbm_A.prom_node ?
1159 &p->pbm_A : 1158 &p->pbm_A :
1160 &p->pbm_B); 1159 &p->pbm_B);
1161 1160
1162 if (pbm->devhandle == (devhandle ^ 0x40)) { 1161 if (pbm->devhandle == (devhandle ^ 0x40)) {
1163 pci_sun4v_pbm_init(p, dp, devhandle); 1162 pci_sun4v_pbm_init(p, dp, devhandle);
1164 return; 1163 return;
1165 } 1164 }
1166 } 1165 }
1167 1166
1168 for_each_possible_cpu(i) { 1167 for_each_possible_cpu(i) {
1169 unsigned long page = get_zeroed_page(GFP_ATOMIC); 1168 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1170 1169
1171 if (!page) 1170 if (!page)
1172 goto fatal_memory_error; 1171 goto fatal_memory_error;
1173 1172
1174 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; 1173 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1175 } 1174 }
1176 1175
1177 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); 1176 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1178 if (!p) 1177 if (!p)
1179 goto fatal_memory_error; 1178 goto fatal_memory_error;
1180 1179
1181 memset(p, 0, sizeof(*p)); 1180 memset(p, 0, sizeof(*p));
1182 1181
1183 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1182 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1184 if (!iommu) 1183 if (!iommu)
1185 goto fatal_memory_error; 1184 goto fatal_memory_error;
1186 1185
1187 memset(iommu, 0, sizeof(*iommu)); 1186 memset(iommu, 0, sizeof(*iommu));
1188 p->pbm_A.iommu = iommu; 1187 p->pbm_A.iommu = iommu;
1189 1188
1190 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); 1189 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1191 if (!iommu) 1190 if (!iommu)
1192 goto fatal_memory_error; 1191 goto fatal_memory_error;
1193 1192
1194 memset(iommu, 0, sizeof(*iommu)); 1193 memset(iommu, 0, sizeof(*iommu));
1195 p->pbm_B.iommu = iommu; 1194 p->pbm_B.iommu = iommu;
1196 1195
1197 p->next = pci_controller_root; 1196 p->next = pci_controller_root;
1198 pci_controller_root = p; 1197 pci_controller_root = p;
1199 1198
1200 p->index = pci_num_controllers++; 1199 p->index = pci_num_controllers++;
1201 p->pbms_same_domain = 0; 1200 p->pbms_same_domain = 0;
1202 1201
1203 p->scan_bus = pci_sun4v_scan_bus; 1202 p->scan_bus = pci_sun4v_scan_bus;
1204 p->irq_build = pci_sun4v_irq_build; 1203 p->irq_build = pci_sun4v_irq_build;
1205 p->base_address_update = pci_sun4v_base_address_update; 1204 p->base_address_update = pci_sun4v_base_address_update;
1206 p->resource_adjust = pci_sun4v_resource_adjust; 1205 p->resource_adjust = pci_sun4v_resource_adjust;
1207 p->pci_ops = &pci_sun4v_ops; 1206 p->pci_ops = &pci_sun4v_ops;
1208 1207
1209 /* Like PSYCHO and SCHIZO we have a 2GB aligned area 1208 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1210 * for memory space. 1209 * for memory space.
1211 */ 1210 */
1212 pci_memspace_mask = 0x7fffffffUL; 1211 pci_memspace_mask = 0x7fffffffUL;
1213 1212
1214 pci_sun4v_pbm_init(p, dp, devhandle); 1213 pci_sun4v_pbm_init(p, dp, devhandle);
1215 return; 1214 return;
1216 1215
1217 fatal_memory_error: 1216 fatal_memory_error:
1218 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); 1217 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1219 prom_halt(); 1218 prom_halt();
1220 } 1219 }
1221 1220
arch/sparc64/kernel/prom.c
1 /* 1 /*
2 * Procedures for creating, accessing and interpreting the device tree. 2 * Procedures for creating, accessing and interpreting the device tree.
3 * 3 *
4 * Paul Mackerras August 1996. 4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras. 5 * Copyright (C) 1996-2005 Paul Mackerras.
6 * 6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com 8 * {engebret|bergner}@us.ibm.com
9 * 9 *
10 * Adapted for sparc64 by David S. Miller davem@davemloft.net 10 * Adapted for sparc64 by David S. Miller davem@davemloft.net
11 * 11 *
12 * This program is free software; you can redistribute it and/or 12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/types.h> 19 #include <linux/types.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/bootmem.h> 22 #include <linux/bootmem.h>
23 #include <linux/module.h>
23 24
24 #include <asm/prom.h> 25 #include <asm/prom.h>
25 #include <asm/oplib.h> 26 #include <asm/oplib.h>
26 27
27 static struct device_node *allnodes; 28 static struct device_node *allnodes;
28 29
29 struct device_node *of_get_parent(const struct device_node *node) 30 struct device_node *of_get_parent(const struct device_node *node)
30 { 31 {
31 struct device_node *np; 32 struct device_node *np;
32 33
33 if (!node) 34 if (!node)
34 return NULL; 35 return NULL;
35 36
36 np = node->parent; 37 np = node->parent;
37 38
38 return np; 39 return np;
39 } 40 }
40 41
41 struct device_node *of_get_next_child(const struct device_node *node, 42 struct device_node *of_get_next_child(const struct device_node *node,
42 struct device_node *prev) 43 struct device_node *prev)
43 { 44 {
44 struct device_node *next; 45 struct device_node *next;
45 46
46 next = prev ? prev->sibling : node->child; 47 next = prev ? prev->sibling : node->child;
47 for (; next != 0; next = next->sibling) { 48 for (; next != 0; next = next->sibling) {
48 break; 49 break;
49 } 50 }
50 51
51 return next; 52 return next;
52 } 53 }
53 54
54 struct device_node *of_find_node_by_path(const char *path) 55 struct device_node *of_find_node_by_path(const char *path)
55 { 56 {
56 struct device_node *np = allnodes; 57 struct device_node *np = allnodes;
57 58
58 for (; np != 0; np = np->allnext) { 59 for (; np != 0; np = np->allnext) {
59 if (np->full_name != 0 && strcmp(np->full_name, path) == 0) 60 if (np->full_name != 0 && strcmp(np->full_name, path) == 0)
60 break; 61 break;
61 } 62 }
62 63
63 return np; 64 return np;
64 } 65 }
65 66
67 struct device_node *of_find_node_by_phandle(phandle handle)
68 {
69 struct device_node *np;
70
71 for (np = allnodes; np != 0; np = np->allnext)
72 if (np->node == handle)
73 break;
74
75 return np;
76 }
77
66 struct device_node *of_find_node_by_name(struct device_node *from, 78 struct device_node *of_find_node_by_name(struct device_node *from,
67 const char *name) 79 const char *name)
68 { 80 {
69 struct device_node *np; 81 struct device_node *np;
70 82
71 np = from ? from->allnext : allnodes; 83 np = from ? from->allnext : allnodes;
72 for (; np != NULL; np = np->allnext) 84 for (; np != NULL; np = np->allnext)
73 if (np->name != NULL && strcmp(np->name, name) == 0) 85 if (np->name != NULL && strcmp(np->name, name) == 0)
74 break; 86 break;
75 87
76 return np; 88 return np;
77 } 89 }
78 90
79 struct device_node *of_find_node_by_type(struct device_node *from, 91 struct device_node *of_find_node_by_type(struct device_node *from,
80 const char *type) 92 const char *type)
81 { 93 {
82 struct device_node *np; 94 struct device_node *np;
83 95
84 np = from ? from->allnext : allnodes; 96 np = from ? from->allnext : allnodes;
85 for (; np != 0; np = np->allnext) 97 for (; np != 0; np = np->allnext)
86 if (np->type != 0 && strcmp(np->type, type) == 0) 98 if (np->type != 0 && strcmp(np->type, type) == 0)
87 break; 99 break;
88 100
89 return np; 101 return np;
90 } 102 }
91 103
92 struct property *of_find_property(struct device_node *np, const char *name, 104 struct property *of_find_property(struct device_node *np, const char *name,
93 int *lenp) 105 int *lenp)
94 { 106 {
95 struct property *pp; 107 struct property *pp;
96 108
97 for (pp = np->properties; pp != 0; pp = pp->next) { 109 for (pp = np->properties; pp != 0; pp = pp->next) {
98 if (strcmp(pp->name, name) == 0) { 110 if (strcmp(pp->name, name) == 0) {
99 if (lenp != 0) 111 if (lenp != 0)
100 *lenp = pp->length; 112 *lenp = pp->length;
101 break; 113 break;
102 } 114 }
103 } 115 }
104 return pp; 116 return pp;
105 } 117 }
118 EXPORT_SYMBOL(of_find_property);
106 119
120 /*
121 * Find a property with a given name for a given node
122 * and return the value.
123 */
124 void *of_get_property(struct device_node *np, const char *name, int *lenp)
125 {
126 struct property *pp = of_find_property(np,name,lenp);
127 return pp ? pp->value : NULL;
128 }
129 EXPORT_SYMBOL(of_get_property);
130
107 int of_getintprop_default(struct device_node *np, const char *name, int def) 131 int of_getintprop_default(struct device_node *np, const char *name, int def)
108 { 132 {
109 struct property *prop; 133 struct property *prop;
110 int len; 134 int len;
111 135
112 prop = of_find_property(np, name, &len); 136 prop = of_find_property(np, name, &len);
113 if (!prop || len != 4) 137 if (!prop || len != 4)
114 return def; 138 return def;
115 139
116 return *(int *) prop->value; 140 return *(int *) prop->value;
117 } 141 }
142 EXPORT_SYMBOL(of_getintprop_default);
118 143
119 static unsigned int prom_early_allocated; 144 static unsigned int prom_early_allocated;
120 145
121 static void * __init prom_early_alloc(unsigned long size) 146 static void * __init prom_early_alloc(unsigned long size)
122 { 147 {
123 void *ret; 148 void *ret;
124 149
125 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); 150 ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
126 if (ret != NULL) 151 if (ret != NULL)
127 memset(ret, 0, size); 152 memset(ret, 0, size);
128 153
129 prom_early_allocated += size; 154 prom_early_allocated += size;
130 155
131 return ret; 156 return ret;
132 } 157 }
133 158
134 static int is_root_node(const struct device_node *dp) 159 static int is_root_node(const struct device_node *dp)
135 { 160 {
136 if (!dp) 161 if (!dp)
137 return 0; 162 return 0;
138 163
139 return (dp->parent == NULL); 164 return (dp->parent == NULL);
140 } 165 }
141 166
142 /* The following routines deal with the black magic of fully naming a 167 /* The following routines deal with the black magic of fully naming a
143 * node. 168 * node.
144 * 169 *
145 * Certain well known named nodes are just the simple name string. 170 * Certain well known named nodes are just the simple name string.
146 * 171 *
147 * Actual devices have an address specifier appended to the base name 172 * Actual devices have an address specifier appended to the base name
148 * string, like this "foo@addr". The "addr" can be in any number of 173 * string, like this "foo@addr". The "addr" can be in any number of
149 * formats, and the platform plus the type of the node determine the 174 * formats, and the platform plus the type of the node determine the
150 * format and how it is constructed. 175 * format and how it is constructed.
151 * 176 *
152 * For children of the ROOT node, the naming convention is fixed and 177 * For children of the ROOT node, the naming convention is fixed and
153 * determined by whether this is a sun4u or sun4v system. 178 * determined by whether this is a sun4u or sun4v system.
154 * 179 *
155 * For children of other nodes, it is bus type specific. So 180 * For children of other nodes, it is bus type specific. So
156 * we walk up the tree until we discover a "device_type" property 181 * we walk up the tree until we discover a "device_type" property
157 * we recognize and we go from there. 182 * we recognize and we go from there.
158 * 183 *
159 * As an example, the boot device on my workstation has a full path: 184 * As an example, the boot device on my workstation has a full path:
160 * 185 *
161 * /pci@1e,600000/ide@d/disk@0,0:c 186 * /pci@1e,600000/ide@d/disk@0,0:c
162 */ 187 */
163 static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) 188 static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
164 { 189 {
165 struct linux_prom64_registers *regs; 190 struct linux_prom64_registers *regs;
166 struct property *rprop; 191 struct property *rprop;
167 u32 high_bits, low_bits, type; 192 u32 high_bits, low_bits, type;
168 193
169 rprop = of_find_property(dp, "reg", NULL); 194 rprop = of_find_property(dp, "reg", NULL);
170 if (!rprop) 195 if (!rprop)
171 return; 196 return;
172 197
173 regs = rprop->value; 198 regs = rprop->value;
174 if (!is_root_node(dp->parent)) { 199 if (!is_root_node(dp->parent)) {
175 sprintf(tmp_buf, "%s@%x,%x", 200 sprintf(tmp_buf, "%s@%x,%x",
176 dp->name, 201 dp->name,
177 (unsigned int) (regs->phys_addr >> 32UL), 202 (unsigned int) (regs->phys_addr >> 32UL),
178 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 203 (unsigned int) (regs->phys_addr & 0xffffffffUL));
179 return; 204 return;
180 } 205 }
181 206
182 type = regs->phys_addr >> 60UL; 207 type = regs->phys_addr >> 60UL;
183 high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL; 208 high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL;
184 low_bits = (regs->phys_addr & 0xffffffffUL); 209 low_bits = (regs->phys_addr & 0xffffffffUL);
185 210
186 if (type == 0 || type == 8) { 211 if (type == 0 || type == 8) {
187 const char *prefix = (type == 0) ? "m" : "i"; 212 const char *prefix = (type == 0) ? "m" : "i";
188 213
189 if (low_bits) 214 if (low_bits)
190 sprintf(tmp_buf, "%s@%s%x,%x", 215 sprintf(tmp_buf, "%s@%s%x,%x",
191 dp->name, prefix, 216 dp->name, prefix,
192 high_bits, low_bits); 217 high_bits, low_bits);
193 else 218 else
194 sprintf(tmp_buf, "%s@%s%x", 219 sprintf(tmp_buf, "%s@%s%x",
195 dp->name, 220 dp->name,
196 prefix, 221 prefix,
197 high_bits); 222 high_bits);
198 } else if (type == 12) { 223 } else if (type == 12) {
199 sprintf(tmp_buf, "%s@%x", 224 sprintf(tmp_buf, "%s@%x",
200 dp->name, high_bits); 225 dp->name, high_bits);
201 } 226 }
202 } 227 }
203 228
204 static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) 229 static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
205 { 230 {
206 struct linux_prom64_registers *regs; 231 struct linux_prom64_registers *regs;
207 struct property *prop; 232 struct property *prop;
208 233
209 prop = of_find_property(dp, "reg", NULL); 234 prop = of_find_property(dp, "reg", NULL);
210 if (!prop) 235 if (!prop)
211 return; 236 return;
212 237
213 regs = prop->value; 238 regs = prop->value;
214 if (!is_root_node(dp->parent)) { 239 if (!is_root_node(dp->parent)) {
215 sprintf(tmp_buf, "%s@%x,%x", 240 sprintf(tmp_buf, "%s@%x,%x",
216 dp->name, 241 dp->name,
217 (unsigned int) (regs->phys_addr >> 32UL), 242 (unsigned int) (regs->phys_addr >> 32UL),
218 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 243 (unsigned int) (regs->phys_addr & 0xffffffffUL));
219 return; 244 return;
220 } 245 }
221 246
222 prop = of_find_property(dp, "upa-portid", NULL); 247 prop = of_find_property(dp, "upa-portid", NULL);
223 if (!prop) 248 if (!prop)
224 prop = of_find_property(dp, "portid", NULL); 249 prop = of_find_property(dp, "portid", NULL);
225 if (prop) { 250 if (prop) {
226 unsigned long mask = 0xffffffffUL; 251 unsigned long mask = 0xffffffffUL;
227 252
228 if (tlb_type >= cheetah) 253 if (tlb_type >= cheetah)
229 mask = 0x7fffff; 254 mask = 0x7fffff;
230 255
231 sprintf(tmp_buf, "%s@%x,%x", 256 sprintf(tmp_buf, "%s@%x,%x",
232 dp->name, 257 dp->name,
233 *(u32 *)prop->value, 258 *(u32 *)prop->value,
234 (unsigned int) (regs->phys_addr & mask)); 259 (unsigned int) (regs->phys_addr & mask));
235 } 260 }
236 } 261 }
237 262
238 /* "name@slot,offset" */ 263 /* "name@slot,offset" */
239 static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) 264 static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
240 { 265 {
241 struct linux_prom_registers *regs; 266 struct linux_prom_registers *regs;
242 struct property *prop; 267 struct property *prop;
243 268
244 prop = of_find_property(dp, "reg", NULL); 269 prop = of_find_property(dp, "reg", NULL);
245 if (!prop) 270 if (!prop)
246 return; 271 return;
247 272
248 regs = prop->value; 273 regs = prop->value;
249 sprintf(tmp_buf, "%s@%x,%x", 274 sprintf(tmp_buf, "%s@%x,%x",
250 dp->name, 275 dp->name,
251 regs->which_io, 276 regs->which_io,
252 regs->phys_addr); 277 regs->phys_addr);
253 } 278 }
254 279
255 /* "name@devnum[,func]" */ 280 /* "name@devnum[,func]" */
256 static void __init pci_path_component(struct device_node *dp, char *tmp_buf) 281 static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
257 { 282 {
258 struct linux_prom_pci_registers *regs; 283 struct linux_prom_pci_registers *regs;
259 struct property *prop; 284 struct property *prop;
260 unsigned int devfn; 285 unsigned int devfn;
261 286
262 prop = of_find_property(dp, "reg", NULL); 287 prop = of_find_property(dp, "reg", NULL);
263 if (!prop) 288 if (!prop)
264 return; 289 return;
265 290
266 regs = prop->value; 291 regs = prop->value;
267 devfn = (regs->phys_hi >> 8) & 0xff; 292 devfn = (regs->phys_hi >> 8) & 0xff;
268 if (devfn & 0x07) { 293 if (devfn & 0x07) {
269 sprintf(tmp_buf, "%s@%x,%x", 294 sprintf(tmp_buf, "%s@%x,%x",
270 dp->name, 295 dp->name,
271 devfn >> 3, 296 devfn >> 3,
272 devfn & 0x07); 297 devfn & 0x07);
273 } else { 298 } else {
274 sprintf(tmp_buf, "%s@%x", 299 sprintf(tmp_buf, "%s@%x",
275 dp->name, 300 dp->name,
276 devfn >> 3); 301 devfn >> 3);
277 } 302 }
278 } 303 }
279 304
280 /* "name@UPA_PORTID,offset" */ 305 /* "name@UPA_PORTID,offset" */
281 static void __init upa_path_component(struct device_node *dp, char *tmp_buf) 306 static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
282 { 307 {
283 struct linux_prom64_registers *regs; 308 struct linux_prom64_registers *regs;
284 struct property *prop; 309 struct property *prop;
285 310
286 prop = of_find_property(dp, "reg", NULL); 311 prop = of_find_property(dp, "reg", NULL);
287 if (!prop) 312 if (!prop)
288 return; 313 return;
289 314
290 regs = prop->value; 315 regs = prop->value;
291 316
292 prop = of_find_property(dp, "upa-portid", NULL); 317 prop = of_find_property(dp, "upa-portid", NULL);
293 if (!prop) 318 if (!prop)
294 return; 319 return;
295 320
296 sprintf(tmp_buf, "%s@%x,%x", 321 sprintf(tmp_buf, "%s@%x,%x",
297 dp->name, 322 dp->name,
298 *(u32 *) prop->value, 323 *(u32 *) prop->value,
299 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 324 (unsigned int) (regs->phys_addr & 0xffffffffUL));
300 } 325 }
301 326
302 /* "name@reg" */ 327 /* "name@reg" */
303 static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) 328 static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
304 { 329 {
305 struct property *prop; 330 struct property *prop;
306 u32 *regs; 331 u32 *regs;
307 332
308 prop = of_find_property(dp, "reg", NULL); 333 prop = of_find_property(dp, "reg", NULL);
309 if (!prop) 334 if (!prop)
310 return; 335 return;
311 336
312 regs = prop->value; 337 regs = prop->value;
313 338
314 sprintf(tmp_buf, "%s@%x", dp->name, *regs); 339 sprintf(tmp_buf, "%s@%x", dp->name, *regs);
315 } 340 }
316 341
317 /* "name@addrhi,addrlo" */ 342 /* "name@addrhi,addrlo" */
318 static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) 343 static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
319 { 344 {
320 struct linux_prom64_registers *regs; 345 struct linux_prom64_registers *regs;
321 struct property *prop; 346 struct property *prop;
322 347
323 prop = of_find_property(dp, "reg", NULL); 348 prop = of_find_property(dp, "reg", NULL);
324 if (!prop) 349 if (!prop)
325 return; 350 return;
326 351
327 regs = prop->value; 352 regs = prop->value;
328 353
329 sprintf(tmp_buf, "%s@%x,%x", 354 sprintf(tmp_buf, "%s@%x,%x",
330 dp->name, 355 dp->name,
331 (unsigned int) (regs->phys_addr >> 32UL), 356 (unsigned int) (regs->phys_addr >> 32UL),
332 (unsigned int) (regs->phys_addr & 0xffffffffUL)); 357 (unsigned int) (regs->phys_addr & 0xffffffffUL));
333 } 358 }
334 359
335 /* "name@bus,addr" */ 360 /* "name@bus,addr" */
336 static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) 361 static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
337 { 362 {
338 struct property *prop; 363 struct property *prop;
339 u32 *regs; 364 u32 *regs;
340 365
341 prop = of_find_property(dp, "reg", NULL); 366 prop = of_find_property(dp, "reg", NULL);
342 if (!prop) 367 if (!prop)
343 return; 368 return;
344 369
345 regs = prop->value; 370 regs = prop->value;
346 371
347 /* This actually isn't right... should look at the #address-cells 372 /* This actually isn't right... should look at the #address-cells
348 * property of the i2c bus node etc. etc. 373 * property of the i2c bus node etc. etc.
349 */ 374 */
350 sprintf(tmp_buf, "%s@%x,%x", 375 sprintf(tmp_buf, "%s@%x,%x",
351 dp->name, regs[0], regs[1]); 376 dp->name, regs[0], regs[1]);
352 } 377 }
353 378
354 /* "name@reg0[,reg1]" */ 379 /* "name@reg0[,reg1]" */
355 static void __init usb_path_component(struct device_node *dp, char *tmp_buf) 380 static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
356 { 381 {
357 struct property *prop; 382 struct property *prop;
358 u32 *regs; 383 u32 *regs;
359 384
360 prop = of_find_property(dp, "reg", NULL); 385 prop = of_find_property(dp, "reg", NULL);
361 if (!prop) 386 if (!prop)
362 return; 387 return;
363 388
364 regs = prop->value; 389 regs = prop->value;
365 390
366 if (prop->length == sizeof(u32) || regs[1] == 1) { 391 if (prop->length == sizeof(u32) || regs[1] == 1) {
367 sprintf(tmp_buf, "%s@%x", 392 sprintf(tmp_buf, "%s@%x",
368 dp->name, regs[0]); 393 dp->name, regs[0]);
369 } else { 394 } else {
370 sprintf(tmp_buf, "%s@%x,%x", 395 sprintf(tmp_buf, "%s@%x,%x",
371 dp->name, regs[0], regs[1]); 396 dp->name, regs[0], regs[1]);
372 } 397 }
373 } 398 }
374 399
375 /* "name@reg0reg1[,reg2reg3]" */ 400 /* "name@reg0reg1[,reg2reg3]" */
376 static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf) 401 static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
377 { 402 {
378 struct property *prop; 403 struct property *prop;
379 u32 *regs; 404 u32 *regs;
380 405
381 prop = of_find_property(dp, "reg", NULL); 406 prop = of_find_property(dp, "reg", NULL);
382 if (!prop) 407 if (!prop)
383 return; 408 return;
384 409
385 regs = prop->value; 410 regs = prop->value;
386 411
387 if (regs[2] || regs[3]) { 412 if (regs[2] || regs[3]) {
388 sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", 413 sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
389 dp->name, regs[0], regs[1], regs[2], regs[3]); 414 dp->name, regs[0], regs[1], regs[2], regs[3]);
390 } else { 415 } else {
391 sprintf(tmp_buf, "%s@%08x%08x", 416 sprintf(tmp_buf, "%s@%08x%08x",
392 dp->name, regs[0], regs[1]); 417 dp->name, regs[0], regs[1]);
393 } 418 }
394 } 419 }
395 420
396 static void __init __build_path_component(struct device_node *dp, char *tmp_buf) 421 static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
397 { 422 {
398 struct device_node *parent = dp->parent; 423 struct device_node *parent = dp->parent;
399 424
400 if (parent != NULL) { 425 if (parent != NULL) {
401 if (!strcmp(parent->type, "pci") || 426 if (!strcmp(parent->type, "pci") ||
402 !strcmp(parent->type, "pciex")) 427 !strcmp(parent->type, "pciex"))
403 return pci_path_component(dp, tmp_buf); 428 return pci_path_component(dp, tmp_buf);
404 if (!strcmp(parent->type, "sbus")) 429 if (!strcmp(parent->type, "sbus"))
405 return sbus_path_component(dp, tmp_buf); 430 return sbus_path_component(dp, tmp_buf);
406 if (!strcmp(parent->type, "upa")) 431 if (!strcmp(parent->type, "upa"))
407 return upa_path_component(dp, tmp_buf); 432 return upa_path_component(dp, tmp_buf);
408 if (!strcmp(parent->type, "ebus")) 433 if (!strcmp(parent->type, "ebus"))
409 return ebus_path_component(dp, tmp_buf); 434 return ebus_path_component(dp, tmp_buf);
410 if (!strcmp(parent->name, "usb") || 435 if (!strcmp(parent->name, "usb") ||
411 !strcmp(parent->name, "hub")) 436 !strcmp(parent->name, "hub"))
412 return usb_path_component(dp, tmp_buf); 437 return usb_path_component(dp, tmp_buf);
413 if (!strcmp(parent->type, "i2c")) 438 if (!strcmp(parent->type, "i2c"))
414 return i2c_path_component(dp, tmp_buf); 439 return i2c_path_component(dp, tmp_buf);
415 if (!strcmp(parent->type, "firewire")) 440 if (!strcmp(parent->type, "firewire"))
416 return ieee1394_path_component(dp, tmp_buf); 441 return ieee1394_path_component(dp, tmp_buf);
417 if (!strcmp(parent->type, "virtual-devices")) 442 if (!strcmp(parent->type, "virtual-devices"))
418 return vdev_path_component(dp, tmp_buf); 443 return vdev_path_component(dp, tmp_buf);
419 444
420 /* "isa" is handled with platform naming */ 445 /* "isa" is handled with platform naming */
421 } 446 }
422 447
423 /* Use platform naming convention. */ 448 /* Use platform naming convention. */
424 if (tlb_type == hypervisor) 449 if (tlb_type == hypervisor)
425 return sun4v_path_component(dp, tmp_buf); 450 return sun4v_path_component(dp, tmp_buf);
426 else 451 else
427 return sun4u_path_component(dp, tmp_buf); 452 return sun4u_path_component(dp, tmp_buf);
428 } 453 }
429 454
430 static char * __init build_path_component(struct device_node *dp) 455 static char * __init build_path_component(struct device_node *dp)
431 { 456 {
432 char tmp_buf[64], *n; 457 char tmp_buf[64], *n;
433 458
434 tmp_buf[0] = '\0'; 459 tmp_buf[0] = '\0';
435 __build_path_component(dp, tmp_buf); 460 __build_path_component(dp, tmp_buf);
436 if (tmp_buf[0] == '\0') 461 if (tmp_buf[0] == '\0')
437 strcpy(tmp_buf, dp->name); 462 strcpy(tmp_buf, dp->name);
438 463
439 n = prom_early_alloc(strlen(tmp_buf) + 1); 464 n = prom_early_alloc(strlen(tmp_buf) + 1);
440 strcpy(n, tmp_buf); 465 strcpy(n, tmp_buf);
441 466
442 return n; 467 return n;
443 } 468 }
444 469
445 static char * __init build_full_name(struct device_node *dp) 470 static char * __init build_full_name(struct device_node *dp)
446 { 471 {
447 int len, ourlen, plen; 472 int len, ourlen, plen;
448 char *n; 473 char *n;
449 474
450 plen = strlen(dp->parent->full_name); 475 plen = strlen(dp->parent->full_name);
451 ourlen = strlen(dp->path_component_name); 476 ourlen = strlen(dp->path_component_name);
452 len = ourlen + plen + 2; 477 len = ourlen + plen + 2;
453 478
454 n = prom_early_alloc(len); 479 n = prom_early_alloc(len);
455 strcpy(n, dp->parent->full_name); 480 strcpy(n, dp->parent->full_name);
456 if (!is_root_node(dp->parent)) { 481 if (!is_root_node(dp->parent)) {
457 strcpy(n + plen, "/"); 482 strcpy(n + plen, "/");
458 plen++; 483 plen++;
459 } 484 }
460 strcpy(n + plen, dp->path_component_name); 485 strcpy(n + plen, dp->path_component_name);
461 486
462 return n; 487 return n;
463 } 488 }
464 489
465 static struct property * __init build_one_prop(phandle node, char *prev) 490 static struct property * __init build_one_prop(phandle node, char *prev)
466 { 491 {
467 static struct property *tmp = NULL; 492 static struct property *tmp = NULL;
468 struct property *p; 493 struct property *p;
469 494
470 if (tmp) { 495 if (tmp) {
471 p = tmp; 496 p = tmp;
472 memset(p, 0, sizeof(*p) + 32); 497 memset(p, 0, sizeof(*p) + 32);
473 tmp = NULL; 498 tmp = NULL;
474 } else 499 } else
475 p = prom_early_alloc(sizeof(struct property) + 32); 500 p = prom_early_alloc(sizeof(struct property) + 32);
476 501
477 p->name = (char *) (p + 1); 502 p->name = (char *) (p + 1);
478 if (prev == NULL) { 503 if (prev == NULL) {
479 prom_firstprop(node, p->name); 504 prom_firstprop(node, p->name);
480 } else { 505 } else {
481 prom_nextprop(node, prev, p->name); 506 prom_nextprop(node, prev, p->name);
482 } 507 }
483 if (strlen(p->name) == 0) { 508 if (strlen(p->name) == 0) {
484 tmp = p; 509 tmp = p;
485 return NULL; 510 return NULL;
486 } 511 }
487 p->length = prom_getproplen(node, p->name); 512 p->length = prom_getproplen(node, p->name);
488 if (p->length <= 0) { 513 if (p->length <= 0) {
489 p->length = 0; 514 p->length = 0;
490 } else { 515 } else {
491 p->value = prom_early_alloc(p->length); 516 p->value = prom_early_alloc(p->length);
492 prom_getproperty(node, p->name, p->value, p->length); 517 prom_getproperty(node, p->name, p->value, p->length);
493 } 518 }
494 return p; 519 return p;
495 } 520 }
496 521
497 static struct property * __init build_prop_list(phandle node) 522 static struct property * __init build_prop_list(phandle node)
498 { 523 {
499 struct property *head, *tail; 524 struct property *head, *tail;
500 525
501 head = tail = build_one_prop(node, NULL); 526 head = tail = build_one_prop(node, NULL);
502 while(tail) { 527 while(tail) {
503 tail->next = build_one_prop(node, tail->name); 528 tail->next = build_one_prop(node, tail->name);
504 tail = tail->next; 529 tail = tail->next;
505 } 530 }
506 531
507 return head; 532 return head;
508 } 533 }
509 534
510 static char * __init get_one_property(phandle node, const char *name) 535 static char * __init get_one_property(phandle node, const char *name)
511 { 536 {
512 char *buf = "<NULL>"; 537 char *buf = "<NULL>";
513 int len; 538 int len;
514 539
515 len = prom_getproplen(node, name); 540 len = prom_getproplen(node, name);
516 if (len > 0) { 541 if (len > 0) {
517 buf = prom_early_alloc(len); 542 buf = prom_early_alloc(len);
518 prom_getproperty(node, name, buf, len); 543 prom_getproperty(node, name, buf, len);
519 } 544 }
520 545
521 return buf; 546 return buf;
522 } 547 }
523 548
524 static struct device_node * __init create_node(phandle node) 549 static struct device_node * __init create_node(phandle node)
525 { 550 {
526 struct device_node *dp; 551 struct device_node *dp;
527 552
528 if (!node) 553 if (!node)
529 return NULL; 554 return NULL;
530 555
531 dp = prom_early_alloc(sizeof(*dp)); 556 dp = prom_early_alloc(sizeof(*dp));
532 557
533 kref_init(&dp->kref); 558 kref_init(&dp->kref);
534 559
535 dp->name = get_one_property(node, "name"); 560 dp->name = get_one_property(node, "name");
536 dp->type = get_one_property(node, "device_type"); 561 dp->type = get_one_property(node, "device_type");
537 dp->node = node; 562 dp->node = node;
538 563
539 /* Build interrupts later... */ 564 /* Build interrupts later... */
540 565
541 dp->properties = build_prop_list(node); 566 dp->properties = build_prop_list(node);
542 567
543 return dp; 568 return dp;
544 } 569 }
545 570
546 static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp) 571 static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
547 { 572 {
548 struct device_node *dp; 573 struct device_node *dp;
549 574
550 dp = create_node(node); 575 dp = create_node(node);
551 if (dp) { 576 if (dp) {
552 *(*nextp) = dp; 577 *(*nextp) = dp;
553 *nextp = &dp->allnext; 578 *nextp = &dp->allnext;
554 579
555 dp->parent = parent; 580 dp->parent = parent;
556 dp->path_component_name = build_path_component(dp); 581 dp->path_component_name = build_path_component(dp);
557 dp->full_name = build_full_name(dp); 582 dp->full_name = build_full_name(dp);
558 583
559 dp->child = build_tree(dp, prom_getchild(node), nextp); 584 dp->child = build_tree(dp, prom_getchild(node), nextp);
560 585
561 dp->sibling = build_tree(parent, prom_getsibling(node), nextp); 586 dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
562 } 587 }
563 588
564 return dp; 589 return dp;
565 } 590 }
566 591
567 void __init prom_build_devicetree(void) 592 void __init prom_build_devicetree(void)
568 { 593 {
569 struct device_node **nextp; 594 struct device_node **nextp;
570 595
571 allnodes = create_node(prom_root_node); 596 allnodes = create_node(prom_root_node);
572 allnodes->path_component_name = ""; 597 allnodes->path_component_name = "";
573 allnodes->full_name = "/"; 598 allnodes->full_name = "/";
574 599
575 nextp = &allnodes->allnext; 600 nextp = &allnodes->allnext;
576 allnodes->child = build_tree(allnodes, 601 allnodes->child = build_tree(allnodes,
577 prom_getchild(allnodes->node), 602 prom_getchild(allnodes->node),
578 &nextp); 603 &nextp);
579 printk("PROM: Built device tree with %u bytes of memory.\n", 604 printk("PROM: Built device tree with %u bytes of memory.\n",
580 prom_early_allocated); 605 prom_early_allocated);
581 } 606 }
582 607
drivers/net/sungem.c
1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ 1 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver. 2 * sungem.c: Sun GEM ethernet driver.
3 * 3 *
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 * 5 *
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
9 * 9 *
10 * NAPI and NETPOLL support 10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
12 * 12 *
13 * TODO: 13 * TODO:
14 * - Now that the driver was significantly simplified, I need to rework 14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule 16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer 17 * instead). The main issues at this point are caused by the netdev layer
18 * though: 18 * though:
19 * 19 *
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't 21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock 22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
23 * where it could have been dropped. change_mtu especially would love also to 23 * where it could have been dropped. change_mtu especially would love also to
24 * be able to msleep instead of horrid locked delays when resetting the HW, 24 * be able to msleep instead of horrid locked delays when resetting the HW,
25 * but that read_lock() makes it impossible, unless I defer it's action to 25 * but that read_lock() makes it impossible, unless I defer it's action to
26 * the reset task, which means it'll be asynchronous (won't take effect until 26 * the reset task, which means it'll be asynchronous (won't take effect until
27 * the system schedules a bit). 27 * the system schedules a bit).
28 * 28 *
29 * Also, it would probably be possible to also remove most of the long-life 29 * Also, it would probably be possible to also remove most of the long-life
30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful 30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful
31 * about when we can start taking interrupts or get xmit() called... 31 * about when we can start taking interrupts or get xmit() called...
32 */ 32 */
33 33
34 #include <linux/module.h> 34 #include <linux/module.h>
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/types.h> 36 #include <linux/types.h>
37 #include <linux/fcntl.h> 37 #include <linux/fcntl.h>
38 #include <linux/interrupt.h> 38 #include <linux/interrupt.h>
39 #include <linux/ioport.h> 39 #include <linux/ioport.h>
40 #include <linux/in.h> 40 #include <linux/in.h>
41 #include <linux/slab.h> 41 #include <linux/slab.h>
42 #include <linux/string.h> 42 #include <linux/string.h>
43 #include <linux/delay.h> 43 #include <linux/delay.h>
44 #include <linux/init.h> 44 #include <linux/init.h>
45 #include <linux/errno.h> 45 #include <linux/errno.h>
46 #include <linux/pci.h> 46 #include <linux/pci.h>
47 #include <linux/dma-mapping.h> 47 #include <linux/dma-mapping.h>
48 #include <linux/netdevice.h> 48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h> 49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h> 50 #include <linux/skbuff.h>
51 #include <linux/mii.h> 51 #include <linux/mii.h>
52 #include <linux/ethtool.h> 52 #include <linux/ethtool.h>
53 #include <linux/crc32.h> 53 #include <linux/crc32.h>
54 #include <linux/random.h> 54 #include <linux/random.h>
55 #include <linux/workqueue.h> 55 #include <linux/workqueue.h>
56 #include <linux/if_vlan.h> 56 #include <linux/if_vlan.h>
57 #include <linux/bitops.h> 57 #include <linux/bitops.h>
58 #include <linux/mutex.h> 58 #include <linux/mutex.h>
59 59
60 #include <asm/system.h> 60 #include <asm/system.h>
61 #include <asm/io.h> 61 #include <asm/io.h>
62 #include <asm/byteorder.h> 62 #include <asm/byteorder.h>
63 #include <asm/uaccess.h> 63 #include <asm/uaccess.h>
64 #include <asm/irq.h> 64 #include <asm/irq.h>
65 65
66 #ifdef __sparc__ 66 #ifdef __sparc__
67 #include <asm/idprom.h> 67 #include <asm/idprom.h>
68 #include <asm/openprom.h> 68 #include <asm/openprom.h>
69 #include <asm/oplib.h> 69 #include <asm/oplib.h>
70 #include <asm/pbm.h> 70 #include <asm/pbm.h>
71 #endif 71 #endif
72 72
73 #ifdef CONFIG_PPC_PMAC 73 #ifdef CONFIG_PPC_PMAC
74 #include <asm/pci-bridge.h> 74 #include <asm/pci-bridge.h>
75 #include <asm/prom.h> 75 #include <asm/prom.h>
76 #include <asm/machdep.h> 76 #include <asm/machdep.h>
77 #include <asm/pmac_feature.h> 77 #include <asm/pmac_feature.h>
78 #endif 78 #endif
79 79
80 #include "sungem_phy.h" 80 #include "sungem_phy.h"
81 #include "sungem.h" 81 #include "sungem.h"
82 82
83 /* Stripping FCS is causing problems, disabled for now */ 83 /* Stripping FCS is causing problems, disabled for now */
84 #undef STRIP_FCS 84 #undef STRIP_FCS
85 85
86 #define DEFAULT_MSG (NETIF_MSG_DRV | \ 86 #define DEFAULT_MSG (NETIF_MSG_DRV | \
87 NETIF_MSG_PROBE | \ 87 NETIF_MSG_PROBE | \
88 NETIF_MSG_LINK) 88 NETIF_MSG_LINK)
89 89
90 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 90 #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
91 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 91 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
92 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) 92 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
93 93
94 #define DRV_NAME "sungem" 94 #define DRV_NAME "sungem"
95 #define DRV_VERSION "0.98" 95 #define DRV_VERSION "0.98"
96 #define DRV_RELDATE "8/24/03" 96 #define DRV_RELDATE "8/24/03"
97 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 97 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
98 98
99 static char version[] __devinitdata = 99 static char version[] __devinitdata =
100 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 100 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
101 101
102 MODULE_AUTHOR(DRV_AUTHOR); 102 MODULE_AUTHOR(DRV_AUTHOR);
103 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); 103 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
104 MODULE_LICENSE("GPL"); 104 MODULE_LICENSE("GPL");
105 105
106 #define GEM_MODULE_NAME "gem" 106 #define GEM_MODULE_NAME "gem"
107 #define PFX GEM_MODULE_NAME ": " 107 #define PFX GEM_MODULE_NAME ": "
108 108
109 static struct pci_device_id gem_pci_tbl[] = { 109 static struct pci_device_id gem_pci_tbl[] = {
110 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 110 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
112 112
113 /* These models only differ from the original GEM in 113 /* These models only differ from the original GEM in
114 * that their tx/rx fifos are of a different size and 114 * that their tx/rx fifos are of a different size and
115 * they only support 10/100 speeds. -DaveM 115 * they only support 10/100 speeds. -DaveM
116 * 116 *
117 * Apple's GMAC does support gigabit on machines with 117 * Apple's GMAC does support gigabit on machines with
118 * the BCM54xx PHYs. -BenH 118 * the BCM54xx PHYs. -BenH
119 */ 119 */
120 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, 120 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
122 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, 122 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, 124 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, 126 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, 128 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, 130 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, 132 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
134 {0, } 134 {0, }
135 }; 135 };
136 136
137 MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 137 MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
138 138
139 static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 139 static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
140 { 140 {
141 u32 cmd; 141 u32 cmd;
142 int limit = 10000; 142 int limit = 10000;
143 143
144 cmd = (1 << 30); 144 cmd = (1 << 30);
145 cmd |= (2 << 28); 145 cmd |= (2 << 28);
146 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 146 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
147 cmd |= (reg << 18) & MIF_FRAME_REGAD; 147 cmd |= (reg << 18) & MIF_FRAME_REGAD;
148 cmd |= (MIF_FRAME_TAMSB); 148 cmd |= (MIF_FRAME_TAMSB);
149 writel(cmd, gp->regs + MIF_FRAME); 149 writel(cmd, gp->regs + MIF_FRAME);
150 150
151 while (limit--) { 151 while (limit--) {
152 cmd = readl(gp->regs + MIF_FRAME); 152 cmd = readl(gp->regs + MIF_FRAME);
153 if (cmd & MIF_FRAME_TALSB) 153 if (cmd & MIF_FRAME_TALSB)
154 break; 154 break;
155 155
156 udelay(10); 156 udelay(10);
157 } 157 }
158 158
159 if (!limit) 159 if (!limit)
160 cmd = 0xffff; 160 cmd = 0xffff;
161 161
162 return cmd & MIF_FRAME_DATA; 162 return cmd & MIF_FRAME_DATA;
163 } 163 }
164 164
165 static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 165 static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
166 { 166 {
167 struct gem *gp = dev->priv; 167 struct gem *gp = dev->priv;
168 return __phy_read(gp, mii_id, reg); 168 return __phy_read(gp, mii_id, reg);
169 } 169 }
170 170
171 static inline u16 phy_read(struct gem *gp, int reg) 171 static inline u16 phy_read(struct gem *gp, int reg)
172 { 172 {
173 return __phy_read(gp, gp->mii_phy_addr, reg); 173 return __phy_read(gp, gp->mii_phy_addr, reg);
174 } 174 }
175 175
176 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 176 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
177 { 177 {
178 u32 cmd; 178 u32 cmd;
179 int limit = 10000; 179 int limit = 10000;
180 180
181 cmd = (1 << 30); 181 cmd = (1 << 30);
182 cmd |= (1 << 28); 182 cmd |= (1 << 28);
183 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; 183 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
184 cmd |= (reg << 18) & MIF_FRAME_REGAD; 184 cmd |= (reg << 18) & MIF_FRAME_REGAD;
185 cmd |= (MIF_FRAME_TAMSB); 185 cmd |= (MIF_FRAME_TAMSB);
186 cmd |= (val & MIF_FRAME_DATA); 186 cmd |= (val & MIF_FRAME_DATA);
187 writel(cmd, gp->regs + MIF_FRAME); 187 writel(cmd, gp->regs + MIF_FRAME);
188 188
189 while (limit--) { 189 while (limit--) {
190 cmd = readl(gp->regs + MIF_FRAME); 190 cmd = readl(gp->regs + MIF_FRAME);
191 if (cmd & MIF_FRAME_TALSB) 191 if (cmd & MIF_FRAME_TALSB)
192 break; 192 break;
193 193
194 udelay(10); 194 udelay(10);
195 } 195 }
196 } 196 }
197 197
198 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 198 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
199 { 199 {
200 struct gem *gp = dev->priv; 200 struct gem *gp = dev->priv;
201 __phy_write(gp, mii_id, reg, val & 0xffff); 201 __phy_write(gp, mii_id, reg, val & 0xffff);
202 } 202 }
203 203
204 static inline void phy_write(struct gem *gp, int reg, u16 val) 204 static inline void phy_write(struct gem *gp, int reg, u16 val)
205 { 205 {
206 __phy_write(gp, gp->mii_phy_addr, reg, val); 206 __phy_write(gp, gp->mii_phy_addr, reg, val);
207 } 207 }
208 208
209 static inline void gem_enable_ints(struct gem *gp) 209 static inline void gem_enable_ints(struct gem *gp)
210 { 210 {
211 /* Enable all interrupts but TXDONE */ 211 /* Enable all interrupts but TXDONE */
212 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 212 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
213 } 213 }
214 214
215 static inline void gem_disable_ints(struct gem *gp) 215 static inline void gem_disable_ints(struct gem *gp)
216 { 216 {
217 /* Disable all interrupts, including TXDONE */ 217 /* Disable all interrupts, including TXDONE */
218 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 218 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
219 } 219 }
220 220
221 static void gem_get_cell(struct gem *gp) 221 static void gem_get_cell(struct gem *gp)
222 { 222 {
223 BUG_ON(gp->cell_enabled < 0); 223 BUG_ON(gp->cell_enabled < 0);
224 gp->cell_enabled++; 224 gp->cell_enabled++;
225 #ifdef CONFIG_PPC_PMAC 225 #ifdef CONFIG_PPC_PMAC
226 if (gp->cell_enabled == 1) { 226 if (gp->cell_enabled == 1) {
227 mb(); 227 mb();
228 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); 228 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
229 udelay(10); 229 udelay(10);
230 } 230 }
231 #endif /* CONFIG_PPC_PMAC */ 231 #endif /* CONFIG_PPC_PMAC */
232 } 232 }
233 233
234 /* Turn off the chip's clock */ 234 /* Turn off the chip's clock */
235 static void gem_put_cell(struct gem *gp) 235 static void gem_put_cell(struct gem *gp)
236 { 236 {
237 BUG_ON(gp->cell_enabled <= 0); 237 BUG_ON(gp->cell_enabled <= 0);
238 gp->cell_enabled--; 238 gp->cell_enabled--;
239 #ifdef CONFIG_PPC_PMAC 239 #ifdef CONFIG_PPC_PMAC
240 if (gp->cell_enabled == 0) { 240 if (gp->cell_enabled == 0) {
241 mb(); 241 mb();
242 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); 242 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
243 udelay(10); 243 udelay(10);
244 } 244 }
245 #endif /* CONFIG_PPC_PMAC */ 245 #endif /* CONFIG_PPC_PMAC */
246 } 246 }
247 247
248 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) 248 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
249 { 249 {
250 if (netif_msg_intr(gp)) 250 if (netif_msg_intr(gp))
251 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); 251 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
252 } 252 }
253 253
254 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 254 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
255 { 255 {
256 u32 pcs_istat = readl(gp->regs + PCS_ISTAT); 256 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
257 u32 pcs_miistat; 257 u32 pcs_miistat;
258 258
259 if (netif_msg_intr(gp)) 259 if (netif_msg_intr(gp))
260 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", 260 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
261 gp->dev->name, pcs_istat); 261 gp->dev->name, pcs_istat);
262 262
263 if (!(pcs_istat & PCS_ISTAT_LSC)) { 263 if (!(pcs_istat & PCS_ISTAT_LSC)) {
264 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 264 printk(KERN_ERR "%s: PCS irq but no link status change???\n",
265 dev->name); 265 dev->name);
266 return 0; 266 return 0;
267 } 267 }
268 268
269 /* The link status bit latches on zero, so you must 269 /* The link status bit latches on zero, so you must
270 * read it twice in such a case to see a transition 270 * read it twice in such a case to see a transition
271 * to the link being up. 271 * to the link being up.
272 */ 272 */
273 pcs_miistat = readl(gp->regs + PCS_MIISTAT); 273 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
274 if (!(pcs_miistat & PCS_MIISTAT_LS)) 274 if (!(pcs_miistat & PCS_MIISTAT_LS))
275 pcs_miistat |= 275 pcs_miistat |=
276 (readl(gp->regs + PCS_MIISTAT) & 276 (readl(gp->regs + PCS_MIISTAT) &
277 PCS_MIISTAT_LS); 277 PCS_MIISTAT_LS);
278 278
279 if (pcs_miistat & PCS_MIISTAT_ANC) { 279 if (pcs_miistat & PCS_MIISTAT_ANC) {
280 /* The remote-fault indication is only valid 280 /* The remote-fault indication is only valid
281 * when autoneg has completed. 281 * when autoneg has completed.
282 */ 282 */
283 if (pcs_miistat & PCS_MIISTAT_RF) 283 if (pcs_miistat & PCS_MIISTAT_RF)
284 printk(KERN_INFO "%s: PCS AutoNEG complete, " 284 printk(KERN_INFO "%s: PCS AutoNEG complete, "
285 "RemoteFault\n", dev->name); 285 "RemoteFault\n", dev->name);
286 else 286 else
287 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 287 printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
288 dev->name); 288 dev->name);
289 } 289 }
290 290
291 if (pcs_miistat & PCS_MIISTAT_LS) { 291 if (pcs_miistat & PCS_MIISTAT_LS) {
292 printk(KERN_INFO "%s: PCS link is now up.\n", 292 printk(KERN_INFO "%s: PCS link is now up.\n",
293 dev->name); 293 dev->name);
294 netif_carrier_on(gp->dev); 294 netif_carrier_on(gp->dev);
295 } else { 295 } else {
296 printk(KERN_INFO "%s: PCS link is now down.\n", 296 printk(KERN_INFO "%s: PCS link is now down.\n",
297 dev->name); 297 dev->name);
298 netif_carrier_off(gp->dev); 298 netif_carrier_off(gp->dev);
299 /* If this happens and the link timer is not running, 299 /* If this happens and the link timer is not running,
300 * reset so we re-negotiate. 300 * reset so we re-negotiate.
301 */ 301 */
302 if (!timer_pending(&gp->link_timer)) 302 if (!timer_pending(&gp->link_timer))
303 return 1; 303 return 1;
304 } 304 }
305 305
306 return 0; 306 return 0;
307 } 307 }
308 308
309 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 309 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
310 { 310 {
311 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); 311 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
312 312
313 if (netif_msg_intr(gp)) 313 if (netif_msg_intr(gp))
314 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 314 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
315 gp->dev->name, txmac_stat); 315 gp->dev->name, txmac_stat);
316 316
317 /* Defer timer expiration is quite normal, 317 /* Defer timer expiration is quite normal,
318 * don't even log the event. 318 * don't even log the event.
319 */ 319 */
320 if ((txmac_stat & MAC_TXSTAT_DTE) && 320 if ((txmac_stat & MAC_TXSTAT_DTE) &&
321 !(txmac_stat & ~MAC_TXSTAT_DTE)) 321 !(txmac_stat & ~MAC_TXSTAT_DTE))
322 return 0; 322 return 0;
323 323
324 if (txmac_stat & MAC_TXSTAT_URUN) { 324 if (txmac_stat & MAC_TXSTAT_URUN) {
325 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 325 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
326 dev->name); 326 dev->name);
327 gp->net_stats.tx_fifo_errors++; 327 gp->net_stats.tx_fifo_errors++;
328 } 328 }
329 329
330 if (txmac_stat & MAC_TXSTAT_MPE) { 330 if (txmac_stat & MAC_TXSTAT_MPE) {
331 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 331 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
332 dev->name); 332 dev->name);
333 gp->net_stats.tx_errors++; 333 gp->net_stats.tx_errors++;
334 } 334 }
335 335
336 /* The rest are all cases of one of the 16-bit TX 336 /* The rest are all cases of one of the 16-bit TX
337 * counters expiring. 337 * counters expiring.
338 */ 338 */
339 if (txmac_stat & MAC_TXSTAT_NCE) 339 if (txmac_stat & MAC_TXSTAT_NCE)
340 gp->net_stats.collisions += 0x10000; 340 gp->net_stats.collisions += 0x10000;
341 341
342 if (txmac_stat & MAC_TXSTAT_ECE) { 342 if (txmac_stat & MAC_TXSTAT_ECE) {
343 gp->net_stats.tx_aborted_errors += 0x10000; 343 gp->net_stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000; 344 gp->net_stats.collisions += 0x10000;
345 } 345 }
346 346
347 if (txmac_stat & MAC_TXSTAT_LCE) { 347 if (txmac_stat & MAC_TXSTAT_LCE) {
348 gp->net_stats.tx_aborted_errors += 0x10000; 348 gp->net_stats.tx_aborted_errors += 0x10000;
349 gp->net_stats.collisions += 0x10000; 349 gp->net_stats.collisions += 0x10000;
350 } 350 }
351 351
352 /* We do not keep track of MAC_TXSTAT_FCE and 352 /* We do not keep track of MAC_TXSTAT_FCE and
353 * MAC_TXSTAT_PCE events. 353 * MAC_TXSTAT_PCE events.
354 */ 354 */
355 return 0; 355 return 0;
356 } 356 }
357 357
358 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung 358 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
359 * so we do the following. 359 * so we do the following.
360 * 360 *
361 * If any part of the reset goes wrong, we return 1 and that causes the 361 * If any part of the reset goes wrong, we return 1 and that causes the
362 * whole chip to be reset. 362 * whole chip to be reset.
363 */ 363 */
364 static int gem_rxmac_reset(struct gem *gp) 364 static int gem_rxmac_reset(struct gem *gp)
365 { 365 {
366 struct net_device *dev = gp->dev; 366 struct net_device *dev = gp->dev;
367 int limit, i; 367 int limit, i;
368 u64 desc_dma; 368 u64 desc_dma;
369 u32 val; 369 u32 val;
370 370
371 /* First, reset & disable MAC RX. */ 371 /* First, reset & disable MAC RX. */
372 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 372 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
373 for (limit = 0; limit < 5000; limit++) { 373 for (limit = 0; limit < 5000; limit++) {
374 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) 374 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
375 break; 375 break;
376 udelay(10); 376 udelay(10);
377 } 377 }
378 if (limit == 5000) { 378 if (limit == 5000) {
379 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 379 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
380 "chip.\n", dev->name); 380 "chip.\n", dev->name);
381 return 1; 381 return 1;
382 } 382 }
383 383
384 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, 384 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
385 gp->regs + MAC_RXCFG); 385 gp->regs + MAC_RXCFG);
386 for (limit = 0; limit < 5000; limit++) { 386 for (limit = 0; limit < 5000; limit++) {
387 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) 387 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
388 break; 388 break;
389 udelay(10); 389 udelay(10);
390 } 390 }
391 if (limit == 5000) { 391 if (limit == 5000) {
392 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 392 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
393 "chip.\n", dev->name); 393 "chip.\n", dev->name);
394 return 1; 394 return 1;
395 } 395 }
396 396
397 /* Second, disable RX DMA. */ 397 /* Second, disable RX DMA. */
398 writel(0, gp->regs + RXDMA_CFG); 398 writel(0, gp->regs + RXDMA_CFG);
399 for (limit = 0; limit < 5000; limit++) { 399 for (limit = 0; limit < 5000; limit++) {
400 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) 400 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
401 break; 401 break;
402 udelay(10); 402 udelay(10);
403 } 403 }
404 if (limit == 5000) { 404 if (limit == 5000) {
405 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 405 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
406 "chip.\n", dev->name); 406 "chip.\n", dev->name);
407 return 1; 407 return 1;
408 } 408 }
409 409
410 udelay(5000); 410 udelay(5000);
411 411
412 /* Execute RX reset command. */ 412 /* Execute RX reset command. */
413 writel(gp->swrst_base | GREG_SWRST_RXRST, 413 writel(gp->swrst_base | GREG_SWRST_RXRST,
414 gp->regs + GREG_SWRST); 414 gp->regs + GREG_SWRST);
415 for (limit = 0; limit < 5000; limit++) { 415 for (limit = 0; limit < 5000; limit++) {
416 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) 416 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
417 break; 417 break;
418 udelay(10); 418 udelay(10);
419 } 419 }
420 if (limit == 5000) { 420 if (limit == 5000) {
421 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 421 printk(KERN_ERR "%s: RX reset command will not execute, resetting "
422 "whole chip.\n", dev->name); 422 "whole chip.\n", dev->name);
423 return 1; 423 return 1;
424 } 424 }
425 425
426 /* Refresh the RX ring. */ 426 /* Refresh the RX ring. */
427 for (i = 0; i < RX_RING_SIZE; i++) { 427 for (i = 0; i < RX_RING_SIZE; i++) {
428 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 428 struct gem_rxd *rxd = &gp->init_block->rxd[i];
429 429
430 if (gp->rx_skbs[i] == NULL) { 430 if (gp->rx_skbs[i] == NULL) {
431 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 431 printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
432 "whole chip.\n", dev->name); 432 "whole chip.\n", dev->name);
433 return 1; 433 return 1;
434 } 434 }
435 435
436 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 436 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
437 } 437 }
438 gp->rx_new = gp->rx_old = 0; 438 gp->rx_new = gp->rx_old = 0;
439 439
440 /* Now we must reprogram the rest of RX unit. */ 440 /* Now we must reprogram the rest of RX unit. */
441 desc_dma = (u64) gp->gblock_dvma; 441 desc_dma = (u64) gp->gblock_dvma;
442 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 442 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
443 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 443 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
444 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 444 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
445 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 445 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
446 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 446 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
447 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 447 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
448 writel(val, gp->regs + RXDMA_CFG); 448 writel(val, gp->regs + RXDMA_CFG);
449 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 449 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
450 writel(((5 & RXDMA_BLANK_IPKTS) | 450 writel(((5 & RXDMA_BLANK_IPKTS) |
451 ((8 << 12) & RXDMA_BLANK_ITIME)), 451 ((8 << 12) & RXDMA_BLANK_ITIME)),
452 gp->regs + RXDMA_BLANK); 452 gp->regs + RXDMA_BLANK);
453 else 453 else
454 writel(((5 & RXDMA_BLANK_IPKTS) | 454 writel(((5 & RXDMA_BLANK_IPKTS) |
455 ((4 << 12) & RXDMA_BLANK_ITIME)), 455 ((4 << 12) & RXDMA_BLANK_ITIME)),
456 gp->regs + RXDMA_BLANK); 456 gp->regs + RXDMA_BLANK);
457 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 457 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
458 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 458 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
459 writel(val, gp->regs + RXDMA_PTHRESH); 459 writel(val, gp->regs + RXDMA_PTHRESH);
460 val = readl(gp->regs + RXDMA_CFG); 460 val = readl(gp->regs + RXDMA_CFG);
461 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 461 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
462 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 462 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
463 val = readl(gp->regs + MAC_RXCFG); 463 val = readl(gp->regs + MAC_RXCFG);
464 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 464 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
465 465
466 return 0; 466 return 0;
467 } 467 }
468 468
469 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 469 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
470 { 470 {
471 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); 471 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
472 int ret = 0; 472 int ret = 0;
473 473
474 if (netif_msg_intr(gp)) 474 if (netif_msg_intr(gp))
475 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", 475 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
476 gp->dev->name, rxmac_stat); 476 gp->dev->name, rxmac_stat);
477 477
478 if (rxmac_stat & MAC_RXSTAT_OFLW) { 478 if (rxmac_stat & MAC_RXSTAT_OFLW) {
479 u32 smac = readl(gp->regs + MAC_SMACHINE); 479 u32 smac = readl(gp->regs + MAC_SMACHINE);
480 480
481 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 481 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
482 dev->name, smac); 482 dev->name, smac);
483 gp->net_stats.rx_over_errors++; 483 gp->net_stats.rx_over_errors++;
484 gp->net_stats.rx_fifo_errors++; 484 gp->net_stats.rx_fifo_errors++;
485 485
486 ret = gem_rxmac_reset(gp); 486 ret = gem_rxmac_reset(gp);
487 } 487 }
488 488
489 if (rxmac_stat & MAC_RXSTAT_ACE) 489 if (rxmac_stat & MAC_RXSTAT_ACE)
490 gp->net_stats.rx_frame_errors += 0x10000; 490 gp->net_stats.rx_frame_errors += 0x10000;
491 491
492 if (rxmac_stat & MAC_RXSTAT_CCE) 492 if (rxmac_stat & MAC_RXSTAT_CCE)
493 gp->net_stats.rx_crc_errors += 0x10000; 493 gp->net_stats.rx_crc_errors += 0x10000;
494 494
495 if (rxmac_stat & MAC_RXSTAT_LCE) 495 if (rxmac_stat & MAC_RXSTAT_LCE)
496 gp->net_stats.rx_length_errors += 0x10000; 496 gp->net_stats.rx_length_errors += 0x10000;
497 497
498 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 498 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
499 * events. 499 * events.
500 */ 500 */
501 return ret; 501 return ret;
502 } 502 }
503 503
504 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 504 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
505 { 505 {
506 u32 mac_cstat = readl(gp->regs + MAC_CSTAT); 506 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
507 507
508 if (netif_msg_intr(gp)) 508 if (netif_msg_intr(gp))
509 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", 509 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
510 gp->dev->name, mac_cstat); 510 gp->dev->name, mac_cstat);
511 511
512 /* This interrupt is just for pause frame and pause 512 /* This interrupt is just for pause frame and pause
513 * tracking. It is useful for diagnostics and debug 513 * tracking. It is useful for diagnostics and debug
514 * but probably by default we will mask these events. 514 * but probably by default we will mask these events.
515 */ 515 */
516 if (mac_cstat & MAC_CSTAT_PS) 516 if (mac_cstat & MAC_CSTAT_PS)
517 gp->pause_entered++; 517 gp->pause_entered++;
518 518
519 if (mac_cstat & MAC_CSTAT_PRCV) 519 if (mac_cstat & MAC_CSTAT_PRCV)
520 gp->pause_last_time_recvd = (mac_cstat >> 16); 520 gp->pause_last_time_recvd = (mac_cstat >> 16);
521 521
522 return 0; 522 return 0;
523 } 523 }
524 524
525 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 525 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
526 { 526 {
527 u32 mif_status = readl(gp->regs + MIF_STATUS); 527 u32 mif_status = readl(gp->regs + MIF_STATUS);
528 u32 reg_val, changed_bits; 528 u32 reg_val, changed_bits;
529 529
530 reg_val = (mif_status & MIF_STATUS_DATA) >> 16; 530 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
531 changed_bits = (mif_status & MIF_STATUS_STAT); 531 changed_bits = (mif_status & MIF_STATUS_STAT);
532 532
533 gem_handle_mif_event(gp, reg_val, changed_bits); 533 gem_handle_mif_event(gp, reg_val, changed_bits);
534 534
535 return 0; 535 return 0;
536 } 536 }
537 537
538 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) 538 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
539 { 539 {
540 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); 540 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
541 541
542 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 542 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
543 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 543 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
544 printk(KERN_ERR "%s: PCI error [%04x] ", 544 printk(KERN_ERR "%s: PCI error [%04x] ",
545 dev->name, pci_estat); 545 dev->name, pci_estat);
546 546
547 if (pci_estat & GREG_PCIESTAT_BADACK) 547 if (pci_estat & GREG_PCIESTAT_BADACK)
548 printk("<No ACK64# during ABS64 cycle> "); 548 printk("<No ACK64# during ABS64 cycle> ");
549 if (pci_estat & GREG_PCIESTAT_DTRTO) 549 if (pci_estat & GREG_PCIESTAT_DTRTO)
550 printk("<Delayed transaction timeout> "); 550 printk("<Delayed transaction timeout> ");
551 if (pci_estat & GREG_PCIESTAT_OTHER) 551 if (pci_estat & GREG_PCIESTAT_OTHER)
552 printk("<other>"); 552 printk("<other>");
553 printk("\n"); 553 printk("\n");
554 } else { 554 } else {
555 pci_estat |= GREG_PCIESTAT_OTHER; 555 pci_estat |= GREG_PCIESTAT_OTHER;
556 printk(KERN_ERR "%s: PCI error\n", dev->name); 556 printk(KERN_ERR "%s: PCI error\n", dev->name);
557 } 557 }
558 558
559 if (pci_estat & GREG_PCIESTAT_OTHER) { 559 if (pci_estat & GREG_PCIESTAT_OTHER) {
560 u16 pci_cfg_stat; 560 u16 pci_cfg_stat;
561 561
562 /* Interrogate PCI config space for the 562 /* Interrogate PCI config space for the
563 * true cause. 563 * true cause.
564 */ 564 */
565 pci_read_config_word(gp->pdev, PCI_STATUS, 565 pci_read_config_word(gp->pdev, PCI_STATUS,
566 &pci_cfg_stat); 566 &pci_cfg_stat);
567 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 567 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
568 dev->name, pci_cfg_stat); 568 dev->name, pci_cfg_stat);
569 if (pci_cfg_stat & PCI_STATUS_PARITY) 569 if (pci_cfg_stat & PCI_STATUS_PARITY)
570 printk(KERN_ERR "%s: PCI parity error detected.\n", 570 printk(KERN_ERR "%s: PCI parity error detected.\n",
571 dev->name); 571 dev->name);
572 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 572 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
573 printk(KERN_ERR "%s: PCI target abort.\n", 573 printk(KERN_ERR "%s: PCI target abort.\n",
574 dev->name); 574 dev->name);
575 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 575 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
576 printk(KERN_ERR "%s: PCI master acks target abort.\n", 576 printk(KERN_ERR "%s: PCI master acks target abort.\n",
577 dev->name); 577 dev->name);
578 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 578 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
579 printk(KERN_ERR "%s: PCI master abort.\n", 579 printk(KERN_ERR "%s: PCI master abort.\n",
580 dev->name); 580 dev->name);
581 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 581 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
582 printk(KERN_ERR "%s: PCI system error SERR#.\n", 582 printk(KERN_ERR "%s: PCI system error SERR#.\n",
583 dev->name); 583 dev->name);
584 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 584 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
585 printk(KERN_ERR "%s: PCI parity error.\n", 585 printk(KERN_ERR "%s: PCI parity error.\n",
586 dev->name); 586 dev->name);
587 587
588 /* Write the error bits back to clear them. */ 588 /* Write the error bits back to clear them. */
589 pci_cfg_stat &= (PCI_STATUS_PARITY | 589 pci_cfg_stat &= (PCI_STATUS_PARITY |
590 PCI_STATUS_SIG_TARGET_ABORT | 590 PCI_STATUS_SIG_TARGET_ABORT |
591 PCI_STATUS_REC_TARGET_ABORT | 591 PCI_STATUS_REC_TARGET_ABORT |
592 PCI_STATUS_REC_MASTER_ABORT | 592 PCI_STATUS_REC_MASTER_ABORT |
593 PCI_STATUS_SIG_SYSTEM_ERROR | 593 PCI_STATUS_SIG_SYSTEM_ERROR |
594 PCI_STATUS_DETECTED_PARITY); 594 PCI_STATUS_DETECTED_PARITY);
595 pci_write_config_word(gp->pdev, 595 pci_write_config_word(gp->pdev,
596 PCI_STATUS, pci_cfg_stat); 596 PCI_STATUS, pci_cfg_stat);
597 } 597 }
598 598
599 /* For all PCI errors, we should reset the chip. */ 599 /* For all PCI errors, we should reset the chip. */
600 return 1; 600 return 1;
601 } 601 }
602 602
603 /* All non-normal interrupt conditions get serviced here. 603 /* All non-normal interrupt conditions get serviced here.
604 * Returns non-zero if we should just exit the interrupt 604 * Returns non-zero if we should just exit the interrupt
605 * handler right now (ie. if we reset the card which invalidates 605 * handler right now (ie. if we reset the card which invalidates
606 * all of the other original irq status bits). 606 * all of the other original irq status bits).
607 */ 607 */
608 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) 608 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
609 { 609 {
610 if (gem_status & GREG_STAT_RXNOBUF) { 610 if (gem_status & GREG_STAT_RXNOBUF) {
611 /* Frame arrived, no free RX buffers available. */ 611 /* Frame arrived, no free RX buffers available. */
612 if (netif_msg_rx_err(gp)) 612 if (netif_msg_rx_err(gp))
613 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 613 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
614 gp->dev->name); 614 gp->dev->name);
615 gp->net_stats.rx_dropped++; 615 gp->net_stats.rx_dropped++;
616 } 616 }
617 617
618 if (gem_status & GREG_STAT_RXTAGERR) { 618 if (gem_status & GREG_STAT_RXTAGERR) {
619 /* corrupt RX tag framing */ 619 /* corrupt RX tag framing */
620 if (netif_msg_rx_err(gp)) 620 if (netif_msg_rx_err(gp))
621 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 621 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
622 gp->dev->name); 622 gp->dev->name);
623 gp->net_stats.rx_errors++; 623 gp->net_stats.rx_errors++;
624 624
625 goto do_reset; 625 goto do_reset;
626 } 626 }
627 627
628 if (gem_status & GREG_STAT_PCS) { 628 if (gem_status & GREG_STAT_PCS) {
629 if (gem_pcs_interrupt(dev, gp, gem_status)) 629 if (gem_pcs_interrupt(dev, gp, gem_status))
630 goto do_reset; 630 goto do_reset;
631 } 631 }
632 632
633 if (gem_status & GREG_STAT_TXMAC) { 633 if (gem_status & GREG_STAT_TXMAC) {
634 if (gem_txmac_interrupt(dev, gp, gem_status)) 634 if (gem_txmac_interrupt(dev, gp, gem_status))
635 goto do_reset; 635 goto do_reset;
636 } 636 }
637 637
638 if (gem_status & GREG_STAT_RXMAC) { 638 if (gem_status & GREG_STAT_RXMAC) {
639 if (gem_rxmac_interrupt(dev, gp, gem_status)) 639 if (gem_rxmac_interrupt(dev, gp, gem_status))
640 goto do_reset; 640 goto do_reset;
641 } 641 }
642 642
643 if (gem_status & GREG_STAT_MAC) { 643 if (gem_status & GREG_STAT_MAC) {
644 if (gem_mac_interrupt(dev, gp, gem_status)) 644 if (gem_mac_interrupt(dev, gp, gem_status))
645 goto do_reset; 645 goto do_reset;
646 } 646 }
647 647
648 if (gem_status & GREG_STAT_MIF) { 648 if (gem_status & GREG_STAT_MIF) {
649 if (gem_mif_interrupt(dev, gp, gem_status)) 649 if (gem_mif_interrupt(dev, gp, gem_status))
650 goto do_reset; 650 goto do_reset;
651 } 651 }
652 652
653 if (gem_status & GREG_STAT_PCIERR) { 653 if (gem_status & GREG_STAT_PCIERR) {
654 if (gem_pci_interrupt(dev, gp, gem_status)) 654 if (gem_pci_interrupt(dev, gp, gem_status))
655 goto do_reset; 655 goto do_reset;
656 } 656 }
657 657
658 return 0; 658 return 0;
659 659
660 do_reset: 660 do_reset:
661 gp->reset_task_pending = 1; 661 gp->reset_task_pending = 1;
662 schedule_work(&gp->reset_task); 662 schedule_work(&gp->reset_task);
663 663
664 return 1; 664 return 1;
665 } 665 }
666 666
667 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 667 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
668 { 668 {
669 int entry, limit; 669 int entry, limit;
670 670
671 if (netif_msg_intr(gp)) 671 if (netif_msg_intr(gp))
672 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 672 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
673 gp->dev->name, gem_status); 673 gp->dev->name, gem_status);
674 674
675 entry = gp->tx_old; 675 entry = gp->tx_old;
676 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); 676 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
677 while (entry != limit) { 677 while (entry != limit) {
678 struct sk_buff *skb; 678 struct sk_buff *skb;
679 struct gem_txd *txd; 679 struct gem_txd *txd;
680 dma_addr_t dma_addr; 680 dma_addr_t dma_addr;
681 u32 dma_len; 681 u32 dma_len;
682 int frag; 682 int frag;
683 683
684 if (netif_msg_tx_done(gp)) 684 if (netif_msg_tx_done(gp))
685 printk(KERN_DEBUG "%s: tx done, slot %d\n", 685 printk(KERN_DEBUG "%s: tx done, slot %d\n",
686 gp->dev->name, entry); 686 gp->dev->name, entry);
687 skb = gp->tx_skbs[entry]; 687 skb = gp->tx_skbs[entry];
688 if (skb_shinfo(skb)->nr_frags) { 688 if (skb_shinfo(skb)->nr_frags) {
689 int last = entry + skb_shinfo(skb)->nr_frags; 689 int last = entry + skb_shinfo(skb)->nr_frags;
690 int walk = entry; 690 int walk = entry;
691 int incomplete = 0; 691 int incomplete = 0;
692 692
693 last &= (TX_RING_SIZE - 1); 693 last &= (TX_RING_SIZE - 1);
694 for (;;) { 694 for (;;) {
695 walk = NEXT_TX(walk); 695 walk = NEXT_TX(walk);
696 if (walk == limit) 696 if (walk == limit)
697 incomplete = 1; 697 incomplete = 1;
698 if (walk == last) 698 if (walk == last)
699 break; 699 break;
700 } 700 }
701 if (incomplete) 701 if (incomplete)
702 break; 702 break;
703 } 703 }
704 gp->tx_skbs[entry] = NULL; 704 gp->tx_skbs[entry] = NULL;
705 gp->net_stats.tx_bytes += skb->len; 705 gp->net_stats.tx_bytes += skb->len;
706 706
707 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 707 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
708 txd = &gp->init_block->txd[entry]; 708 txd = &gp->init_block->txd[entry];
709 709
710 dma_addr = le64_to_cpu(txd->buffer); 710 dma_addr = le64_to_cpu(txd->buffer);
711 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; 711 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
712 712
713 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 713 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
714 entry = NEXT_TX(entry); 714 entry = NEXT_TX(entry);
715 } 715 }
716 716
717 gp->net_stats.tx_packets++; 717 gp->net_stats.tx_packets++;
718 dev_kfree_skb_irq(skb); 718 dev_kfree_skb_irq(skb);
719 } 719 }
720 gp->tx_old = entry; 720 gp->tx_old = entry;
721 721
722 if (netif_queue_stopped(dev) && 722 if (netif_queue_stopped(dev) &&
723 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 723 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
724 netif_wake_queue(dev); 724 netif_wake_queue(dev);
725 } 725 }
726 726
727 static __inline__ void gem_post_rxds(struct gem *gp, int limit) 727 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
728 { 728 {
729 int cluster_start, curr, count, kick; 729 int cluster_start, curr, count, kick;
730 730
731 cluster_start = curr = (gp->rx_new & ~(4 - 1)); 731 cluster_start = curr = (gp->rx_new & ~(4 - 1));
732 count = 0; 732 count = 0;
733 kick = -1; 733 kick = -1;
734 wmb(); 734 wmb();
735 while (curr != limit) { 735 while (curr != limit) {
736 curr = NEXT_RX(curr); 736 curr = NEXT_RX(curr);
737 if (++count == 4) { 737 if (++count == 4) {
738 struct gem_rxd *rxd = 738 struct gem_rxd *rxd =
739 &gp->init_block->rxd[cluster_start]; 739 &gp->init_block->rxd[cluster_start];
740 for (;;) { 740 for (;;) {
741 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 741 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
742 rxd++; 742 rxd++;
743 cluster_start = NEXT_RX(cluster_start); 743 cluster_start = NEXT_RX(cluster_start);
744 if (cluster_start == curr) 744 if (cluster_start == curr)
745 break; 745 break;
746 } 746 }
747 kick = curr; 747 kick = curr;
748 count = 0; 748 count = 0;
749 } 749 }
750 } 750 }
751 if (kick >= 0) { 751 if (kick >= 0) {
752 mb(); 752 mb();
753 writel(kick, gp->regs + RXDMA_KICK); 753 writel(kick, gp->regs + RXDMA_KICK);
754 } 754 }
755 } 755 }
756 756
757 static int gem_rx(struct gem *gp, int work_to_do) 757 static int gem_rx(struct gem *gp, int work_to_do)
758 { 758 {
759 int entry, drops, work_done = 0; 759 int entry, drops, work_done = 0;
760 u32 done; 760 u32 done;
761 761
762 if (netif_msg_rx_status(gp)) 762 if (netif_msg_rx_status(gp))
763 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 763 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
764 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); 764 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
765 765
766 entry = gp->rx_new; 766 entry = gp->rx_new;
767 drops = 0; 767 drops = 0;
768 done = readl(gp->regs + RXDMA_DONE); 768 done = readl(gp->regs + RXDMA_DONE);
769 for (;;) { 769 for (;;) {
770 struct gem_rxd *rxd = &gp->init_block->rxd[entry]; 770 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
771 struct sk_buff *skb; 771 struct sk_buff *skb;
772 u64 status = cpu_to_le64(rxd->status_word); 772 u64 status = cpu_to_le64(rxd->status_word);
773 dma_addr_t dma_addr; 773 dma_addr_t dma_addr;
774 int len; 774 int len;
775 775
776 if ((status & RXDCTRL_OWN) != 0) 776 if ((status & RXDCTRL_OWN) != 0)
777 break; 777 break;
778 778
779 if (work_done >= RX_RING_SIZE || work_done >= work_to_do) 779 if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
780 break; 780 break;
781 781
782 /* When writing back RX descriptor, GEM writes status 782 /* When writing back RX descriptor, GEM writes status
783 * then buffer address, possibly in seperate transactions. 783 * then buffer address, possibly in seperate transactions.
784 * If we don't wait for the chip to write both, we could 784 * If we don't wait for the chip to write both, we could
785 * post a new buffer to this descriptor then have GEM spam 785 * post a new buffer to this descriptor then have GEM spam
786 * on the buffer address. We sync on the RX completion 786 * on the buffer address. We sync on the RX completion
787 * register to prevent this from happening. 787 * register to prevent this from happening.
788 */ 788 */
789 if (entry == done) { 789 if (entry == done) {
790 done = readl(gp->regs + RXDMA_DONE); 790 done = readl(gp->regs + RXDMA_DONE);
791 if (entry == done) 791 if (entry == done)
792 break; 792 break;
793 } 793 }
794 794
795 /* We can now account for the work we're about to do */ 795 /* We can now account for the work we're about to do */
796 work_done++; 796 work_done++;
797 797
798 skb = gp->rx_skbs[entry]; 798 skb = gp->rx_skbs[entry];
799 799
800 len = (status & RXDCTRL_BUFSZ) >> 16; 800 len = (status & RXDCTRL_BUFSZ) >> 16;
801 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 801 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
802 gp->net_stats.rx_errors++; 802 gp->net_stats.rx_errors++;
803 if (len < ETH_ZLEN) 803 if (len < ETH_ZLEN)
804 gp->net_stats.rx_length_errors++; 804 gp->net_stats.rx_length_errors++;
805 if (len & RXDCTRL_BAD) 805 if (len & RXDCTRL_BAD)
806 gp->net_stats.rx_crc_errors++; 806 gp->net_stats.rx_crc_errors++;
807 807
808 /* We'll just return it to GEM. */ 808 /* We'll just return it to GEM. */
809 drop_it: 809 drop_it:
810 gp->net_stats.rx_dropped++; 810 gp->net_stats.rx_dropped++;
811 goto next; 811 goto next;
812 } 812 }
813 813
814 dma_addr = cpu_to_le64(rxd->buffer); 814 dma_addr = cpu_to_le64(rxd->buffer);
815 if (len > RX_COPY_THRESHOLD) { 815 if (len > RX_COPY_THRESHOLD) {
816 struct sk_buff *new_skb; 816 struct sk_buff *new_skb;
817 817
818 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 818 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
819 if (new_skb == NULL) { 819 if (new_skb == NULL) {
820 drops++; 820 drops++;
821 goto drop_it; 821 goto drop_it;
822 } 822 }
823 pci_unmap_page(gp->pdev, dma_addr, 823 pci_unmap_page(gp->pdev, dma_addr,
824 RX_BUF_ALLOC_SIZE(gp), 824 RX_BUF_ALLOC_SIZE(gp),
825 PCI_DMA_FROMDEVICE); 825 PCI_DMA_FROMDEVICE);
826 gp->rx_skbs[entry] = new_skb; 826 gp->rx_skbs[entry] = new_skb;
827 new_skb->dev = gp->dev; 827 new_skb->dev = gp->dev;
828 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 828 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
829 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 829 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
830 virt_to_page(new_skb->data), 830 virt_to_page(new_skb->data),
831 offset_in_page(new_skb->data), 831 offset_in_page(new_skb->data),
832 RX_BUF_ALLOC_SIZE(gp), 832 RX_BUF_ALLOC_SIZE(gp),
833 PCI_DMA_FROMDEVICE)); 833 PCI_DMA_FROMDEVICE));
834 skb_reserve(new_skb, RX_OFFSET); 834 skb_reserve(new_skb, RX_OFFSET);
835 835
836 /* Trim the original skb for the netif. */ 836 /* Trim the original skb for the netif. */
837 skb_trim(skb, len); 837 skb_trim(skb, len);
838 } else { 838 } else {
839 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 839 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
840 840
841 if (copy_skb == NULL) { 841 if (copy_skb == NULL) {
842 drops++; 842 drops++;
843 goto drop_it; 843 goto drop_it;
844 } 844 }
845 845
846 copy_skb->dev = gp->dev; 846 copy_skb->dev = gp->dev;
847 skb_reserve(copy_skb, 2); 847 skb_reserve(copy_skb, 2);
848 skb_put(copy_skb, len); 848 skb_put(copy_skb, len);
849 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 849 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
850 memcpy(copy_skb->data, skb->data, len); 850 memcpy(copy_skb->data, skb->data, len);
851 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 851 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
852 852
853 /* We'll reuse the original ring buffer. */ 853 /* We'll reuse the original ring buffer. */
854 skb = copy_skb; 854 skb = copy_skb;
855 } 855 }
856 856
857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
858 skb->ip_summed = CHECKSUM_HW; 858 skb->ip_summed = CHECKSUM_HW;
859 skb->protocol = eth_type_trans(skb, gp->dev); 859 skb->protocol = eth_type_trans(skb, gp->dev);
860 860
861 netif_receive_skb(skb); 861 netif_receive_skb(skb);
862 862
863 gp->net_stats.rx_packets++; 863 gp->net_stats.rx_packets++;
864 gp->net_stats.rx_bytes += len; 864 gp->net_stats.rx_bytes += len;
865 gp->dev->last_rx = jiffies; 865 gp->dev->last_rx = jiffies;
866 866
867 next: 867 next:
868 entry = NEXT_RX(entry); 868 entry = NEXT_RX(entry);
869 } 869 }
870 870
871 gem_post_rxds(gp, entry); 871 gem_post_rxds(gp, entry);
872 872
873 gp->rx_new = entry; 873 gp->rx_new = entry;
874 874
875 if (drops) 875 if (drops)
876 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 876 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
877 gp->dev->name); 877 gp->dev->name);
878 878
879 return work_done; 879 return work_done;
880 } 880 }
881 881
882 static int gem_poll(struct net_device *dev, int *budget) 882 static int gem_poll(struct net_device *dev, int *budget)
883 { 883 {
884 struct gem *gp = dev->priv; 884 struct gem *gp = dev->priv;
885 unsigned long flags; 885 unsigned long flags;
886 886
887 /* 887 /*
888 * NAPI locking nightmare: See comment at head of driver 888 * NAPI locking nightmare: See comment at head of driver
889 */ 889 */
890 spin_lock_irqsave(&gp->lock, flags); 890 spin_lock_irqsave(&gp->lock, flags);
891 891
892 do { 892 do {
893 int work_to_do, work_done; 893 int work_to_do, work_done;
894 894
895 /* Handle anomalies */ 895 /* Handle anomalies */
896 if (gp->status & GREG_STAT_ABNORMAL) { 896 if (gp->status & GREG_STAT_ABNORMAL) {
897 if (gem_abnormal_irq(dev, gp, gp->status)) 897 if (gem_abnormal_irq(dev, gp, gp->status))
898 break; 898 break;
899 } 899 }
900 900
901 /* Run TX completion thread */ 901 /* Run TX completion thread */
902 spin_lock(&gp->tx_lock); 902 spin_lock(&gp->tx_lock);
903 gem_tx(dev, gp, gp->status); 903 gem_tx(dev, gp, gp->status);
904 spin_unlock(&gp->tx_lock); 904 spin_unlock(&gp->tx_lock);
905 905
906 spin_unlock_irqrestore(&gp->lock, flags); 906 spin_unlock_irqrestore(&gp->lock, flags);
907 907
908 /* Run RX thread. We don't use any locking here, 908 /* Run RX thread. We don't use any locking here,
909 * code willing to do bad things - like cleaning the 909 * code willing to do bad things - like cleaning the
910 * rx ring - must call netif_poll_disable(), which 910 * rx ring - must call netif_poll_disable(), which
911 * schedule_timeout()'s if polling is already disabled. 911 * schedule_timeout()'s if polling is already disabled.
912 */ 912 */
913 work_to_do = min(*budget, dev->quota); 913 work_to_do = min(*budget, dev->quota);
914 914
915 work_done = gem_rx(gp, work_to_do); 915 work_done = gem_rx(gp, work_to_do);
916 916
917 *budget -= work_done; 917 *budget -= work_done;
918 dev->quota -= work_done; 918 dev->quota -= work_done;
919 919
920 if (work_done >= work_to_do) 920 if (work_done >= work_to_do)
921 return 1; 921 return 1;
922 922
923 spin_lock_irqsave(&gp->lock, flags); 923 spin_lock_irqsave(&gp->lock, flags);
924 924
925 gp->status = readl(gp->regs + GREG_STAT); 925 gp->status = readl(gp->regs + GREG_STAT);
926 } while (gp->status & GREG_STAT_NAPI); 926 } while (gp->status & GREG_STAT_NAPI);
927 927
928 __netif_rx_complete(dev); 928 __netif_rx_complete(dev);
929 gem_enable_ints(gp); 929 gem_enable_ints(gp);
930 930
931 spin_unlock_irqrestore(&gp->lock, flags); 931 spin_unlock_irqrestore(&gp->lock, flags);
932 return 0; 932 return 0;
933 } 933 }
934 934
935 static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs) 935 static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
936 { 936 {
937 struct net_device *dev = dev_id; 937 struct net_device *dev = dev_id;
938 struct gem *gp = dev->priv; 938 struct gem *gp = dev->priv;
939 unsigned long flags; 939 unsigned long flags;
940 940
941 /* Swallow interrupts when shutting the chip down, though 941 /* Swallow interrupts when shutting the chip down, though
942 * that shouldn't happen, we should have done free_irq() at 942 * that shouldn't happen, we should have done free_irq() at
943 * this point... 943 * this point...
944 */ 944 */
945 if (!gp->running) 945 if (!gp->running)
946 return IRQ_HANDLED; 946 return IRQ_HANDLED;
947 947
948 spin_lock_irqsave(&gp->lock, flags); 948 spin_lock_irqsave(&gp->lock, flags);
949 949
950 if (netif_rx_schedule_prep(dev)) { 950 if (netif_rx_schedule_prep(dev)) {
951 u32 gem_status = readl(gp->regs + GREG_STAT); 951 u32 gem_status = readl(gp->regs + GREG_STAT);
952 952
953 if (gem_status == 0) { 953 if (gem_status == 0) {
954 netif_poll_enable(dev); 954 netif_poll_enable(dev);
955 spin_unlock_irqrestore(&gp->lock, flags); 955 spin_unlock_irqrestore(&gp->lock, flags);
956 return IRQ_NONE; 956 return IRQ_NONE;
957 } 957 }
958 gp->status = gem_status; 958 gp->status = gem_status;
959 gem_disable_ints(gp); 959 gem_disable_ints(gp);
960 __netif_rx_schedule(dev); 960 __netif_rx_schedule(dev);
961 } 961 }
962 962
963 spin_unlock_irqrestore(&gp->lock, flags); 963 spin_unlock_irqrestore(&gp->lock, flags);
964 964
965 /* If polling was disabled at the time we received that 965 /* If polling was disabled at the time we received that
966 * interrupt, we may return IRQ_HANDLED here while we 966 * interrupt, we may return IRQ_HANDLED here while we
967 * should return IRQ_NONE. No big deal... 967 * should return IRQ_NONE. No big deal...
968 */ 968 */
969 return IRQ_HANDLED; 969 return IRQ_HANDLED;
970 } 970 }
971 971
972 #ifdef CONFIG_NET_POLL_CONTROLLER 972 #ifdef CONFIG_NET_POLL_CONTROLLER
973 static void gem_poll_controller(struct net_device *dev) 973 static void gem_poll_controller(struct net_device *dev)
974 { 974 {
975 /* gem_interrupt is safe to reentrance so no need 975 /* gem_interrupt is safe to reentrance so no need
976 * to disable_irq here. 976 * to disable_irq here.
977 */ 977 */
978 gem_interrupt(dev->irq, dev, NULL); 978 gem_interrupt(dev->irq, dev, NULL);
979 } 979 }
980 #endif 980 #endif
981 981
982 static void gem_tx_timeout(struct net_device *dev) 982 static void gem_tx_timeout(struct net_device *dev)
983 { 983 {
984 struct gem *gp = dev->priv; 984 struct gem *gp = dev->priv;
985 985
986 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 986 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
987 if (!gp->running) { 987 if (!gp->running) {
988 printk("%s: hrm.. hw not running !\n", dev->name); 988 printk("%s: hrm.. hw not running !\n", dev->name);
989 return; 989 return;
990 } 990 }
991 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 991 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
992 dev->name, 992 dev->name,
993 readl(gp->regs + TXDMA_CFG), 993 readl(gp->regs + TXDMA_CFG),
994 readl(gp->regs + MAC_TXSTAT), 994 readl(gp->regs + MAC_TXSTAT),
995 readl(gp->regs + MAC_TXCFG)); 995 readl(gp->regs + MAC_TXCFG));
996 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 996 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
997 dev->name, 997 dev->name,
998 readl(gp->regs + RXDMA_CFG), 998 readl(gp->regs + RXDMA_CFG),
999 readl(gp->regs + MAC_RXSTAT), 999 readl(gp->regs + MAC_RXSTAT),
1000 readl(gp->regs + MAC_RXCFG)); 1000 readl(gp->regs + MAC_RXCFG));
1001 1001
1002 spin_lock_irq(&gp->lock); 1002 spin_lock_irq(&gp->lock);
1003 spin_lock(&gp->tx_lock); 1003 spin_lock(&gp->tx_lock);
1004 1004
1005 gp->reset_task_pending = 1; 1005 gp->reset_task_pending = 1;
1006 schedule_work(&gp->reset_task); 1006 schedule_work(&gp->reset_task);
1007 1007
1008 spin_unlock(&gp->tx_lock); 1008 spin_unlock(&gp->tx_lock);
1009 spin_unlock_irq(&gp->lock); 1009 spin_unlock_irq(&gp->lock);
1010 } 1010 }
1011 1011
1012 static __inline__ int gem_intme(int entry) 1012 static __inline__ int gem_intme(int entry)
1013 { 1013 {
1014 /* Algorithm: IRQ every 1/2 of descriptors. */ 1014 /* Algorithm: IRQ every 1/2 of descriptors. */
1015 if (!(entry & ((TX_RING_SIZE>>1)-1))) 1015 if (!(entry & ((TX_RING_SIZE>>1)-1)))
1016 return 1; 1016 return 1;
1017 1017
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 1020
1021 static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1021 static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1022 { 1022 {
1023 struct gem *gp = dev->priv; 1023 struct gem *gp = dev->priv;
1024 int entry; 1024 int entry;
1025 u64 ctrl; 1025 u64 ctrl;
1026 unsigned long flags; 1026 unsigned long flags;
1027 1027
1028 ctrl = 0; 1028 ctrl = 0;
1029 if (skb->ip_summed == CHECKSUM_HW) { 1029 if (skb->ip_summed == CHECKSUM_HW) {
1030 u64 csum_start_off, csum_stuff_off; 1030 u64 csum_start_off, csum_stuff_off;
1031 1031
1032 csum_start_off = (u64) (skb->h.raw - skb->data); 1032 csum_start_off = (u64) (skb->h.raw - skb->data);
1033 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); 1033 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
1034 1034
1035 ctrl = (TXDCTRL_CENAB | 1035 ctrl = (TXDCTRL_CENAB |
1036 (csum_start_off << 15) | 1036 (csum_start_off << 15) |
1037 (csum_stuff_off << 21)); 1037 (csum_stuff_off << 21));
1038 } 1038 }
1039 1039
1040 local_irq_save(flags); 1040 local_irq_save(flags);
1041 if (!spin_trylock(&gp->tx_lock)) { 1041 if (!spin_trylock(&gp->tx_lock)) {
1042 /* Tell upper layer to requeue */ 1042 /* Tell upper layer to requeue */
1043 local_irq_restore(flags); 1043 local_irq_restore(flags);
1044 return NETDEV_TX_LOCKED; 1044 return NETDEV_TX_LOCKED;
1045 } 1045 }
1046 /* We raced with gem_do_stop() */ 1046 /* We raced with gem_do_stop() */
1047 if (!gp->running) { 1047 if (!gp->running) {
1048 spin_unlock_irqrestore(&gp->tx_lock, flags); 1048 spin_unlock_irqrestore(&gp->tx_lock, flags);
1049 return NETDEV_TX_BUSY; 1049 return NETDEV_TX_BUSY;
1050 } 1050 }
1051 1051
1052 /* This is a hard error, log it. */ 1052 /* This is a hard error, log it. */
1053 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1053 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1054 netif_stop_queue(dev); 1054 netif_stop_queue(dev);
1055 spin_unlock_irqrestore(&gp->tx_lock, flags); 1055 spin_unlock_irqrestore(&gp->tx_lock, flags);
1056 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1056 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
1057 dev->name); 1057 dev->name);
1058 return NETDEV_TX_BUSY; 1058 return NETDEV_TX_BUSY;
1059 } 1059 }
1060 1060
1061 entry = gp->tx_new; 1061 entry = gp->tx_new;
1062 gp->tx_skbs[entry] = skb; 1062 gp->tx_skbs[entry] = skb;
1063 1063
1064 if (skb_shinfo(skb)->nr_frags == 0) { 1064 if (skb_shinfo(skb)->nr_frags == 0) {
1065 struct gem_txd *txd = &gp->init_block->txd[entry]; 1065 struct gem_txd *txd = &gp->init_block->txd[entry];
1066 dma_addr_t mapping; 1066 dma_addr_t mapping;
1067 u32 len; 1067 u32 len;
1068 1068
1069 len = skb->len; 1069 len = skb->len;
1070 mapping = pci_map_page(gp->pdev, 1070 mapping = pci_map_page(gp->pdev,
1071 virt_to_page(skb->data), 1071 virt_to_page(skb->data),
1072 offset_in_page(skb->data), 1072 offset_in_page(skb->data),
1073 len, PCI_DMA_TODEVICE); 1073 len, PCI_DMA_TODEVICE);
1074 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; 1074 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1075 if (gem_intme(entry)) 1075 if (gem_intme(entry))
1076 ctrl |= TXDCTRL_INTME; 1076 ctrl |= TXDCTRL_INTME;
1077 txd->buffer = cpu_to_le64(mapping); 1077 txd->buffer = cpu_to_le64(mapping);
1078 wmb(); 1078 wmb();
1079 txd->control_word = cpu_to_le64(ctrl); 1079 txd->control_word = cpu_to_le64(ctrl);
1080 entry = NEXT_TX(entry); 1080 entry = NEXT_TX(entry);
1081 } else { 1081 } else {
1082 struct gem_txd *txd; 1082 struct gem_txd *txd;
1083 u32 first_len; 1083 u32 first_len;
1084 u64 intme; 1084 u64 intme;
1085 dma_addr_t first_mapping; 1085 dma_addr_t first_mapping;
1086 int frag, first_entry = entry; 1086 int frag, first_entry = entry;
1087 1087
1088 intme = 0; 1088 intme = 0;
1089 if (gem_intme(entry)) 1089 if (gem_intme(entry))
1090 intme |= TXDCTRL_INTME; 1090 intme |= TXDCTRL_INTME;
1091 1091
1092 /* We must give this initial chunk to the device last. 1092 /* We must give this initial chunk to the device last.
1093 * Otherwise we could race with the device. 1093 * Otherwise we could race with the device.
1094 */ 1094 */
1095 first_len = skb_headlen(skb); 1095 first_len = skb_headlen(skb);
1096 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), 1096 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1097 offset_in_page(skb->data), 1097 offset_in_page(skb->data),
1098 first_len, PCI_DMA_TODEVICE); 1098 first_len, PCI_DMA_TODEVICE);
1099 entry = NEXT_TX(entry); 1099 entry = NEXT_TX(entry);
1100 1100
1101 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1101 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1102 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1102 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1103 u32 len; 1103 u32 len;
1104 dma_addr_t mapping; 1104 dma_addr_t mapping;
1105 u64 this_ctrl; 1105 u64 this_ctrl;
1106 1106
1107 len = this_frag->size; 1107 len = this_frag->size;
1108 mapping = pci_map_page(gp->pdev, 1108 mapping = pci_map_page(gp->pdev,
1109 this_frag->page, 1109 this_frag->page,
1110 this_frag->page_offset, 1110 this_frag->page_offset,
1111 len, PCI_DMA_TODEVICE); 1111 len, PCI_DMA_TODEVICE);
1112 this_ctrl = ctrl; 1112 this_ctrl = ctrl;
1113 if (frag == skb_shinfo(skb)->nr_frags - 1) 1113 if (frag == skb_shinfo(skb)->nr_frags - 1)
1114 this_ctrl |= TXDCTRL_EOF; 1114 this_ctrl |= TXDCTRL_EOF;
1115 1115
1116 txd = &gp->init_block->txd[entry]; 1116 txd = &gp->init_block->txd[entry];
1117 txd->buffer = cpu_to_le64(mapping); 1117 txd->buffer = cpu_to_le64(mapping);
1118 wmb(); 1118 wmb();
1119 txd->control_word = cpu_to_le64(this_ctrl | len); 1119 txd->control_word = cpu_to_le64(this_ctrl | len);
1120 1120
1121 if (gem_intme(entry)) 1121 if (gem_intme(entry))
1122 intme |= TXDCTRL_INTME; 1122 intme |= TXDCTRL_INTME;
1123 1123
1124 entry = NEXT_TX(entry); 1124 entry = NEXT_TX(entry);
1125 } 1125 }
1126 txd = &gp->init_block->txd[first_entry]; 1126 txd = &gp->init_block->txd[first_entry];
1127 txd->buffer = cpu_to_le64(first_mapping); 1127 txd->buffer = cpu_to_le64(first_mapping);
1128 wmb(); 1128 wmb();
1129 txd->control_word = 1129 txd->control_word =
1130 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); 1130 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1131 } 1131 }
1132 1132
1133 gp->tx_new = entry; 1133 gp->tx_new = entry;
1134 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1134 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1135 netif_stop_queue(dev); 1135 netif_stop_queue(dev);
1136 1136
1137 if (netif_msg_tx_queued(gp)) 1137 if (netif_msg_tx_queued(gp))
1138 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1138 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1139 dev->name, entry, skb->len); 1139 dev->name, entry, skb->len);
1140 mb(); 1140 mb();
1141 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1141 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1142 spin_unlock_irqrestore(&gp->tx_lock, flags); 1142 spin_unlock_irqrestore(&gp->tx_lock, flags);
1143 1143
1144 dev->trans_start = jiffies; 1144 dev->trans_start = jiffies;
1145 1145
1146 return NETDEV_TX_OK; 1146 return NETDEV_TX_OK;
1147 } 1147 }
1148 1148
1149 #define STOP_TRIES 32 1149 #define STOP_TRIES 32
1150 1150
1151 /* Must be invoked under gp->lock and gp->tx_lock. */ 1151 /* Must be invoked under gp->lock and gp->tx_lock. */
1152 static void gem_reset(struct gem *gp) 1152 static void gem_reset(struct gem *gp)
1153 { 1153 {
1154 int limit; 1154 int limit;
1155 u32 val; 1155 u32 val;
1156 1156
1157 /* Make sure we won't get any more interrupts */ 1157 /* Make sure we won't get any more interrupts */
1158 writel(0xffffffff, gp->regs + GREG_IMASK); 1158 writel(0xffffffff, gp->regs + GREG_IMASK);
1159 1159
1160 /* Reset the chip */ 1160 /* Reset the chip */
1161 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, 1161 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1162 gp->regs + GREG_SWRST); 1162 gp->regs + GREG_SWRST);
1163 1163
1164 limit = STOP_TRIES; 1164 limit = STOP_TRIES;
1165 1165
1166 do { 1166 do {
1167 udelay(20); 1167 udelay(20);
1168 val = readl(gp->regs + GREG_SWRST); 1168 val = readl(gp->regs + GREG_SWRST);
1169 if (limit-- <= 0) 1169 if (limit-- <= 0)
1170 break; 1170 break;
1171 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1171 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1172 1172
1173 if (limit <= 0) 1173 if (limit <= 0)
1174 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1174 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1175 } 1175 }
1176 1176
1177 /* Must be invoked under gp->lock and gp->tx_lock. */ 1177 /* Must be invoked under gp->lock and gp->tx_lock. */
1178 static void gem_start_dma(struct gem *gp) 1178 static void gem_start_dma(struct gem *gp)
1179 { 1179 {
1180 u32 val; 1180 u32 val;
1181 1181
1182 /* We are ready to rock, turn everything on. */ 1182 /* We are ready to rock, turn everything on. */
1183 val = readl(gp->regs + TXDMA_CFG); 1183 val = readl(gp->regs + TXDMA_CFG);
1184 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1184 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1185 val = readl(gp->regs + RXDMA_CFG); 1185 val = readl(gp->regs + RXDMA_CFG);
1186 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1186 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1187 val = readl(gp->regs + MAC_TXCFG); 1187 val = readl(gp->regs + MAC_TXCFG);
1188 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1188 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1189 val = readl(gp->regs + MAC_RXCFG); 1189 val = readl(gp->regs + MAC_RXCFG);
1190 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1190 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1191 1191
1192 (void) readl(gp->regs + MAC_RXCFG); 1192 (void) readl(gp->regs + MAC_RXCFG);
1193 udelay(100); 1193 udelay(100);
1194 1194
1195 gem_enable_ints(gp); 1195 gem_enable_ints(gp);
1196 1196
1197 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1197 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1198 } 1198 }
1199 1199
1200 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1200 /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1201 * actually stopped before about 4ms tho ... 1201 * actually stopped before about 4ms tho ...
1202 */ 1202 */
1203 static void gem_stop_dma(struct gem *gp) 1203 static void gem_stop_dma(struct gem *gp)
1204 { 1204 {
1205 u32 val; 1205 u32 val;
1206 1206
1207 /* We are done rocking, turn everything off. */ 1207 /* We are done rocking, turn everything off. */
1208 val = readl(gp->regs + TXDMA_CFG); 1208 val = readl(gp->regs + TXDMA_CFG);
1209 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1209 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1210 val = readl(gp->regs + RXDMA_CFG); 1210 val = readl(gp->regs + RXDMA_CFG);
1211 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); 1211 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1212 val = readl(gp->regs + MAC_TXCFG); 1212 val = readl(gp->regs + MAC_TXCFG);
1213 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); 1213 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1214 val = readl(gp->regs + MAC_RXCFG); 1214 val = readl(gp->regs + MAC_RXCFG);
1215 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 1215 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1216 1216
1217 (void) readl(gp->regs + MAC_RXCFG); 1217 (void) readl(gp->regs + MAC_RXCFG);
1218 1218
1219 /* Need to wait a bit ... done by the caller */ 1219 /* Need to wait a bit ... done by the caller */
1220 } 1220 }
1221 1221
1222 1222
1223 /* Must be invoked under gp->lock and gp->tx_lock. */ 1223 /* Must be invoked under gp->lock and gp->tx_lock. */
1224 // XXX dbl check what that function should do when called on PCS PHY 1224 // XXX dbl check what that function should do when called on PCS PHY
1225 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1225 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1226 { 1226 {
1227 u32 advertise, features; 1227 u32 advertise, features;
1228 int autoneg; 1228 int autoneg;
1229 int speed; 1229 int speed;
1230 int duplex; 1230 int duplex;
1231 1231
1232 if (gp->phy_type != phy_mii_mdio0 && 1232 if (gp->phy_type != phy_mii_mdio0 &&
1233 gp->phy_type != phy_mii_mdio1) 1233 gp->phy_type != phy_mii_mdio1)
1234 goto non_mii; 1234 goto non_mii;
1235 1235
1236 /* Setup advertise */ 1236 /* Setup advertise */
1237 if (found_mii_phy(gp)) 1237 if (found_mii_phy(gp))
1238 features = gp->phy_mii.def->features; 1238 features = gp->phy_mii.def->features;
1239 else 1239 else
1240 features = 0; 1240 features = 0;
1241 1241
1242 advertise = features & ADVERTISE_MASK; 1242 advertise = features & ADVERTISE_MASK;
1243 if (gp->phy_mii.advertising != 0) 1243 if (gp->phy_mii.advertising != 0)
1244 advertise &= gp->phy_mii.advertising; 1244 advertise &= gp->phy_mii.advertising;
1245 1245
1246 autoneg = gp->want_autoneg; 1246 autoneg = gp->want_autoneg;
1247 speed = gp->phy_mii.speed; 1247 speed = gp->phy_mii.speed;
1248 duplex = gp->phy_mii.duplex; 1248 duplex = gp->phy_mii.duplex;
1249 1249
1250 /* Setup link parameters */ 1250 /* Setup link parameters */
1251 if (!ep) 1251 if (!ep)
1252 goto start_aneg; 1252 goto start_aneg;
1253 if (ep->autoneg == AUTONEG_ENABLE) { 1253 if (ep->autoneg == AUTONEG_ENABLE) {
1254 advertise = ep->advertising; 1254 advertise = ep->advertising;
1255 autoneg = 1; 1255 autoneg = 1;
1256 } else { 1256 } else {
1257 autoneg = 0; 1257 autoneg = 0;
1258 speed = ep->speed; 1258 speed = ep->speed;
1259 duplex = ep->duplex; 1259 duplex = ep->duplex;
1260 } 1260 }
1261 1261
1262 start_aneg: 1262 start_aneg:
1263 /* Sanitize settings based on PHY capabilities */ 1263 /* Sanitize settings based on PHY capabilities */
1264 if ((features & SUPPORTED_Autoneg) == 0) 1264 if ((features & SUPPORTED_Autoneg) == 0)
1265 autoneg = 0; 1265 autoneg = 0;
1266 if (speed == SPEED_1000 && 1266 if (speed == SPEED_1000 &&
1267 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) 1267 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1268 speed = SPEED_100; 1268 speed = SPEED_100;
1269 if (speed == SPEED_100 && 1269 if (speed == SPEED_100 &&
1270 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) 1270 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1271 speed = SPEED_10; 1271 speed = SPEED_10;
1272 if (duplex == DUPLEX_FULL && 1272 if (duplex == DUPLEX_FULL &&
1273 !(features & (SUPPORTED_1000baseT_Full | 1273 !(features & (SUPPORTED_1000baseT_Full |
1274 SUPPORTED_100baseT_Full | 1274 SUPPORTED_100baseT_Full |
1275 SUPPORTED_10baseT_Full))) 1275 SUPPORTED_10baseT_Full)))
1276 duplex = DUPLEX_HALF; 1276 duplex = DUPLEX_HALF;
1277 if (speed == 0) 1277 if (speed == 0)
1278 speed = SPEED_10; 1278 speed = SPEED_10;
1279 1279
1280 /* If we are asleep, we don't try to actually setup the PHY, we 1280 /* If we are asleep, we don't try to actually setup the PHY, we
1281 * just store the settings 1281 * just store the settings
1282 */ 1282 */
1283 if (gp->asleep) { 1283 if (gp->asleep) {
1284 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1284 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1285 gp->phy_mii.speed = speed; 1285 gp->phy_mii.speed = speed;
1286 gp->phy_mii.duplex = duplex; 1286 gp->phy_mii.duplex = duplex;
1287 return; 1287 return;
1288 } 1288 }
1289 1289
1290 /* Configure PHY & start aneg */ 1290 /* Configure PHY & start aneg */
1291 gp->want_autoneg = autoneg; 1291 gp->want_autoneg = autoneg;
1292 if (autoneg) { 1292 if (autoneg) {
1293 if (found_mii_phy(gp)) 1293 if (found_mii_phy(gp))
1294 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); 1294 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1295 gp->lstate = link_aneg; 1295 gp->lstate = link_aneg;
1296 } else { 1296 } else {
1297 if (found_mii_phy(gp)) 1297 if (found_mii_phy(gp))
1298 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); 1298 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1299 gp->lstate = link_force_ok; 1299 gp->lstate = link_force_ok;
1300 } 1300 }
1301 1301
1302 non_mii: 1302 non_mii:
1303 gp->timer_ticks = 0; 1303 gp->timer_ticks = 0;
1304 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1304 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1305 } 1305 }
1306 1306
1307 /* A link-up condition has occurred, initialize and enable the 1307 /* A link-up condition has occurred, initialize and enable the
1308 * rest of the chip. 1308 * rest of the chip.
1309 * 1309 *
1310 * Must be invoked under gp->lock and gp->tx_lock. 1310 * Must be invoked under gp->lock and gp->tx_lock.
1311 */ 1311 */
1312 static int gem_set_link_modes(struct gem *gp) 1312 static int gem_set_link_modes(struct gem *gp)
1313 { 1313 {
1314 u32 val; 1314 u32 val;
1315 int full_duplex, speed, pause; 1315 int full_duplex, speed, pause;
1316 1316
1317 full_duplex = 0; 1317 full_duplex = 0;
1318 speed = SPEED_10; 1318 speed = SPEED_10;
1319 pause = 0; 1319 pause = 0;
1320 1320
1321 if (found_mii_phy(gp)) { 1321 if (found_mii_phy(gp)) {
1322 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) 1322 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1323 return 1; 1323 return 1;
1324 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); 1324 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1325 speed = gp->phy_mii.speed; 1325 speed = gp->phy_mii.speed;
1326 pause = gp->phy_mii.pause; 1326 pause = gp->phy_mii.pause;
1327 } else if (gp->phy_type == phy_serialink || 1327 } else if (gp->phy_type == phy_serialink ||
1328 gp->phy_type == phy_serdes) { 1328 gp->phy_type == phy_serdes) {
1329 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1329 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1330 1330
1331 if (pcs_lpa & PCS_MIIADV_FD) 1331 if (pcs_lpa & PCS_MIIADV_FD)
1332 full_duplex = 1; 1332 full_duplex = 1;
1333 speed = SPEED_1000; 1333 speed = SPEED_1000;
1334 } 1334 }
1335 1335
1336 if (netif_msg_link(gp)) 1336 if (netif_msg_link(gp))
1337 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1337 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1338 gp->dev->name, speed, (full_duplex ? "full" : "half")); 1338 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1339 1339
1340 if (!gp->running) 1340 if (!gp->running)
1341 return 0; 1341 return 0;
1342 1342
1343 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1343 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1344 if (full_duplex) { 1344 if (full_duplex) {
1345 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1345 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1346 } else { 1346 } else {
1347 /* MAC_TXCFG_NBO must be zero. */ 1347 /* MAC_TXCFG_NBO must be zero. */
1348 } 1348 }
1349 writel(val, gp->regs + MAC_TXCFG); 1349 writel(val, gp->regs + MAC_TXCFG);
1350 1350
1351 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1351 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1352 if (!full_duplex && 1352 if (!full_duplex &&
1353 (gp->phy_type == phy_mii_mdio0 || 1353 (gp->phy_type == phy_mii_mdio0 ||
1354 gp->phy_type == phy_mii_mdio1)) { 1354 gp->phy_type == phy_mii_mdio1)) {
1355 val |= MAC_XIFCFG_DISE; 1355 val |= MAC_XIFCFG_DISE;
1356 } else if (full_duplex) { 1356 } else if (full_duplex) {
1357 val |= MAC_XIFCFG_FLED; 1357 val |= MAC_XIFCFG_FLED;
1358 } 1358 }
1359 1359
1360 if (speed == SPEED_1000) 1360 if (speed == SPEED_1000)
1361 val |= (MAC_XIFCFG_GMII); 1361 val |= (MAC_XIFCFG_GMII);
1362 1362
1363 writel(val, gp->regs + MAC_XIFCFG); 1363 writel(val, gp->regs + MAC_XIFCFG);
1364 1364
1365 /* If gigabit and half-duplex, enable carrier extension 1365 /* If gigabit and half-duplex, enable carrier extension
1366 * mode. Else, disable it. 1366 * mode. Else, disable it.
1367 */ 1367 */
1368 if (speed == SPEED_1000 && !full_duplex) { 1368 if (speed == SPEED_1000 && !full_duplex) {
1369 val = readl(gp->regs + MAC_TXCFG); 1369 val = readl(gp->regs + MAC_TXCFG);
1370 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1370 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1371 1371
1372 val = readl(gp->regs + MAC_RXCFG); 1372 val = readl(gp->regs + MAC_RXCFG);
1373 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1373 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1374 } else { 1374 } else {
1375 val = readl(gp->regs + MAC_TXCFG); 1375 val = readl(gp->regs + MAC_TXCFG);
1376 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); 1376 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1377 1377
1378 val = readl(gp->regs + MAC_RXCFG); 1378 val = readl(gp->regs + MAC_RXCFG);
1379 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); 1379 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1380 } 1380 }
1381 1381
1382 if (gp->phy_type == phy_serialink || 1382 if (gp->phy_type == phy_serialink ||
1383 gp->phy_type == phy_serdes) { 1383 gp->phy_type == phy_serdes) {
1384 u32 pcs_lpa = readl(gp->regs + PCS_MIILP); 1384 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1385 1385
1386 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) 1386 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1387 pause = 1; 1387 pause = 1;
1388 } 1388 }
1389 1389
1390 if (netif_msg_link(gp)) { 1390 if (netif_msg_link(gp)) {
1391 if (pause) { 1391 if (pause) {
1392 printk(KERN_INFO "%s: Pause is enabled " 1392 printk(KERN_INFO "%s: Pause is enabled "
1393 "(rxfifo: %d off: %d on: %d)\n", 1393 "(rxfifo: %d off: %d on: %d)\n",
1394 gp->dev->name, 1394 gp->dev->name,
1395 gp->rx_fifo_sz, 1395 gp->rx_fifo_sz,
1396 gp->rx_pause_off, 1396 gp->rx_pause_off,
1397 gp->rx_pause_on); 1397 gp->rx_pause_on);
1398 } else { 1398 } else {
1399 printk(KERN_INFO "%s: Pause is disabled\n", 1399 printk(KERN_INFO "%s: Pause is disabled\n",
1400 gp->dev->name); 1400 gp->dev->name);
1401 } 1401 }
1402 } 1402 }
1403 1403
1404 if (!full_duplex) 1404 if (!full_duplex)
1405 writel(512, gp->regs + MAC_STIME); 1405 writel(512, gp->regs + MAC_STIME);
1406 else 1406 else
1407 writel(64, gp->regs + MAC_STIME); 1407 writel(64, gp->regs + MAC_STIME);
1408 val = readl(gp->regs + MAC_MCCFG); 1408 val = readl(gp->regs + MAC_MCCFG);
1409 if (pause) 1409 if (pause)
1410 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1410 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1411 else 1411 else
1412 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); 1412 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1413 writel(val, gp->regs + MAC_MCCFG); 1413 writel(val, gp->regs + MAC_MCCFG);
1414 1414
1415 gem_start_dma(gp); 1415 gem_start_dma(gp);
1416 1416
1417 return 0; 1417 return 0;
1418 } 1418 }
1419 1419
1420 /* Must be invoked under gp->lock and gp->tx_lock. */ 1420 /* Must be invoked under gp->lock and gp->tx_lock. */
1421 static int gem_mdio_link_not_up(struct gem *gp) 1421 static int gem_mdio_link_not_up(struct gem *gp)
1422 { 1422 {
1423 switch (gp->lstate) { 1423 switch (gp->lstate) {
1424 case link_force_ret: 1424 case link_force_ret:
1425 if (netif_msg_link(gp)) 1425 if (netif_msg_link(gp))
1426 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1426 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1427 " forced mode\n", gp->dev->name); 1427 " forced mode\n", gp->dev->name);
1428 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1428 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1429 gp->last_forced_speed, DUPLEX_HALF); 1429 gp->last_forced_speed, DUPLEX_HALF);
1430 gp->timer_ticks = 5; 1430 gp->timer_ticks = 5;
1431 gp->lstate = link_force_ok; 1431 gp->lstate = link_force_ok;
1432 return 0; 1432 return 0;
1433 case link_aneg: 1433 case link_aneg:
1434 /* We try forced modes after a failed aneg only on PHYs that don't 1434 /* We try forced modes after a failed aneg only on PHYs that don't
1435 * have "magic_aneg" bit set, which means they internally do the 1435 * have "magic_aneg" bit set, which means they internally do the
1436 * while forced-mode thingy. On these, we just restart aneg 1436 * while forced-mode thingy. On these, we just restart aneg
1437 */ 1437 */
1438 if (gp->phy_mii.def->magic_aneg) 1438 if (gp->phy_mii.def->magic_aneg)
1439 return 1; 1439 return 1;
1440 if (netif_msg_link(gp)) 1440 if (netif_msg_link(gp))
1441 printk(KERN_INFO "%s: switching to forced 100bt\n", 1441 printk(KERN_INFO "%s: switching to forced 100bt\n",
1442 gp->dev->name); 1442 gp->dev->name);
1443 /* Try forced modes. */ 1443 /* Try forced modes. */
1444 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1444 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1445 DUPLEX_HALF); 1445 DUPLEX_HALF);
1446 gp->timer_ticks = 5; 1446 gp->timer_ticks = 5;
1447 gp->lstate = link_force_try; 1447 gp->lstate = link_force_try;
1448 return 0; 1448 return 0;
1449 case link_force_try: 1449 case link_force_try:
1450 /* Downgrade from 100 to 10 Mbps if necessary. 1450 /* Downgrade from 100 to 10 Mbps if necessary.
1451 * If already at 10Mbps, warn user about the 1451 * If already at 10Mbps, warn user about the
1452 * situation every 10 ticks. 1452 * situation every 10 ticks.
1453 */ 1453 */
1454 if (gp->phy_mii.speed == SPEED_100) { 1454 if (gp->phy_mii.speed == SPEED_100) {
1455 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1455 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1456 DUPLEX_HALF); 1456 DUPLEX_HALF);
1457 gp->timer_ticks = 5; 1457 gp->timer_ticks = 5;
1458 if (netif_msg_link(gp)) 1458 if (netif_msg_link(gp))
1459 printk(KERN_INFO "%s: switching to forced 10bt\n", 1459 printk(KERN_INFO "%s: switching to forced 10bt\n",
1460 gp->dev->name); 1460 gp->dev->name);
1461 return 0; 1461 return 0;
1462 } else 1462 } else
1463 return 1; 1463 return 1;
1464 default: 1464 default:
1465 return 0; 1465 return 0;
1466 } 1466 }
1467 } 1467 }
1468 1468
1469 static void gem_link_timer(unsigned long data) 1469 static void gem_link_timer(unsigned long data)
1470 { 1470 {
1471 struct gem *gp = (struct gem *) data; 1471 struct gem *gp = (struct gem *) data;
1472 int restart_aneg = 0; 1472 int restart_aneg = 0;
1473 1473
1474 if (gp->asleep) 1474 if (gp->asleep)
1475 return; 1475 return;
1476 1476
1477 spin_lock_irq(&gp->lock); 1477 spin_lock_irq(&gp->lock);
1478 spin_lock(&gp->tx_lock); 1478 spin_lock(&gp->tx_lock);
1479 gem_get_cell(gp); 1479 gem_get_cell(gp);
1480 1480
1481 /* If the reset task is still pending, we just 1481 /* If the reset task is still pending, we just
1482 * reschedule the link timer 1482 * reschedule the link timer
1483 */ 1483 */
1484 if (gp->reset_task_pending) 1484 if (gp->reset_task_pending)
1485 goto restart; 1485 goto restart;
1486 1486
1487 if (gp->phy_type == phy_serialink || 1487 if (gp->phy_type == phy_serialink ||
1488 gp->phy_type == phy_serdes) { 1488 gp->phy_type == phy_serdes) {
1489 u32 val = readl(gp->regs + PCS_MIISTAT); 1489 u32 val = readl(gp->regs + PCS_MIISTAT);
1490 1490
1491 if (!(val & PCS_MIISTAT_LS)) 1491 if (!(val & PCS_MIISTAT_LS))
1492 val = readl(gp->regs + PCS_MIISTAT); 1492 val = readl(gp->regs + PCS_MIISTAT);
1493 1493
1494 if ((val & PCS_MIISTAT_LS) != 0) { 1494 if ((val & PCS_MIISTAT_LS) != 0) {
1495 gp->lstate = link_up; 1495 gp->lstate = link_up;
1496 netif_carrier_on(gp->dev); 1496 netif_carrier_on(gp->dev);
1497 (void)gem_set_link_modes(gp); 1497 (void)gem_set_link_modes(gp);
1498 } 1498 }
1499 goto restart; 1499 goto restart;
1500 } 1500 }
1501 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { 1501 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1502 /* Ok, here we got a link. If we had it due to a forced 1502 /* Ok, here we got a link. If we had it due to a forced
1503 * fallback, and we were configured for autoneg, we do 1503 * fallback, and we were configured for autoneg, we do
1504 * retry a short autoneg pass. If you know your hub is 1504 * retry a short autoneg pass. If you know your hub is
1505 * broken, use ethtool ;) 1505 * broken, use ethtool ;)
1506 */ 1506 */
1507 if (gp->lstate == link_force_try && gp->want_autoneg) { 1507 if (gp->lstate == link_force_try && gp->want_autoneg) {
1508 gp->lstate = link_force_ret; 1508 gp->lstate = link_force_ret;
1509 gp->last_forced_speed = gp->phy_mii.speed; 1509 gp->last_forced_speed = gp->phy_mii.speed;
1510 gp->timer_ticks = 5; 1510 gp->timer_ticks = 5;
1511 if (netif_msg_link(gp)) 1511 if (netif_msg_link(gp))
1512 printk(KERN_INFO "%s: Got link after fallback, retrying" 1512 printk(KERN_INFO "%s: Got link after fallback, retrying"
1513 " autoneg once...\n", gp->dev->name); 1513 " autoneg once...\n", gp->dev->name);
1514 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1514 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1515 } else if (gp->lstate != link_up) { 1515 } else if (gp->lstate != link_up) {
1516 gp->lstate = link_up; 1516 gp->lstate = link_up;
1517 netif_carrier_on(gp->dev); 1517 netif_carrier_on(gp->dev);
1518 if (gem_set_link_modes(gp)) 1518 if (gem_set_link_modes(gp))
1519 restart_aneg = 1; 1519 restart_aneg = 1;
1520 } 1520 }
1521 } else { 1521 } else {
1522 /* If the link was previously up, we restart the 1522 /* If the link was previously up, we restart the
1523 * whole process 1523 * whole process
1524 */ 1524 */
1525 if (gp->lstate == link_up) { 1525 if (gp->lstate == link_up) {
1526 gp->lstate = link_down; 1526 gp->lstate = link_down;
1527 if (netif_msg_link(gp)) 1527 if (netif_msg_link(gp))
1528 printk(KERN_INFO "%s: Link down\n", 1528 printk(KERN_INFO "%s: Link down\n",
1529 gp->dev->name); 1529 gp->dev->name);
1530 netif_carrier_off(gp->dev); 1530 netif_carrier_off(gp->dev);
1531 gp->reset_task_pending = 1; 1531 gp->reset_task_pending = 1;
1532 schedule_work(&gp->reset_task); 1532 schedule_work(&gp->reset_task);
1533 restart_aneg = 1; 1533 restart_aneg = 1;
1534 } else if (++gp->timer_ticks > 10) { 1534 } else if (++gp->timer_ticks > 10) {
1535 if (found_mii_phy(gp)) 1535 if (found_mii_phy(gp))
1536 restart_aneg = gem_mdio_link_not_up(gp); 1536 restart_aneg = gem_mdio_link_not_up(gp);
1537 else 1537 else
1538 restart_aneg = 1; 1538 restart_aneg = 1;
1539 } 1539 }
1540 } 1540 }
1541 if (restart_aneg) { 1541 if (restart_aneg) {
1542 gem_begin_auto_negotiation(gp, NULL); 1542 gem_begin_auto_negotiation(gp, NULL);
1543 goto out_unlock; 1543 goto out_unlock;
1544 } 1544 }
1545 restart: 1545 restart:
1546 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1546 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1547 out_unlock: 1547 out_unlock:
1548 gem_put_cell(gp); 1548 gem_put_cell(gp);
1549 spin_unlock(&gp->tx_lock); 1549 spin_unlock(&gp->tx_lock);
1550 spin_unlock_irq(&gp->lock); 1550 spin_unlock_irq(&gp->lock);
1551 } 1551 }
1552 1552
1553 /* Must be invoked under gp->lock and gp->tx_lock. */ 1553 /* Must be invoked under gp->lock and gp->tx_lock. */
1554 static void gem_clean_rings(struct gem *gp) 1554 static void gem_clean_rings(struct gem *gp)
1555 { 1555 {
1556 struct gem_init_block *gb = gp->init_block; 1556 struct gem_init_block *gb = gp->init_block;
1557 struct sk_buff *skb; 1557 struct sk_buff *skb;
1558 int i; 1558 int i;
1559 dma_addr_t dma_addr; 1559 dma_addr_t dma_addr;
1560 1560
1561 for (i = 0; i < RX_RING_SIZE; i++) { 1561 for (i = 0; i < RX_RING_SIZE; i++) {
1562 struct gem_rxd *rxd; 1562 struct gem_rxd *rxd;
1563 1563
1564 rxd = &gb->rxd[i]; 1564 rxd = &gb->rxd[i];
1565 if (gp->rx_skbs[i] != NULL) { 1565 if (gp->rx_skbs[i] != NULL) {
1566 skb = gp->rx_skbs[i]; 1566 skb = gp->rx_skbs[i];
1567 dma_addr = le64_to_cpu(rxd->buffer); 1567 dma_addr = le64_to_cpu(rxd->buffer);
1568 pci_unmap_page(gp->pdev, dma_addr, 1568 pci_unmap_page(gp->pdev, dma_addr,
1569 RX_BUF_ALLOC_SIZE(gp), 1569 RX_BUF_ALLOC_SIZE(gp),
1570 PCI_DMA_FROMDEVICE); 1570 PCI_DMA_FROMDEVICE);
1571 dev_kfree_skb_any(skb); 1571 dev_kfree_skb_any(skb);
1572 gp->rx_skbs[i] = NULL; 1572 gp->rx_skbs[i] = NULL;
1573 } 1573 }
1574 rxd->status_word = 0; 1574 rxd->status_word = 0;
1575 wmb(); 1575 wmb();
1576 rxd->buffer = 0; 1576 rxd->buffer = 0;
1577 } 1577 }
1578 1578
1579 for (i = 0; i < TX_RING_SIZE; i++) { 1579 for (i = 0; i < TX_RING_SIZE; i++) {
1580 if (gp->tx_skbs[i] != NULL) { 1580 if (gp->tx_skbs[i] != NULL) {
1581 struct gem_txd *txd; 1581 struct gem_txd *txd;
1582 int frag; 1582 int frag;
1583 1583
1584 skb = gp->tx_skbs[i]; 1584 skb = gp->tx_skbs[i];
1585 gp->tx_skbs[i] = NULL; 1585 gp->tx_skbs[i] = NULL;
1586 1586
1587 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1587 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1588 int ent = i & (TX_RING_SIZE - 1); 1588 int ent = i & (TX_RING_SIZE - 1);
1589 1589
1590 txd = &gb->txd[ent]; 1590 txd = &gb->txd[ent];
1591 dma_addr = le64_to_cpu(txd->buffer); 1591 dma_addr = le64_to_cpu(txd->buffer);
1592 pci_unmap_page(gp->pdev, dma_addr, 1592 pci_unmap_page(gp->pdev, dma_addr,
1593 le64_to_cpu(txd->control_word) & 1593 le64_to_cpu(txd->control_word) &
1594 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); 1594 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1595 1595
1596 if (frag != skb_shinfo(skb)->nr_frags) 1596 if (frag != skb_shinfo(skb)->nr_frags)
1597 i++; 1597 i++;
1598 } 1598 }
1599 dev_kfree_skb_any(skb); 1599 dev_kfree_skb_any(skb);
1600 } 1600 }
1601 } 1601 }
1602 } 1602 }
1603 1603
1604 /* Must be invoked under gp->lock and gp->tx_lock. */ 1604 /* Must be invoked under gp->lock and gp->tx_lock. */
1605 static void gem_init_rings(struct gem *gp) 1605 static void gem_init_rings(struct gem *gp)
1606 { 1606 {
1607 struct gem_init_block *gb = gp->init_block; 1607 struct gem_init_block *gb = gp->init_block;
1608 struct net_device *dev = gp->dev; 1608 struct net_device *dev = gp->dev;
1609 int i; 1609 int i;
1610 dma_addr_t dma_addr; 1610 dma_addr_t dma_addr;
1611 1611
1612 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; 1612 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1613 1613
1614 gem_clean_rings(gp); 1614 gem_clean_rings(gp);
1615 1615
1616 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, 1616 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1617 (unsigned)VLAN_ETH_FRAME_LEN); 1617 (unsigned)VLAN_ETH_FRAME_LEN);
1618 1618
1619 for (i = 0; i < RX_RING_SIZE; i++) { 1619 for (i = 0; i < RX_RING_SIZE; i++) {
1620 struct sk_buff *skb; 1620 struct sk_buff *skb;
1621 struct gem_rxd *rxd = &gb->rxd[i]; 1621 struct gem_rxd *rxd = &gb->rxd[i];
1622 1622
1623 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1623 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1624 if (!skb) { 1624 if (!skb) {
1625 rxd->buffer = 0; 1625 rxd->buffer = 0;
1626 rxd->status_word = 0; 1626 rxd->status_word = 0;
1627 continue; 1627 continue;
1628 } 1628 }
1629 1629
1630 gp->rx_skbs[i] = skb; 1630 gp->rx_skbs[i] = skb;
1631 skb->dev = dev; 1631 skb->dev = dev;
1632 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1632 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1633 dma_addr = pci_map_page(gp->pdev, 1633 dma_addr = pci_map_page(gp->pdev,
1634 virt_to_page(skb->data), 1634 virt_to_page(skb->data),
1635 offset_in_page(skb->data), 1635 offset_in_page(skb->data),
1636 RX_BUF_ALLOC_SIZE(gp), 1636 RX_BUF_ALLOC_SIZE(gp),
1637 PCI_DMA_FROMDEVICE); 1637 PCI_DMA_FROMDEVICE);
1638 rxd->buffer = cpu_to_le64(dma_addr); 1638 rxd->buffer = cpu_to_le64(dma_addr);
1639 wmb(); 1639 wmb();
1640 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); 1640 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1641 skb_reserve(skb, RX_OFFSET); 1641 skb_reserve(skb, RX_OFFSET);
1642 } 1642 }
1643 1643
1644 for (i = 0; i < TX_RING_SIZE; i++) { 1644 for (i = 0; i < TX_RING_SIZE; i++) {
1645 struct gem_txd *txd = &gb->txd[i]; 1645 struct gem_txd *txd = &gb->txd[i];
1646 1646
1647 txd->control_word = 0; 1647 txd->control_word = 0;
1648 wmb(); 1648 wmb();
1649 txd->buffer = 0; 1649 txd->buffer = 0;
1650 } 1650 }
1651 wmb(); 1651 wmb();
1652 } 1652 }
1653 1653
1654 /* Init PHY interface and start link poll state machine */ 1654 /* Init PHY interface and start link poll state machine */
1655 static void gem_init_phy(struct gem *gp) 1655 static void gem_init_phy(struct gem *gp)
1656 { 1656 {
1657 u32 mifcfg; 1657 u32 mifcfg;
1658 1658
1659 /* Revert MIF CFG setting done on stop_phy */ 1659 /* Revert MIF CFG setting done on stop_phy */
1660 mifcfg = readl(gp->regs + MIF_CFG); 1660 mifcfg = readl(gp->regs + MIF_CFG);
1661 mifcfg &= ~MIF_CFG_BBMODE; 1661 mifcfg &= ~MIF_CFG_BBMODE;
1662 writel(mifcfg, gp->regs + MIF_CFG); 1662 writel(mifcfg, gp->regs + MIF_CFG);
1663 1663
1664 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1664 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1665 int i; 1665 int i;
1666 1666
1667 /* Those delay sucks, the HW seem to love them though, I'll 1667 /* Those delay sucks, the HW seem to love them though, I'll
1668 * serisouly consider breaking some locks here to be able 1668 * serisouly consider breaking some locks here to be able
1669 * to schedule instead 1669 * to schedule instead
1670 */ 1670 */
1671 for (i = 0; i < 3; i++) { 1671 for (i = 0; i < 3; i++) {
1672 #ifdef CONFIG_PPC_PMAC 1672 #ifdef CONFIG_PPC_PMAC
1673 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); 1673 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1674 msleep(20); 1674 msleep(20);
1675 #endif 1675 #endif
1676 /* Some PHYs used by apple have problem getting back to us, 1676 /* Some PHYs used by apple have problem getting back to us,
1677 * we do an additional reset here 1677 * we do an additional reset here
1678 */ 1678 */
1679 phy_write(gp, MII_BMCR, BMCR_RESET); 1679 phy_write(gp, MII_BMCR, BMCR_RESET);
1680 msleep(20); 1680 msleep(20);
1681 if (phy_read(gp, MII_BMCR) != 0xffff) 1681 if (phy_read(gp, MII_BMCR) != 0xffff)
1682 break; 1682 break;
1683 if (i == 2) 1683 if (i == 2)
1684 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1684 printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1685 gp->dev->name); 1685 gp->dev->name);
1686 } 1686 }
1687 } 1687 }
1688 1688
1689 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 1689 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1690 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 1690 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1691 u32 val; 1691 u32 val;
1692 1692
1693 /* Init datapath mode register. */ 1693 /* Init datapath mode register. */
1694 if (gp->phy_type == phy_mii_mdio0 || 1694 if (gp->phy_type == phy_mii_mdio0 ||
1695 gp->phy_type == phy_mii_mdio1) { 1695 gp->phy_type == phy_mii_mdio1) {
1696 val = PCS_DMODE_MGM; 1696 val = PCS_DMODE_MGM;
1697 } else if (gp->phy_type == phy_serialink) { 1697 } else if (gp->phy_type == phy_serialink) {
1698 val = PCS_DMODE_SM | PCS_DMODE_GMOE; 1698 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1699 } else { 1699 } else {
1700 val = PCS_DMODE_ESM; 1700 val = PCS_DMODE_ESM;
1701 } 1701 }
1702 1702
1703 writel(val, gp->regs + PCS_DMODE); 1703 writel(val, gp->regs + PCS_DMODE);
1704 } 1704 }
1705 1705
1706 if (gp->phy_type == phy_mii_mdio0 || 1706 if (gp->phy_type == phy_mii_mdio0 ||
1707 gp->phy_type == phy_mii_mdio1) { 1707 gp->phy_type == phy_mii_mdio1) {
1708 // XXX check for errors 1708 // XXX check for errors
1709 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1709 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1710 1710
1711 /* Init PHY */ 1711 /* Init PHY */
1712 if (gp->phy_mii.def && gp->phy_mii.def->ops->init) 1712 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1713 gp->phy_mii.def->ops->init(&gp->phy_mii); 1713 gp->phy_mii.def->ops->init(&gp->phy_mii);
1714 } else { 1714 } else {
1715 u32 val; 1715 u32 val;
1716 int limit; 1716 int limit;
1717 1717
1718 /* Reset PCS unit. */ 1718 /* Reset PCS unit. */
1719 val = readl(gp->regs + PCS_MIICTRL); 1719 val = readl(gp->regs + PCS_MIICTRL);
1720 val |= PCS_MIICTRL_RST; 1720 val |= PCS_MIICTRL_RST;
1721 writeb(val, gp->regs + PCS_MIICTRL); 1721 writeb(val, gp->regs + PCS_MIICTRL);
1722 1722
1723 limit = 32; 1723 limit = 32;
1724 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { 1724 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1725 udelay(100); 1725 udelay(100);
1726 if (limit-- <= 0) 1726 if (limit-- <= 0)
1727 break; 1727 break;
1728 } 1728 }
1729 if (limit <= 0) 1729 if (limit <= 0)
1730 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1730 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1731 gp->dev->name); 1731 gp->dev->name);
1732 1732
1733 /* Make sure PCS is disabled while changing advertisement 1733 /* Make sure PCS is disabled while changing advertisement
1734 * configuration. 1734 * configuration.
1735 */ 1735 */
1736 val = readl(gp->regs + PCS_CFG); 1736 val = readl(gp->regs + PCS_CFG);
1737 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); 1737 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1738 writel(val, gp->regs + PCS_CFG); 1738 writel(val, gp->regs + PCS_CFG);
1739 1739
1740 /* Advertise all capabilities except assymetric 1740 /* Advertise all capabilities except assymetric
1741 * pause. 1741 * pause.
1742 */ 1742 */
1743 val = readl(gp->regs + PCS_MIIADV); 1743 val = readl(gp->regs + PCS_MIIADV);
1744 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | 1744 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1745 PCS_MIIADV_SP | PCS_MIIADV_AP); 1745 PCS_MIIADV_SP | PCS_MIIADV_AP);
1746 writel(val, gp->regs + PCS_MIIADV); 1746 writel(val, gp->regs + PCS_MIIADV);
1747 1747
1748 /* Enable and restart auto-negotiation, disable wrapback/loopback, 1748 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1749 * and re-enable PCS. 1749 * and re-enable PCS.
1750 */ 1750 */
1751 val = readl(gp->regs + PCS_MIICTRL); 1751 val = readl(gp->regs + PCS_MIICTRL);
1752 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); 1752 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1753 val &= ~PCS_MIICTRL_WB; 1753 val &= ~PCS_MIICTRL_WB;
1754 writel(val, gp->regs + PCS_MIICTRL); 1754 writel(val, gp->regs + PCS_MIICTRL);
1755 1755
1756 val = readl(gp->regs + PCS_CFG); 1756 val = readl(gp->regs + PCS_CFG);
1757 val |= PCS_CFG_ENABLE; 1757 val |= PCS_CFG_ENABLE;
1758 writel(val, gp->regs + PCS_CFG); 1758 writel(val, gp->regs + PCS_CFG);
1759 1759
1760 /* Make sure serialink loopback is off. The meaning 1760 /* Make sure serialink loopback is off. The meaning
1761 * of this bit is logically inverted based upon whether 1761 * of this bit is logically inverted based upon whether
1762 * you are in Serialink or SERDES mode. 1762 * you are in Serialink or SERDES mode.
1763 */ 1763 */
1764 val = readl(gp->regs + PCS_SCTRL); 1764 val = readl(gp->regs + PCS_SCTRL);
1765 if (gp->phy_type == phy_serialink) 1765 if (gp->phy_type == phy_serialink)
1766 val &= ~PCS_SCTRL_LOOP; 1766 val &= ~PCS_SCTRL_LOOP;
1767 else 1767 else
1768 val |= PCS_SCTRL_LOOP; 1768 val |= PCS_SCTRL_LOOP;
1769 writel(val, gp->regs + PCS_SCTRL); 1769 writel(val, gp->regs + PCS_SCTRL);
1770 } 1770 }
1771 1771
1772 /* Default aneg parameters */ 1772 /* Default aneg parameters */
1773 gp->timer_ticks = 0; 1773 gp->timer_ticks = 0;
1774 gp->lstate = link_down; 1774 gp->lstate = link_down;
1775 netif_carrier_off(gp->dev); 1775 netif_carrier_off(gp->dev);
1776 1776
1777 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1777 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1778 spin_lock_irq(&gp->lock); 1778 spin_lock_irq(&gp->lock);
1779 gem_begin_auto_negotiation(gp, NULL); 1779 gem_begin_auto_negotiation(gp, NULL);
1780 spin_unlock_irq(&gp->lock); 1780 spin_unlock_irq(&gp->lock);
1781 } 1781 }
1782 1782
1783 /* Must be invoked under gp->lock and gp->tx_lock. */ 1783 /* Must be invoked under gp->lock and gp->tx_lock. */
1784 static void gem_init_dma(struct gem *gp) 1784 static void gem_init_dma(struct gem *gp)
1785 { 1785 {
1786 u64 desc_dma = (u64) gp->gblock_dvma; 1786 u64 desc_dma = (u64) gp->gblock_dvma;
1787 u32 val; 1787 u32 val;
1788 1788
1789 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); 1789 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1790 writel(val, gp->regs + TXDMA_CFG); 1790 writel(val, gp->regs + TXDMA_CFG);
1791 1791
1792 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); 1792 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1793 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); 1793 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1794 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); 1794 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1795 1795
1796 writel(0, gp->regs + TXDMA_KICK); 1796 writel(0, gp->regs + TXDMA_KICK);
1797 1797
1798 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1798 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1799 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1799 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1800 writel(val, gp->regs + RXDMA_CFG); 1800 writel(val, gp->regs + RXDMA_CFG);
1801 1801
1802 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1802 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1803 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 1803 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1804 1804
1805 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1805 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1806 1806
1807 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); 1807 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1808 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); 1808 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1809 writel(val, gp->regs + RXDMA_PTHRESH); 1809 writel(val, gp->regs + RXDMA_PTHRESH);
1810 1810
1811 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 1811 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1812 writel(((5 & RXDMA_BLANK_IPKTS) | 1812 writel(((5 & RXDMA_BLANK_IPKTS) |
1813 ((8 << 12) & RXDMA_BLANK_ITIME)), 1813 ((8 << 12) & RXDMA_BLANK_ITIME)),
1814 gp->regs + RXDMA_BLANK); 1814 gp->regs + RXDMA_BLANK);
1815 else 1815 else
1816 writel(((5 & RXDMA_BLANK_IPKTS) | 1816 writel(((5 & RXDMA_BLANK_IPKTS) |
1817 ((4 << 12) & RXDMA_BLANK_ITIME)), 1817 ((4 << 12) & RXDMA_BLANK_ITIME)),
1818 gp->regs + RXDMA_BLANK); 1818 gp->regs + RXDMA_BLANK);
1819 } 1819 }
1820 1820
1821 /* Must be invoked under gp->lock and gp->tx_lock. */ 1821 /* Must be invoked under gp->lock and gp->tx_lock. */
1822 static u32 gem_setup_multicast(struct gem *gp) 1822 static u32 gem_setup_multicast(struct gem *gp)
1823 { 1823 {
1824 u32 rxcfg = 0; 1824 u32 rxcfg = 0;
1825 int i; 1825 int i;
1826 1826
1827 if ((gp->dev->flags & IFF_ALLMULTI) || 1827 if ((gp->dev->flags & IFF_ALLMULTI) ||
1828 (gp->dev->mc_count > 256)) { 1828 (gp->dev->mc_count > 256)) {
1829 for (i=0; i<16; i++) 1829 for (i=0; i<16; i++)
1830 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); 1830 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1831 rxcfg |= MAC_RXCFG_HFE; 1831 rxcfg |= MAC_RXCFG_HFE;
1832 } else if (gp->dev->flags & IFF_PROMISC) { 1832 } else if (gp->dev->flags & IFF_PROMISC) {
1833 rxcfg |= MAC_RXCFG_PROM; 1833 rxcfg |= MAC_RXCFG_PROM;
1834 } else { 1834 } else {
1835 u16 hash_table[16]; 1835 u16 hash_table[16];
1836 u32 crc; 1836 u32 crc;
1837 struct dev_mc_list *dmi = gp->dev->mc_list; 1837 struct dev_mc_list *dmi = gp->dev->mc_list;
1838 int i; 1838 int i;
1839 1839
1840 for (i = 0; i < 16; i++) 1840 for (i = 0; i < 16; i++)
1841 hash_table[i] = 0; 1841 hash_table[i] = 0;
1842 1842
1843 for (i = 0; i < gp->dev->mc_count; i++) { 1843 for (i = 0; i < gp->dev->mc_count; i++) {
1844 char *addrs = dmi->dmi_addr; 1844 char *addrs = dmi->dmi_addr;
1845 1845
1846 dmi = dmi->next; 1846 dmi = dmi->next;
1847 1847
1848 if (!(*addrs & 1)) 1848 if (!(*addrs & 1))
1849 continue; 1849 continue;
1850 1850
1851 crc = ether_crc_le(6, addrs); 1851 crc = ether_crc_le(6, addrs);
1852 crc >>= 24; 1852 crc >>= 24;
1853 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 1853 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1854 } 1854 }
1855 for (i=0; i<16; i++) 1855 for (i=0; i<16; i++)
1856 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); 1856 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1857 rxcfg |= MAC_RXCFG_HFE; 1857 rxcfg |= MAC_RXCFG_HFE;
1858 } 1858 }
1859 1859
1860 return rxcfg; 1860 return rxcfg;
1861 } 1861 }
1862 1862
1863 /* Must be invoked under gp->lock and gp->tx_lock. */ 1863 /* Must be invoked under gp->lock and gp->tx_lock. */
1864 static void gem_init_mac(struct gem *gp) 1864 static void gem_init_mac(struct gem *gp)
1865 { 1865 {
1866 unsigned char *e = &gp->dev->dev_addr[0]; 1866 unsigned char *e = &gp->dev->dev_addr[0];
1867 1867
1868 writel(0x1bf0, gp->regs + MAC_SNDPAUSE); 1868 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1869 1869
1870 writel(0x00, gp->regs + MAC_IPG0); 1870 writel(0x00, gp->regs + MAC_IPG0);
1871 writel(0x08, gp->regs + MAC_IPG1); 1871 writel(0x08, gp->regs + MAC_IPG1);
1872 writel(0x04, gp->regs + MAC_IPG2); 1872 writel(0x04, gp->regs + MAC_IPG2);
1873 writel(0x40, gp->regs + MAC_STIME); 1873 writel(0x40, gp->regs + MAC_STIME);
1874 writel(0x40, gp->regs + MAC_MINFSZ); 1874 writel(0x40, gp->regs + MAC_MINFSZ);
1875 1875
1876 /* Ethernet payload + header + FCS + optional VLAN tag. */ 1876 /* Ethernet payload + header + FCS + optional VLAN tag. */
1877 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); 1877 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1878 1878
1879 writel(0x07, gp->regs + MAC_PASIZE); 1879 writel(0x07, gp->regs + MAC_PASIZE);
1880 writel(0x04, gp->regs + MAC_JAMSIZE); 1880 writel(0x04, gp->regs + MAC_JAMSIZE);
1881 writel(0x10, gp->regs + MAC_ATTLIM); 1881 writel(0x10, gp->regs + MAC_ATTLIM);
1882 writel(0x8808, gp->regs + MAC_MCTYPE); 1882 writel(0x8808, gp->regs + MAC_MCTYPE);
1883 1883
1884 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); 1884 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1885 1885
1886 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 1886 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1887 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 1887 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1888 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 1888 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1889 1889
1890 writel(0, gp->regs + MAC_ADDR3); 1890 writel(0, gp->regs + MAC_ADDR3);
1891 writel(0, gp->regs + MAC_ADDR4); 1891 writel(0, gp->regs + MAC_ADDR4);
1892 writel(0, gp->regs + MAC_ADDR5); 1892 writel(0, gp->regs + MAC_ADDR5);
1893 1893
1894 writel(0x0001, gp->regs + MAC_ADDR6); 1894 writel(0x0001, gp->regs + MAC_ADDR6);
1895 writel(0xc200, gp->regs + MAC_ADDR7); 1895 writel(0xc200, gp->regs + MAC_ADDR7);
1896 writel(0x0180, gp->regs + MAC_ADDR8); 1896 writel(0x0180, gp->regs + MAC_ADDR8);
1897 1897
1898 writel(0, gp->regs + MAC_AFILT0); 1898 writel(0, gp->regs + MAC_AFILT0);
1899 writel(0, gp->regs + MAC_AFILT1); 1899 writel(0, gp->regs + MAC_AFILT1);
1900 writel(0, gp->regs + MAC_AFILT2); 1900 writel(0, gp->regs + MAC_AFILT2);
1901 writel(0, gp->regs + MAC_AF21MSK); 1901 writel(0, gp->regs + MAC_AF21MSK);
1902 writel(0, gp->regs + MAC_AF0MSK); 1902 writel(0, gp->regs + MAC_AF0MSK);
1903 1903
1904 gp->mac_rx_cfg = gem_setup_multicast(gp); 1904 gp->mac_rx_cfg = gem_setup_multicast(gp);
1905 #ifdef STRIP_FCS 1905 #ifdef STRIP_FCS
1906 gp->mac_rx_cfg |= MAC_RXCFG_SFCS; 1906 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1907 #endif 1907 #endif
1908 writel(0, gp->regs + MAC_NCOLL); 1908 writel(0, gp->regs + MAC_NCOLL);
1909 writel(0, gp->regs + MAC_FASUCC); 1909 writel(0, gp->regs + MAC_FASUCC);
1910 writel(0, gp->regs + MAC_ECOLL); 1910 writel(0, gp->regs + MAC_ECOLL);
1911 writel(0, gp->regs + MAC_LCOLL); 1911 writel(0, gp->regs + MAC_LCOLL);
1912 writel(0, gp->regs + MAC_DTIMER); 1912 writel(0, gp->regs + MAC_DTIMER);
1913 writel(0, gp->regs + MAC_PATMPS); 1913 writel(0, gp->regs + MAC_PATMPS);
1914 writel(0, gp->regs + MAC_RFCTR); 1914 writel(0, gp->regs + MAC_RFCTR);
1915 writel(0, gp->regs + MAC_LERR); 1915 writel(0, gp->regs + MAC_LERR);
1916 writel(0, gp->regs + MAC_AERR); 1916 writel(0, gp->regs + MAC_AERR);
1917 writel(0, gp->regs + MAC_FCSERR); 1917 writel(0, gp->regs + MAC_FCSERR);
1918 writel(0, gp->regs + MAC_RXCVERR); 1918 writel(0, gp->regs + MAC_RXCVERR);
1919 1919
1920 /* Clear RX/TX/MAC/XIF config, we will set these up and enable 1920 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1921 * them once a link is established. 1921 * them once a link is established.
1922 */ 1922 */
1923 writel(0, gp->regs + MAC_TXCFG); 1923 writel(0, gp->regs + MAC_TXCFG);
1924 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); 1924 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1925 writel(0, gp->regs + MAC_MCCFG); 1925 writel(0, gp->regs + MAC_MCCFG);
1926 writel(0, gp->regs + MAC_XIFCFG); 1926 writel(0, gp->regs + MAC_XIFCFG);
1927 1927
1928 /* Setup MAC interrupts. We want to get all of the interesting 1928 /* Setup MAC interrupts. We want to get all of the interesting
1929 * counter expiration events, but we do not want to hear about 1929 * counter expiration events, but we do not want to hear about
1930 * normal rx/tx as the DMA engine tells us that. 1930 * normal rx/tx as the DMA engine tells us that.
1931 */ 1931 */
1932 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); 1932 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1933 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); 1933 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1934 1934
1935 /* Don't enable even the PAUSE interrupts for now, we 1935 /* Don't enable even the PAUSE interrupts for now, we
1936 * make no use of those events other than to record them. 1936 * make no use of those events other than to record them.
1937 */ 1937 */
1938 writel(0xffffffff, gp->regs + MAC_MCMASK); 1938 writel(0xffffffff, gp->regs + MAC_MCMASK);
1939 1939
1940 /* Don't enable GEM's WOL in normal operations 1940 /* Don't enable GEM's WOL in normal operations
1941 */ 1941 */
1942 if (gp->has_wol) 1942 if (gp->has_wol)
1943 writel(0, gp->regs + WOL_WAKECSR); 1943 writel(0, gp->regs + WOL_WAKECSR);
1944 } 1944 }
1945 1945
1946 /* Must be invoked under gp->lock and gp->tx_lock. */ 1946 /* Must be invoked under gp->lock and gp->tx_lock. */
1947 static void gem_init_pause_thresholds(struct gem *gp) 1947 static void gem_init_pause_thresholds(struct gem *gp)
1948 { 1948 {
1949 u32 cfg; 1949 u32 cfg;
1950 1950
1951 /* Calculate pause thresholds. Setting the OFF threshold to the 1951 /* Calculate pause thresholds. Setting the OFF threshold to the
1952 * full RX fifo size effectively disables PAUSE generation which 1952 * full RX fifo size effectively disables PAUSE generation which
1953 * is what we do for 10/100 only GEMs which have FIFOs too small 1953 * is what we do for 10/100 only GEMs which have FIFOs too small
1954 * to make real gains from PAUSE. 1954 * to make real gains from PAUSE.
1955 */ 1955 */
1956 if (gp->rx_fifo_sz <= (2 * 1024)) { 1956 if (gp->rx_fifo_sz <= (2 * 1024)) {
1957 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; 1957 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1958 } else { 1958 } else {
1959 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; 1959 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1960 int off = (gp->rx_fifo_sz - (max_frame * 2)); 1960 int off = (gp->rx_fifo_sz - (max_frame * 2));
1961 int on = off - max_frame; 1961 int on = off - max_frame;
1962 1962
1963 gp->rx_pause_off = off; 1963 gp->rx_pause_off = off;
1964 gp->rx_pause_on = on; 1964 gp->rx_pause_on = on;
1965 } 1965 }
1966 1966
1967 1967
1968 /* Configure the chip "burst" DMA mode & enable some 1968 /* Configure the chip "burst" DMA mode & enable some
1969 * HW bug fixes on Apple version 1969 * HW bug fixes on Apple version
1970 */ 1970 */
1971 cfg = 0; 1971 cfg = 0;
1972 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) 1972 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1973 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; 1973 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1974 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 1974 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1975 cfg |= GREG_CFG_IBURST; 1975 cfg |= GREG_CFG_IBURST;
1976 #endif 1976 #endif
1977 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); 1977 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1978 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); 1978 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1979 writel(cfg, gp->regs + GREG_CFG); 1979 writel(cfg, gp->regs + GREG_CFG);
1980 1980
1981 /* If Infinite Burst didn't stick, then use different 1981 /* If Infinite Burst didn't stick, then use different
1982 * thresholds (and Apple bug fixes don't exist) 1982 * thresholds (and Apple bug fixes don't exist)
1983 */ 1983 */
1984 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { 1984 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1985 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1985 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1986 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1986 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1987 writel(cfg, gp->regs + GREG_CFG); 1987 writel(cfg, gp->regs + GREG_CFG);
1988 } 1988 }
1989 } 1989 }
1990 1990
1991 static int gem_check_invariants(struct gem *gp) 1991 static int gem_check_invariants(struct gem *gp)
1992 { 1992 {
1993 struct pci_dev *pdev = gp->pdev; 1993 struct pci_dev *pdev = gp->pdev;
1994 u32 mif_cfg; 1994 u32 mif_cfg;
1995 1995
1996 /* On Apple's sungem, we can't rely on registers as the chip 1996 /* On Apple's sungem, we can't rely on registers as the chip
1997 * was been powered down by the firmware. The PHY is looked 1997 * was been powered down by the firmware. The PHY is looked
1998 * up later on. 1998 * up later on.
1999 */ 1999 */
2000 if (pdev->vendor == PCI_VENDOR_ID_APPLE) { 2000 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
2001 gp->phy_type = phy_mii_mdio0; 2001 gp->phy_type = phy_mii_mdio0;
2002 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2002 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2003 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2003 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2004 gp->swrst_base = 0; 2004 gp->swrst_base = 0;
2005 2005
2006 mif_cfg = readl(gp->regs + MIF_CFG); 2006 mif_cfg = readl(gp->regs + MIF_CFG);
2007 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); 2007 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2008 mif_cfg |= MIF_CFG_MDI0; 2008 mif_cfg |= MIF_CFG_MDI0;
2009 writel(mif_cfg, gp->regs + MIF_CFG); 2009 writel(mif_cfg, gp->regs + MIF_CFG);
2010 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); 2010 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2011 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); 2011 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2012 2012
2013 /* We hard-code the PHY address so we can properly bring it out of 2013 /* We hard-code the PHY address so we can properly bring it out of
2014 * reset later on, we can't really probe it at this point, though 2014 * reset later on, we can't really probe it at this point, though
2015 * that isn't an issue. 2015 * that isn't an issue.
2016 */ 2016 */
2017 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) 2017 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2018 gp->mii_phy_addr = 1; 2018 gp->mii_phy_addr = 1;
2019 else 2019 else
2020 gp->mii_phy_addr = 0; 2020 gp->mii_phy_addr = 0;
2021 2021
2022 return 0; 2022 return 0;
2023 } 2023 }
2024 2024
2025 mif_cfg = readl(gp->regs + MIF_CFG); 2025 mif_cfg = readl(gp->regs + MIF_CFG);
2026 2026
2027 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2027 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2028 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { 2028 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2029 /* One of the MII PHYs _must_ be present 2029 /* One of the MII PHYs _must_ be present
2030 * as this chip has no gigabit PHY. 2030 * as this chip has no gigabit PHY.
2031 */ 2031 */
2032 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2032 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2033 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2033 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2034 mif_cfg); 2034 mif_cfg);
2035 return -1; 2035 return -1;
2036 } 2036 }
2037 } 2037 }
2038 2038
2039 /* Determine initial PHY interface type guess. MDIO1 is the 2039 /* Determine initial PHY interface type guess. MDIO1 is the
2040 * external PHY and thus takes precedence over MDIO0. 2040 * external PHY and thus takes precedence over MDIO0.
2041 */ 2041 */
2042 2042
2043 if (mif_cfg & MIF_CFG_MDI1) { 2043 if (mif_cfg & MIF_CFG_MDI1) {
2044 gp->phy_type = phy_mii_mdio1; 2044 gp->phy_type = phy_mii_mdio1;
2045 mif_cfg |= MIF_CFG_PSELECT; 2045 mif_cfg |= MIF_CFG_PSELECT;
2046 writel(mif_cfg, gp->regs + MIF_CFG); 2046 writel(mif_cfg, gp->regs + MIF_CFG);
2047 } else if (mif_cfg & MIF_CFG_MDI0) { 2047 } else if (mif_cfg & MIF_CFG_MDI0) {
2048 gp->phy_type = phy_mii_mdio0; 2048 gp->phy_type = phy_mii_mdio0;
2049 mif_cfg &= ~MIF_CFG_PSELECT; 2049 mif_cfg &= ~MIF_CFG_PSELECT;
2050 writel(mif_cfg, gp->regs + MIF_CFG); 2050 writel(mif_cfg, gp->regs + MIF_CFG);
2051 } else { 2051 } else {
2052 gp->phy_type = phy_serialink; 2052 gp->phy_type = phy_serialink;
2053 } 2053 }
2054 if (gp->phy_type == phy_mii_mdio1 || 2054 if (gp->phy_type == phy_mii_mdio1 ||
2055 gp->phy_type == phy_mii_mdio0) { 2055 gp->phy_type == phy_mii_mdio0) {
2056 int i; 2056 int i;
2057 2057
2058 for (i = 0; i < 32; i++) { 2058 for (i = 0; i < 32; i++) {
2059 gp->mii_phy_addr = i; 2059 gp->mii_phy_addr = i;
2060 if (phy_read(gp, MII_BMCR) != 0xffff) 2060 if (phy_read(gp, MII_BMCR) != 0xffff)
2061 break; 2061 break;
2062 } 2062 }
2063 if (i == 32) { 2063 if (i == 32) {
2064 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2064 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2065 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2065 printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2066 return -1; 2066 return -1;
2067 } 2067 }
2068 gp->phy_type = phy_serdes; 2068 gp->phy_type = phy_serdes;
2069 } 2069 }
2070 } 2070 }
2071 2071
2072 /* Fetch the FIFO configurations now too. */ 2072 /* Fetch the FIFO configurations now too. */
2073 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; 2073 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2074 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; 2074 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2075 2075
2076 if (pdev->vendor == PCI_VENDOR_ID_SUN) { 2076 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2077 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2077 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2078 if (gp->tx_fifo_sz != (9 * 1024) || 2078 if (gp->tx_fifo_sz != (9 * 1024) ||
2079 gp->rx_fifo_sz != (20 * 1024)) { 2079 gp->rx_fifo_sz != (20 * 1024)) {
2080 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2080 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2081 gp->tx_fifo_sz, gp->rx_fifo_sz); 2081 gp->tx_fifo_sz, gp->rx_fifo_sz);
2082 return -1; 2082 return -1;
2083 } 2083 }
2084 gp->swrst_base = 0; 2084 gp->swrst_base = 0;
2085 } else { 2085 } else {
2086 if (gp->tx_fifo_sz != (2 * 1024) || 2086 if (gp->tx_fifo_sz != (2 * 1024) ||
2087 gp->rx_fifo_sz != (2 * 1024)) { 2087 gp->rx_fifo_sz != (2 * 1024)) {
2088 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2088 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2089 gp->tx_fifo_sz, gp->rx_fifo_sz); 2089 gp->tx_fifo_sz, gp->rx_fifo_sz);
2090 return -1; 2090 return -1;
2091 } 2091 }
2092 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; 2092 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2093 } 2093 }
2094 } 2094 }
2095 2095
2096 return 0; 2096 return 0;
2097 } 2097 }
2098 2098
2099 /* Must be invoked under gp->lock and gp->tx_lock. */ 2099 /* Must be invoked under gp->lock and gp->tx_lock. */
2100 static void gem_reinit_chip(struct gem *gp) 2100 static void gem_reinit_chip(struct gem *gp)
2101 { 2101 {
2102 /* Reset the chip */ 2102 /* Reset the chip */
2103 gem_reset(gp); 2103 gem_reset(gp);
2104 2104
2105 /* Make sure ints are disabled */ 2105 /* Make sure ints are disabled */
2106 gem_disable_ints(gp); 2106 gem_disable_ints(gp);
2107 2107
2108 /* Allocate & setup ring buffers */ 2108 /* Allocate & setup ring buffers */
2109 gem_init_rings(gp); 2109 gem_init_rings(gp);
2110 2110
2111 /* Configure pause thresholds */ 2111 /* Configure pause thresholds */
2112 gem_init_pause_thresholds(gp); 2112 gem_init_pause_thresholds(gp);
2113 2113
2114 /* Init DMA & MAC engines */ 2114 /* Init DMA & MAC engines */
2115 gem_init_dma(gp); 2115 gem_init_dma(gp);
2116 gem_init_mac(gp); 2116 gem_init_mac(gp);
2117 } 2117 }
2118 2118
2119 2119
2120 /* Must be invoked with no lock held. */ 2120 /* Must be invoked with no lock held. */
2121 static void gem_stop_phy(struct gem *gp, int wol) 2121 static void gem_stop_phy(struct gem *gp, int wol)
2122 { 2122 {
2123 u32 mifcfg; 2123 u32 mifcfg;
2124 unsigned long flags; 2124 unsigned long flags;
2125 2125
2126 /* Let the chip settle down a bit, it seems that helps 2126 /* Let the chip settle down a bit, it seems that helps
2127 * for sleep mode on some models 2127 * for sleep mode on some models
2128 */ 2128 */
2129 msleep(10); 2129 msleep(10);
2130 2130
2131 /* Make sure we aren't polling PHY status change. We 2131 /* Make sure we aren't polling PHY status change. We
2132 * don't currently use that feature though 2132 * don't currently use that feature though
2133 */ 2133 */
2134 mifcfg = readl(gp->regs + MIF_CFG); 2134 mifcfg = readl(gp->regs + MIF_CFG);
2135 mifcfg &= ~MIF_CFG_POLL; 2135 mifcfg &= ~MIF_CFG_POLL;
2136 writel(mifcfg, gp->regs + MIF_CFG); 2136 writel(mifcfg, gp->regs + MIF_CFG);
2137 2137
2138 if (wol && gp->has_wol) { 2138 if (wol && gp->has_wol) {
2139 unsigned char *e = &gp->dev->dev_addr[0]; 2139 unsigned char *e = &gp->dev->dev_addr[0];
2140 u32 csr; 2140 u32 csr;
2141 2141
2142 /* Setup wake-on-lan for MAGIC packet */ 2142 /* Setup wake-on-lan for MAGIC packet */
2143 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2143 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2144 gp->regs + MAC_RXCFG); 2144 gp->regs + MAC_RXCFG);
2145 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2145 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2146 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2146 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2147 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2147 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2148 2148
2149 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); 2149 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2150 csr = WOL_WAKECSR_ENABLE; 2150 csr = WOL_WAKECSR_ENABLE;
2151 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) 2151 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2152 csr |= WOL_WAKECSR_MII; 2152 csr |= WOL_WAKECSR_MII;
2153 writel(csr, gp->regs + WOL_WAKECSR); 2153 writel(csr, gp->regs + WOL_WAKECSR);
2154 } else { 2154 } else {
2155 writel(0, gp->regs + MAC_RXCFG); 2155 writel(0, gp->regs + MAC_RXCFG);
2156 (void)readl(gp->regs + MAC_RXCFG); 2156 (void)readl(gp->regs + MAC_RXCFG);
2157 /* Machine sleep will die in strange ways if we 2157 /* Machine sleep will die in strange ways if we
2158 * dont wait a bit here, looks like the chip takes 2158 * dont wait a bit here, looks like the chip takes
2159 * some time to really shut down 2159 * some time to really shut down
2160 */ 2160 */
2161 msleep(10); 2161 msleep(10);
2162 } 2162 }
2163 2163
2164 writel(0, gp->regs + MAC_TXCFG); 2164 writel(0, gp->regs + MAC_TXCFG);
2165 writel(0, gp->regs + MAC_XIFCFG); 2165 writel(0, gp->regs + MAC_XIFCFG);
2166 writel(0, gp->regs + TXDMA_CFG); 2166 writel(0, gp->regs + TXDMA_CFG);
2167 writel(0, gp->regs + RXDMA_CFG); 2167 writel(0, gp->regs + RXDMA_CFG);
2168 2168
2169 if (!wol) { 2169 if (!wol) {
2170 spin_lock_irqsave(&gp->lock, flags); 2170 spin_lock_irqsave(&gp->lock, flags);
2171 spin_lock(&gp->tx_lock); 2171 spin_lock(&gp->tx_lock);
2172 gem_reset(gp); 2172 gem_reset(gp);
2173 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2173 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2174 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2174 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2175 spin_unlock(&gp->tx_lock); 2175 spin_unlock(&gp->tx_lock);
2176 spin_unlock_irqrestore(&gp->lock, flags); 2176 spin_unlock_irqrestore(&gp->lock, flags);
2177 2177
2178 /* No need to take the lock here */ 2178 /* No need to take the lock here */
2179 2179
2180 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2180 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2181 gp->phy_mii.def->ops->suspend(&gp->phy_mii); 2181 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2182 2182
2183 /* According to Apple, we must set the MDIO pins to this begnign 2183 /* According to Apple, we must set the MDIO pins to this begnign
2184 * state or we may 1) eat more current, 2) damage some PHYs 2184 * state or we may 1) eat more current, 2) damage some PHYs
2185 */ 2185 */
2186 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); 2186 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2187 writel(0, gp->regs + MIF_BBCLK); 2187 writel(0, gp->regs + MIF_BBCLK);
2188 writel(0, gp->regs + MIF_BBDATA); 2188 writel(0, gp->regs + MIF_BBDATA);
2189 writel(0, gp->regs + MIF_BBOENAB); 2189 writel(0, gp->regs + MIF_BBOENAB);
2190 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); 2190 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2191 (void) readl(gp->regs + MAC_XIFCFG); 2191 (void) readl(gp->regs + MAC_XIFCFG);
2192 } 2192 }
2193 } 2193 }
2194 2194
2195 2195
2196 static int gem_do_start(struct net_device *dev) 2196 static int gem_do_start(struct net_device *dev)
2197 { 2197 {
2198 struct gem *gp = dev->priv; 2198 struct gem *gp = dev->priv;
2199 unsigned long flags; 2199 unsigned long flags;
2200 2200
2201 spin_lock_irqsave(&gp->lock, flags); 2201 spin_lock_irqsave(&gp->lock, flags);
2202 spin_lock(&gp->tx_lock); 2202 spin_lock(&gp->tx_lock);
2203 2203
2204 /* Enable the cell */ 2204 /* Enable the cell */
2205 gem_get_cell(gp); 2205 gem_get_cell(gp);
2206 2206
2207 /* Init & setup chip hardware */ 2207 /* Init & setup chip hardware */
2208 gem_reinit_chip(gp); 2208 gem_reinit_chip(gp);
2209 2209
2210 gp->running = 1; 2210 gp->running = 1;
2211 2211
2212 if (gp->lstate == link_up) { 2212 if (gp->lstate == link_up) {
2213 netif_carrier_on(gp->dev); 2213 netif_carrier_on(gp->dev);
2214 gem_set_link_modes(gp); 2214 gem_set_link_modes(gp);
2215 } 2215 }
2216 2216
2217 netif_wake_queue(gp->dev); 2217 netif_wake_queue(gp->dev);
2218 2218
2219 spin_unlock(&gp->tx_lock); 2219 spin_unlock(&gp->tx_lock);
2220 spin_unlock_irqrestore(&gp->lock, flags); 2220 spin_unlock_irqrestore(&gp->lock, flags);
2221 2221
2222 if (request_irq(gp->pdev->irq, gem_interrupt, 2222 if (request_irq(gp->pdev->irq, gem_interrupt,
2223 SA_SHIRQ, dev->name, (void *)dev)) { 2223 SA_SHIRQ, dev->name, (void *)dev)) {
2224 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2224 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2225 2225
2226 spin_lock_irqsave(&gp->lock, flags); 2226 spin_lock_irqsave(&gp->lock, flags);
2227 spin_lock(&gp->tx_lock); 2227 spin_lock(&gp->tx_lock);
2228 2228
2229 gp->running = 0; 2229 gp->running = 0;
2230 gem_reset(gp); 2230 gem_reset(gp);
2231 gem_clean_rings(gp); 2231 gem_clean_rings(gp);
2232 gem_put_cell(gp); 2232 gem_put_cell(gp);
2233 2233
2234 spin_unlock(&gp->tx_lock); 2234 spin_unlock(&gp->tx_lock);
2235 spin_unlock_irqrestore(&gp->lock, flags); 2235 spin_unlock_irqrestore(&gp->lock, flags);
2236 2236
2237 return -EAGAIN; 2237 return -EAGAIN;
2238 } 2238 }
2239 2239
2240 return 0; 2240 return 0;
2241 } 2241 }
2242 2242
2243 static void gem_do_stop(struct net_device *dev, int wol) 2243 static void gem_do_stop(struct net_device *dev, int wol)
2244 { 2244 {
2245 struct gem *gp = dev->priv; 2245 struct gem *gp = dev->priv;
2246 unsigned long flags; 2246 unsigned long flags;
2247 2247
2248 spin_lock_irqsave(&gp->lock, flags); 2248 spin_lock_irqsave(&gp->lock, flags);
2249 spin_lock(&gp->tx_lock); 2249 spin_lock(&gp->tx_lock);
2250 2250
2251 gp->running = 0; 2251 gp->running = 0;
2252 2252
2253 /* Stop netif queue */ 2253 /* Stop netif queue */
2254 netif_stop_queue(dev); 2254 netif_stop_queue(dev);
2255 2255
2256 /* Make sure ints are disabled */ 2256 /* Make sure ints are disabled */
2257 gem_disable_ints(gp); 2257 gem_disable_ints(gp);
2258 2258
2259 /* We can drop the lock now */ 2259 /* We can drop the lock now */
2260 spin_unlock(&gp->tx_lock); 2260 spin_unlock(&gp->tx_lock);
2261 spin_unlock_irqrestore(&gp->lock, flags); 2261 spin_unlock_irqrestore(&gp->lock, flags);
2262 2262
2263 /* If we are going to sleep with WOL */ 2263 /* If we are going to sleep with WOL */
2264 gem_stop_dma(gp); 2264 gem_stop_dma(gp);
2265 msleep(10); 2265 msleep(10);
2266 if (!wol) 2266 if (!wol)
2267 gem_reset(gp); 2267 gem_reset(gp);
2268 msleep(10); 2268 msleep(10);
2269 2269
2270 /* Get rid of rings */ 2270 /* Get rid of rings */
2271 gem_clean_rings(gp); 2271 gem_clean_rings(gp);
2272 2272
2273 /* No irq needed anymore */ 2273 /* No irq needed anymore */
2274 free_irq(gp->pdev->irq, (void *) dev); 2274 free_irq(gp->pdev->irq, (void *) dev);
2275 2275
2276 /* Cell not needed neither if no WOL */ 2276 /* Cell not needed neither if no WOL */
2277 if (!wol) { 2277 if (!wol) {
2278 spin_lock_irqsave(&gp->lock, flags); 2278 spin_lock_irqsave(&gp->lock, flags);
2279 gem_put_cell(gp); 2279 gem_put_cell(gp);
2280 spin_unlock_irqrestore(&gp->lock, flags); 2280 spin_unlock_irqrestore(&gp->lock, flags);
2281 } 2281 }
2282 } 2282 }
2283 2283
2284 static void gem_reset_task(void *data) 2284 static void gem_reset_task(void *data)
2285 { 2285 {
2286 struct gem *gp = (struct gem *) data; 2286 struct gem *gp = (struct gem *) data;
2287 2287
2288 mutex_lock(&gp->pm_mutex); 2288 mutex_lock(&gp->pm_mutex);
2289 2289
2290 netif_poll_disable(gp->dev); 2290 netif_poll_disable(gp->dev);
2291 2291
2292 spin_lock_irq(&gp->lock); 2292 spin_lock_irq(&gp->lock);
2293 spin_lock(&gp->tx_lock); 2293 spin_lock(&gp->tx_lock);
2294 2294
2295 if (gp->running == 0) 2295 if (gp->running == 0)
2296 goto not_running; 2296 goto not_running;
2297 2297
2298 if (gp->running) { 2298 if (gp->running) {
2299 netif_stop_queue(gp->dev); 2299 netif_stop_queue(gp->dev);
2300 2300
2301 /* Reset the chip & rings */ 2301 /* Reset the chip & rings */
2302 gem_reinit_chip(gp); 2302 gem_reinit_chip(gp);
2303 if (gp->lstate == link_up) 2303 if (gp->lstate == link_up)
2304 gem_set_link_modes(gp); 2304 gem_set_link_modes(gp);
2305 netif_wake_queue(gp->dev); 2305 netif_wake_queue(gp->dev);
2306 } 2306 }
2307 not_running: 2307 not_running:
2308 gp->reset_task_pending = 0; 2308 gp->reset_task_pending = 0;
2309 2309
2310 spin_unlock(&gp->tx_lock); 2310 spin_unlock(&gp->tx_lock);
2311 spin_unlock_irq(&gp->lock); 2311 spin_unlock_irq(&gp->lock);
2312 2312
2313 netif_poll_enable(gp->dev); 2313 netif_poll_enable(gp->dev);
2314 2314
2315 mutex_unlock(&gp->pm_mutex); 2315 mutex_unlock(&gp->pm_mutex);
2316 } 2316 }
2317 2317
2318 2318
2319 static int gem_open(struct net_device *dev) 2319 static int gem_open(struct net_device *dev)
2320 { 2320 {
2321 struct gem *gp = dev->priv; 2321 struct gem *gp = dev->priv;
2322 int rc = 0; 2322 int rc = 0;
2323 2323
2324 mutex_lock(&gp->pm_mutex); 2324 mutex_lock(&gp->pm_mutex);
2325 2325
2326 /* We need the cell enabled */ 2326 /* We need the cell enabled */
2327 if (!gp->asleep) 2327 if (!gp->asleep)
2328 rc = gem_do_start(dev); 2328 rc = gem_do_start(dev);
2329 gp->opened = (rc == 0); 2329 gp->opened = (rc == 0);
2330 2330
2331 mutex_unlock(&gp->pm_mutex); 2331 mutex_unlock(&gp->pm_mutex);
2332 2332
2333 return rc; 2333 return rc;
2334 } 2334 }
2335 2335
2336 static int gem_close(struct net_device *dev) 2336 static int gem_close(struct net_device *dev)
2337 { 2337 {
2338 struct gem *gp = dev->priv; 2338 struct gem *gp = dev->priv;
2339 2339
2340 /* Note: we don't need to call netif_poll_disable() here because 2340 /* Note: we don't need to call netif_poll_disable() here because
2341 * our caller (dev_close) already did it for us 2341 * our caller (dev_close) already did it for us
2342 */ 2342 */
2343 2343
2344 mutex_lock(&gp->pm_mutex); 2344 mutex_lock(&gp->pm_mutex);
2345 2345
2346 gp->opened = 0; 2346 gp->opened = 0;
2347 if (!gp->asleep) 2347 if (!gp->asleep)
2348 gem_do_stop(dev, 0); 2348 gem_do_stop(dev, 0);
2349 2349
2350 mutex_unlock(&gp->pm_mutex); 2350 mutex_unlock(&gp->pm_mutex);
2351 2351
2352 return 0; 2352 return 0;
2353 } 2353 }
2354 2354
2355 #ifdef CONFIG_PM 2355 #ifdef CONFIG_PM
2356 static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2356 static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2357 { 2357 {
2358 struct net_device *dev = pci_get_drvdata(pdev); 2358 struct net_device *dev = pci_get_drvdata(pdev);
2359 struct gem *gp = dev->priv; 2359 struct gem *gp = dev->priv;
2360 unsigned long flags; 2360 unsigned long flags;
2361 2361
2362 mutex_lock(&gp->pm_mutex); 2362 mutex_lock(&gp->pm_mutex);
2363 2363
2364 netif_poll_disable(dev); 2364 netif_poll_disable(dev);
2365 2365
2366 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2366 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2367 dev->name, 2367 dev->name,
2368 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2368 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2369 2369
2370 /* Keep the cell enabled during the entire operation */ 2370 /* Keep the cell enabled during the entire operation */
2371 spin_lock_irqsave(&gp->lock, flags); 2371 spin_lock_irqsave(&gp->lock, flags);
2372 spin_lock(&gp->tx_lock); 2372 spin_lock(&gp->tx_lock);
2373 gem_get_cell(gp); 2373 gem_get_cell(gp);
2374 spin_unlock(&gp->tx_lock); 2374 spin_unlock(&gp->tx_lock);
2375 spin_unlock_irqrestore(&gp->lock, flags); 2375 spin_unlock_irqrestore(&gp->lock, flags);
2376 2376
2377 /* If the driver is opened, we stop the MAC */ 2377 /* If the driver is opened, we stop the MAC */
2378 if (gp->opened) { 2378 if (gp->opened) {
2379 /* Stop traffic, mark us closed */ 2379 /* Stop traffic, mark us closed */
2380 netif_device_detach(dev); 2380 netif_device_detach(dev);
2381 2381
2382 /* Switch off MAC, remember WOL setting */ 2382 /* Switch off MAC, remember WOL setting */
2383 gp->asleep_wol = gp->wake_on_lan; 2383 gp->asleep_wol = gp->wake_on_lan;
2384 gem_do_stop(dev, gp->asleep_wol); 2384 gem_do_stop(dev, gp->asleep_wol);
2385 } else 2385 } else
2386 gp->asleep_wol = 0; 2386 gp->asleep_wol = 0;
2387 2387
2388 /* Mark us asleep */ 2388 /* Mark us asleep */
2389 gp->asleep = 1; 2389 gp->asleep = 1;
2390 wmb(); 2390 wmb();
2391 2391
2392 /* Stop the link timer */ 2392 /* Stop the link timer */
2393 del_timer_sync(&gp->link_timer); 2393 del_timer_sync(&gp->link_timer);
2394 2394
2395 /* Now we release the mutex to not block the reset task who 2395 /* Now we release the mutex to not block the reset task who
2396 * can take it too. We are marked asleep, so there will be no 2396 * can take it too. We are marked asleep, so there will be no
2397 * conflict here 2397 * conflict here
2398 */ 2398 */
2399 mutex_unlock(&gp->pm_mutex); 2399 mutex_unlock(&gp->pm_mutex);
2400 2400
2401 /* Wait for a pending reset task to complete */ 2401 /* Wait for a pending reset task to complete */
2402 while (gp->reset_task_pending) 2402 while (gp->reset_task_pending)
2403 yield(); 2403 yield();
2404 flush_scheduled_work(); 2404 flush_scheduled_work();
2405 2405
2406 /* Shut the PHY down eventually and setup WOL */ 2406 /* Shut the PHY down eventually and setup WOL */
2407 gem_stop_phy(gp, gp->asleep_wol); 2407 gem_stop_phy(gp, gp->asleep_wol);
2408 2408
2409 /* Make sure bus master is disabled */ 2409 /* Make sure bus master is disabled */
2410 pci_disable_device(gp->pdev); 2410 pci_disable_device(gp->pdev);
2411 2411
2412 /* Release the cell, no need to take a lock at this point since 2412 /* Release the cell, no need to take a lock at this point since
2413 * nothing else can happen now 2413 * nothing else can happen now
2414 */ 2414 */
2415 gem_put_cell(gp); 2415 gem_put_cell(gp);
2416 2416
2417 return 0; 2417 return 0;
2418 } 2418 }
2419 2419
2420 static int gem_resume(struct pci_dev *pdev) 2420 static int gem_resume(struct pci_dev *pdev)
2421 { 2421 {
2422 struct net_device *dev = pci_get_drvdata(pdev); 2422 struct net_device *dev = pci_get_drvdata(pdev);
2423 struct gem *gp = dev->priv; 2423 struct gem *gp = dev->priv;
2424 unsigned long flags; 2424 unsigned long flags;
2425 2425
2426 printk(KERN_INFO "%s: resuming\n", dev->name); 2426 printk(KERN_INFO "%s: resuming\n", dev->name);
2427 2427
2428 mutex_lock(&gp->pm_mutex); 2428 mutex_lock(&gp->pm_mutex);
2429 2429
2430 /* Keep the cell enabled during the entire operation, no need to 2430 /* Keep the cell enabled during the entire operation, no need to
2431 * take a lock here tho since nothing else can happen while we are 2431 * take a lock here tho since nothing else can happen while we are
2432 * marked asleep 2432 * marked asleep
2433 */ 2433 */
2434 gem_get_cell(gp); 2434 gem_get_cell(gp);
2435 2435
2436 /* Make sure PCI access and bus master are enabled */ 2436 /* Make sure PCI access and bus master are enabled */
2437 if (pci_enable_device(gp->pdev)) { 2437 if (pci_enable_device(gp->pdev)) {
2438 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2438 printk(KERN_ERR "%s: Can't re-enable chip !\n",
2439 dev->name); 2439 dev->name);
2440 /* Put cell and forget it for now, it will be considered as 2440 /* Put cell and forget it for now, it will be considered as
2441 * still asleep, a new sleep cycle may bring it back 2441 * still asleep, a new sleep cycle may bring it back
2442 */ 2442 */
2443 gem_put_cell(gp); 2443 gem_put_cell(gp);
2444 mutex_unlock(&gp->pm_mutex); 2444 mutex_unlock(&gp->pm_mutex);
2445 return 0; 2445 return 0;
2446 } 2446 }
2447 pci_set_master(gp->pdev); 2447 pci_set_master(gp->pdev);
2448 2448
2449 /* Reset everything */ 2449 /* Reset everything */
2450 gem_reset(gp); 2450 gem_reset(gp);
2451 2451
2452 /* Mark us woken up */ 2452 /* Mark us woken up */
2453 gp->asleep = 0; 2453 gp->asleep = 0;
2454 wmb(); 2454 wmb();
2455 2455
2456 /* Bring the PHY back. Again, lock is useless at this point as 2456 /* Bring the PHY back. Again, lock is useless at this point as
2457 * nothing can be happening until we restart the whole thing 2457 * nothing can be happening until we restart the whole thing
2458 */ 2458 */
2459 gem_init_phy(gp); 2459 gem_init_phy(gp);
2460 2460
2461 /* If we were opened, bring everything back */ 2461 /* If we were opened, bring everything back */
2462 if (gp->opened) { 2462 if (gp->opened) {
2463 /* Restart MAC */ 2463 /* Restart MAC */
2464 gem_do_start(dev); 2464 gem_do_start(dev);
2465 2465
2466 /* Re-attach net device */ 2466 /* Re-attach net device */
2467 netif_device_attach(dev); 2467 netif_device_attach(dev);
2468 2468
2469 } 2469 }
2470 2470
2471 spin_lock_irqsave(&gp->lock, flags); 2471 spin_lock_irqsave(&gp->lock, flags);
2472 spin_lock(&gp->tx_lock); 2472 spin_lock(&gp->tx_lock);
2473 2473
2474 /* If we had WOL enabled, the cell clock was never turned off during 2474 /* If we had WOL enabled, the cell clock was never turned off during
2475 * sleep, so we end up beeing unbalanced. Fix that here 2475 * sleep, so we end up beeing unbalanced. Fix that here
2476 */ 2476 */
2477 if (gp->asleep_wol) 2477 if (gp->asleep_wol)
2478 gem_put_cell(gp); 2478 gem_put_cell(gp);
2479 2479
2480 /* This function doesn't need to hold the cell, it will be held if the 2480 /* This function doesn't need to hold the cell, it will be held if the
2481 * driver is open by gem_do_start(). 2481 * driver is open by gem_do_start().
2482 */ 2482 */
2483 gem_put_cell(gp); 2483 gem_put_cell(gp);
2484 2484
2485 spin_unlock(&gp->tx_lock); 2485 spin_unlock(&gp->tx_lock);
2486 spin_unlock_irqrestore(&gp->lock, flags); 2486 spin_unlock_irqrestore(&gp->lock, flags);
2487 2487
2488 netif_poll_enable(dev); 2488 netif_poll_enable(dev);
2489 2489
2490 mutex_unlock(&gp->pm_mutex); 2490 mutex_unlock(&gp->pm_mutex);
2491 2491
2492 return 0; 2492 return 0;
2493 } 2493 }
2494 #endif /* CONFIG_PM */ 2494 #endif /* CONFIG_PM */
2495 2495
2496 static struct net_device_stats *gem_get_stats(struct net_device *dev) 2496 static struct net_device_stats *gem_get_stats(struct net_device *dev)
2497 { 2497 {
2498 struct gem *gp = dev->priv; 2498 struct gem *gp = dev->priv;
2499 struct net_device_stats *stats = &gp->net_stats; 2499 struct net_device_stats *stats = &gp->net_stats;
2500 2500
2501 spin_lock_irq(&gp->lock); 2501 spin_lock_irq(&gp->lock);
2502 spin_lock(&gp->tx_lock); 2502 spin_lock(&gp->tx_lock);
2503 2503
2504 /* I have seen this being called while the PM was in progress, 2504 /* I have seen this being called while the PM was in progress,
2505 * so we shield against this 2505 * so we shield against this
2506 */ 2506 */
2507 if (gp->running) { 2507 if (gp->running) {
2508 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2508 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2509 writel(0, gp->regs + MAC_FCSERR); 2509 writel(0, gp->regs + MAC_FCSERR);
2510 2510
2511 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2511 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2512 writel(0, gp->regs + MAC_AERR); 2512 writel(0, gp->regs + MAC_AERR);
2513 2513
2514 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2514 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2515 writel(0, gp->regs + MAC_LERR); 2515 writel(0, gp->regs + MAC_LERR);
2516 2516
2517 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2517 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2518 stats->collisions += 2518 stats->collisions +=
2519 (readl(gp->regs + MAC_ECOLL) + 2519 (readl(gp->regs + MAC_ECOLL) +
2520 readl(gp->regs + MAC_LCOLL)); 2520 readl(gp->regs + MAC_LCOLL));
2521 writel(0, gp->regs + MAC_ECOLL); 2521 writel(0, gp->regs + MAC_ECOLL);
2522 writel(0, gp->regs + MAC_LCOLL); 2522 writel(0, gp->regs + MAC_LCOLL);
2523 } 2523 }
2524 2524
2525 spin_unlock(&gp->tx_lock); 2525 spin_unlock(&gp->tx_lock);
2526 spin_unlock_irq(&gp->lock); 2526 spin_unlock_irq(&gp->lock);
2527 2527
2528 return &gp->net_stats; 2528 return &gp->net_stats;
2529 } 2529 }
2530 2530
2531 static void gem_set_multicast(struct net_device *dev) 2531 static void gem_set_multicast(struct net_device *dev)
2532 { 2532 {
2533 struct gem *gp = dev->priv; 2533 struct gem *gp = dev->priv;
2534 u32 rxcfg, rxcfg_new; 2534 u32 rxcfg, rxcfg_new;
2535 int limit = 10000; 2535 int limit = 10000;
2536 2536
2537 2537
2538 spin_lock_irq(&gp->lock); 2538 spin_lock_irq(&gp->lock);
2539 spin_lock(&gp->tx_lock); 2539 spin_lock(&gp->tx_lock);
2540 2540
2541 if (!gp->running) 2541 if (!gp->running)
2542 goto bail; 2542 goto bail;
2543 2543
2544 netif_stop_queue(dev); 2544 netif_stop_queue(dev);
2545 2545
2546 rxcfg = readl(gp->regs + MAC_RXCFG); 2546 rxcfg = readl(gp->regs + MAC_RXCFG);
2547 rxcfg_new = gem_setup_multicast(gp); 2547 rxcfg_new = gem_setup_multicast(gp);
2548 #ifdef STRIP_FCS 2548 #ifdef STRIP_FCS
2549 rxcfg_new |= MAC_RXCFG_SFCS; 2549 rxcfg_new |= MAC_RXCFG_SFCS;
2550 #endif 2550 #endif
2551 gp->mac_rx_cfg = rxcfg_new; 2551 gp->mac_rx_cfg = rxcfg_new;
2552 2552
2553 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2553 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2554 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2554 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2555 if (!limit--) 2555 if (!limit--)
2556 break; 2556 break;
2557 udelay(10); 2557 udelay(10);
2558 } 2558 }
2559 2559
2560 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); 2560 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2561 rxcfg |= rxcfg_new; 2561 rxcfg |= rxcfg_new;
2562 2562
2563 writel(rxcfg, gp->regs + MAC_RXCFG); 2563 writel(rxcfg, gp->regs + MAC_RXCFG);
2564 2564
2565 netif_wake_queue(dev); 2565 netif_wake_queue(dev);
2566 2566
2567 bail: 2567 bail:
2568 spin_unlock(&gp->tx_lock); 2568 spin_unlock(&gp->tx_lock);
2569 spin_unlock_irq(&gp->lock); 2569 spin_unlock_irq(&gp->lock);
2570 } 2570 }
2571 2571
2572 /* Jumbo-grams don't seem to work :-( */ 2572 /* Jumbo-grams don't seem to work :-( */
2573 #define GEM_MIN_MTU 68 2573 #define GEM_MIN_MTU 68
2574 #if 1 2574 #if 1
2575 #define GEM_MAX_MTU 1500 2575 #define GEM_MAX_MTU 1500
2576 #else 2576 #else
2577 #define GEM_MAX_MTU 9000 2577 #define GEM_MAX_MTU 9000
2578 #endif 2578 #endif
2579 2579
2580 static int gem_change_mtu(struct net_device *dev, int new_mtu) 2580 static int gem_change_mtu(struct net_device *dev, int new_mtu)
2581 { 2581 {
2582 struct gem *gp = dev->priv; 2582 struct gem *gp = dev->priv;
2583 2583
2584 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2584 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2585 return -EINVAL; 2585 return -EINVAL;
2586 2586
2587 if (!netif_running(dev) || !netif_device_present(dev)) { 2587 if (!netif_running(dev) || !netif_device_present(dev)) {
2588 /* We'll just catch it later when the 2588 /* We'll just catch it later when the
2589 * device is up'd or resumed. 2589 * device is up'd or resumed.
2590 */ 2590 */
2591 dev->mtu = new_mtu; 2591 dev->mtu = new_mtu;
2592 return 0; 2592 return 0;
2593 } 2593 }
2594 2594
2595 mutex_lock(&gp->pm_mutex); 2595 mutex_lock(&gp->pm_mutex);
2596 spin_lock_irq(&gp->lock); 2596 spin_lock_irq(&gp->lock);
2597 spin_lock(&gp->tx_lock); 2597 spin_lock(&gp->tx_lock);
2598 dev->mtu = new_mtu; 2598 dev->mtu = new_mtu;
2599 if (gp->running) { 2599 if (gp->running) {
2600 gem_reinit_chip(gp); 2600 gem_reinit_chip(gp);
2601 if (gp->lstate == link_up) 2601 if (gp->lstate == link_up)
2602 gem_set_link_modes(gp); 2602 gem_set_link_modes(gp);
2603 } 2603 }
2604 spin_unlock(&gp->tx_lock); 2604 spin_unlock(&gp->tx_lock);
2605 spin_unlock_irq(&gp->lock); 2605 spin_unlock_irq(&gp->lock);
2606 mutex_unlock(&gp->pm_mutex); 2606 mutex_unlock(&gp->pm_mutex);
2607 2607
2608 return 0; 2608 return 0;
2609 } 2609 }
2610 2610
2611 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2611 static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2612 { 2612 {
2613 struct gem *gp = dev->priv; 2613 struct gem *gp = dev->priv;
2614 2614
2615 strcpy(info->driver, DRV_NAME); 2615 strcpy(info->driver, DRV_NAME);
2616 strcpy(info->version, DRV_VERSION); 2616 strcpy(info->version, DRV_VERSION);
2617 strcpy(info->bus_info, pci_name(gp->pdev)); 2617 strcpy(info->bus_info, pci_name(gp->pdev));
2618 } 2618 }
2619 2619
2620 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2620 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2621 { 2621 {
2622 struct gem *gp = dev->priv; 2622 struct gem *gp = dev->priv;
2623 2623
2624 if (gp->phy_type == phy_mii_mdio0 || 2624 if (gp->phy_type == phy_mii_mdio0 ||
2625 gp->phy_type == phy_mii_mdio1) { 2625 gp->phy_type == phy_mii_mdio1) {
2626 if (gp->phy_mii.def) 2626 if (gp->phy_mii.def)
2627 cmd->supported = gp->phy_mii.def->features; 2627 cmd->supported = gp->phy_mii.def->features;
2628 else 2628 else
2629 cmd->supported = (SUPPORTED_10baseT_Half | 2629 cmd->supported = (SUPPORTED_10baseT_Half |
2630 SUPPORTED_10baseT_Full); 2630 SUPPORTED_10baseT_Full);
2631 2631
2632 /* XXX hardcoded stuff for now */ 2632 /* XXX hardcoded stuff for now */
2633 cmd->port = PORT_MII; 2633 cmd->port = PORT_MII;
2634 cmd->transceiver = XCVR_EXTERNAL; 2634 cmd->transceiver = XCVR_EXTERNAL;
2635 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2635 cmd->phy_address = 0; /* XXX fixed PHYAD */
2636 2636
2637 /* Return current PHY settings */ 2637 /* Return current PHY settings */
2638 spin_lock_irq(&gp->lock); 2638 spin_lock_irq(&gp->lock);
2639 cmd->autoneg = gp->want_autoneg; 2639 cmd->autoneg = gp->want_autoneg;
2640 cmd->speed = gp->phy_mii.speed; 2640 cmd->speed = gp->phy_mii.speed;
2641 cmd->duplex = gp->phy_mii.duplex; 2641 cmd->duplex = gp->phy_mii.duplex;
2642 cmd->advertising = gp->phy_mii.advertising; 2642 cmd->advertising = gp->phy_mii.advertising;
2643 2643
2644 /* If we started with a forced mode, we don't have a default 2644 /* If we started with a forced mode, we don't have a default
2645 * advertise set, we need to return something sensible so 2645 * advertise set, we need to return something sensible so
2646 * userland can re-enable autoneg properly. 2646 * userland can re-enable autoneg properly.
2647 */ 2647 */
2648 if (cmd->advertising == 0) 2648 if (cmd->advertising == 0)
2649 cmd->advertising = cmd->supported; 2649 cmd->advertising = cmd->supported;
2650 spin_unlock_irq(&gp->lock); 2650 spin_unlock_irq(&gp->lock);
2651 } else { // XXX PCS ? 2651 } else { // XXX PCS ?
2652 cmd->supported = 2652 cmd->supported =
2653 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2653 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2654 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2654 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2655 SUPPORTED_Autoneg); 2655 SUPPORTED_Autoneg);
2656 cmd->advertising = cmd->supported; 2656 cmd->advertising = cmd->supported;
2657 cmd->speed = 0; 2657 cmd->speed = 0;
2658 cmd->duplex = cmd->port = cmd->phy_address = 2658 cmd->duplex = cmd->port = cmd->phy_address =
2659 cmd->transceiver = cmd->autoneg = 0; 2659 cmd->transceiver = cmd->autoneg = 0;
2660 } 2660 }
2661 cmd->maxtxpkt = cmd->maxrxpkt = 0; 2661 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2662 2662
2663 return 0; 2663 return 0;
2664 } 2664 }
2665 2665
2666 static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2666 static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2667 { 2667 {
2668 struct gem *gp = dev->priv; 2668 struct gem *gp = dev->priv;
2669 2669
2670 /* Verify the settings we care about. */ 2670 /* Verify the settings we care about. */
2671 if (cmd->autoneg != AUTONEG_ENABLE && 2671 if (cmd->autoneg != AUTONEG_ENABLE &&
2672 cmd->autoneg != AUTONEG_DISABLE) 2672 cmd->autoneg != AUTONEG_DISABLE)
2673 return -EINVAL; 2673 return -EINVAL;
2674 2674
2675 if (cmd->autoneg == AUTONEG_ENABLE && 2675 if (cmd->autoneg == AUTONEG_ENABLE &&
2676 cmd->advertising == 0) 2676 cmd->advertising == 0)
2677 return -EINVAL; 2677 return -EINVAL;
2678 2678
2679 if (cmd->autoneg == AUTONEG_DISABLE && 2679 if (cmd->autoneg == AUTONEG_DISABLE &&
2680 ((cmd->speed != SPEED_1000 && 2680 ((cmd->speed != SPEED_1000 &&
2681 cmd->speed != SPEED_100 && 2681 cmd->speed != SPEED_100 &&
2682 cmd->speed != SPEED_10) || 2682 cmd->speed != SPEED_10) ||
2683 (cmd->duplex != DUPLEX_HALF && 2683 (cmd->duplex != DUPLEX_HALF &&
2684 cmd->duplex != DUPLEX_FULL))) 2684 cmd->duplex != DUPLEX_FULL)))
2685 return -EINVAL; 2685 return -EINVAL;
2686 2686
2687 /* Apply settings and restart link process. */ 2687 /* Apply settings and restart link process. */
2688 spin_lock_irq(&gp->lock); 2688 spin_lock_irq(&gp->lock);
2689 gem_get_cell(gp); 2689 gem_get_cell(gp);
2690 gem_begin_auto_negotiation(gp, cmd); 2690 gem_begin_auto_negotiation(gp, cmd);
2691 gem_put_cell(gp); 2691 gem_put_cell(gp);
2692 spin_unlock_irq(&gp->lock); 2692 spin_unlock_irq(&gp->lock);
2693 2693
2694 return 0; 2694 return 0;
2695 } 2695 }
2696 2696
2697 static int gem_nway_reset(struct net_device *dev) 2697 static int gem_nway_reset(struct net_device *dev)
2698 { 2698 {
2699 struct gem *gp = dev->priv; 2699 struct gem *gp = dev->priv;
2700 2700
2701 if (!gp->want_autoneg) 2701 if (!gp->want_autoneg)
2702 return -EINVAL; 2702 return -EINVAL;
2703 2703
2704 /* Restart link process. */ 2704 /* Restart link process. */
2705 spin_lock_irq(&gp->lock); 2705 spin_lock_irq(&gp->lock);
2706 gem_get_cell(gp); 2706 gem_get_cell(gp);
2707 gem_begin_auto_negotiation(gp, NULL); 2707 gem_begin_auto_negotiation(gp, NULL);
2708 gem_put_cell(gp); 2708 gem_put_cell(gp);
2709 spin_unlock_irq(&gp->lock); 2709 spin_unlock_irq(&gp->lock);
2710 2710
2711 return 0; 2711 return 0;
2712 } 2712 }
2713 2713
2714 static u32 gem_get_msglevel(struct net_device *dev) 2714 static u32 gem_get_msglevel(struct net_device *dev)
2715 { 2715 {
2716 struct gem *gp = dev->priv; 2716 struct gem *gp = dev->priv;
2717 return gp->msg_enable; 2717 return gp->msg_enable;
2718 } 2718 }
2719 2719
2720 static void gem_set_msglevel(struct net_device *dev, u32 value) 2720 static void gem_set_msglevel(struct net_device *dev, u32 value)
2721 { 2721 {
2722 struct gem *gp = dev->priv; 2722 struct gem *gp = dev->priv;
2723 gp->msg_enable = value; 2723 gp->msg_enable = value;
2724 } 2724 }
2725 2725
2726 2726
2727 /* Add more when I understand how to program the chip */ 2727 /* Add more when I understand how to program the chip */
2728 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ 2728 /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2729 2729
2730 #define WOL_SUPPORTED_MASK (WAKE_MAGIC) 2730 #define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2731 2731
2732 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2732 static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2733 { 2733 {
2734 struct gem *gp = dev->priv; 2734 struct gem *gp = dev->priv;
2735 2735
2736 /* Add more when I understand how to program the chip */ 2736 /* Add more when I understand how to program the chip */
2737 if (gp->has_wol) { 2737 if (gp->has_wol) {
2738 wol->supported = WOL_SUPPORTED_MASK; 2738 wol->supported = WOL_SUPPORTED_MASK;
2739 wol->wolopts = gp->wake_on_lan; 2739 wol->wolopts = gp->wake_on_lan;
2740 } else { 2740 } else {
2741 wol->supported = 0; 2741 wol->supported = 0;
2742 wol->wolopts = 0; 2742 wol->wolopts = 0;
2743 } 2743 }
2744 } 2744 }
2745 2745
2746 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2746 static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2747 { 2747 {
2748 struct gem *gp = dev->priv; 2748 struct gem *gp = dev->priv;
2749 2749
2750 if (!gp->has_wol) 2750 if (!gp->has_wol)
2751 return -EOPNOTSUPP; 2751 return -EOPNOTSUPP;
2752 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; 2752 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2753 return 0; 2753 return 0;
2754 } 2754 }
2755 2755
2756 static struct ethtool_ops gem_ethtool_ops = { 2756 static struct ethtool_ops gem_ethtool_ops = {
2757 .get_drvinfo = gem_get_drvinfo, 2757 .get_drvinfo = gem_get_drvinfo,
2758 .get_link = ethtool_op_get_link, 2758 .get_link = ethtool_op_get_link,
2759 .get_settings = gem_get_settings, 2759 .get_settings = gem_get_settings,
2760 .set_settings = gem_set_settings, 2760 .set_settings = gem_set_settings,
2761 .nway_reset = gem_nway_reset, 2761 .nway_reset = gem_nway_reset,
2762 .get_msglevel = gem_get_msglevel, 2762 .get_msglevel = gem_get_msglevel,
2763 .set_msglevel = gem_set_msglevel, 2763 .set_msglevel = gem_set_msglevel,
2764 .get_wol = gem_get_wol, 2764 .get_wol = gem_get_wol,
2765 .set_wol = gem_set_wol, 2765 .set_wol = gem_set_wol,
2766 }; 2766 };
2767 2767
2768 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2768 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2769 { 2769 {
2770 struct gem *gp = dev->priv; 2770 struct gem *gp = dev->priv;
2771 struct mii_ioctl_data *data = if_mii(ifr); 2771 struct mii_ioctl_data *data = if_mii(ifr);
2772 int rc = -EOPNOTSUPP; 2772 int rc = -EOPNOTSUPP;
2773 unsigned long flags; 2773 unsigned long flags;
2774 2774
2775 /* Hold the PM mutex while doing ioctl's or we may collide 2775 /* Hold the PM mutex while doing ioctl's or we may collide
2776 * with power management. 2776 * with power management.
2777 */ 2777 */
2778 mutex_lock(&gp->pm_mutex); 2778 mutex_lock(&gp->pm_mutex);
2779 2779
2780 spin_lock_irqsave(&gp->lock, flags); 2780 spin_lock_irqsave(&gp->lock, flags);
2781 gem_get_cell(gp); 2781 gem_get_cell(gp);
2782 spin_unlock_irqrestore(&gp->lock, flags); 2782 spin_unlock_irqrestore(&gp->lock, flags);
2783 2783
2784 switch (cmd) { 2784 switch (cmd) {
2785 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 2785 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2786 data->phy_id = gp->mii_phy_addr; 2786 data->phy_id = gp->mii_phy_addr;
2787 /* Fallthrough... */ 2787 /* Fallthrough... */
2788 2788
2789 case SIOCGMIIREG: /* Read MII PHY register. */ 2789 case SIOCGMIIREG: /* Read MII PHY register. */
2790 if (!gp->running) 2790 if (!gp->running)
2791 rc = -EAGAIN; 2791 rc = -EAGAIN;
2792 else { 2792 else {
2793 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2793 data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2794 data->reg_num & 0x1f); 2794 data->reg_num & 0x1f);
2795 rc = 0; 2795 rc = 0;
2796 } 2796 }
2797 break; 2797 break;
2798 2798
2799 case SIOCSMIIREG: /* Write MII PHY register. */ 2799 case SIOCSMIIREG: /* Write MII PHY register. */
2800 if (!capable(CAP_NET_ADMIN)) 2800 if (!capable(CAP_NET_ADMIN))
2801 rc = -EPERM; 2801 rc = -EPERM;
2802 else if (!gp->running) 2802 else if (!gp->running)
2803 rc = -EAGAIN; 2803 rc = -EAGAIN;
2804 else { 2804 else {
2805 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2805 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2806 data->val_in); 2806 data->val_in);
2807 rc = 0; 2807 rc = 0;
2808 } 2808 }
2809 break; 2809 break;
2810 }; 2810 };
2811 2811
2812 spin_lock_irqsave(&gp->lock, flags); 2812 spin_lock_irqsave(&gp->lock, flags);
2813 gem_put_cell(gp); 2813 gem_put_cell(gp);
2814 spin_unlock_irqrestore(&gp->lock, flags); 2814 spin_unlock_irqrestore(&gp->lock, flags);
2815 2815
2816 mutex_unlock(&gp->pm_mutex); 2816 mutex_unlock(&gp->pm_mutex);
2817 2817
2818 return rc; 2818 return rc;
2819 } 2819 }
2820 2820
2821 #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) 2821 #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC))
2822 /* Fetch MAC address from vital product data of PCI ROM. */ 2822 /* Fetch MAC address from vital product data of PCI ROM. */
2823 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) 2823 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2824 { 2824 {
2825 int this_offset; 2825 int this_offset;
2826 2826
2827 for (this_offset = 0x20; this_offset < len; this_offset++) { 2827 for (this_offset = 0x20; this_offset < len; this_offset++) {
2828 void __iomem *p = rom_base + this_offset; 2828 void __iomem *p = rom_base + this_offset;
2829 int i; 2829 int i;
2830 2830
2831 if (readb(p + 0) != 0x90 || 2831 if (readb(p + 0) != 0x90 ||
2832 readb(p + 1) != 0x00 || 2832 readb(p + 1) != 0x00 ||
2833 readb(p + 2) != 0x09 || 2833 readb(p + 2) != 0x09 ||
2834 readb(p + 3) != 0x4e || 2834 readb(p + 3) != 0x4e ||
2835 readb(p + 4) != 0x41 || 2835 readb(p + 4) != 0x41 ||
2836 readb(p + 5) != 0x06) 2836 readb(p + 5) != 0x06)
2837 continue; 2837 continue;
2838 2838
2839 this_offset += 6; 2839 this_offset += 6;
2840 p += 6; 2840 p += 6;
2841 2841
2842 for (i = 0; i < 6; i++) 2842 for (i = 0; i < 6; i++)
2843 dev_addr[i] = readb(p + i); 2843 dev_addr[i] = readb(p + i);
2844 return 1; 2844 return 1;
2845 } 2845 }
2846 return 0; 2846 return 0;
2847 } 2847 }
2848 2848
2849 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) 2849 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2850 { 2850 {
2851 size_t size; 2851 size_t size;
2852 void __iomem *p = pci_map_rom(pdev, &size); 2852 void __iomem *p = pci_map_rom(pdev, &size);
2853 2853
2854 if (p) { 2854 if (p) {
2855 int found; 2855 int found;
2856 2856
2857 found = readb(p) == 0x55 && 2857 found = readb(p) == 0x55 &&
2858 readb(p + 1) == 0xaa && 2858 readb(p + 1) == 0xaa &&
2859 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); 2859 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2860 pci_unmap_rom(pdev, p); 2860 pci_unmap_rom(pdev, p);
2861 if (found) 2861 if (found)
2862 return; 2862 return;
2863 } 2863 }
2864 2864
2865 /* Sun MAC prefix then 3 random bytes. */ 2865 /* Sun MAC prefix then 3 random bytes. */
2866 dev_addr[0] = 0x08; 2866 dev_addr[0] = 0x08;
2867 dev_addr[1] = 0x00; 2867 dev_addr[1] = 0x00;
2868 dev_addr[2] = 0x20; 2868 dev_addr[2] = 0x20;
2869 get_random_bytes(dev_addr + 3, 3); 2869 get_random_bytes(dev_addr + 3, 3);
2870 return; 2870 return;
2871 } 2871 }
2872 #endif /* not Sparc and not PPC */ 2872 #endif /* not Sparc and not PPC */
2873 2873
2874 static int __devinit gem_get_device_address(struct gem *gp) 2874 static int __devinit gem_get_device_address(struct gem *gp)
2875 { 2875 {
2876 #if defined(__sparc__) || defined(CONFIG_PPC_PMAC) 2876 #if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
2877 struct net_device *dev = gp->dev; 2877 struct net_device *dev = gp->dev;
2878 #endif 2878 #endif
2879 2879
2880 #if defined(__sparc__) 2880 #if defined(__sparc__)
2881 struct pci_dev *pdev = gp->pdev; 2881 struct pci_dev *pdev = gp->pdev;
2882 struct pcidev_cookie *pcp = pdev->sysdata; 2882 struct pcidev_cookie *pcp = pdev->sysdata;
2883 int node = -1; 2883 int use_idprom = 1;
2884 2884
2885 if (pcp != NULL) { 2885 if (pcp != NULL) {
2886 node = pcp->prom_node; 2886 unsigned char *addr;
2887 if (prom_getproplen(node, "local-mac-address") == 6) 2887 int len;
2888 prom_getproperty(node, "local-mac-address", 2888
2889 dev->dev_addr, 6); 2889 addr = of_get_property(pcp->prom_node, "local-mac-address",
2890 else 2890 &len);
2891 node = -1; 2891 if (addr && len == 6) {
2892 use_idprom = 0;
2893 memcpy(dev->dev_addr, addr, 6);
2894 }
2892 } 2895 }
2893 if (node == -1) 2896 if (use_idprom)
2894 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2897 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2895 #elif defined(CONFIG_PPC_PMAC) 2898 #elif defined(CONFIG_PPC_PMAC)
2896 unsigned char *addr; 2899 unsigned char *addr;
2897 2900
2898 addr = get_property(gp->of_node, "local-mac-address", NULL); 2901 addr = get_property(gp->of_node, "local-mac-address", NULL);
2899 if (addr == NULL) { 2902 if (addr == NULL) {
2900 printk("\n"); 2903 printk("\n");
2901 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2904 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2902 return -1; 2905 return -1;
2903 } 2906 }
2904 memcpy(dev->dev_addr, addr, 6); 2907 memcpy(dev->dev_addr, addr, 6);
2905 #else 2908 #else
2906 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); 2909 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2907 #endif 2910 #endif
2908 return 0; 2911 return 0;
2909 } 2912 }
2910 2913
2911 static void gem_remove_one(struct pci_dev *pdev) 2914 static void gem_remove_one(struct pci_dev *pdev)
2912 { 2915 {
2913 struct net_device *dev = pci_get_drvdata(pdev); 2916 struct net_device *dev = pci_get_drvdata(pdev);
2914 2917
2915 if (dev) { 2918 if (dev) {
2916 struct gem *gp = dev->priv; 2919 struct gem *gp = dev->priv;
2917 2920
2918 unregister_netdev(dev); 2921 unregister_netdev(dev);
2919 2922
2920 /* Stop the link timer */ 2923 /* Stop the link timer */
2921 del_timer_sync(&gp->link_timer); 2924 del_timer_sync(&gp->link_timer);
2922 2925
2923 /* We shouldn't need any locking here */ 2926 /* We shouldn't need any locking here */
2924 gem_get_cell(gp); 2927 gem_get_cell(gp);
2925 2928
2926 /* Wait for a pending reset task to complete */ 2929 /* Wait for a pending reset task to complete */
2927 while (gp->reset_task_pending) 2930 while (gp->reset_task_pending)
2928 yield(); 2931 yield();
2929 flush_scheduled_work(); 2932 flush_scheduled_work();
2930 2933
2931 /* Shut the PHY down */ 2934 /* Shut the PHY down */
2932 gem_stop_phy(gp, 0); 2935 gem_stop_phy(gp, 0);
2933 2936
2934 gem_put_cell(gp); 2937 gem_put_cell(gp);
2935 2938
2936 /* Make sure bus master is disabled */ 2939 /* Make sure bus master is disabled */
2937 pci_disable_device(gp->pdev); 2940 pci_disable_device(gp->pdev);
2938 2941
2939 /* Free resources */ 2942 /* Free resources */
2940 pci_free_consistent(pdev, 2943 pci_free_consistent(pdev,
2941 sizeof(struct gem_init_block), 2944 sizeof(struct gem_init_block),
2942 gp->init_block, 2945 gp->init_block,
2943 gp->gblock_dvma); 2946 gp->gblock_dvma);
2944 iounmap(gp->regs); 2947 iounmap(gp->regs);
2945 pci_release_regions(pdev); 2948 pci_release_regions(pdev);
2946 free_netdev(dev); 2949 free_netdev(dev);
2947 2950
2948 pci_set_drvdata(pdev, NULL); 2951 pci_set_drvdata(pdev, NULL);
2949 } 2952 }
2950 } 2953 }
2951 2954
2952 static int __devinit gem_init_one(struct pci_dev *pdev, 2955 static int __devinit gem_init_one(struct pci_dev *pdev,
2953 const struct pci_device_id *ent) 2956 const struct pci_device_id *ent)
2954 { 2957 {
2955 static int gem_version_printed = 0; 2958 static int gem_version_printed = 0;
2956 unsigned long gemreg_base, gemreg_len; 2959 unsigned long gemreg_base, gemreg_len;
2957 struct net_device *dev; 2960 struct net_device *dev;
2958 struct gem *gp; 2961 struct gem *gp;
2959 int i, err, pci_using_dac; 2962 int i, err, pci_using_dac;
2960 2963
2961 if (gem_version_printed++ == 0) 2964 if (gem_version_printed++ == 0)
2962 printk(KERN_INFO "%s", version); 2965 printk(KERN_INFO "%s", version);
2963 2966
2964 /* Apple gmac note: during probe, the chip is powered up by 2967 /* Apple gmac note: during probe, the chip is powered up by
2965 * the arch code to allow the code below to work (and to let 2968 * the arch code to allow the code below to work (and to let
2966 * the chip be probed on the config space. It won't stay powered 2969 * the chip be probed on the config space. It won't stay powered
2967 * up until the interface is brought up however, so we can't rely 2970 * up until the interface is brought up however, so we can't rely
2968 * on register configuration done at this point. 2971 * on register configuration done at this point.
2969 */ 2972 */
2970 err = pci_enable_device(pdev); 2973 err = pci_enable_device(pdev);
2971 if (err) { 2974 if (err) {
2972 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2975 printk(KERN_ERR PFX "Cannot enable MMIO operation, "
2973 "aborting.\n"); 2976 "aborting.\n");
2974 return err; 2977 return err;
2975 } 2978 }
2976 pci_set_master(pdev); 2979 pci_set_master(pdev);
2977 2980
2978 /* Configure DMA attributes. */ 2981 /* Configure DMA attributes. */
2979 2982
2980 /* All of the GEM documentation states that 64-bit DMA addressing 2983 /* All of the GEM documentation states that 64-bit DMA addressing
2981 * is fully supported and should work just fine. However the 2984 * is fully supported and should work just fine. However the
2982 * front end for RIO based GEMs is different and only supports 2985 * front end for RIO based GEMs is different and only supports
2983 * 32-bit addressing. 2986 * 32-bit addressing.
2984 * 2987 *
2985 * For now we assume the various PPC GEMs are 32-bit only as well. 2988 * For now we assume the various PPC GEMs are 32-bit only as well.
2986 */ 2989 */
2987 if (pdev->vendor == PCI_VENDOR_ID_SUN && 2990 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2988 pdev->device == PCI_DEVICE_ID_SUN_GEM && 2991 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
2989 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 2992 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2990 pci_using_dac = 1; 2993 pci_using_dac = 1;
2991 } else { 2994 } else {
2992 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2995 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2993 if (err) { 2996 if (err) {
2994 printk(KERN_ERR PFX "No usable DMA configuration, " 2997 printk(KERN_ERR PFX "No usable DMA configuration, "
2995 "aborting.\n"); 2998 "aborting.\n");
2996 goto err_disable_device; 2999 goto err_disable_device;
2997 } 3000 }
2998 pci_using_dac = 0; 3001 pci_using_dac = 0;
2999 } 3002 }
3000 3003
3001 gemreg_base = pci_resource_start(pdev, 0); 3004 gemreg_base = pci_resource_start(pdev, 0);
3002 gemreg_len = pci_resource_len(pdev, 0); 3005 gemreg_len = pci_resource_len(pdev, 0);
3003 3006
3004 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3007 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3005 printk(KERN_ERR PFX "Cannot find proper PCI device " 3008 printk(KERN_ERR PFX "Cannot find proper PCI device "
3006 "base address, aborting.\n"); 3009 "base address, aborting.\n");
3007 err = -ENODEV; 3010 err = -ENODEV;
3008 goto err_disable_device; 3011 goto err_disable_device;
3009 } 3012 }
3010 3013
3011 dev = alloc_etherdev(sizeof(*gp)); 3014 dev = alloc_etherdev(sizeof(*gp));
3012 if (!dev) { 3015 if (!dev) {
3013 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3016 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
3014 err = -ENOMEM; 3017 err = -ENOMEM;
3015 goto err_disable_device; 3018 goto err_disable_device;
3016 } 3019 }
3017 SET_MODULE_OWNER(dev); 3020 SET_MODULE_OWNER(dev);
3018 SET_NETDEV_DEV(dev, &pdev->dev); 3021 SET_NETDEV_DEV(dev, &pdev->dev);
3019 3022
3020 gp = dev->priv; 3023 gp = dev->priv;
3021 3024
3022 err = pci_request_regions(pdev, DRV_NAME); 3025 err = pci_request_regions(pdev, DRV_NAME);
3023 if (err) { 3026 if (err) {
3024 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3027 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
3025 "aborting.\n"); 3028 "aborting.\n");
3026 goto err_out_free_netdev; 3029 goto err_out_free_netdev;
3027 } 3030 }
3028 3031
3029 gp->pdev = pdev; 3032 gp->pdev = pdev;
3030 dev->base_addr = (long) pdev; 3033 dev->base_addr = (long) pdev;
3031 gp->dev = dev; 3034 gp->dev = dev;
3032 3035
3033 gp->msg_enable = DEFAULT_MSG; 3036 gp->msg_enable = DEFAULT_MSG;
3034 3037
3035 spin_lock_init(&gp->lock); 3038 spin_lock_init(&gp->lock);
3036 spin_lock_init(&gp->tx_lock); 3039 spin_lock_init(&gp->tx_lock);
3037 mutex_init(&gp->pm_mutex); 3040 mutex_init(&gp->pm_mutex);
3038 3041
3039 init_timer(&gp->link_timer); 3042 init_timer(&gp->link_timer);
3040 gp->link_timer.function = gem_link_timer; 3043 gp->link_timer.function = gem_link_timer;
3041 gp->link_timer.data = (unsigned long) gp; 3044 gp->link_timer.data = (unsigned long) gp;
3042 3045
3043 INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3046 INIT_WORK(&gp->reset_task, gem_reset_task, gp);
3044 3047
3045 gp->lstate = link_down; 3048 gp->lstate = link_down;
3046 gp->timer_ticks = 0; 3049 gp->timer_ticks = 0;
3047 netif_carrier_off(dev); 3050 netif_carrier_off(dev);
3048 3051
3049 gp->regs = ioremap(gemreg_base, gemreg_len); 3052 gp->regs = ioremap(gemreg_base, gemreg_len);
3050 if (gp->regs == 0UL) { 3053 if (gp->regs == 0UL) {
3051 printk(KERN_ERR PFX "Cannot map device registers, " 3054 printk(KERN_ERR PFX "Cannot map device registers, "
3052 "aborting.\n"); 3055 "aborting.\n");
3053 err = -EIO; 3056 err = -EIO;
3054 goto err_out_free_res; 3057 goto err_out_free_res;
3055 } 3058 }
3056 3059
3057 /* On Apple, we want a reference to the Open Firmware device-tree 3060 /* On Apple, we want a reference to the Open Firmware device-tree
3058 * node. We use it for clock control. 3061 * node. We use it for clock control.
3059 */ 3062 */
3060 #ifdef CONFIG_PPC_PMAC 3063 #ifdef CONFIG_PPC_PMAC
3061 gp->of_node = pci_device_to_OF_node(pdev); 3064 gp->of_node = pci_device_to_OF_node(pdev);
3062 #endif 3065 #endif
3063 3066
3064 /* Only Apple version supports WOL afaik */ 3067 /* Only Apple version supports WOL afaik */
3065 if (pdev->vendor == PCI_VENDOR_ID_APPLE) 3068 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3066 gp->has_wol = 1; 3069 gp->has_wol = 1;
3067 3070
3068 /* Make sure cell is enabled */ 3071 /* Make sure cell is enabled */
3069 gem_get_cell(gp); 3072 gem_get_cell(gp);
3070 3073
3071 /* Make sure everything is stopped and in init state */ 3074 /* Make sure everything is stopped and in init state */
3072 gem_reset(gp); 3075 gem_reset(gp);
3073 3076
3074 /* Fill up the mii_phy structure (even if we won't use it) */ 3077 /* Fill up the mii_phy structure (even if we won't use it) */
3075 gp->phy_mii.dev = dev; 3078 gp->phy_mii.dev = dev;
3076 gp->phy_mii.mdio_read = _phy_read; 3079 gp->phy_mii.mdio_read = _phy_read;
3077 gp->phy_mii.mdio_write = _phy_write; 3080 gp->phy_mii.mdio_write = _phy_write;
3078 #ifdef CONFIG_PPC_PMAC 3081 #ifdef CONFIG_PPC_PMAC
3079 gp->phy_mii.platform_data = gp->of_node; 3082 gp->phy_mii.platform_data = gp->of_node;
3080 #endif 3083 #endif
3081 /* By default, we start with autoneg */ 3084 /* By default, we start with autoneg */
3082 gp->want_autoneg = 1; 3085 gp->want_autoneg = 1;
3083 3086
3084 /* Check fifo sizes, PHY type, etc... */ 3087 /* Check fifo sizes, PHY type, etc... */
3085 if (gem_check_invariants(gp)) { 3088 if (gem_check_invariants(gp)) {
3086 err = -ENODEV; 3089 err = -ENODEV;
3087 goto err_out_iounmap; 3090 goto err_out_iounmap;
3088 } 3091 }
3089 3092
3090 /* It is guaranteed that the returned buffer will be at least 3093 /* It is guaranteed that the returned buffer will be at least
3091 * PAGE_SIZE aligned. 3094 * PAGE_SIZE aligned.
3092 */ 3095 */
3093 gp->init_block = (struct gem_init_block *) 3096 gp->init_block = (struct gem_init_block *)
3094 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3097 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3095 &gp->gblock_dvma); 3098 &gp->gblock_dvma);
3096 if (!gp->init_block) { 3099 if (!gp->init_block) {
3097 printk(KERN_ERR PFX "Cannot allocate init block, " 3100 printk(KERN_ERR PFX "Cannot allocate init block, "
3098 "aborting.\n"); 3101 "aborting.\n");
3099 err = -ENOMEM; 3102 err = -ENOMEM;
3100 goto err_out_iounmap; 3103 goto err_out_iounmap;
3101 } 3104 }
3102 3105
3103 if (gem_get_device_address(gp)) 3106 if (gem_get_device_address(gp))
3104 goto err_out_free_consistent; 3107 goto err_out_free_consistent;
3105 3108
3106 dev->open = gem_open; 3109 dev->open = gem_open;
3107 dev->stop = gem_close; 3110 dev->stop = gem_close;
3108 dev->hard_start_xmit = gem_start_xmit; 3111 dev->hard_start_xmit = gem_start_xmit;
3109 dev->get_stats = gem_get_stats; 3112 dev->get_stats = gem_get_stats;
3110 dev->set_multicast_list = gem_set_multicast; 3113 dev->set_multicast_list = gem_set_multicast;
3111 dev->do_ioctl = gem_ioctl; 3114 dev->do_ioctl = gem_ioctl;
3112 dev->poll = gem_poll; 3115 dev->poll = gem_poll;
3113 dev->weight = 64; 3116 dev->weight = 64;
3114 dev->ethtool_ops = &gem_ethtool_ops; 3117 dev->ethtool_ops = &gem_ethtool_ops;
3115 dev->tx_timeout = gem_tx_timeout; 3118 dev->tx_timeout = gem_tx_timeout;
3116 dev->watchdog_timeo = 5 * HZ; 3119 dev->watchdog_timeo = 5 * HZ;
3117 dev->change_mtu = gem_change_mtu; 3120 dev->change_mtu = gem_change_mtu;
3118 dev->irq = pdev->irq; 3121 dev->irq = pdev->irq;
3119 dev->dma = 0; 3122 dev->dma = 0;
3120 #ifdef CONFIG_NET_POLL_CONTROLLER 3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3121 dev->poll_controller = gem_poll_controller; 3124 dev->poll_controller = gem_poll_controller;
3122 #endif 3125 #endif
3123 3126
3124 /* Set that now, in case PM kicks in now */ 3127 /* Set that now, in case PM kicks in now */
3125 pci_set_drvdata(pdev, dev); 3128 pci_set_drvdata(pdev, dev);
3126 3129
3127 /* Detect & init PHY, start autoneg, we release the cell now 3130 /* Detect & init PHY, start autoneg, we release the cell now
3128 * too, it will be managed by whoever needs it 3131 * too, it will be managed by whoever needs it
3129 */ 3132 */
3130 gem_init_phy(gp); 3133 gem_init_phy(gp);
3131 3134
3132 spin_lock_irq(&gp->lock); 3135 spin_lock_irq(&gp->lock);
3133 gem_put_cell(gp); 3136 gem_put_cell(gp);
3134 spin_unlock_irq(&gp->lock); 3137 spin_unlock_irq(&gp->lock);
3135 3138
3136 /* Register with kernel */ 3139 /* Register with kernel */
3137 if (register_netdev(dev)) { 3140 if (register_netdev(dev)) {
3138 printk(KERN_ERR PFX "Cannot register net device, " 3141 printk(KERN_ERR PFX "Cannot register net device, "
3139 "aborting.\n"); 3142 "aborting.\n");
3140 err = -ENOMEM; 3143 err = -ENOMEM;
3141 goto err_out_free_consistent; 3144 goto err_out_free_consistent;
3142 } 3145 }
3143 3146
3144 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", 3147 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
3145 dev->name); 3148 dev->name);
3146 for (i = 0; i < 6; i++) 3149 for (i = 0; i < 6; i++)
3147 printk("%2.2x%c", dev->dev_addr[i], 3150 printk("%2.2x%c", dev->dev_addr[i],
3148 i == 5 ? ' ' : ':'); 3151 i == 5 ? ' ' : ':');
3149 printk("\n"); 3152 printk("\n");
3150 3153
3151 if (gp->phy_type == phy_mii_mdio0 || 3154 if (gp->phy_type == phy_mii_mdio0 ||
3152 gp->phy_type == phy_mii_mdio1) 3155 gp->phy_type == phy_mii_mdio1)
3153 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3156 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3154 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3157 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3155 3158
3156 /* GEM can do it all... */ 3159 /* GEM can do it all... */
3157 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3160 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
3158 if (pci_using_dac) 3161 if (pci_using_dac)
3159 dev->features |= NETIF_F_HIGHDMA; 3162 dev->features |= NETIF_F_HIGHDMA;
3160 3163
3161 return 0; 3164 return 0;
3162 3165
3163 err_out_free_consistent: 3166 err_out_free_consistent:
3164 gem_remove_one(pdev); 3167 gem_remove_one(pdev);
3165 err_out_iounmap: 3168 err_out_iounmap:
3166 gem_put_cell(gp); 3169 gem_put_cell(gp);
3167 iounmap(gp->regs); 3170 iounmap(gp->regs);
3168 3171
3169 err_out_free_res: 3172 err_out_free_res:
3170 pci_release_regions(pdev); 3173 pci_release_regions(pdev);
3171 3174
3172 err_out_free_netdev: 3175 err_out_free_netdev:
3173 free_netdev(dev); 3176 free_netdev(dev);
3174 err_disable_device: 3177 err_disable_device:
3175 pci_disable_device(pdev); 3178 pci_disable_device(pdev);
3176 return err; 3179 return err;
3177 3180
3178 } 3181 }
3179 3182
3180 3183
3181 static struct pci_driver gem_driver = { 3184 static struct pci_driver gem_driver = {
3182 .name = GEM_MODULE_NAME, 3185 .name = GEM_MODULE_NAME,
3183 .id_table = gem_pci_tbl, 3186 .id_table = gem_pci_tbl,
3184 .probe = gem_init_one, 3187 .probe = gem_init_one,
3185 .remove = gem_remove_one, 3188 .remove = gem_remove_one,
3186 #ifdef CONFIG_PM 3189 #ifdef CONFIG_PM
3187 .suspend = gem_suspend, 3190 .suspend = gem_suspend,
3188 .resume = gem_resume, 3191 .resume = gem_resume,
3189 #endif /* CONFIG_PM */ 3192 #endif /* CONFIG_PM */
3190 }; 3193 };
3191 3194
3192 static int __init gem_init(void) 3195 static int __init gem_init(void)
3193 { 3196 {
3194 return pci_module_init(&gem_driver); 3197 return pci_module_init(&gem_driver);
3195 } 3198 }
3196 3199
3197 static void __exit gem_cleanup(void) 3200 static void __exit gem_cleanup(void)
3198 { 3201 {
3199 pci_unregister_driver(&gem_driver); 3202 pci_unregister_driver(&gem_driver);
3200 } 3203 }
3201 3204
3202 module_init(gem_init); 3205 module_init(gem_init);
3203 module_exit(gem_cleanup); 3206 module_exit(gem_cleanup);
3204 3207
drivers/net/sunhme.c
1 /* $Id: sunhme.c,v 1.124 2002/01/15 06:25:51 davem Exp $ 1 /* $Id: sunhme.c,v 1.124 2002/01/15 06:25:51 davem Exp $
2 * sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, 2 * sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
3 * auto carrier detecting ethernet driver. Also known as the 3 * auto carrier detecting ethernet driver. Also known as the
4 * "Happy Meal Ethernet" found on SunSwift SBUS cards. 4 * "Happy Meal Ethernet" found on SunSwift SBUS cards.
5 * 5 *
6 * Copyright (C) 1996, 1998, 1999, 2002, 2003 David S. Miller (davem@redhat.com) 6 * Copyright (C) 1996, 1998, 1999, 2002, 2003 David S. Miller (davem@redhat.com)
7 * 7 *
8 * Changes : 8 * Changes :
9 * 2000/11/11 Willy Tarreau <willy AT meta-x.org> 9 * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
10 * - port to non-sparc architectures. Tested only on x86 and 10 * - port to non-sparc architectures. Tested only on x86 and
11 * only currently works with QFE PCI cards. 11 * only currently works with QFE PCI cards.
12 * - ability to specify the MAC address at module load time by passing this 12 * - ability to specify the MAC address at module load time by passing this
13 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50 13 * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
14 */ 14 */
15 15
16 #include <linux/config.h> 16 #include <linux/config.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/types.h> 19 #include <linux/types.h>
20 #include <linux/fcntl.h> 20 #include <linux/fcntl.h>
21 #include <linux/interrupt.h> 21 #include <linux/interrupt.h>
22 #include <linux/ioport.h> 22 #include <linux/ioport.h>
23 #include <linux/in.h> 23 #include <linux/in.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/string.h> 25 #include <linux/string.h>
26 #include <linux/delay.h> 26 #include <linux/delay.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/ethtool.h> 28 #include <linux/ethtool.h>
29 #include <linux/mii.h> 29 #include <linux/mii.h>
30 #include <linux/crc32.h> 30 #include <linux/crc32.h>
31 #include <linux/random.h> 31 #include <linux/random.h>
32 #include <linux/errno.h> 32 #include <linux/errno.h>
33 #include <linux/netdevice.h> 33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h> 34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h> 35 #include <linux/skbuff.h>
36 #include <linux/bitops.h> 36 #include <linux/bitops.h>
37 37
38 #include <asm/system.h> 38 #include <asm/system.h>
39 #include <asm/io.h> 39 #include <asm/io.h>
40 #include <asm/dma.h> 40 #include <asm/dma.h>
41 #include <asm/byteorder.h> 41 #include <asm/byteorder.h>
42 42
43 #ifdef __sparc__ 43 #ifdef __sparc__
44 #include <asm/idprom.h> 44 #include <asm/idprom.h>
45 #include <asm/sbus.h> 45 #include <asm/sbus.h>
46 #include <asm/openprom.h> 46 #include <asm/openprom.h>
47 #include <asm/oplib.h> 47 #include <asm/oplib.h>
48 #include <asm/auxio.h> 48 #include <asm/auxio.h>
49 #ifndef __sparc_v9__ 49 #ifndef __sparc_v9__
50 #include <asm/io-unit.h> 50 #include <asm/io-unit.h>
51 #endif 51 #endif
52 #endif 52 #endif
53 #include <asm/uaccess.h> 53 #include <asm/uaccess.h>
54 54
55 #include <asm/pgtable.h> 55 #include <asm/pgtable.h>
56 #include <asm/irq.h> 56 #include <asm/irq.h>
57 57
58 #ifdef CONFIG_PCI 58 #ifdef CONFIG_PCI
59 #include <linux/pci.h> 59 #include <linux/pci.h>
60 #ifdef __sparc__ 60 #ifdef __sparc__
61 #include <asm/pbm.h> 61 #include <asm/pbm.h>
62 #endif 62 #endif
63 #endif 63 #endif
64 64
65 #include "sunhme.h" 65 #include "sunhme.h"
66 66
67 #define DRV_NAME "sunhme" 67 #define DRV_NAME "sunhme"
68 #define DRV_VERSION "2.02" 68 #define DRV_VERSION "2.02"
69 #define DRV_RELDATE "8/24/03" 69 #define DRV_RELDATE "8/24/03"
70 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 70 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
71 71
72 static char version[] = 72 static char version[] =
73 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 73 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
74 74
75 MODULE_VERSION(DRV_VERSION); 75 MODULE_VERSION(DRV_VERSION);
76 MODULE_AUTHOR(DRV_AUTHOR); 76 MODULE_AUTHOR(DRV_AUTHOR);
77 MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver"); 77 MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
78 MODULE_LICENSE("GPL"); 78 MODULE_LICENSE("GPL");
79 79
80 static int macaddr[6]; 80 static int macaddr[6];
81 81
82 /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ 82 /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
83 module_param_array(macaddr, int, NULL, 0); 83 module_param_array(macaddr, int, NULL, 0);
84 MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set"); 84 MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
85 85
86 static struct happy_meal *root_happy_dev; 86 static struct happy_meal *root_happy_dev;
87 87
88 #ifdef CONFIG_SBUS 88 #ifdef CONFIG_SBUS
89 static struct quattro *qfe_sbus_list; 89 static struct quattro *qfe_sbus_list;
90 #endif 90 #endif
91 91
92 #ifdef CONFIG_PCI 92 #ifdef CONFIG_PCI
93 static struct quattro *qfe_pci_list; 93 static struct quattro *qfe_pci_list;
94 #endif 94 #endif
95 95
96 #undef HMEDEBUG 96 #undef HMEDEBUG
97 #undef SXDEBUG 97 #undef SXDEBUG
98 #undef RXDEBUG 98 #undef RXDEBUG
99 #undef TXDEBUG 99 #undef TXDEBUG
100 #undef TXLOGGING 100 #undef TXLOGGING
101 101
102 #ifdef TXLOGGING 102 #ifdef TXLOGGING
103 struct hme_tx_logent { 103 struct hme_tx_logent {
104 unsigned int tstamp; 104 unsigned int tstamp;
105 int tx_new, tx_old; 105 int tx_new, tx_old;
106 unsigned int action; 106 unsigned int action;
107 #define TXLOG_ACTION_IRQ 0x01 107 #define TXLOG_ACTION_IRQ 0x01
108 #define TXLOG_ACTION_TXMIT 0x02 108 #define TXLOG_ACTION_TXMIT 0x02
109 #define TXLOG_ACTION_TBUSY 0x04 109 #define TXLOG_ACTION_TBUSY 0x04
110 #define TXLOG_ACTION_NBUFS 0x08 110 #define TXLOG_ACTION_NBUFS 0x08
111 unsigned int status; 111 unsigned int status;
112 }; 112 };
113 #define TX_LOG_LEN 128 113 #define TX_LOG_LEN 128
114 static struct hme_tx_logent tx_log[TX_LOG_LEN]; 114 static struct hme_tx_logent tx_log[TX_LOG_LEN];
115 static int txlog_cur_entry; 115 static int txlog_cur_entry;
116 static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s) 116 static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
117 { 117 {
118 struct hme_tx_logent *tlp; 118 struct hme_tx_logent *tlp;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 save_and_cli(flags); 121 save_and_cli(flags);
122 tlp = &tx_log[txlog_cur_entry]; 122 tlp = &tx_log[txlog_cur_entry];
123 tlp->tstamp = (unsigned int)jiffies; 123 tlp->tstamp = (unsigned int)jiffies;
124 tlp->tx_new = hp->tx_new; 124 tlp->tx_new = hp->tx_new;
125 tlp->tx_old = hp->tx_old; 125 tlp->tx_old = hp->tx_old;
126 tlp->action = a; 126 tlp->action = a;
127 tlp->status = s; 127 tlp->status = s;
128 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); 128 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
129 restore_flags(flags); 129 restore_flags(flags);
130 } 130 }
131 static __inline__ void tx_dump_log(void) 131 static __inline__ void tx_dump_log(void)
132 { 132 {
133 int i, this; 133 int i, this;
134 134
135 this = txlog_cur_entry; 135 this = txlog_cur_entry;
136 for (i = 0; i < TX_LOG_LEN; i++) { 136 for (i = 0; i < TX_LOG_LEN; i++) {
137 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i, 137 printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
138 tx_log[this].tstamp, 138 tx_log[this].tstamp,
139 tx_log[this].tx_new, tx_log[this].tx_old, 139 tx_log[this].tx_new, tx_log[this].tx_old,
140 tx_log[this].action, tx_log[this].status); 140 tx_log[this].action, tx_log[this].status);
141 this = (this + 1) & (TX_LOG_LEN - 1); 141 this = (this + 1) & (TX_LOG_LEN - 1);
142 } 142 }
143 } 143 }
144 static __inline__ void tx_dump_ring(struct happy_meal *hp) 144 static __inline__ void tx_dump_ring(struct happy_meal *hp)
145 { 145 {
146 struct hmeal_init_block *hb = hp->happy_block; 146 struct hmeal_init_block *hb = hp->happy_block;
147 struct happy_meal_txd *tp = &hb->happy_meal_txd[0]; 147 struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
148 int i; 148 int i;
149 149
150 for (i = 0; i < TX_RING_SIZE; i+=4) { 150 for (i = 0; i < TX_RING_SIZE; i+=4) {
151 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n", 151 printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
152 i, i + 4, 152 i, i + 4,
153 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr), 153 le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
154 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr), 154 le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
155 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr), 155 le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
156 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr)); 156 le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
157 } 157 }
158 } 158 }
159 #else 159 #else
160 #define tx_add_log(hp, a, s) do { } while(0) 160 #define tx_add_log(hp, a, s) do { } while(0)
161 #define tx_dump_log() do { } while(0) 161 #define tx_dump_log() do { } while(0)
162 #define tx_dump_ring(hp) do { } while(0) 162 #define tx_dump_ring(hp) do { } while(0)
163 #endif 163 #endif
164 164
165 #ifdef HMEDEBUG 165 #ifdef HMEDEBUG
166 #define HMD(x) printk x 166 #define HMD(x) printk x
167 #else 167 #else
168 #define HMD(x) 168 #define HMD(x)
169 #endif 169 #endif
170 170
171 /* #define AUTO_SWITCH_DEBUG */ 171 /* #define AUTO_SWITCH_DEBUG */
172 172
173 #ifdef AUTO_SWITCH_DEBUG 173 #ifdef AUTO_SWITCH_DEBUG
174 #define ASD(x) printk x 174 #define ASD(x) printk x
175 #else 175 #else
176 #define ASD(x) 176 #define ASD(x)
177 #endif 177 #endif
178 178
179 #define DEFAULT_IPG0 16 /* For lance-mode only */ 179 #define DEFAULT_IPG0 16 /* For lance-mode only */
180 #define DEFAULT_IPG1 8 /* For all modes */ 180 #define DEFAULT_IPG1 8 /* For all modes */
181 #define DEFAULT_IPG2 4 /* For all modes */ 181 #define DEFAULT_IPG2 4 /* For all modes */
182 #define DEFAULT_JAMSIZE 4 /* Toe jam */ 182 #define DEFAULT_JAMSIZE 4 /* Toe jam */
183 183
184 #if defined(CONFIG_PCI) && defined(MODULE) 184 #if defined(CONFIG_PCI) && defined(MODULE)
185 /* This happy_pci_ids is declared __initdata because it is only used 185 /* This happy_pci_ids is declared __initdata because it is only used
186 as an advisory to depmod. If this is ported to the new PCI interface 186 as an advisory to depmod. If this is ported to the new PCI interface
187 where it could be referenced at any time due to hot plugging, 187 where it could be referenced at any time due to hot plugging,
188 the __initdata reference should be removed. */ 188 the __initdata reference should be removed. */
189 189
190 static struct pci_device_id happymeal_pci_ids[] = { 190 static struct pci_device_id happymeal_pci_ids[] = {
191 { 191 {
192 .vendor = PCI_VENDOR_ID_SUN, 192 .vendor = PCI_VENDOR_ID_SUN,
193 .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, 193 .device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
194 .subvendor = PCI_ANY_ID, 194 .subvendor = PCI_ANY_ID,
195 .subdevice = PCI_ANY_ID, 195 .subdevice = PCI_ANY_ID,
196 }, 196 },
197 { } /* Terminating entry */ 197 { } /* Terminating entry */
198 }; 198 };
199 199
200 MODULE_DEVICE_TABLE(pci, happymeal_pci_ids); 200 MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
201 201
202 #endif 202 #endif
203 203
204 /* NOTE: In the descriptor writes one _must_ write the address 204 /* NOTE: In the descriptor writes one _must_ write the address
205 * member _first_. The card must not be allowed to see 205 * member _first_. The card must not be allowed to see
206 * the updated descriptor flags until the address is 206 * the updated descriptor flags until the address is
207 * correct. I've added a write memory barrier between 207 * correct. I've added a write memory barrier between
208 * the two stores so that I can sleep well at night... -DaveM 208 * the two stores so that I can sleep well at night... -DaveM
209 */ 209 */
210 210
211 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 211 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
212 static void sbus_hme_write32(void __iomem *reg, u32 val) 212 static void sbus_hme_write32(void __iomem *reg, u32 val)
213 { 213 {
214 sbus_writel(val, reg); 214 sbus_writel(val, reg);
215 } 215 }
216 216
217 static u32 sbus_hme_read32(void __iomem *reg) 217 static u32 sbus_hme_read32(void __iomem *reg)
218 { 218 {
219 return sbus_readl(reg); 219 return sbus_readl(reg);
220 } 220 }
221 221
222 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) 222 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
223 { 223 {
224 rxd->rx_addr = addr; 224 rxd->rx_addr = addr;
225 wmb(); 225 wmb();
226 rxd->rx_flags = flags; 226 rxd->rx_flags = flags;
227 } 227 }
228 228
229 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) 229 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
230 { 230 {
231 txd->tx_addr = addr; 231 txd->tx_addr = addr;
232 wmb(); 232 wmb();
233 txd->tx_flags = flags; 233 txd->tx_flags = flags;
234 } 234 }
235 235
236 static u32 sbus_hme_read_desc32(u32 *p) 236 static u32 sbus_hme_read_desc32(u32 *p)
237 { 237 {
238 return *p; 238 return *p;
239 } 239 }
240 240
241 static void pci_hme_write32(void __iomem *reg, u32 val) 241 static void pci_hme_write32(void __iomem *reg, u32 val)
242 { 242 {
243 writel(val, reg); 243 writel(val, reg);
244 } 244 }
245 245
246 static u32 pci_hme_read32(void __iomem *reg) 246 static u32 pci_hme_read32(void __iomem *reg)
247 { 247 {
248 return readl(reg); 248 return readl(reg);
249 } 249 }
250 250
251 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) 251 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
252 { 252 {
253 rxd->rx_addr = cpu_to_le32(addr); 253 rxd->rx_addr = cpu_to_le32(addr);
254 wmb(); 254 wmb();
255 rxd->rx_flags = cpu_to_le32(flags); 255 rxd->rx_flags = cpu_to_le32(flags);
256 } 256 }
257 257
258 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) 258 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
259 { 259 {
260 txd->tx_addr = cpu_to_le32(addr); 260 txd->tx_addr = cpu_to_le32(addr);
261 wmb(); 261 wmb();
262 txd->tx_flags = cpu_to_le32(flags); 262 txd->tx_flags = cpu_to_le32(flags);
263 } 263 }
264 264
265 static u32 pci_hme_read_desc32(u32 *p) 265 static u32 pci_hme_read_desc32(u32 *p)
266 { 266 {
267 return cpu_to_le32p(p); 267 return cpu_to_le32p(p);
268 } 268 }
269 269
270 #define hme_write32(__hp, __reg, __val) \ 270 #define hme_write32(__hp, __reg, __val) \
271 ((__hp)->write32((__reg), (__val))) 271 ((__hp)->write32((__reg), (__val)))
272 #define hme_read32(__hp, __reg) \ 272 #define hme_read32(__hp, __reg) \
273 ((__hp)->read32(__reg)) 273 ((__hp)->read32(__reg))
274 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ 274 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
275 ((__hp)->write_rxd((__rxd), (__flags), (__addr))) 275 ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
276 #define hme_write_txd(__hp, __txd, __flags, __addr) \ 276 #define hme_write_txd(__hp, __txd, __flags, __addr) \
277 ((__hp)->write_txd((__txd), (__flags), (__addr))) 277 ((__hp)->write_txd((__txd), (__flags), (__addr)))
278 #define hme_read_desc32(__hp, __p) \ 278 #define hme_read_desc32(__hp, __p) \
279 ((__hp)->read_desc32(__p)) 279 ((__hp)->read_desc32(__p))
280 #define hme_dma_map(__hp, __ptr, __size, __dir) \ 280 #define hme_dma_map(__hp, __ptr, __size, __dir) \
281 ((__hp)->dma_map((__hp)->happy_dev, (__ptr), (__size), (__dir))) 281 ((__hp)->dma_map((__hp)->happy_dev, (__ptr), (__size), (__dir)))
282 #define hme_dma_unmap(__hp, __addr, __size, __dir) \ 282 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
283 ((__hp)->dma_unmap((__hp)->happy_dev, (__addr), (__size), (__dir))) 283 ((__hp)->dma_unmap((__hp)->happy_dev, (__addr), (__size), (__dir)))
284 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ 284 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
285 ((__hp)->dma_sync_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))) 285 ((__hp)->dma_sync_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir)))
286 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ 286 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
287 ((__hp)->dma_sync_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))) 287 ((__hp)->dma_sync_for_device((__hp)->happy_dev, (__addr), (__size), (__dir)))
288 #else 288 #else
289 #ifdef CONFIG_SBUS 289 #ifdef CONFIG_SBUS
290 /* SBUS only compilation */ 290 /* SBUS only compilation */
291 #define hme_write32(__hp, __reg, __val) \ 291 #define hme_write32(__hp, __reg, __val) \
292 sbus_writel((__val), (__reg)) 292 sbus_writel((__val), (__reg))
293 #define hme_read32(__hp, __reg) \ 293 #define hme_read32(__hp, __reg) \
294 sbus_readl(__reg) 294 sbus_readl(__reg)
295 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ 295 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
296 do { (__rxd)->rx_addr = (__addr); \ 296 do { (__rxd)->rx_addr = (__addr); \
297 wmb(); \ 297 wmb(); \
298 (__rxd)->rx_flags = (__flags); \ 298 (__rxd)->rx_flags = (__flags); \
299 } while(0) 299 } while(0)
300 #define hme_write_txd(__hp, __txd, __flags, __addr) \ 300 #define hme_write_txd(__hp, __txd, __flags, __addr) \
301 do { (__txd)->tx_addr = (__addr); \ 301 do { (__txd)->tx_addr = (__addr); \
302 wmb(); \ 302 wmb(); \
303 (__txd)->tx_flags = (__flags); \ 303 (__txd)->tx_flags = (__flags); \
304 } while(0) 304 } while(0)
305 #define hme_read_desc32(__hp, __p) (*(__p)) 305 #define hme_read_desc32(__hp, __p) (*(__p))
306 #define hme_dma_map(__hp, __ptr, __size, __dir) \ 306 #define hme_dma_map(__hp, __ptr, __size, __dir) \
307 sbus_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir)) 307 sbus_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
308 #define hme_dma_unmap(__hp, __addr, __size, __dir) \ 308 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
309 sbus_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir)) 309 sbus_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
310 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ 310 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
311 sbus_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir)) 311 sbus_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
312 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ 312 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
313 sbus_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir)) 313 sbus_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
314 #else 314 #else
315 /* PCI only compilation */ 315 /* PCI only compilation */
316 #define hme_write32(__hp, __reg, __val) \ 316 #define hme_write32(__hp, __reg, __val) \
317 writel((__val), (__reg)) 317 writel((__val), (__reg))
318 #define hme_read32(__hp, __reg) \ 318 #define hme_read32(__hp, __reg) \
319 readl(__reg) 319 readl(__reg)
320 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ 320 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
321 do { (__rxd)->rx_addr = cpu_to_le32(__addr); \ 321 do { (__rxd)->rx_addr = cpu_to_le32(__addr); \
322 wmb(); \ 322 wmb(); \
323 (__rxd)->rx_flags = cpu_to_le32(__flags); \ 323 (__rxd)->rx_flags = cpu_to_le32(__flags); \
324 } while(0) 324 } while(0)
325 #define hme_write_txd(__hp, __txd, __flags, __addr) \ 325 #define hme_write_txd(__hp, __txd, __flags, __addr) \
326 do { (__txd)->tx_addr = cpu_to_le32(__addr); \ 326 do { (__txd)->tx_addr = cpu_to_le32(__addr); \
327 wmb(); \ 327 wmb(); \
328 (__txd)->tx_flags = cpu_to_le32(__flags); \ 328 (__txd)->tx_flags = cpu_to_le32(__flags); \
329 } while(0) 329 } while(0)
330 #define hme_read_desc32(__hp, __p) cpu_to_le32p(__p) 330 #define hme_read_desc32(__hp, __p) cpu_to_le32p(__p)
331 #define hme_dma_map(__hp, __ptr, __size, __dir) \ 331 #define hme_dma_map(__hp, __ptr, __size, __dir) \
332 pci_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir)) 332 pci_map_single((__hp)->happy_dev, (__ptr), (__size), (__dir))
333 #define hme_dma_unmap(__hp, __addr, __size, __dir) \ 333 #define hme_dma_unmap(__hp, __addr, __size, __dir) \
334 pci_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir)) 334 pci_unmap_single((__hp)->happy_dev, (__addr), (__size), (__dir))
335 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ 335 #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
336 pci_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir)) 336 pci_dma_sync_single_for_cpu((__hp)->happy_dev, (__addr), (__size), (__dir))
337 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ 337 #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
338 pci_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir)) 338 pci_dma_sync_single_for_device((__hp)->happy_dev, (__addr), (__size), (__dir))
339 #endif 339 #endif
340 #endif 340 #endif
341 341
342 342
343 #ifdef SBUS_DMA_BIDIRECTIONAL 343 #ifdef SBUS_DMA_BIDIRECTIONAL
344 # define DMA_BIDIRECTIONAL SBUS_DMA_BIDIRECTIONAL 344 # define DMA_BIDIRECTIONAL SBUS_DMA_BIDIRECTIONAL
345 #else 345 #else
346 # define DMA_BIDIRECTIONAL 0 346 # define DMA_BIDIRECTIONAL 0
347 #endif 347 #endif
348 348
349 #ifdef SBUS_DMA_FROMDEVICE 349 #ifdef SBUS_DMA_FROMDEVICE
350 # define DMA_FROMDEVICE SBUS_DMA_FROMDEVICE 350 # define DMA_FROMDEVICE SBUS_DMA_FROMDEVICE
351 #else 351 #else
352 # define DMA_TODEVICE 1 352 # define DMA_TODEVICE 1
353 #endif 353 #endif
354 354
355 #ifdef SBUS_DMA_TODEVICE 355 #ifdef SBUS_DMA_TODEVICE
356 # define DMA_TODEVICE SBUS_DMA_TODEVICE 356 # define DMA_TODEVICE SBUS_DMA_TODEVICE
357 #else 357 #else
358 # define DMA_FROMDEVICE 2 358 # define DMA_FROMDEVICE 2
359 #endif 359 #endif
360 360
361 361
362 /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ 362 /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
363 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) 363 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
364 { 364 {
365 hme_write32(hp, tregs + TCVR_BBDATA, bit); 365 hme_write32(hp, tregs + TCVR_BBDATA, bit);
366 hme_write32(hp, tregs + TCVR_BBCLOCK, 0); 366 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
367 hme_write32(hp, tregs + TCVR_BBCLOCK, 1); 367 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
368 } 368 }
369 369
370 #if 0 370 #if 0
371 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal) 371 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
372 { 372 {
373 u32 ret; 373 u32 ret;
374 374
375 hme_write32(hp, tregs + TCVR_BBCLOCK, 0); 375 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
376 hme_write32(hp, tregs + TCVR_BBCLOCK, 1); 376 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
377 ret = hme_read32(hp, tregs + TCVR_CFG); 377 ret = hme_read32(hp, tregs + TCVR_CFG);
378 if (internal) 378 if (internal)
379 ret &= TCV_CFG_MDIO0; 379 ret &= TCV_CFG_MDIO0;
380 else 380 else
381 ret &= TCV_CFG_MDIO1; 381 ret &= TCV_CFG_MDIO1;
382 382
383 return ret; 383 return ret;
384 } 384 }
385 #endif 385 #endif
386 386
387 static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal) 387 static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
388 { 388 {
389 u32 retval; 389 u32 retval;
390 390
391 hme_write32(hp, tregs + TCVR_BBCLOCK, 0); 391 hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
392 udelay(1); 392 udelay(1);
393 retval = hme_read32(hp, tregs + TCVR_CFG); 393 retval = hme_read32(hp, tregs + TCVR_CFG);
394 if (internal) 394 if (internal)
395 retval &= TCV_CFG_MDIO0; 395 retval &= TCV_CFG_MDIO0;
396 else 396 else
397 retval &= TCV_CFG_MDIO1; 397 retval &= TCV_CFG_MDIO1;
398 hme_write32(hp, tregs + TCVR_BBCLOCK, 1); 398 hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
399 399
400 return retval; 400 return retval;
401 } 401 }
402 402
403 #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */ 403 #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
404 404
405 static int happy_meal_bb_read(struct happy_meal *hp, 405 static int happy_meal_bb_read(struct happy_meal *hp,
406 void __iomem *tregs, int reg) 406 void __iomem *tregs, int reg)
407 { 407 {
408 u32 tmp; 408 u32 tmp;
409 int retval = 0; 409 int retval = 0;
410 int i; 410 int i;
411 411
412 ASD(("happy_meal_bb_read: reg=%d ", reg)); 412 ASD(("happy_meal_bb_read: reg=%d ", reg));
413 413
414 /* Enable the MIF BitBang outputs. */ 414 /* Enable the MIF BitBang outputs. */
415 hme_write32(hp, tregs + TCVR_BBOENAB, 1); 415 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
416 416
417 /* Force BitBang into the idle state. */ 417 /* Force BitBang into the idle state. */
418 for (i = 0; i < 32; i++) 418 for (i = 0; i < 32; i++)
419 BB_PUT_BIT(hp, tregs, 1); 419 BB_PUT_BIT(hp, tregs, 1);
420 420
421 /* Give it the read sequence. */ 421 /* Give it the read sequence. */
422 BB_PUT_BIT(hp, tregs, 0); 422 BB_PUT_BIT(hp, tregs, 0);
423 BB_PUT_BIT(hp, tregs, 1); 423 BB_PUT_BIT(hp, tregs, 1);
424 BB_PUT_BIT(hp, tregs, 1); 424 BB_PUT_BIT(hp, tregs, 1);
425 BB_PUT_BIT(hp, tregs, 0); 425 BB_PUT_BIT(hp, tregs, 0);
426 426
427 /* Give it the PHY address. */ 427 /* Give it the PHY address. */
428 tmp = hp->paddr & 0xff; 428 tmp = hp->paddr & 0xff;
429 for (i = 4; i >= 0; i--) 429 for (i = 4; i >= 0; i--)
430 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); 430 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
431 431
432 /* Tell it what register we want to read. */ 432 /* Tell it what register we want to read. */
433 tmp = (reg & 0xff); 433 tmp = (reg & 0xff);
434 for (i = 4; i >= 0; i--) 434 for (i = 4; i >= 0; i--)
435 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); 435 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
436 436
437 /* Close down the MIF BitBang outputs. */ 437 /* Close down the MIF BitBang outputs. */
438 hme_write32(hp, tregs + TCVR_BBOENAB, 0); 438 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
439 439
440 /* Now read in the value. */ 440 /* Now read in the value. */
441 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); 441 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
442 for (i = 15; i >= 0; i--) 442 for (i = 15; i >= 0; i--)
443 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); 443 retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
444 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); 444 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
445 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); 445 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
446 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); 446 (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
447 ASD(("value=%x\n", retval)); 447 ASD(("value=%x\n", retval));
448 return retval; 448 return retval;
449 } 449 }
450 450
451 static void happy_meal_bb_write(struct happy_meal *hp, 451 static void happy_meal_bb_write(struct happy_meal *hp,
452 void __iomem *tregs, int reg, 452 void __iomem *tregs, int reg,
453 unsigned short value) 453 unsigned short value)
454 { 454 {
455 u32 tmp; 455 u32 tmp;
456 int i; 456 int i;
457 457
458 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value)); 458 ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
459 459
460 /* Enable the MIF BitBang outputs. */ 460 /* Enable the MIF BitBang outputs. */
461 hme_write32(hp, tregs + TCVR_BBOENAB, 1); 461 hme_write32(hp, tregs + TCVR_BBOENAB, 1);
462 462
463 /* Force BitBang into the idle state. */ 463 /* Force BitBang into the idle state. */
464 for (i = 0; i < 32; i++) 464 for (i = 0; i < 32; i++)
465 BB_PUT_BIT(hp, tregs, 1); 465 BB_PUT_BIT(hp, tregs, 1);
466 466
467 /* Give it write sequence. */ 467 /* Give it write sequence. */
468 BB_PUT_BIT(hp, tregs, 0); 468 BB_PUT_BIT(hp, tregs, 0);
469 BB_PUT_BIT(hp, tregs, 1); 469 BB_PUT_BIT(hp, tregs, 1);
470 BB_PUT_BIT(hp, tregs, 0); 470 BB_PUT_BIT(hp, tregs, 0);
471 BB_PUT_BIT(hp, tregs, 1); 471 BB_PUT_BIT(hp, tregs, 1);
472 472
473 /* Give it the PHY address. */ 473 /* Give it the PHY address. */
474 tmp = (hp->paddr & 0xff); 474 tmp = (hp->paddr & 0xff);
475 for (i = 4; i >= 0; i--) 475 for (i = 4; i >= 0; i--)
476 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); 476 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
477 477
478 /* Tell it what register we will be writing. */ 478 /* Tell it what register we will be writing. */
479 tmp = (reg & 0xff); 479 tmp = (reg & 0xff);
480 for (i = 4; i >= 0; i--) 480 for (i = 4; i >= 0; i--)
481 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); 481 BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
482 482
483 /* Tell it to become ready for the bits. */ 483 /* Tell it to become ready for the bits. */
484 BB_PUT_BIT(hp, tregs, 1); 484 BB_PUT_BIT(hp, tregs, 1);
485 BB_PUT_BIT(hp, tregs, 0); 485 BB_PUT_BIT(hp, tregs, 0);
486 486
487 for (i = 15; i >= 0; i--) 487 for (i = 15; i >= 0; i--)
488 BB_PUT_BIT(hp, tregs, ((value >> i) & 1)); 488 BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
489 489
490 /* Close down the MIF BitBang outputs. */ 490 /* Close down the MIF BitBang outputs. */
491 hme_write32(hp, tregs + TCVR_BBOENAB, 0); 491 hme_write32(hp, tregs + TCVR_BBOENAB, 0);
492 } 492 }
493 493
494 #define TCVR_READ_TRIES 16 494 #define TCVR_READ_TRIES 16
495 495
496 static int happy_meal_tcvr_read(struct happy_meal *hp, 496 static int happy_meal_tcvr_read(struct happy_meal *hp,
497 void __iomem *tregs, int reg) 497 void __iomem *tregs, int reg)
498 { 498 {
499 int tries = TCVR_READ_TRIES; 499 int tries = TCVR_READ_TRIES;
500 int retval; 500 int retval;
501 501
502 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg)); 502 ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
503 if (hp->tcvr_type == none) { 503 if (hp->tcvr_type == none) {
504 ASD(("no transceiver, value=TCVR_FAILURE\n")); 504 ASD(("no transceiver, value=TCVR_FAILURE\n"));
505 return TCVR_FAILURE; 505 return TCVR_FAILURE;
506 } 506 }
507 507
508 if (!(hp->happy_flags & HFLAG_FENABLE)) { 508 if (!(hp->happy_flags & HFLAG_FENABLE)) {
509 ASD(("doing bit bang\n")); 509 ASD(("doing bit bang\n"));
510 return happy_meal_bb_read(hp, tregs, reg); 510 return happy_meal_bb_read(hp, tregs, reg);
511 } 511 }
512 512
513 hme_write32(hp, tregs + TCVR_FRAME, 513 hme_write32(hp, tregs + TCVR_FRAME,
514 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18))); 514 (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
515 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) 515 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
516 udelay(20); 516 udelay(20);
517 if (!tries) { 517 if (!tries) {
518 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n"); 518 printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
519 return TCVR_FAILURE; 519 return TCVR_FAILURE;
520 } 520 }
521 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff; 521 retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
522 ASD(("value=%04x\n", retval)); 522 ASD(("value=%04x\n", retval));
523 return retval; 523 return retval;
524 } 524 }
525 525
526 #define TCVR_WRITE_TRIES 16 526 #define TCVR_WRITE_TRIES 16
527 527
528 static void happy_meal_tcvr_write(struct happy_meal *hp, 528 static void happy_meal_tcvr_write(struct happy_meal *hp,
529 void __iomem *tregs, int reg, 529 void __iomem *tregs, int reg,
530 unsigned short value) 530 unsigned short value)
531 { 531 {
532 int tries = TCVR_WRITE_TRIES; 532 int tries = TCVR_WRITE_TRIES;
533 533
534 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value)); 534 ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
535 535
536 /* Welcome to Sun Microsystems, can I take your order please? */ 536 /* Welcome to Sun Microsystems, can I take your order please? */
537 if (!(hp->happy_flags & HFLAG_FENABLE)) { 537 if (!(hp->happy_flags & HFLAG_FENABLE)) {
538 happy_meal_bb_write(hp, tregs, reg, value); 538 happy_meal_bb_write(hp, tregs, reg, value);
539 return; 539 return;
540 } 540 }
541 541
542 /* Would you like fries with that? */ 542 /* Would you like fries with that? */
543 hme_write32(hp, tregs + TCVR_FRAME, 543 hme_write32(hp, tregs + TCVR_FRAME,
544 (FRAME_WRITE | (hp->paddr << 23) | 544 (FRAME_WRITE | (hp->paddr << 23) |
545 ((reg & 0xff) << 18) | (value & 0xffff))); 545 ((reg & 0xff) << 18) | (value & 0xffff)));
546 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) 546 while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
547 udelay(20); 547 udelay(20);
548 548
549 /* Anything else? */ 549 /* Anything else? */
550 if (!tries) 550 if (!tries)
551 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n"); 551 printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
552 552
553 /* Fifty-two cents is your change, have a nice day. */ 553 /* Fifty-two cents is your change, have a nice day. */
554 } 554 }
555 555
556 /* Auto negotiation. The scheme is very simple. We have a timer routine 556 /* Auto negotiation. The scheme is very simple. We have a timer routine
557 * that keeps watching the auto negotiation process as it progresses. 557 * that keeps watching the auto negotiation process as it progresses.
558 * The DP83840 is first told to start doing it's thing, we set up the time 558 * The DP83840 is first told to start doing it's thing, we set up the time
559 * and place the timer state machine in it's initial state. 559 * and place the timer state machine in it's initial state.
560 * 560 *
561 * Here the timer peeks at the DP83840 status registers at each click to see 561 * Here the timer peeks at the DP83840 status registers at each click to see
562 * if the auto negotiation has completed, we assume here that the DP83840 PHY 562 * if the auto negotiation has completed, we assume here that the DP83840 PHY
563 * will time out at some point and just tell us what (didn't) happen. For 563 * will time out at some point and just tell us what (didn't) happen. For
564 * complete coverage we only allow so many of the ticks at this level to run, 564 * complete coverage we only allow so many of the ticks at this level to run,
565 * when this has expired we print a warning message and try another strategy. 565 * when this has expired we print a warning message and try another strategy.
566 * This "other" strategy is to force the interface into various speed/duplex 566 * This "other" strategy is to force the interface into various speed/duplex
567 * configurations and we stop when we see a link-up condition before the 567 * configurations and we stop when we see a link-up condition before the
568 * maximum number of "peek" ticks have occurred. 568 * maximum number of "peek" ticks have occurred.
569 * 569 *
570 * Once a valid link status has been detected we configure the BigMAC and 570 * Once a valid link status has been detected we configure the BigMAC and
571 * the rest of the Happy Meal to speak the most efficient protocol we could 571 * the rest of the Happy Meal to speak the most efficient protocol we could
572 * get a clean link for. The priority for link configurations, highest first 572 * get a clean link for. The priority for link configurations, highest first
573 * is: 573 * is:
574 * 100 Base-T Full Duplex 574 * 100 Base-T Full Duplex
575 * 100 Base-T Half Duplex 575 * 100 Base-T Half Duplex
576 * 10 Base-T Full Duplex 576 * 10 Base-T Full Duplex
577 * 10 Base-T Half Duplex 577 * 10 Base-T Half Duplex
578 * 578 *
579 * We start a new timer now, after a successful auto negotiation status has 579 * We start a new timer now, after a successful auto negotiation status has
580 * been detected. This timer just waits for the link-up bit to get set in 580 * been detected. This timer just waits for the link-up bit to get set in
581 * the BMCR of the DP83840. When this occurs we print a kernel log message 581 * the BMCR of the DP83840. When this occurs we print a kernel log message
582 * describing the link type in use and the fact that it is up. 582 * describing the link type in use and the fact that it is up.
583 * 583 *
584 * If a fatal error of some sort is signalled and detected in the interrupt 584 * If a fatal error of some sort is signalled and detected in the interrupt
585 * service routine, and the chip is reset, or the link is ifconfig'd down 585 * service routine, and the chip is reset, or the link is ifconfig'd down
586 * and then back up, this entire process repeats itself all over again. 586 * and then back up, this entire process repeats itself all over again.
587 */ 587 */
588 static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) 588 static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
589 { 589 {
590 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 590 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
591 591
592 /* Downgrade from full to half duplex. Only possible 592 /* Downgrade from full to half duplex. Only possible
593 * via ethtool. 593 * via ethtool.
594 */ 594 */
595 if (hp->sw_bmcr & BMCR_FULLDPLX) { 595 if (hp->sw_bmcr & BMCR_FULLDPLX) {
596 hp->sw_bmcr &= ~(BMCR_FULLDPLX); 596 hp->sw_bmcr &= ~(BMCR_FULLDPLX);
597 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 597 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
598 return 0; 598 return 0;
599 } 599 }
600 600
601 /* Downgrade from 100 to 10. */ 601 /* Downgrade from 100 to 10. */
602 if (hp->sw_bmcr & BMCR_SPEED100) { 602 if (hp->sw_bmcr & BMCR_SPEED100) {
603 hp->sw_bmcr &= ~(BMCR_SPEED100); 603 hp->sw_bmcr &= ~(BMCR_SPEED100);
604 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 604 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
605 return 0; 605 return 0;
606 } 606 }
607 607
608 /* We've tried everything. */ 608 /* We've tried everything. */
609 return -1; 609 return -1;
610 } 610 }
611 611
612 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) 612 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
613 { 613 {
614 printk(KERN_INFO "%s: Link is up using ", hp->dev->name); 614 printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
615 if (hp->tcvr_type == external) 615 if (hp->tcvr_type == external)
616 printk("external "); 616 printk("external ");
617 else 617 else
618 printk("internal "); 618 printk("internal ");
619 printk("transceiver at "); 619 printk("transceiver at ");
620 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); 620 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
621 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { 621 if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
622 if (hp->sw_lpa & LPA_100FULL) 622 if (hp->sw_lpa & LPA_100FULL)
623 printk("100Mb/s, Full Duplex.\n"); 623 printk("100Mb/s, Full Duplex.\n");
624 else 624 else
625 printk("100Mb/s, Half Duplex.\n"); 625 printk("100Mb/s, Half Duplex.\n");
626 } else { 626 } else {
627 if (hp->sw_lpa & LPA_10FULL) 627 if (hp->sw_lpa & LPA_10FULL)
628 printk("10Mb/s, Full Duplex.\n"); 628 printk("10Mb/s, Full Duplex.\n");
629 else 629 else
630 printk("10Mb/s, Half Duplex.\n"); 630 printk("10Mb/s, Half Duplex.\n");
631 } 631 }
632 } 632 }
633 633
634 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) 634 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
635 { 635 {
636 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); 636 printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
637 if (hp->tcvr_type == external) 637 if (hp->tcvr_type == external)
638 printk("external "); 638 printk("external ");
639 else 639 else
640 printk("internal "); 640 printk("internal ");
641 printk("transceiver at "); 641 printk("transceiver at ");
642 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 642 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
643 if (hp->sw_bmcr & BMCR_SPEED100) 643 if (hp->sw_bmcr & BMCR_SPEED100)
644 printk("100Mb/s, "); 644 printk("100Mb/s, ");
645 else 645 else
646 printk("10Mb/s, "); 646 printk("10Mb/s, ");
647 if (hp->sw_bmcr & BMCR_FULLDPLX) 647 if (hp->sw_bmcr & BMCR_FULLDPLX)
648 printk("Full Duplex.\n"); 648 printk("Full Duplex.\n");
649 else 649 else
650 printk("Half Duplex.\n"); 650 printk("Half Duplex.\n");
651 } 651 }
652 652
653 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) 653 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
654 { 654 {
655 int full; 655 int full;
656 656
657 /* All we care about is making sure the bigmac tx_cfg has a 657 /* All we care about is making sure the bigmac tx_cfg has a
658 * proper duplex setting. 658 * proper duplex setting.
659 */ 659 */
660 if (hp->timer_state == arbwait) { 660 if (hp->timer_state == arbwait) {
661 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); 661 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
662 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL))) 662 if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
663 goto no_response; 663 goto no_response;
664 if (hp->sw_lpa & LPA_100FULL) 664 if (hp->sw_lpa & LPA_100FULL)
665 full = 1; 665 full = 1;
666 else if (hp->sw_lpa & LPA_100HALF) 666 else if (hp->sw_lpa & LPA_100HALF)
667 full = 0; 667 full = 0;
668 else if (hp->sw_lpa & LPA_10FULL) 668 else if (hp->sw_lpa & LPA_10FULL)
669 full = 1; 669 full = 1;
670 else 670 else
671 full = 0; 671 full = 0;
672 } else { 672 } else {
673 /* Forcing a link mode. */ 673 /* Forcing a link mode. */
674 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 674 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
675 if (hp->sw_bmcr & BMCR_FULLDPLX) 675 if (hp->sw_bmcr & BMCR_FULLDPLX)
676 full = 1; 676 full = 1;
677 else 677 else
678 full = 0; 678 full = 0;
679 } 679 }
680 680
681 /* Before changing other bits in the tx_cfg register, and in 681 /* Before changing other bits in the tx_cfg register, and in
682 * general any of other the TX config registers too, you 682 * general any of other the TX config registers too, you
683 * must: 683 * must:
684 * 1) Clear Enable 684 * 1) Clear Enable
685 * 2) Poll with reads until that bit reads back as zero 685 * 2) Poll with reads until that bit reads back as zero
686 * 3) Make TX configuration changes 686 * 3) Make TX configuration changes
687 * 4) Set Enable once more 687 * 4) Set Enable once more
688 */ 688 */
689 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, 689 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
690 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & 690 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
691 ~(BIGMAC_TXCFG_ENABLE)); 691 ~(BIGMAC_TXCFG_ENABLE));
692 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE) 692 while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
693 barrier(); 693 barrier();
694 if (full) { 694 if (full) {
695 hp->happy_flags |= HFLAG_FULL; 695 hp->happy_flags |= HFLAG_FULL;
696 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, 696 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
697 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | 697 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
698 BIGMAC_TXCFG_FULLDPLX); 698 BIGMAC_TXCFG_FULLDPLX);
699 } else { 699 } else {
700 hp->happy_flags &= ~(HFLAG_FULL); 700 hp->happy_flags &= ~(HFLAG_FULL);
701 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, 701 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
702 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & 702 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
703 ~(BIGMAC_TXCFG_FULLDPLX)); 703 ~(BIGMAC_TXCFG_FULLDPLX));
704 } 704 }
705 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, 705 hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
706 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | 706 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
707 BIGMAC_TXCFG_ENABLE); 707 BIGMAC_TXCFG_ENABLE);
708 return 0; 708 return 0;
709 no_response: 709 no_response:
710 return 1; 710 return 1;
711 } 711 }
712 712
713 static int happy_meal_init(struct happy_meal *hp); 713 static int happy_meal_init(struct happy_meal *hp);
714 714
715 static int is_lucent_phy(struct happy_meal *hp) 715 static int is_lucent_phy(struct happy_meal *hp)
716 { 716 {
717 void __iomem *tregs = hp->tcvregs; 717 void __iomem *tregs = hp->tcvregs;
718 unsigned short mr2, mr3; 718 unsigned short mr2, mr3;
719 int ret = 0; 719 int ret = 0;
720 720
721 mr2 = happy_meal_tcvr_read(hp, tregs, 2); 721 mr2 = happy_meal_tcvr_read(hp, tregs, 2);
722 mr3 = happy_meal_tcvr_read(hp, tregs, 3); 722 mr3 = happy_meal_tcvr_read(hp, tregs, 3);
723 if ((mr2 & 0xffff) == 0x0180 && 723 if ((mr2 & 0xffff) == 0x0180 &&
724 ((mr3 & 0xffff) >> 10) == 0x1d) 724 ((mr3 & 0xffff) >> 10) == 0x1d)
725 ret = 1; 725 ret = 1;
726 726
727 return ret; 727 return ret;
728 } 728 }
729 729
730 static void happy_meal_timer(unsigned long data) 730 static void happy_meal_timer(unsigned long data)
731 { 731 {
732 struct happy_meal *hp = (struct happy_meal *) data; 732 struct happy_meal *hp = (struct happy_meal *) data;
733 void __iomem *tregs = hp->tcvregs; 733 void __iomem *tregs = hp->tcvregs;
734 int restart_timer = 0; 734 int restart_timer = 0;
735 735
736 spin_lock_irq(&hp->happy_lock); 736 spin_lock_irq(&hp->happy_lock);
737 737
738 hp->timer_ticks++; 738 hp->timer_ticks++;
739 switch(hp->timer_state) { 739 switch(hp->timer_state) {
740 case arbwait: 740 case arbwait:
741 /* Only allow for 5 ticks, thats 10 seconds and much too 741 /* Only allow for 5 ticks, thats 10 seconds and much too
742 * long to wait for arbitration to complete. 742 * long to wait for arbitration to complete.
743 */ 743 */
744 if (hp->timer_ticks >= 10) { 744 if (hp->timer_ticks >= 10) {
745 /* Enter force mode. */ 745 /* Enter force mode. */
746 do_force_mode: 746 do_force_mode:
747 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 747 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
748 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n", 748 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
749 hp->dev->name); 749 hp->dev->name);
750 hp->sw_bmcr = BMCR_SPEED100; 750 hp->sw_bmcr = BMCR_SPEED100;
751 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 751 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
752 752
753 if (!is_lucent_phy(hp)) { 753 if (!is_lucent_phy(hp)) {
754 /* OK, seems we need do disable the transceiver for the first 754 /* OK, seems we need do disable the transceiver for the first
755 * tick to make sure we get an accurate link state at the 755 * tick to make sure we get an accurate link state at the
756 * second tick. 756 * second tick.
757 */ 757 */
758 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); 758 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
759 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); 759 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
760 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); 760 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
761 } 761 }
762 hp->timer_state = ltrywait; 762 hp->timer_state = ltrywait;
763 hp->timer_ticks = 0; 763 hp->timer_ticks = 0;
764 restart_timer = 1; 764 restart_timer = 1;
765 } else { 765 } else {
766 /* Anything interesting happen? */ 766 /* Anything interesting happen? */
767 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 767 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
768 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) { 768 if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
769 int ret; 769 int ret;
770 770
771 /* Just what we've been waiting for... */ 771 /* Just what we've been waiting for... */
772 ret = set_happy_link_modes(hp, tregs); 772 ret = set_happy_link_modes(hp, tregs);
773 if (ret) { 773 if (ret) {
774 /* Ooops, something bad happened, go to force 774 /* Ooops, something bad happened, go to force
775 * mode. 775 * mode.
776 * 776 *
777 * XXX Broken hubs which don't support 802.3u 777 * XXX Broken hubs which don't support 802.3u
778 * XXX auto-negotiation make this happen as well. 778 * XXX auto-negotiation make this happen as well.
779 */ 779 */
780 goto do_force_mode; 780 goto do_force_mode;
781 } 781 }
782 782
783 /* Success, at least so far, advance our state engine. */ 783 /* Success, at least so far, advance our state engine. */
784 hp->timer_state = lupwait; 784 hp->timer_state = lupwait;
785 restart_timer = 1; 785 restart_timer = 1;
786 } else { 786 } else {
787 restart_timer = 1; 787 restart_timer = 1;
788 } 788 }
789 } 789 }
790 break; 790 break;
791 791
792 case lupwait: 792 case lupwait:
793 /* Auto negotiation was successful and we are awaiting a 793 /* Auto negotiation was successful and we are awaiting a
794 * link up status. I have decided to let this timer run 794 * link up status. I have decided to let this timer run
795 * forever until some sort of error is signalled, reporting 795 * forever until some sort of error is signalled, reporting
796 * a message to the user at 10 second intervals. 796 * a message to the user at 10 second intervals.
797 */ 797 */
798 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 798 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
799 if (hp->sw_bmsr & BMSR_LSTATUS) { 799 if (hp->sw_bmsr & BMSR_LSTATUS) {
800 /* Wheee, it's up, display the link mode in use and put 800 /* Wheee, it's up, display the link mode in use and put
801 * the timer to sleep. 801 * the timer to sleep.
802 */ 802 */
803 display_link_mode(hp, tregs); 803 display_link_mode(hp, tregs);
804 hp->timer_state = asleep; 804 hp->timer_state = asleep;
805 restart_timer = 0; 805 restart_timer = 0;
806 } else { 806 } else {
807 if (hp->timer_ticks >= 10) { 807 if (hp->timer_ticks >= 10) {
808 printk(KERN_NOTICE "%s: Auto negotiation successful, link still " 808 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
809 "not completely up.\n", hp->dev->name); 809 "not completely up.\n", hp->dev->name);
810 hp->timer_ticks = 0; 810 hp->timer_ticks = 0;
811 restart_timer = 1; 811 restart_timer = 1;
812 } else { 812 } else {
813 restart_timer = 1; 813 restart_timer = 1;
814 } 814 }
815 } 815 }
816 break; 816 break;
817 817
818 case ltrywait: 818 case ltrywait:
819 /* Making the timeout here too long can make it take 819 /* Making the timeout here too long can make it take
820 * annoyingly long to attempt all of the link mode 820 * annoyingly long to attempt all of the link mode
821 * permutations, but then again this is essentially 821 * permutations, but then again this is essentially
822 * error recovery code for the most part. 822 * error recovery code for the most part.
823 */ 823 */
824 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 824 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
825 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); 825 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
826 if (hp->timer_ticks == 1) { 826 if (hp->timer_ticks == 1) {
827 if (!is_lucent_phy(hp)) { 827 if (!is_lucent_phy(hp)) {
828 /* Re-enable transceiver, we'll re-enable the transceiver next 828 /* Re-enable transceiver, we'll re-enable the transceiver next
829 * tick, then check link state on the following tick. 829 * tick, then check link state on the following tick.
830 */ 830 */
831 hp->sw_csconfig |= CSCONFIG_TCVDISAB; 831 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
832 happy_meal_tcvr_write(hp, tregs, 832 happy_meal_tcvr_write(hp, tregs,
833 DP83840_CSCONFIG, hp->sw_csconfig); 833 DP83840_CSCONFIG, hp->sw_csconfig);
834 } 834 }
835 restart_timer = 1; 835 restart_timer = 1;
836 break; 836 break;
837 } 837 }
838 if (hp->timer_ticks == 2) { 838 if (hp->timer_ticks == 2) {
839 if (!is_lucent_phy(hp)) { 839 if (!is_lucent_phy(hp)) {
840 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); 840 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
841 happy_meal_tcvr_write(hp, tregs, 841 happy_meal_tcvr_write(hp, tregs,
842 DP83840_CSCONFIG, hp->sw_csconfig); 842 DP83840_CSCONFIG, hp->sw_csconfig);
843 } 843 }
844 restart_timer = 1; 844 restart_timer = 1;
845 break; 845 break;
846 } 846 }
847 if (hp->sw_bmsr & BMSR_LSTATUS) { 847 if (hp->sw_bmsr & BMSR_LSTATUS) {
848 /* Force mode selection success. */ 848 /* Force mode selection success. */
849 display_forced_link_mode(hp, tregs); 849 display_forced_link_mode(hp, tregs);
850 set_happy_link_modes(hp, tregs); /* XXX error? then what? */ 850 set_happy_link_modes(hp, tregs); /* XXX error? then what? */
851 hp->timer_state = asleep; 851 hp->timer_state = asleep;
852 restart_timer = 0; 852 restart_timer = 0;
853 } else { 853 } else {
854 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */ 854 if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
855 int ret; 855 int ret;
856 856
857 ret = try_next_permutation(hp, tregs); 857 ret = try_next_permutation(hp, tregs);
858 if (ret == -1) { 858 if (ret == -1) {
859 /* Aieee, tried them all, reset the 859 /* Aieee, tried them all, reset the
860 * chip and try all over again. 860 * chip and try all over again.
861 */ 861 */
862 862
863 /* Let the user know... */ 863 /* Let the user know... */
864 printk(KERN_NOTICE "%s: Link down, cable problem?\n", 864 printk(KERN_NOTICE "%s: Link down, cable problem?\n",
865 hp->dev->name); 865 hp->dev->name);
866 866
867 ret = happy_meal_init(hp); 867 ret = happy_meal_init(hp);
868 if (ret) { 868 if (ret) {
869 /* ho hum... */ 869 /* ho hum... */
870 printk(KERN_ERR "%s: Error, cannot re-init the " 870 printk(KERN_ERR "%s: Error, cannot re-init the "
871 "Happy Meal.\n", hp->dev->name); 871 "Happy Meal.\n", hp->dev->name);
872 } 872 }
873 goto out; 873 goto out;
874 } 874 }
875 if (!is_lucent_phy(hp)) { 875 if (!is_lucent_phy(hp)) {
876 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, 876 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
877 DP83840_CSCONFIG); 877 DP83840_CSCONFIG);
878 hp->sw_csconfig |= CSCONFIG_TCVDISAB; 878 hp->sw_csconfig |= CSCONFIG_TCVDISAB;
879 happy_meal_tcvr_write(hp, tregs, 879 happy_meal_tcvr_write(hp, tregs,
880 DP83840_CSCONFIG, hp->sw_csconfig); 880 DP83840_CSCONFIG, hp->sw_csconfig);
881 } 881 }
882 hp->timer_ticks = 0; 882 hp->timer_ticks = 0;
883 restart_timer = 1; 883 restart_timer = 1;
884 } else { 884 } else {
885 restart_timer = 1; 885 restart_timer = 1;
886 } 886 }
887 } 887 }
888 break; 888 break;
889 889
890 case asleep: 890 case asleep:
891 default: 891 default:
892 /* Can't happens.... */ 892 /* Can't happens.... */
893 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", 893 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
894 hp->dev->name); 894 hp->dev->name);
895 restart_timer = 0; 895 restart_timer = 0;
896 hp->timer_ticks = 0; 896 hp->timer_ticks = 0;
897 hp->timer_state = asleep; /* foo on you */ 897 hp->timer_state = asleep; /* foo on you */
898 break; 898 break;
899 }; 899 };
900 900
901 if (restart_timer) { 901 if (restart_timer) {
902 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ 902 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
903 add_timer(&hp->happy_timer); 903 add_timer(&hp->happy_timer);
904 } 904 }
905 905
906 out: 906 out:
907 spin_unlock_irq(&hp->happy_lock); 907 spin_unlock_irq(&hp->happy_lock);
908 } 908 }
909 909
910 #define TX_RESET_TRIES 32 910 #define TX_RESET_TRIES 32
911 #define RX_RESET_TRIES 32 911 #define RX_RESET_TRIES 32
912 912
913 /* hp->happy_lock must be held */ 913 /* hp->happy_lock must be held */
914 static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs) 914 static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
915 { 915 {
916 int tries = TX_RESET_TRIES; 916 int tries = TX_RESET_TRIES;
917 917
918 HMD(("happy_meal_tx_reset: reset, ")); 918 HMD(("happy_meal_tx_reset: reset, "));
919 919
920 /* Would you like to try our SMCC Delux? */ 920 /* Would you like to try our SMCC Delux? */
921 hme_write32(hp, bregs + BMAC_TXSWRESET, 0); 921 hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
922 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries) 922 while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
923 udelay(20); 923 udelay(20);
924 924
925 /* Lettuce, tomato, buggy hardware (no extra charge)? */ 925 /* Lettuce, tomato, buggy hardware (no extra charge)? */
926 if (!tries) 926 if (!tries)
927 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!"); 927 printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
928 928
929 /* Take care. */ 929 /* Take care. */
930 HMD(("done\n")); 930 HMD(("done\n"));
931 } 931 }
932 932
933 /* hp->happy_lock must be held */ 933 /* hp->happy_lock must be held */
934 static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs) 934 static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
935 { 935 {
936 int tries = RX_RESET_TRIES; 936 int tries = RX_RESET_TRIES;
937 937
938 HMD(("happy_meal_rx_reset: reset, ")); 938 HMD(("happy_meal_rx_reset: reset, "));
939 939
940 /* We have a special on GNU/Viking hardware bugs today. */ 940 /* We have a special on GNU/Viking hardware bugs today. */
941 hme_write32(hp, bregs + BMAC_RXSWRESET, 0); 941 hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
942 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries) 942 while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
943 udelay(20); 943 udelay(20);
944 944
945 /* Will that be all? */ 945 /* Will that be all? */
946 if (!tries) 946 if (!tries)
947 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!"); 947 printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
948 948
949 /* Don't forget your vik_1137125_wa. Have a nice day. */ 949 /* Don't forget your vik_1137125_wa. Have a nice day. */
950 HMD(("done\n")); 950 HMD(("done\n"));
951 } 951 }
952 952
953 #define STOP_TRIES 16 953 #define STOP_TRIES 16
954 954
955 /* hp->happy_lock must be held */ 955 /* hp->happy_lock must be held */
956 static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) 956 static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
957 { 957 {
958 int tries = STOP_TRIES; 958 int tries = STOP_TRIES;
959 959
960 HMD(("happy_meal_stop: reset, ")); 960 HMD(("happy_meal_stop: reset, "));
961 961
962 /* We're consolidating our STB products, it's your lucky day. */ 962 /* We're consolidating our STB products, it's your lucky day. */
963 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL); 963 hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
964 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries) 964 while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
965 udelay(20); 965 udelay(20);
966 966
967 /* Come back next week when we are "Sun Microelectronics". */ 967 /* Come back next week when we are "Sun Microelectronics". */
968 if (!tries) 968 if (!tries)
969 printk(KERN_ERR "happy meal: Fry guys."); 969 printk(KERN_ERR "happy meal: Fry guys.");
970 970
971 /* Remember: "Different name, same old buggy as shit hardware." */ 971 /* Remember: "Different name, same old buggy as shit hardware." */
972 HMD(("done\n")); 972 HMD(("done\n"));
973 } 973 }
974 974
975 /* hp->happy_lock must be held */ 975 /* hp->happy_lock must be held */
976 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) 976 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
977 { 977 {
978 struct net_device_stats *stats = &hp->net_stats; 978 struct net_device_stats *stats = &hp->net_stats;
979 979
980 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); 980 stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
981 hme_write32(hp, bregs + BMAC_RCRCECTR, 0); 981 hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
982 982
983 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR); 983 stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
984 hme_write32(hp, bregs + BMAC_UNALECTR, 0); 984 hme_write32(hp, bregs + BMAC_UNALECTR, 0);
985 985
986 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR); 986 stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
987 hme_write32(hp, bregs + BMAC_GLECTR, 0); 987 hme_write32(hp, bregs + BMAC_GLECTR, 0);
988 988
989 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR); 989 stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
990 990
991 stats->collisions += 991 stats->collisions +=
992 (hme_read32(hp, bregs + BMAC_EXCTR) + 992 (hme_read32(hp, bregs + BMAC_EXCTR) +
993 hme_read32(hp, bregs + BMAC_LTCTR)); 993 hme_read32(hp, bregs + BMAC_LTCTR));
994 hme_write32(hp, bregs + BMAC_EXCTR, 0); 994 hme_write32(hp, bregs + BMAC_EXCTR, 0);
995 hme_write32(hp, bregs + BMAC_LTCTR, 0); 995 hme_write32(hp, bregs + BMAC_LTCTR, 0);
996 } 996 }
997 997
998 /* hp->happy_lock must be held */ 998 /* hp->happy_lock must be held */
999 static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs) 999 static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
1000 { 1000 {
1001 ASD(("happy_meal_poll_stop: ")); 1001 ASD(("happy_meal_poll_stop: "));
1002 1002
1003 /* If polling disabled or not polling already, nothing to do. */ 1003 /* If polling disabled or not polling already, nothing to do. */
1004 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) != 1004 if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
1005 (HFLAG_POLLENABLE | HFLAG_POLL)) { 1005 (HFLAG_POLLENABLE | HFLAG_POLL)) {
1006 HMD(("not polling, return\n")); 1006 HMD(("not polling, return\n"));
1007 return; 1007 return;
1008 } 1008 }
1009 1009
1010 /* Shut up the MIF. */ 1010 /* Shut up the MIF. */
1011 ASD(("were polling, mif ints off, ")); 1011 ASD(("were polling, mif ints off, "));
1012 hme_write32(hp, tregs + TCVR_IMASK, 0xffff); 1012 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1013 1013
1014 /* Turn off polling. */ 1014 /* Turn off polling. */
1015 ASD(("polling off, ")); 1015 ASD(("polling off, "));
1016 hme_write32(hp, tregs + TCVR_CFG, 1016 hme_write32(hp, tregs + TCVR_CFG,
1017 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE)); 1017 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
1018 1018
1019 /* We are no longer polling. */ 1019 /* We are no longer polling. */
1020 hp->happy_flags &= ~(HFLAG_POLL); 1020 hp->happy_flags &= ~(HFLAG_POLL);
1021 1021
1022 /* Let the bits set. */ 1022 /* Let the bits set. */
1023 udelay(200); 1023 udelay(200);
1024 ASD(("done\n")); 1024 ASD(("done\n"));
1025 } 1025 }
1026 1026
1027 /* Only Sun can take such nice parts and fuck up the programming interface 1027 /* Only Sun can take such nice parts and fuck up the programming interface
1028 * like this. Good job guys... 1028 * like this. Good job guys...
1029 */ 1029 */
1030 #define TCVR_RESET_TRIES 16 /* It should reset quickly */ 1030 #define TCVR_RESET_TRIES 16 /* It should reset quickly */
1031 #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */ 1031 #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
1032 1032
1033 /* hp->happy_lock must be held */ 1033 /* hp->happy_lock must be held */
1034 static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs) 1034 static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
1035 { 1035 {
1036 u32 tconfig; 1036 u32 tconfig;
1037 int result, tries = TCVR_RESET_TRIES; 1037 int result, tries = TCVR_RESET_TRIES;
1038 1038
1039 tconfig = hme_read32(hp, tregs + TCVR_CFG); 1039 tconfig = hme_read32(hp, tregs + TCVR_CFG);
1040 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig)); 1040 ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
1041 if (hp->tcvr_type == external) { 1041 if (hp->tcvr_type == external) {
1042 ASD(("external<")); 1042 ASD(("external<"));
1043 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); 1043 hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1044 hp->tcvr_type = internal; 1044 hp->tcvr_type = internal;
1045 hp->paddr = TCV_PADDR_ITX; 1045 hp->paddr = TCV_PADDR_ITX;
1046 ASD(("ISOLATE,")); 1046 ASD(("ISOLATE,"));
1047 happy_meal_tcvr_write(hp, tregs, MII_BMCR, 1047 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1048 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); 1048 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1049 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1049 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1050 if (result == TCVR_FAILURE) { 1050 if (result == TCVR_FAILURE) {
1051 ASD(("phyread_fail>\n")); 1051 ASD(("phyread_fail>\n"));
1052 return -1; 1052 return -1;
1053 } 1053 }
1054 ASD(("phyread_ok,PSELECT>")); 1054 ASD(("phyread_ok,PSELECT>"));
1055 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); 1055 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1056 hp->tcvr_type = external; 1056 hp->tcvr_type = external;
1057 hp->paddr = TCV_PADDR_ETX; 1057 hp->paddr = TCV_PADDR_ETX;
1058 } else { 1058 } else {
1059 if (tconfig & TCV_CFG_MDIO1) { 1059 if (tconfig & TCV_CFG_MDIO1) {
1060 ASD(("internal<PSELECT,")); 1060 ASD(("internal<PSELECT,"));
1061 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT)); 1061 hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1062 ASD(("ISOLATE,")); 1062 ASD(("ISOLATE,"));
1063 happy_meal_tcvr_write(hp, tregs, MII_BMCR, 1063 happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1064 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); 1064 (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1065 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1065 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1066 if (result == TCVR_FAILURE) { 1066 if (result == TCVR_FAILURE) {
1067 ASD(("phyread_fail>\n")); 1067 ASD(("phyread_fail>\n"));
1068 return -1; 1068 return -1;
1069 } 1069 }
1070 ASD(("phyread_ok,~PSELECT>")); 1070 ASD(("phyread_ok,~PSELECT>"));
1071 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT))); 1071 hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1072 hp->tcvr_type = internal; 1072 hp->tcvr_type = internal;
1073 hp->paddr = TCV_PADDR_ITX; 1073 hp->paddr = TCV_PADDR_ITX;
1074 } 1074 }
1075 } 1075 }
1076 1076
1077 ASD(("BMCR_RESET ")); 1077 ASD(("BMCR_RESET "));
1078 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET); 1078 happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1079 1079
1080 while (--tries) { 1080 while (--tries) {
1081 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1081 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1082 if (result == TCVR_FAILURE) 1082 if (result == TCVR_FAILURE)
1083 return -1; 1083 return -1;
1084 hp->sw_bmcr = result; 1084 hp->sw_bmcr = result;
1085 if (!(result & BMCR_RESET)) 1085 if (!(result & BMCR_RESET))
1086 break; 1086 break;
1087 udelay(20); 1087 udelay(20);
1088 } 1088 }
1089 if (!tries) { 1089 if (!tries) {
1090 ASD(("BMCR RESET FAILED!\n")); 1090 ASD(("BMCR RESET FAILED!\n"));
1091 return -1; 1091 return -1;
1092 } 1092 }
1093 ASD(("RESET_OK\n")); 1093 ASD(("RESET_OK\n"));
1094 1094
1095 /* Get fresh copies of the PHY registers. */ 1095 /* Get fresh copies of the PHY registers. */
1096 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 1096 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1097 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); 1097 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1098 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); 1098 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1099 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); 1099 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1100 1100
1101 ASD(("UNISOLATE")); 1101 ASD(("UNISOLATE"));
1102 hp->sw_bmcr &= ~(BMCR_ISOLATE); 1102 hp->sw_bmcr &= ~(BMCR_ISOLATE);
1103 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1103 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1104 1104
1105 tries = TCVR_UNISOLATE_TRIES; 1105 tries = TCVR_UNISOLATE_TRIES;
1106 while (--tries) { 1106 while (--tries) {
1107 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1107 result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1108 if (result == TCVR_FAILURE) 1108 if (result == TCVR_FAILURE)
1109 return -1; 1109 return -1;
1110 if (!(result & BMCR_ISOLATE)) 1110 if (!(result & BMCR_ISOLATE))
1111 break; 1111 break;
1112 udelay(20); 1112 udelay(20);
1113 } 1113 }
1114 if (!tries) { 1114 if (!tries) {
1115 ASD((" FAILED!\n")); 1115 ASD((" FAILED!\n"));
1116 return -1; 1116 return -1;
1117 } 1117 }
1118 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n")); 1118 ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
1119 if (!is_lucent_phy(hp)) { 1119 if (!is_lucent_phy(hp)) {
1120 result = happy_meal_tcvr_read(hp, tregs, 1120 result = happy_meal_tcvr_read(hp, tregs,
1121 DP83840_CSCONFIG); 1121 DP83840_CSCONFIG);
1122 happy_meal_tcvr_write(hp, tregs, 1122 happy_meal_tcvr_write(hp, tregs,
1123 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS)); 1123 DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1124 } 1124 }
1125 return 0; 1125 return 0;
1126 } 1126 }
1127 1127
1128 /* Figure out whether we have an internal or external transceiver. 1128 /* Figure out whether we have an internal or external transceiver.
1129 * 1129 *
1130 * hp->happy_lock must be held 1130 * hp->happy_lock must be held
1131 */ 1131 */
1132 static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs) 1132 static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1133 { 1133 {
1134 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG); 1134 unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1135 1135
1136 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig)); 1136 ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
1137 if (hp->happy_flags & HFLAG_POLL) { 1137 if (hp->happy_flags & HFLAG_POLL) {
1138 /* If we are polling, we must stop to get the transceiver type. */ 1138 /* If we are polling, we must stop to get the transceiver type. */
1139 ASD(("<polling> ")); 1139 ASD(("<polling> "));
1140 if (hp->tcvr_type == internal) { 1140 if (hp->tcvr_type == internal) {
1141 if (tconfig & TCV_CFG_MDIO1) { 1141 if (tconfig & TCV_CFG_MDIO1) {
1142 ASD(("<internal> <poll stop> ")); 1142 ASD(("<internal> <poll stop> "));
1143 happy_meal_poll_stop(hp, tregs); 1143 happy_meal_poll_stop(hp, tregs);
1144 hp->paddr = TCV_PADDR_ETX; 1144 hp->paddr = TCV_PADDR_ETX;
1145 hp->tcvr_type = external; 1145 hp->tcvr_type = external;
1146 ASD(("<external>\n")); 1146 ASD(("<external>\n"));
1147 tconfig &= ~(TCV_CFG_PENABLE); 1147 tconfig &= ~(TCV_CFG_PENABLE);
1148 tconfig |= TCV_CFG_PSELECT; 1148 tconfig |= TCV_CFG_PSELECT;
1149 hme_write32(hp, tregs + TCVR_CFG, tconfig); 1149 hme_write32(hp, tregs + TCVR_CFG, tconfig);
1150 } 1150 }
1151 } else { 1151 } else {
1152 if (hp->tcvr_type == external) { 1152 if (hp->tcvr_type == external) {
1153 ASD(("<external> ")); 1153 ASD(("<external> "));
1154 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) { 1154 if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
1155 ASD(("<poll stop> ")); 1155 ASD(("<poll stop> "));
1156 happy_meal_poll_stop(hp, tregs); 1156 happy_meal_poll_stop(hp, tregs);
1157 hp->paddr = TCV_PADDR_ITX; 1157 hp->paddr = TCV_PADDR_ITX;
1158 hp->tcvr_type = internal; 1158 hp->tcvr_type = internal;
1159 ASD(("<internal>\n")); 1159 ASD(("<internal>\n"));
1160 hme_write32(hp, tregs + TCVR_CFG, 1160 hme_write32(hp, tregs + TCVR_CFG,
1161 hme_read32(hp, tregs + TCVR_CFG) & 1161 hme_read32(hp, tregs + TCVR_CFG) &
1162 ~(TCV_CFG_PSELECT)); 1162 ~(TCV_CFG_PSELECT));
1163 } 1163 }
1164 ASD(("\n")); 1164 ASD(("\n"));
1165 } else { 1165 } else {
1166 ASD(("<none>\n")); 1166 ASD(("<none>\n"));
1167 } 1167 }
1168 } 1168 }
1169 } else { 1169 } else {
1170 u32 reread = hme_read32(hp, tregs + TCVR_CFG); 1170 u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1171 1171
1172 /* Else we can just work off of the MDIO bits. */ 1172 /* Else we can just work off of the MDIO bits. */
1173 ASD(("<not polling> ")); 1173 ASD(("<not polling> "));
1174 if (reread & TCV_CFG_MDIO1) { 1174 if (reread & TCV_CFG_MDIO1) {
1175 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); 1175 hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1176 hp->paddr = TCV_PADDR_ETX; 1176 hp->paddr = TCV_PADDR_ETX;
1177 hp->tcvr_type = external; 1177 hp->tcvr_type = external;
1178 ASD(("<external>\n")); 1178 ASD(("<external>\n"));
1179 } else { 1179 } else {
1180 if (reread & TCV_CFG_MDIO0) { 1180 if (reread & TCV_CFG_MDIO0) {
1181 hme_write32(hp, tregs + TCVR_CFG, 1181 hme_write32(hp, tregs + TCVR_CFG,
1182 tconfig & ~(TCV_CFG_PSELECT)); 1182 tconfig & ~(TCV_CFG_PSELECT));
1183 hp->paddr = TCV_PADDR_ITX; 1183 hp->paddr = TCV_PADDR_ITX;
1184 hp->tcvr_type = internal; 1184 hp->tcvr_type = internal;
1185 ASD(("<internal>\n")); 1185 ASD(("<internal>\n"));
1186 } else { 1186 } else {
1187 printk(KERN_ERR "happy meal: Transceiver and a coke please."); 1187 printk(KERN_ERR "happy meal: Transceiver and a coke please.");
1188 hp->tcvr_type = none; /* Grrr... */ 1188 hp->tcvr_type = none; /* Grrr... */
1189 ASD(("<none>\n")); 1189 ASD(("<none>\n"));
1190 } 1190 }
1191 } 1191 }
1192 } 1192 }
1193 } 1193 }
1194 1194
1195 /* The receive ring buffers are a bit tricky to get right. Here goes... 1195 /* The receive ring buffers are a bit tricky to get right. Here goes...
1196 * 1196 *
1197 * The buffers we dma into must be 64 byte aligned. So we use a special 1197 * The buffers we dma into must be 64 byte aligned. So we use a special
1198 * alloc_skb() routine for the happy meal to allocate 64 bytes more than 1198 * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1199 * we really need. 1199 * we really need.
1200 * 1200 *
1201 * We use skb_reserve() to align the data block we get in the skb. We 1201 * We use skb_reserve() to align the data block we get in the skb. We
1202 * also program the etxregs->cfg register to use an offset of 2. This 1202 * also program the etxregs->cfg register to use an offset of 2. This
1203 * imperical constant plus the ethernet header size will always leave 1203 * imperical constant plus the ethernet header size will always leave
1204 * us with a nicely aligned ip header once we pass things up to the 1204 * us with a nicely aligned ip header once we pass things up to the
1205 * protocol layers. 1205 * protocol layers.
1206 * 1206 *
1207 * The numbers work out to: 1207 * The numbers work out to:
1208 * 1208 *
1209 * Max ethernet frame size 1518 1209 * Max ethernet frame size 1518
1210 * Ethernet header size 14 1210 * Ethernet header size 14
1211 * Happy Meal base offset 2 1211 * Happy Meal base offset 2
1212 * 1212 *
1213 * Say a skb data area is at 0xf001b010, and its size alloced is 1213 * Say a skb data area is at 0xf001b010, and its size alloced is
1214 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes. 1214 * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1215 * 1215 *
1216 * First our alloc_skb() routine aligns the data base to a 64 byte 1216 * First our alloc_skb() routine aligns the data base to a 64 byte
1217 * boundary. We now have 0xf001b040 as our skb data address. We 1217 * boundary. We now have 0xf001b040 as our skb data address. We
1218 * plug this into the receive descriptor address. 1218 * plug this into the receive descriptor address.
1219 * 1219 *
1220 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset. 1220 * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1221 * So now the data we will end up looking at starts at 0xf001b042. When 1221 * So now the data we will end up looking at starts at 0xf001b042. When
1222 * the packet arrives, we will check out the size received and subtract 1222 * the packet arrives, we will check out the size received and subtract
1223 * this from the skb->length. Then we just pass the packet up to the 1223 * this from the skb->length. Then we just pass the packet up to the
1224 * protocols as is, and allocate a new skb to replace this slot we have 1224 * protocols as is, and allocate a new skb to replace this slot we have
1225 * just received from. 1225 * just received from.
1226 * 1226 *
1227 * The ethernet layer will strip the ether header from the front of the 1227 * The ethernet layer will strip the ether header from the front of the
1228 * skb we just sent to it, this leaves us with the ip header sitting 1228 * skb we just sent to it, this leaves us with the ip header sitting
1229 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the 1229 * nicely aligned at 0xf001b050. Also, for tcp and udp packets the
1230 * Happy Meal has even checksummed the tcp/udp data for us. The 16 1230 * Happy Meal has even checksummed the tcp/udp data for us. The 16
1231 * bit checksum is obtained from the low bits of the receive descriptor 1231 * bit checksum is obtained from the low bits of the receive descriptor
1232 * flags, thus: 1232 * flags, thus:
1233 * 1233 *
1234 * skb->csum = rxd->rx_flags & 0xffff; 1234 * skb->csum = rxd->rx_flags & 0xffff;
1235 * skb->ip_summed = CHECKSUM_HW; 1235 * skb->ip_summed = CHECKSUM_HW;
1236 * 1236 *
1237 * before sending off the skb to the protocols, and we are good as gold. 1237 * before sending off the skb to the protocols, and we are good as gold.
1238 */ 1238 */
1239 static void happy_meal_clean_rings(struct happy_meal *hp) 1239 static void happy_meal_clean_rings(struct happy_meal *hp)
1240 { 1240 {
1241 int i; 1241 int i;
1242 1242
1243 for (i = 0; i < RX_RING_SIZE; i++) { 1243 for (i = 0; i < RX_RING_SIZE; i++) {
1244 if (hp->rx_skbs[i] != NULL) { 1244 if (hp->rx_skbs[i] != NULL) {
1245 struct sk_buff *skb = hp->rx_skbs[i]; 1245 struct sk_buff *skb = hp->rx_skbs[i];
1246 struct happy_meal_rxd *rxd; 1246 struct happy_meal_rxd *rxd;
1247 u32 dma_addr; 1247 u32 dma_addr;
1248 1248
1249 rxd = &hp->happy_block->happy_meal_rxd[i]; 1249 rxd = &hp->happy_block->happy_meal_rxd[i];
1250 dma_addr = hme_read_desc32(hp, &rxd->rx_addr); 1250 dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1251 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); 1251 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
1252 dev_kfree_skb_any(skb); 1252 dev_kfree_skb_any(skb);
1253 hp->rx_skbs[i] = NULL; 1253 hp->rx_skbs[i] = NULL;
1254 } 1254 }
1255 } 1255 }
1256 1256
1257 for (i = 0; i < TX_RING_SIZE; i++) { 1257 for (i = 0; i < TX_RING_SIZE; i++) {
1258 if (hp->tx_skbs[i] != NULL) { 1258 if (hp->tx_skbs[i] != NULL) {
1259 struct sk_buff *skb = hp->tx_skbs[i]; 1259 struct sk_buff *skb = hp->tx_skbs[i];
1260 struct happy_meal_txd *txd; 1260 struct happy_meal_txd *txd;
1261 u32 dma_addr; 1261 u32 dma_addr;
1262 int frag; 1262 int frag;
1263 1263
1264 hp->tx_skbs[i] = NULL; 1264 hp->tx_skbs[i] = NULL;
1265 1265
1266 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1266 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1267 txd = &hp->happy_block->happy_meal_txd[i]; 1267 txd = &hp->happy_block->happy_meal_txd[i];
1268 dma_addr = hme_read_desc32(hp, &txd->tx_addr); 1268 dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1269 hme_dma_unmap(hp, dma_addr, 1269 hme_dma_unmap(hp, dma_addr,
1270 (hme_read_desc32(hp, &txd->tx_flags) 1270 (hme_read_desc32(hp, &txd->tx_flags)
1271 & TXFLAG_SIZE), 1271 & TXFLAG_SIZE),
1272 DMA_TODEVICE); 1272 DMA_TODEVICE);
1273 1273
1274 if (frag != skb_shinfo(skb)->nr_frags) 1274 if (frag != skb_shinfo(skb)->nr_frags)
1275 i++; 1275 i++;
1276 } 1276 }
1277 1277
1278 dev_kfree_skb_any(skb); 1278 dev_kfree_skb_any(skb);
1279 } 1279 }
1280 } 1280 }
1281 } 1281 }
1282 1282
1283 /* hp->happy_lock must be held */ 1283 /* hp->happy_lock must be held */
1284 static void happy_meal_init_rings(struct happy_meal *hp) 1284 static void happy_meal_init_rings(struct happy_meal *hp)
1285 { 1285 {
1286 struct hmeal_init_block *hb = hp->happy_block; 1286 struct hmeal_init_block *hb = hp->happy_block;
1287 struct net_device *dev = hp->dev; 1287 struct net_device *dev = hp->dev;
1288 int i; 1288 int i;
1289 1289
1290 HMD(("happy_meal_init_rings: counters to zero, ")); 1290 HMD(("happy_meal_init_rings: counters to zero, "));
1291 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; 1291 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1292 1292
1293 /* Free any skippy bufs left around in the rings. */ 1293 /* Free any skippy bufs left around in the rings. */
1294 HMD(("clean, ")); 1294 HMD(("clean, "));
1295 happy_meal_clean_rings(hp); 1295 happy_meal_clean_rings(hp);
1296 1296
1297 /* Now get new skippy bufs for the receive ring. */ 1297 /* Now get new skippy bufs for the receive ring. */
1298 HMD(("init rxring, ")); 1298 HMD(("init rxring, "));
1299 for (i = 0; i < RX_RING_SIZE; i++) { 1299 for (i = 0; i < RX_RING_SIZE; i++) {
1300 struct sk_buff *skb; 1300 struct sk_buff *skb;
1301 1301
1302 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 1302 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1303 if (!skb) { 1303 if (!skb) {
1304 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); 1304 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1305 continue; 1305 continue;
1306 } 1306 }
1307 hp->rx_skbs[i] = skb; 1307 hp->rx_skbs[i] = skb;
1308 skb->dev = dev; 1308 skb->dev = dev;
1309 1309
1310 /* Because we reserve afterwards. */ 1310 /* Because we reserve afterwards. */
1311 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET)); 1311 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
1312 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1312 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1313 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1313 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1314 hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); 1314 hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
1315 skb_reserve(skb, RX_OFFSET); 1315 skb_reserve(skb, RX_OFFSET);
1316 } 1316 }
1317 1317
1318 HMD(("init txring, ")); 1318 HMD(("init txring, "));
1319 for (i = 0; i < TX_RING_SIZE; i++) 1319 for (i = 0; i < TX_RING_SIZE; i++)
1320 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); 1320 hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1321 1321
1322 HMD(("done\n")); 1322 HMD(("done\n"));
1323 } 1323 }
1324 1324
1325 /* hp->happy_lock must be held */ 1325 /* hp->happy_lock must be held */
1326 static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, 1326 static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
1327 void __iomem *tregs, 1327 void __iomem *tregs,
1328 struct ethtool_cmd *ep) 1328 struct ethtool_cmd *ep)
1329 { 1329 {
1330 int timeout; 1330 int timeout;
1331 1331
1332 /* Read all of the registers we are interested in now. */ 1332 /* Read all of the registers we are interested in now. */
1333 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 1333 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1334 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1334 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1335 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); 1335 hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1336 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); 1336 hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1337 1337
1338 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ 1338 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1339 1339
1340 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); 1340 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1341 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { 1341 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1342 /* Advertise everything we can support. */ 1342 /* Advertise everything we can support. */
1343 if (hp->sw_bmsr & BMSR_10HALF) 1343 if (hp->sw_bmsr & BMSR_10HALF)
1344 hp->sw_advertise |= (ADVERTISE_10HALF); 1344 hp->sw_advertise |= (ADVERTISE_10HALF);
1345 else 1345 else
1346 hp->sw_advertise &= ~(ADVERTISE_10HALF); 1346 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1347 1347
1348 if (hp->sw_bmsr & BMSR_10FULL) 1348 if (hp->sw_bmsr & BMSR_10FULL)
1349 hp->sw_advertise |= (ADVERTISE_10FULL); 1349 hp->sw_advertise |= (ADVERTISE_10FULL);
1350 else 1350 else
1351 hp->sw_advertise &= ~(ADVERTISE_10FULL); 1351 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1352 if (hp->sw_bmsr & BMSR_100HALF) 1352 if (hp->sw_bmsr & BMSR_100HALF)
1353 hp->sw_advertise |= (ADVERTISE_100HALF); 1353 hp->sw_advertise |= (ADVERTISE_100HALF);
1354 else 1354 else
1355 hp->sw_advertise &= ~(ADVERTISE_100HALF); 1355 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1356 if (hp->sw_bmsr & BMSR_100FULL) 1356 if (hp->sw_bmsr & BMSR_100FULL)
1357 hp->sw_advertise |= (ADVERTISE_100FULL); 1357 hp->sw_advertise |= (ADVERTISE_100FULL);
1358 else 1358 else
1359 hp->sw_advertise &= ~(ADVERTISE_100FULL); 1359 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1360 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); 1360 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1361 1361
1362 /* XXX Currently no Happy Meal cards I know off support 100BaseT4, 1362 /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
1363 * XXX and this is because the DP83840 does not support it, changes 1363 * XXX and this is because the DP83840 does not support it, changes
1364 * XXX would need to be made to the tx/rx logic in the driver as well 1364 * XXX would need to be made to the tx/rx logic in the driver as well
1365 * XXX so I completely skip checking for it in the BMSR for now. 1365 * XXX so I completely skip checking for it in the BMSR for now.
1366 */ 1366 */
1367 1367
1368 #ifdef AUTO_SWITCH_DEBUG 1368 #ifdef AUTO_SWITCH_DEBUG
1369 ASD(("%s: Advertising [ ", hp->dev->name)); 1369 ASD(("%s: Advertising [ ", hp->dev->name));
1370 if (hp->sw_advertise & ADVERTISE_10HALF) 1370 if (hp->sw_advertise & ADVERTISE_10HALF)
1371 ASD(("10H ")); 1371 ASD(("10H "));
1372 if (hp->sw_advertise & ADVERTISE_10FULL) 1372 if (hp->sw_advertise & ADVERTISE_10FULL)
1373 ASD(("10F ")); 1373 ASD(("10F "));
1374 if (hp->sw_advertise & ADVERTISE_100HALF) 1374 if (hp->sw_advertise & ADVERTISE_100HALF)
1375 ASD(("100H ")); 1375 ASD(("100H "));
1376 if (hp->sw_advertise & ADVERTISE_100FULL) 1376 if (hp->sw_advertise & ADVERTISE_100FULL)
1377 ASD(("100F ")); 1377 ASD(("100F "));
1378 #endif 1378 #endif
1379 1379
1380 /* Enable Auto-Negotiation, this is usually on already... */ 1380 /* Enable Auto-Negotiation, this is usually on already... */
1381 hp->sw_bmcr |= BMCR_ANENABLE; 1381 hp->sw_bmcr |= BMCR_ANENABLE;
1382 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1382 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1383 1383
1384 /* Restart it to make sure it is going. */ 1384 /* Restart it to make sure it is going. */
1385 hp->sw_bmcr |= BMCR_ANRESTART; 1385 hp->sw_bmcr |= BMCR_ANRESTART;
1386 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1386 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1387 1387
1388 /* BMCR_ANRESTART self clears when the process has begun. */ 1388 /* BMCR_ANRESTART self clears when the process has begun. */
1389 1389
1390 timeout = 64; /* More than enough. */ 1390 timeout = 64; /* More than enough. */
1391 while (--timeout) { 1391 while (--timeout) {
1392 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1392 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1393 if (!(hp->sw_bmcr & BMCR_ANRESTART)) 1393 if (!(hp->sw_bmcr & BMCR_ANRESTART))
1394 break; /* got it. */ 1394 break; /* got it. */
1395 udelay(10); 1395 udelay(10);
1396 } 1396 }
1397 if (!timeout) { 1397 if (!timeout) {
1398 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation " 1398 printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
1399 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr); 1399 "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
1400 printk(KERN_NOTICE "%s: Performing force link detection.\n", 1400 printk(KERN_NOTICE "%s: Performing force link detection.\n",
1401 hp->dev->name); 1401 hp->dev->name);
1402 goto force_link; 1402 goto force_link;
1403 } else { 1403 } else {
1404 hp->timer_state = arbwait; 1404 hp->timer_state = arbwait;
1405 } 1405 }
1406 } else { 1406 } else {
1407 force_link: 1407 force_link:
1408 /* Force the link up, trying first a particular mode. 1408 /* Force the link up, trying first a particular mode.
1409 * Either we are here at the request of ethtool or 1409 * Either we are here at the request of ethtool or
1410 * because the Happy Meal would not start to autoneg. 1410 * because the Happy Meal would not start to autoneg.
1411 */ 1411 */
1412 1412
1413 /* Disable auto-negotiation in BMCR, enable the duplex and 1413 /* Disable auto-negotiation in BMCR, enable the duplex and
1414 * speed setting, init the timer state machine, and fire it off. 1414 * speed setting, init the timer state machine, and fire it off.
1415 */ 1415 */
1416 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { 1416 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1417 hp->sw_bmcr = BMCR_SPEED100; 1417 hp->sw_bmcr = BMCR_SPEED100;
1418 } else { 1418 } else {
1419 if (ep->speed == SPEED_100) 1419 if (ep->speed == SPEED_100)
1420 hp->sw_bmcr = BMCR_SPEED100; 1420 hp->sw_bmcr = BMCR_SPEED100;
1421 else 1421 else
1422 hp->sw_bmcr = 0; 1422 hp->sw_bmcr = 0;
1423 if (ep->duplex == DUPLEX_FULL) 1423 if (ep->duplex == DUPLEX_FULL)
1424 hp->sw_bmcr |= BMCR_FULLDPLX; 1424 hp->sw_bmcr |= BMCR_FULLDPLX;
1425 } 1425 }
1426 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1426 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1427 1427
1428 if (!is_lucent_phy(hp)) { 1428 if (!is_lucent_phy(hp)) {
1429 /* OK, seems we need do disable the transceiver for the first 1429 /* OK, seems we need do disable the transceiver for the first
1430 * tick to make sure we get an accurate link state at the 1430 * tick to make sure we get an accurate link state at the
1431 * second tick. 1431 * second tick.
1432 */ 1432 */
1433 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, 1433 hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
1434 DP83840_CSCONFIG); 1434 DP83840_CSCONFIG);
1435 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); 1435 hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1436 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, 1436 happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
1437 hp->sw_csconfig); 1437 hp->sw_csconfig);
1438 } 1438 }
1439 hp->timer_state = ltrywait; 1439 hp->timer_state = ltrywait;
1440 } 1440 }
1441 1441
1442 hp->timer_ticks = 0; 1442 hp->timer_ticks = 0;
1443 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 1443 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1444 hp->happy_timer.data = (unsigned long) hp; 1444 hp->happy_timer.data = (unsigned long) hp;
1445 hp->happy_timer.function = &happy_meal_timer; 1445 hp->happy_timer.function = &happy_meal_timer;
1446 add_timer(&hp->happy_timer); 1446 add_timer(&hp->happy_timer);
1447 } 1447 }
1448 1448
1449 /* hp->happy_lock must be held */ 1449 /* hp->happy_lock must be held */
1450 static int happy_meal_init(struct happy_meal *hp) 1450 static int happy_meal_init(struct happy_meal *hp)
1451 { 1451 {
1452 void __iomem *gregs = hp->gregs; 1452 void __iomem *gregs = hp->gregs;
1453 void __iomem *etxregs = hp->etxregs; 1453 void __iomem *etxregs = hp->etxregs;
1454 void __iomem *erxregs = hp->erxregs; 1454 void __iomem *erxregs = hp->erxregs;
1455 void __iomem *bregs = hp->bigmacregs; 1455 void __iomem *bregs = hp->bigmacregs;
1456 void __iomem *tregs = hp->tcvregs; 1456 void __iomem *tregs = hp->tcvregs;
1457 u32 regtmp, rxcfg; 1457 u32 regtmp, rxcfg;
1458 unsigned char *e = &hp->dev->dev_addr[0]; 1458 unsigned char *e = &hp->dev->dev_addr[0];
1459 1459
1460 /* If auto-negotiation timer is running, kill it. */ 1460 /* If auto-negotiation timer is running, kill it. */
1461 del_timer(&hp->happy_timer); 1461 del_timer(&hp->happy_timer);
1462 1462
1463 HMD(("happy_meal_init: happy_flags[%08x] ", 1463 HMD(("happy_meal_init: happy_flags[%08x] ",
1464 hp->happy_flags)); 1464 hp->happy_flags));
1465 if (!(hp->happy_flags & HFLAG_INIT)) { 1465 if (!(hp->happy_flags & HFLAG_INIT)) {
1466 HMD(("set HFLAG_INIT, ")); 1466 HMD(("set HFLAG_INIT, "));
1467 hp->happy_flags |= HFLAG_INIT; 1467 hp->happy_flags |= HFLAG_INIT;
1468 happy_meal_get_counters(hp, bregs); 1468 happy_meal_get_counters(hp, bregs);
1469 } 1469 }
1470 1470
1471 /* Stop polling. */ 1471 /* Stop polling. */
1472 HMD(("to happy_meal_poll_stop\n")); 1472 HMD(("to happy_meal_poll_stop\n"));
1473 happy_meal_poll_stop(hp, tregs); 1473 happy_meal_poll_stop(hp, tregs);
1474 1474
1475 /* Stop transmitter and receiver. */ 1475 /* Stop transmitter and receiver. */
1476 HMD(("happy_meal_init: to happy_meal_stop\n")); 1476 HMD(("happy_meal_init: to happy_meal_stop\n"));
1477 happy_meal_stop(hp, gregs); 1477 happy_meal_stop(hp, gregs);
1478 1478
1479 /* Alloc and reset the tx/rx descriptor chains. */ 1479 /* Alloc and reset the tx/rx descriptor chains. */
1480 HMD(("happy_meal_init: to happy_meal_init_rings\n")); 1480 HMD(("happy_meal_init: to happy_meal_init_rings\n"));
1481 happy_meal_init_rings(hp); 1481 happy_meal_init_rings(hp);
1482 1482
1483 /* Shut up the MIF. */ 1483 /* Shut up the MIF. */
1484 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ", 1484 HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
1485 hme_read32(hp, tregs + TCVR_IMASK))); 1485 hme_read32(hp, tregs + TCVR_IMASK)));
1486 hme_write32(hp, tregs + TCVR_IMASK, 0xffff); 1486 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1487 1487
1488 /* See if we can enable the MIF frame on this card to speak to the DP83840. */ 1488 /* See if we can enable the MIF frame on this card to speak to the DP83840. */
1489 if (hp->happy_flags & HFLAG_FENABLE) { 1489 if (hp->happy_flags & HFLAG_FENABLE) {
1490 HMD(("use frame old[%08x], ", 1490 HMD(("use frame old[%08x], ",
1491 hme_read32(hp, tregs + TCVR_CFG))); 1491 hme_read32(hp, tregs + TCVR_CFG)));
1492 hme_write32(hp, tregs + TCVR_CFG, 1492 hme_write32(hp, tregs + TCVR_CFG,
1493 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); 1493 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1494 } else { 1494 } else {
1495 HMD(("use bitbang old[%08x], ", 1495 HMD(("use bitbang old[%08x], ",
1496 hme_read32(hp, tregs + TCVR_CFG))); 1496 hme_read32(hp, tregs + TCVR_CFG)));
1497 hme_write32(hp, tregs + TCVR_CFG, 1497 hme_write32(hp, tregs + TCVR_CFG,
1498 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); 1498 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1499 } 1499 }
1500 1500
1501 /* Check the state of the transceiver. */ 1501 /* Check the state of the transceiver. */
1502 HMD(("to happy_meal_transceiver_check\n")); 1502 HMD(("to happy_meal_transceiver_check\n"));
1503 happy_meal_transceiver_check(hp, tregs); 1503 happy_meal_transceiver_check(hp, tregs);
1504 1504
1505 /* Put the Big Mac into a sane state. */ 1505 /* Put the Big Mac into a sane state. */
1506 HMD(("happy_meal_init: ")); 1506 HMD(("happy_meal_init: "));
1507 switch(hp->tcvr_type) { 1507 switch(hp->tcvr_type) {
1508 case none: 1508 case none:
1509 /* Cannot operate if we don't know the transceiver type! */ 1509 /* Cannot operate if we don't know the transceiver type! */
1510 HMD(("AAIEEE no transceiver type, EAGAIN")); 1510 HMD(("AAIEEE no transceiver type, EAGAIN"));
1511 return -EAGAIN; 1511 return -EAGAIN;
1512 1512
1513 case internal: 1513 case internal:
1514 /* Using the MII buffers. */ 1514 /* Using the MII buffers. */
1515 HMD(("internal, using MII, ")); 1515 HMD(("internal, using MII, "));
1516 hme_write32(hp, bregs + BMAC_XIFCFG, 0); 1516 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1517 break; 1517 break;
1518 1518
1519 case external: 1519 case external:
1520 /* Not using the MII, disable it. */ 1520 /* Not using the MII, disable it. */
1521 HMD(("external, disable MII, ")); 1521 HMD(("external, disable MII, "));
1522 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); 1522 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1523 break; 1523 break;
1524 }; 1524 };
1525 1525
1526 if (happy_meal_tcvr_reset(hp, tregs)) 1526 if (happy_meal_tcvr_reset(hp, tregs))
1527 return -EAGAIN; 1527 return -EAGAIN;
1528 1528
1529 /* Reset the Happy Meal Big Mac transceiver and the receiver. */ 1529 /* Reset the Happy Meal Big Mac transceiver and the receiver. */
1530 HMD(("tx/rx reset, ")); 1530 HMD(("tx/rx reset, "));
1531 happy_meal_tx_reset(hp, bregs); 1531 happy_meal_tx_reset(hp, bregs);
1532 happy_meal_rx_reset(hp, bregs); 1532 happy_meal_rx_reset(hp, bregs);
1533 1533
1534 /* Set jam size and inter-packet gaps to reasonable defaults. */ 1534 /* Set jam size and inter-packet gaps to reasonable defaults. */
1535 HMD(("jsize/ipg1/ipg2, ")); 1535 HMD(("jsize/ipg1/ipg2, "));
1536 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE); 1536 hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1537 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1); 1537 hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1538 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2); 1538 hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1539 1539
1540 /* Load up the MAC address and random seed. */ 1540 /* Load up the MAC address and random seed. */
1541 HMD(("rseed/macaddr, ")); 1541 HMD(("rseed/macaddr, "));
1542 1542
1543 /* The docs recommend to use the 10LSB of our MAC here. */ 1543 /* The docs recommend to use the 10LSB of our MAC here. */
1544 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff)); 1544 hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1545 1545
1546 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5])); 1546 hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1547 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3])); 1547 hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1548 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1])); 1548 hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1549 1549
1550 HMD(("htable, ")); 1550 HMD(("htable, "));
1551 if ((hp->dev->flags & IFF_ALLMULTI) || 1551 if ((hp->dev->flags & IFF_ALLMULTI) ||
1552 (hp->dev->mc_count > 64)) { 1552 (hp->dev->mc_count > 64)) {
1553 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); 1553 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1554 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); 1554 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1555 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); 1555 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1556 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); 1556 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1557 } else if ((hp->dev->flags & IFF_PROMISC) == 0) { 1557 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1558 u16 hash_table[4]; 1558 u16 hash_table[4];
1559 struct dev_mc_list *dmi = hp->dev->mc_list; 1559 struct dev_mc_list *dmi = hp->dev->mc_list;
1560 char *addrs; 1560 char *addrs;
1561 int i; 1561 int i;
1562 u32 crc; 1562 u32 crc;
1563 1563
1564 for (i = 0; i < 4; i++) 1564 for (i = 0; i < 4; i++)
1565 hash_table[i] = 0; 1565 hash_table[i] = 0;
1566 1566
1567 for (i = 0; i < hp->dev->mc_count; i++) { 1567 for (i = 0; i < hp->dev->mc_count; i++) {
1568 addrs = dmi->dmi_addr; 1568 addrs = dmi->dmi_addr;
1569 dmi = dmi->next; 1569 dmi = dmi->next;
1570 1570
1571 if (!(*addrs & 1)) 1571 if (!(*addrs & 1))
1572 continue; 1572 continue;
1573 1573
1574 crc = ether_crc_le(6, addrs); 1574 crc = ether_crc_le(6, addrs);
1575 crc >>= 26; 1575 crc >>= 26;
1576 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1576 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1577 } 1577 }
1578 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); 1578 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1579 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); 1579 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1580 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); 1580 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1581 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); 1581 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1582 } else { 1582 } else {
1583 hme_write32(hp, bregs + BMAC_HTABLE3, 0); 1583 hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1584 hme_write32(hp, bregs + BMAC_HTABLE2, 0); 1584 hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1585 hme_write32(hp, bregs + BMAC_HTABLE1, 0); 1585 hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1586 hme_write32(hp, bregs + BMAC_HTABLE0, 0); 1586 hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1587 } 1587 }
1588 1588
1589 /* Set the RX and TX ring ptrs. */ 1589 /* Set the RX and TX ring ptrs. */
1590 HMD(("ring ptrs rxr[%08x] txr[%08x]\n", 1590 HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
1591 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)), 1591 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1592 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)))); 1592 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
1593 hme_write32(hp, erxregs + ERX_RING, 1593 hme_write32(hp, erxregs + ERX_RING,
1594 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))); 1594 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1595 hme_write32(hp, etxregs + ETX_RING, 1595 hme_write32(hp, etxregs + ETX_RING,
1596 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); 1596 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1597 1597
1598 /* Parity issues in the ERX unit of some HME revisions can cause some 1598 /* Parity issues in the ERX unit of some HME revisions can cause some
1599 * registers to not be written unless their parity is even. Detect such 1599 * registers to not be written unless their parity is even. Detect such
1600 * lost writes and simply rewrite with a low bit set (which will be ignored 1600 * lost writes and simply rewrite with a low bit set (which will be ignored
1601 * since the rxring needs to be 2K aligned). 1601 * since the rxring needs to be 2K aligned).
1602 */ 1602 */
1603 if (hme_read32(hp, erxregs + ERX_RING) != 1603 if (hme_read32(hp, erxregs + ERX_RING) !=
1604 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))) 1604 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1605 hme_write32(hp, erxregs + ERX_RING, 1605 hme_write32(hp, erxregs + ERX_RING,
1606 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)) 1606 ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1607 | 0x4); 1607 | 0x4);
1608 1608
1609 /* Set the supported burst sizes. */ 1609 /* Set the supported burst sizes. */
1610 HMD(("happy_meal_init: old[%08x] bursts<", 1610 HMD(("happy_meal_init: old[%08x] bursts<",
1611 hme_read32(hp, gregs + GREG_CFG))); 1611 hme_read32(hp, gregs + GREG_CFG)));
1612 1612
1613 #ifndef __sparc__ 1613 #ifndef __sparc__
1614 /* It is always PCI and can handle 64byte bursts. */ 1614 /* It is always PCI and can handle 64byte bursts. */
1615 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64); 1615 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1616 #else 1616 #else
1617 if ((hp->happy_bursts & DMA_BURST64) && 1617 if ((hp->happy_bursts & DMA_BURST64) &&
1618 ((hp->happy_flags & HFLAG_PCI) != 0 1618 ((hp->happy_flags & HFLAG_PCI) != 0
1619 #ifdef CONFIG_SBUS 1619 #ifdef CONFIG_SBUS
1620 || sbus_can_burst64(hp->happy_dev) 1620 || sbus_can_burst64(hp->happy_dev)
1621 #endif 1621 #endif
1622 || 0)) { 1622 || 0)) {
1623 u32 gcfg = GREG_CFG_BURST64; 1623 u32 gcfg = GREG_CFG_BURST64;
1624 1624
1625 /* I have no idea if I should set the extended 1625 /* I have no idea if I should set the extended
1626 * transfer mode bit for Cheerio, so for now I 1626 * transfer mode bit for Cheerio, so for now I
1627 * do not. -DaveM 1627 * do not. -DaveM
1628 */ 1628 */
1629 #ifdef CONFIG_SBUS 1629 #ifdef CONFIG_SBUS
1630 if ((hp->happy_flags & HFLAG_PCI) == 0 && 1630 if ((hp->happy_flags & HFLAG_PCI) == 0 &&
1631 sbus_can_dma_64bit(hp->happy_dev)) { 1631 sbus_can_dma_64bit(hp->happy_dev)) {
1632 sbus_set_sbus64(hp->happy_dev, 1632 sbus_set_sbus64(hp->happy_dev,
1633 hp->happy_bursts); 1633 hp->happy_bursts);
1634 gcfg |= GREG_CFG_64BIT; 1634 gcfg |= GREG_CFG_64BIT;
1635 } 1635 }
1636 #endif 1636 #endif
1637 1637
1638 HMD(("64>")); 1638 HMD(("64>"));
1639 hme_write32(hp, gregs + GREG_CFG, gcfg); 1639 hme_write32(hp, gregs + GREG_CFG, gcfg);
1640 } else if (hp->happy_bursts & DMA_BURST32) { 1640 } else if (hp->happy_bursts & DMA_BURST32) {
1641 HMD(("32>")); 1641 HMD(("32>"));
1642 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32); 1642 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1643 } else if (hp->happy_bursts & DMA_BURST16) { 1643 } else if (hp->happy_bursts & DMA_BURST16) {
1644 HMD(("16>")); 1644 HMD(("16>"));
1645 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16); 1645 hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1646 } else { 1646 } else {
1647 HMD(("XXX>")); 1647 HMD(("XXX>"));
1648 hme_write32(hp, gregs + GREG_CFG, 0); 1648 hme_write32(hp, gregs + GREG_CFG, 0);
1649 } 1649 }
1650 #endif /* __sparc__ */ 1650 #endif /* __sparc__ */
1651 1651
1652 /* Turn off interrupts we do not want to hear. */ 1652 /* Turn off interrupts we do not want to hear. */
1653 HMD((", enable global interrupts, ")); 1653 HMD((", enable global interrupts, "));
1654 hme_write32(hp, gregs + GREG_IMASK, 1654 hme_write32(hp, gregs + GREG_IMASK,
1655 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP | 1655 (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1656 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR)); 1656 GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1657 1657
1658 /* Set the transmit ring buffer size. */ 1658 /* Set the transmit ring buffer size. */
1659 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE, 1659 HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
1660 hme_read32(hp, etxregs + ETX_RSIZE))); 1660 hme_read32(hp, etxregs + ETX_RSIZE)));
1661 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); 1661 hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1662 1662
1663 /* Enable transmitter DVMA. */ 1663 /* Enable transmitter DVMA. */
1664 HMD(("tx dma enable old[%08x], ", 1664 HMD(("tx dma enable old[%08x], ",
1665 hme_read32(hp, etxregs + ETX_CFG))); 1665 hme_read32(hp, etxregs + ETX_CFG)));
1666 hme_write32(hp, etxregs + ETX_CFG, 1666 hme_write32(hp, etxregs + ETX_CFG,
1667 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE); 1667 hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1668 1668
1669 /* This chip really rots, for the receiver sometimes when you 1669 /* This chip really rots, for the receiver sometimes when you
1670 * write to its control registers not all the bits get there 1670 * write to its control registers not all the bits get there
1671 * properly. I cannot think of a sane way to provide complete 1671 * properly. I cannot think of a sane way to provide complete
1672 * coverage for this hardware bug yet. 1672 * coverage for this hardware bug yet.
1673 */ 1673 */
1674 HMD(("erx regs bug old[%08x]\n", 1674 HMD(("erx regs bug old[%08x]\n",
1675 hme_read32(hp, erxregs + ERX_CFG))); 1675 hme_read32(hp, erxregs + ERX_CFG)));
1676 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); 1676 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1677 regtmp = hme_read32(hp, erxregs + ERX_CFG); 1677 regtmp = hme_read32(hp, erxregs + ERX_CFG);
1678 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); 1678 hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1679 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) { 1679 if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1680 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n"); 1680 printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
1681 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n", 1681 printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
1682 ERX_CFG_DEFAULT(RX_OFFSET), regtmp); 1682 ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1683 /* XXX Should return failure here... */ 1683 /* XXX Should return failure here... */
1684 } 1684 }
1685 1685
1686 /* Enable Big Mac hash table filter. */ 1686 /* Enable Big Mac hash table filter. */
1687 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ", 1687 HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
1688 hme_read32(hp, bregs + BMAC_RXCFG))); 1688 hme_read32(hp, bregs + BMAC_RXCFG)));
1689 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME; 1689 rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1690 if (hp->dev->flags & IFF_PROMISC) 1690 if (hp->dev->flags & IFF_PROMISC)
1691 rxcfg |= BIGMAC_RXCFG_PMISC; 1691 rxcfg |= BIGMAC_RXCFG_PMISC;
1692 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg); 1692 hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1693 1693
1694 /* Let the bits settle in the chip. */ 1694 /* Let the bits settle in the chip. */
1695 udelay(10); 1695 udelay(10);
1696 1696
1697 /* Ok, configure the Big Mac transmitter. */ 1697 /* Ok, configure the Big Mac transmitter. */
1698 HMD(("BIGMAC init, ")); 1698 HMD(("BIGMAC init, "));
1699 regtmp = 0; 1699 regtmp = 0;
1700 if (hp->happy_flags & HFLAG_FULL) 1700 if (hp->happy_flags & HFLAG_FULL)
1701 regtmp |= BIGMAC_TXCFG_FULLDPLX; 1701 regtmp |= BIGMAC_TXCFG_FULLDPLX;
1702 1702
1703 /* Don't turn on the "don't give up" bit for now. It could cause hme 1703 /* Don't turn on the "don't give up" bit for now. It could cause hme
1704 * to deadlock with the PHY if a Jabber occurs. 1704 * to deadlock with the PHY if a Jabber occurs.
1705 */ 1705 */
1706 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/); 1706 hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1707 1707
1708 /* Give up after 16 TX attempts. */ 1708 /* Give up after 16 TX attempts. */
1709 hme_write32(hp, bregs + BMAC_ALIMIT, 16); 1709 hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1710 1710
1711 /* Enable the output drivers no matter what. */ 1711 /* Enable the output drivers no matter what. */
1712 regtmp = BIGMAC_XCFG_ODENABLE; 1712 regtmp = BIGMAC_XCFG_ODENABLE;
1713 1713
1714 /* If card can do lance mode, enable it. */ 1714 /* If card can do lance mode, enable it. */
1715 if (hp->happy_flags & HFLAG_LANCE) 1715 if (hp->happy_flags & HFLAG_LANCE)
1716 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE; 1716 regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1717 1717
1718 /* Disable the MII buffers if using external transceiver. */ 1718 /* Disable the MII buffers if using external transceiver. */
1719 if (hp->tcvr_type == external) 1719 if (hp->tcvr_type == external)
1720 regtmp |= BIGMAC_XCFG_MIIDISAB; 1720 regtmp |= BIGMAC_XCFG_MIIDISAB;
1721 1721
1722 HMD(("XIF config old[%08x], ", 1722 HMD(("XIF config old[%08x], ",
1723 hme_read32(hp, bregs + BMAC_XIFCFG))); 1723 hme_read32(hp, bregs + BMAC_XIFCFG)));
1724 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp); 1724 hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1725 1725
1726 /* Start things up. */ 1726 /* Start things up. */
1727 HMD(("tx old[%08x] and rx [%08x] ON!\n", 1727 HMD(("tx old[%08x] and rx [%08x] ON!\n",
1728 hme_read32(hp, bregs + BMAC_TXCFG), 1728 hme_read32(hp, bregs + BMAC_TXCFG),
1729 hme_read32(hp, bregs + BMAC_RXCFG))); 1729 hme_read32(hp, bregs + BMAC_RXCFG)));
1730 hme_write32(hp, bregs + BMAC_TXCFG, 1730 hme_write32(hp, bregs + BMAC_TXCFG,
1731 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); 1731 hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1732 hme_write32(hp, bregs + BMAC_RXCFG, 1732 hme_write32(hp, bregs + BMAC_RXCFG,
1733 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE); 1733 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1734 1734
1735 /* Get the autonegotiation started, and the watch timer ticking. */ 1735 /* Get the autonegotiation started, and the watch timer ticking. */
1736 happy_meal_begin_auto_negotiation(hp, tregs, NULL); 1736 happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1737 1737
1738 /* Success. */ 1738 /* Success. */
1739 return 0; 1739 return 0;
1740 } 1740 }
1741 1741
1742 /* hp->happy_lock must be held */ 1742 /* hp->happy_lock must be held */
1743 static void happy_meal_set_initial_advertisement(struct happy_meal *hp) 1743 static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1744 { 1744 {
1745 void __iomem *tregs = hp->tcvregs; 1745 void __iomem *tregs = hp->tcvregs;
1746 void __iomem *bregs = hp->bigmacregs; 1746 void __iomem *bregs = hp->bigmacregs;
1747 void __iomem *gregs = hp->gregs; 1747 void __iomem *gregs = hp->gregs;
1748 1748
1749 happy_meal_stop(hp, gregs); 1749 happy_meal_stop(hp, gregs);
1750 hme_write32(hp, tregs + TCVR_IMASK, 0xffff); 1750 hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
1751 if (hp->happy_flags & HFLAG_FENABLE) 1751 if (hp->happy_flags & HFLAG_FENABLE)
1752 hme_write32(hp, tregs + TCVR_CFG, 1752 hme_write32(hp, tregs + TCVR_CFG,
1753 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); 1753 hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1754 else 1754 else
1755 hme_write32(hp, tregs + TCVR_CFG, 1755 hme_write32(hp, tregs + TCVR_CFG,
1756 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); 1756 hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1757 happy_meal_transceiver_check(hp, tregs); 1757 happy_meal_transceiver_check(hp, tregs);
1758 switch(hp->tcvr_type) { 1758 switch(hp->tcvr_type) {
1759 case none: 1759 case none:
1760 return; 1760 return;
1761 case internal: 1761 case internal:
1762 hme_write32(hp, bregs + BMAC_XIFCFG, 0); 1762 hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1763 break; 1763 break;
1764 case external: 1764 case external:
1765 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); 1765 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1766 break; 1766 break;
1767 }; 1767 };
1768 if (happy_meal_tcvr_reset(hp, tregs)) 1768 if (happy_meal_tcvr_reset(hp, tregs))
1769 return; 1769 return;
1770 1770
1771 /* Latch PHY registers as of now. */ 1771 /* Latch PHY registers as of now. */
1772 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); 1772 hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1773 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); 1773 hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1774 1774
1775 /* Advertise everything we can support. */ 1775 /* Advertise everything we can support. */
1776 if (hp->sw_bmsr & BMSR_10HALF) 1776 if (hp->sw_bmsr & BMSR_10HALF)
1777 hp->sw_advertise |= (ADVERTISE_10HALF); 1777 hp->sw_advertise |= (ADVERTISE_10HALF);
1778 else 1778 else
1779 hp->sw_advertise &= ~(ADVERTISE_10HALF); 1779 hp->sw_advertise &= ~(ADVERTISE_10HALF);
1780 1780
1781 if (hp->sw_bmsr & BMSR_10FULL) 1781 if (hp->sw_bmsr & BMSR_10FULL)
1782 hp->sw_advertise |= (ADVERTISE_10FULL); 1782 hp->sw_advertise |= (ADVERTISE_10FULL);
1783 else 1783 else
1784 hp->sw_advertise &= ~(ADVERTISE_10FULL); 1784 hp->sw_advertise &= ~(ADVERTISE_10FULL);
1785 if (hp->sw_bmsr & BMSR_100HALF) 1785 if (hp->sw_bmsr & BMSR_100HALF)
1786 hp->sw_advertise |= (ADVERTISE_100HALF); 1786 hp->sw_advertise |= (ADVERTISE_100HALF);
1787 else 1787 else
1788 hp->sw_advertise &= ~(ADVERTISE_100HALF); 1788 hp->sw_advertise &= ~(ADVERTISE_100HALF);
1789 if (hp->sw_bmsr & BMSR_100FULL) 1789 if (hp->sw_bmsr & BMSR_100FULL)
1790 hp->sw_advertise |= (ADVERTISE_100FULL); 1790 hp->sw_advertise |= (ADVERTISE_100FULL);
1791 else 1791 else
1792 hp->sw_advertise &= ~(ADVERTISE_100FULL); 1792 hp->sw_advertise &= ~(ADVERTISE_100FULL);
1793 1793
1794 /* Update the PHY advertisement register. */ 1794 /* Update the PHY advertisement register. */
1795 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); 1795 happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1796 } 1796 }
1797 1797
1798 /* Once status is latched (by happy_meal_interrupt) it is cleared by 1798 /* Once status is latched (by happy_meal_interrupt) it is cleared by
1799 * the hardware, so we cannot re-read it and get a correct value. 1799 * the hardware, so we cannot re-read it and get a correct value.
1800 * 1800 *
1801 * hp->happy_lock must be held 1801 * hp->happy_lock must be held
1802 */ 1802 */
1803 static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) 1803 static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1804 { 1804 {
1805 int reset = 0; 1805 int reset = 0;
1806 1806
1807 /* Only print messages for non-counter related interrupts. */ 1807 /* Only print messages for non-counter related interrupts. */
1808 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND | 1808 if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1809 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR | 1809 GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1810 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR | 1810 GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1811 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR | 1811 GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1812 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR | 1812 GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1813 GREG_STAT_SLVPERR)) 1813 GREG_STAT_SLVPERR))
1814 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n", 1814 printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
1815 hp->dev->name, status); 1815 hp->dev->name, status);
1816 1816
1817 if (status & GREG_STAT_RFIFOVF) { 1817 if (status & GREG_STAT_RFIFOVF) {
1818 /* Receive FIFO overflow is harmless and the hardware will take 1818 /* Receive FIFO overflow is harmless and the hardware will take
1819 care of it, just some packets are lost. Who cares. */ 1819 care of it, just some packets are lost. Who cares. */
1820 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name); 1820 printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
1821 } 1821 }
1822 1822
1823 if (status & GREG_STAT_STSTERR) { 1823 if (status & GREG_STAT_STSTERR) {
1824 /* BigMAC SQE link test failed. */ 1824 /* BigMAC SQE link test failed. */
1825 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name); 1825 printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
1826 reset = 1; 1826 reset = 1;
1827 } 1827 }
1828 1828
1829 if (status & GREG_STAT_TFIFO_UND) { 1829 if (status & GREG_STAT_TFIFO_UND) {
1830 /* Transmit FIFO underrun, again DMA error likely. */ 1830 /* Transmit FIFO underrun, again DMA error likely. */
1831 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n", 1831 printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
1832 hp->dev->name); 1832 hp->dev->name);
1833 reset = 1; 1833 reset = 1;
1834 } 1834 }
1835 1835
1836 if (status & GREG_STAT_MAXPKTERR) { 1836 if (status & GREG_STAT_MAXPKTERR) {
1837 /* Driver error, tried to transmit something larger 1837 /* Driver error, tried to transmit something larger
1838 * than ethernet max mtu. 1838 * than ethernet max mtu.
1839 */ 1839 */
1840 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name); 1840 printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
1841 reset = 1; 1841 reset = 1;
1842 } 1842 }
1843 1843
1844 if (status & GREG_STAT_NORXD) { 1844 if (status & GREG_STAT_NORXD) {
1845 /* This is harmless, it just means the system is 1845 /* This is harmless, it just means the system is
1846 * quite loaded and the incoming packet rate was 1846 * quite loaded and the incoming packet rate was
1847 * faster than the interrupt handler could keep up 1847 * faster than the interrupt handler could keep up
1848 * with. 1848 * with.
1849 */ 1849 */
1850 printk(KERN_INFO "%s: Happy Meal out of receive " 1850 printk(KERN_INFO "%s: Happy Meal out of receive "
1851 "descriptors, packet dropped.\n", 1851 "descriptors, packet dropped.\n",
1852 hp->dev->name); 1852 hp->dev->name);
1853 } 1853 }
1854 1854
1855 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) { 1855 if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1856 /* All sorts of DMA receive errors. */ 1856 /* All sorts of DMA receive errors. */
1857 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name); 1857 printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
1858 if (status & GREG_STAT_RXERR) 1858 if (status & GREG_STAT_RXERR)
1859 printk("GenericError "); 1859 printk("GenericError ");
1860 if (status & GREG_STAT_RXPERR) 1860 if (status & GREG_STAT_RXPERR)
1861 printk("ParityError "); 1861 printk("ParityError ");
1862 if (status & GREG_STAT_RXTERR) 1862 if (status & GREG_STAT_RXTERR)
1863 printk("RxTagBotch "); 1863 printk("RxTagBotch ");
1864 printk("]\n"); 1864 printk("]\n");
1865 reset = 1; 1865 reset = 1;
1866 } 1866 }
1867 1867
1868 if (status & GREG_STAT_EOPERR) { 1868 if (status & GREG_STAT_EOPERR) {
1869 /* Driver bug, didn't set EOP bit in tx descriptor given 1869 /* Driver bug, didn't set EOP bit in tx descriptor given
1870 * to the happy meal. 1870 * to the happy meal.
1871 */ 1871 */
1872 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n", 1872 printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
1873 hp->dev->name); 1873 hp->dev->name);
1874 reset = 1; 1874 reset = 1;
1875 } 1875 }
1876 1876
1877 if (status & GREG_STAT_MIFIRQ) { 1877 if (status & GREG_STAT_MIFIRQ) {
1878 /* MIF signalled an interrupt, were we polling it? */ 1878 /* MIF signalled an interrupt, were we polling it? */
1879 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name); 1879 printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
1880 } 1880 }
1881 1881
1882 if (status & 1882 if (status &
1883 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) { 1883 (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1884 /* All sorts of transmit DMA errors. */ 1884 /* All sorts of transmit DMA errors. */
1885 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name); 1885 printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
1886 if (status & GREG_STAT_TXEACK) 1886 if (status & GREG_STAT_TXEACK)
1887 printk("GenericError "); 1887 printk("GenericError ");
1888 if (status & GREG_STAT_TXLERR) 1888 if (status & GREG_STAT_TXLERR)
1889 printk("LateError "); 1889 printk("LateError ");
1890 if (status & GREG_STAT_TXPERR) 1890 if (status & GREG_STAT_TXPERR)
1891 printk("ParityErro "); 1891 printk("ParityErro ");
1892 if (status & GREG_STAT_TXTERR) 1892 if (status & GREG_STAT_TXTERR)
1893 printk("TagBotch "); 1893 printk("TagBotch ");
1894 printk("]\n"); 1894 printk("]\n");
1895 reset = 1; 1895 reset = 1;
1896 } 1896 }
1897 1897
1898 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) { 1898 if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1899 /* Bus or parity error when cpu accessed happy meal registers 1899 /* Bus or parity error when cpu accessed happy meal registers
1900 * or it's internal FIFO's. Should never see this. 1900 * or it's internal FIFO's. Should never see this.
1901 */ 1901 */
1902 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n", 1902 printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
1903 hp->dev->name, 1903 hp->dev->name,
1904 (status & GREG_STAT_SLVPERR) ? "parity" : "generic"); 1904 (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1905 reset = 1; 1905 reset = 1;
1906 } 1906 }
1907 1907
1908 if (reset) { 1908 if (reset) {
1909 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name); 1909 printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
1910 happy_meal_init(hp); 1910 happy_meal_init(hp);
1911 return 1; 1911 return 1;
1912 } 1912 }
1913 return 0; 1913 return 0;
1914 } 1914 }
1915 1915
1916 /* hp->happy_lock must be held */ 1916 /* hp->happy_lock must be held */
1917 static void happy_meal_mif_interrupt(struct happy_meal *hp) 1917 static void happy_meal_mif_interrupt(struct happy_meal *hp)
1918 { 1918 {
1919 void __iomem *tregs = hp->tcvregs; 1919 void __iomem *tregs = hp->tcvregs;
1920 1920
1921 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name); 1921 printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
1922 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); 1922 hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1923 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); 1923 hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
1924 1924
1925 /* Use the fastest transmission protocol possible. */ 1925 /* Use the fastest transmission protocol possible. */
1926 if (hp->sw_lpa & LPA_100FULL) { 1926 if (hp->sw_lpa & LPA_100FULL) {
1927 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name); 1927 printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
1928 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100); 1928 hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
1929 } else if (hp->sw_lpa & LPA_100HALF) { 1929 } else if (hp->sw_lpa & LPA_100HALF) {
1930 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name); 1930 printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
1931 hp->sw_bmcr |= BMCR_SPEED100; 1931 hp->sw_bmcr |= BMCR_SPEED100;
1932 } else if (hp->sw_lpa & LPA_10FULL) { 1932 } else if (hp->sw_lpa & LPA_10FULL) {
1933 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name); 1933 printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
1934 hp->sw_bmcr |= BMCR_FULLDPLX; 1934 hp->sw_bmcr |= BMCR_FULLDPLX;
1935 } else { 1935 } else {
1936 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name); 1936 printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
1937 } 1937 }
1938 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); 1938 happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1939 1939
1940 /* Finally stop polling and shut up the MIF. */ 1940 /* Finally stop polling and shut up the MIF. */
1941 happy_meal_poll_stop(hp, tregs); 1941 happy_meal_poll_stop(hp, tregs);
1942 } 1942 }
1943 1943
1944 #ifdef TXDEBUG 1944 #ifdef TXDEBUG
1945 #define TXD(x) printk x 1945 #define TXD(x) printk x
1946 #else 1946 #else
1947 #define TXD(x) 1947 #define TXD(x)
1948 #endif 1948 #endif
1949 1949
1950 /* hp->happy_lock must be held */ 1950 /* hp->happy_lock must be held */
1951 static void happy_meal_tx(struct happy_meal *hp) 1951 static void happy_meal_tx(struct happy_meal *hp)
1952 { 1952 {
1953 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; 1953 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1954 struct happy_meal_txd *this; 1954 struct happy_meal_txd *this;
1955 struct net_device *dev = hp->dev; 1955 struct net_device *dev = hp->dev;
1956 int elem; 1956 int elem;
1957 1957
1958 elem = hp->tx_old; 1958 elem = hp->tx_old;
1959 TXD(("TX<")); 1959 TXD(("TX<"));
1960 while (elem != hp->tx_new) { 1960 while (elem != hp->tx_new) {
1961 struct sk_buff *skb; 1961 struct sk_buff *skb;
1962 u32 flags, dma_addr, dma_len; 1962 u32 flags, dma_addr, dma_len;
1963 int frag; 1963 int frag;
1964 1964
1965 TXD(("[%d]", elem)); 1965 TXD(("[%d]", elem));
1966 this = &txbase[elem]; 1966 this = &txbase[elem];
1967 flags = hme_read_desc32(hp, &this->tx_flags); 1967 flags = hme_read_desc32(hp, &this->tx_flags);
1968 if (flags & TXFLAG_OWN) 1968 if (flags & TXFLAG_OWN)
1969 break; 1969 break;
1970 skb = hp->tx_skbs[elem]; 1970 skb = hp->tx_skbs[elem];
1971 if (skb_shinfo(skb)->nr_frags) { 1971 if (skb_shinfo(skb)->nr_frags) {
1972 int last; 1972 int last;
1973 1973
1974 last = elem + skb_shinfo(skb)->nr_frags; 1974 last = elem + skb_shinfo(skb)->nr_frags;
1975 last &= (TX_RING_SIZE - 1); 1975 last &= (TX_RING_SIZE - 1);
1976 flags = hme_read_desc32(hp, &txbase[last].tx_flags); 1976 flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1977 if (flags & TXFLAG_OWN) 1977 if (flags & TXFLAG_OWN)
1978 break; 1978 break;
1979 } 1979 }
1980 hp->tx_skbs[elem] = NULL; 1980 hp->tx_skbs[elem] = NULL;
1981 hp->net_stats.tx_bytes += skb->len; 1981 hp->net_stats.tx_bytes += skb->len;
1982 1982
1983 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1983 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1984 dma_addr = hme_read_desc32(hp, &this->tx_addr); 1984 dma_addr = hme_read_desc32(hp, &this->tx_addr);
1985 dma_len = hme_read_desc32(hp, &this->tx_flags); 1985 dma_len = hme_read_desc32(hp, &this->tx_flags);
1986 1986
1987 dma_len &= TXFLAG_SIZE; 1987 dma_len &= TXFLAG_SIZE;
1988 hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE); 1988 hme_dma_unmap(hp, dma_addr, dma_len, DMA_TODEVICE);
1989 1989
1990 elem = NEXT_TX(elem); 1990 elem = NEXT_TX(elem);
1991 this = &txbase[elem]; 1991 this = &txbase[elem];
1992 } 1992 }
1993 1993
1994 dev_kfree_skb_irq(skb); 1994 dev_kfree_skb_irq(skb);
1995 hp->net_stats.tx_packets++; 1995 hp->net_stats.tx_packets++;
1996 } 1996 }
1997 hp->tx_old = elem; 1997 hp->tx_old = elem;
1998 TXD((">")); 1998 TXD((">"));
1999 1999
2000 if (netif_queue_stopped(dev) && 2000 if (netif_queue_stopped(dev) &&
2001 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) 2001 TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
2002 netif_wake_queue(dev); 2002 netif_wake_queue(dev);
2003 } 2003 }
2004 2004
2005 #ifdef RXDEBUG 2005 #ifdef RXDEBUG
2006 #define RXD(x) printk x 2006 #define RXD(x) printk x
2007 #else 2007 #else
2008 #define RXD(x) 2008 #define RXD(x)
2009 #endif 2009 #endif
2010 2010
2011 /* Originally I used to handle the allocation failure by just giving back just 2011 /* Originally I used to handle the allocation failure by just giving back just
2012 * that one ring buffer to the happy meal. Problem is that usually when that 2012 * that one ring buffer to the happy meal. Problem is that usually when that
2013 * condition is triggered, the happy meal expects you to do something reasonable 2013 * condition is triggered, the happy meal expects you to do something reasonable
2014 * with all of the packets it has DMA'd in. So now I just drop the entire 2014 * with all of the packets it has DMA'd in. So now I just drop the entire
2015 * ring when we cannot get a new skb and give them all back to the happy meal, 2015 * ring when we cannot get a new skb and give them all back to the happy meal,
2016 * maybe things will be "happier" now. 2016 * maybe things will be "happier" now.
2017 * 2017 *
2018 * hp->happy_lock must be held 2018 * hp->happy_lock must be held
2019 */ 2019 */
2020 static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) 2020 static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2021 { 2021 {
2022 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; 2022 struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
2023 struct happy_meal_rxd *this; 2023 struct happy_meal_rxd *this;
2024 int elem = hp->rx_new, drops = 0; 2024 int elem = hp->rx_new, drops = 0;
2025 u32 flags; 2025 u32 flags;
2026 2026
2027 RXD(("RX<")); 2027 RXD(("RX<"));
2028 this = &rxbase[elem]; 2028 this = &rxbase[elem];
2029 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) { 2029 while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
2030 struct sk_buff *skb; 2030 struct sk_buff *skb;
2031 int len = flags >> 16; 2031 int len = flags >> 16;
2032 u16 csum = flags & RXFLAG_CSUM; 2032 u16 csum = flags & RXFLAG_CSUM;
2033 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr); 2033 u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
2034 2034
2035 RXD(("[%d ", elem)); 2035 RXD(("[%d ", elem));
2036 2036
2037 /* Check for errors. */ 2037 /* Check for errors. */
2038 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { 2038 if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
2039 RXD(("ERR(%08x)]", flags)); 2039 RXD(("ERR(%08x)]", flags));
2040 hp->net_stats.rx_errors++; 2040 hp->net_stats.rx_errors++;
2041 if (len < ETH_ZLEN) 2041 if (len < ETH_ZLEN)
2042 hp->net_stats.rx_length_errors++; 2042 hp->net_stats.rx_length_errors++;
2043 if (len & (RXFLAG_OVERFLOW >> 16)) { 2043 if (len & (RXFLAG_OVERFLOW >> 16)) {
2044 hp->net_stats.rx_over_errors++; 2044 hp->net_stats.rx_over_errors++;
2045 hp->net_stats.rx_fifo_errors++; 2045 hp->net_stats.rx_fifo_errors++;
2046 } 2046 }
2047 2047
2048 /* Return it to the Happy meal. */ 2048 /* Return it to the Happy meal. */
2049 drop_it: 2049 drop_it:
2050 hp->net_stats.rx_dropped++; 2050 hp->net_stats.rx_dropped++;
2051 hme_write_rxd(hp, this, 2051 hme_write_rxd(hp, this,
2052 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2052 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2053 dma_addr); 2053 dma_addr);
2054 goto next; 2054 goto next;
2055 } 2055 }
2056 skb = hp->rx_skbs[elem]; 2056 skb = hp->rx_skbs[elem];
2057 if (len > RX_COPY_THRESHOLD) { 2057 if (len > RX_COPY_THRESHOLD) {
2058 struct sk_buff *new_skb; 2058 struct sk_buff *new_skb;
2059 2059
2060 /* Now refill the entry, if we can. */ 2060 /* Now refill the entry, if we can. */
2061 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 2061 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
2062 if (new_skb == NULL) { 2062 if (new_skb == NULL) {
2063 drops++; 2063 drops++;
2064 goto drop_it; 2064 goto drop_it;
2065 } 2065 }
2066 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE); 2066 hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
2067 hp->rx_skbs[elem] = new_skb; 2067 hp->rx_skbs[elem] = new_skb;
2068 new_skb->dev = dev; 2068 new_skb->dev = dev;
2069 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET)); 2069 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
2070 hme_write_rxd(hp, this, 2070 hme_write_rxd(hp, this,
2071 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2071 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2072 hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE)); 2072 hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
2073 skb_reserve(new_skb, RX_OFFSET); 2073 skb_reserve(new_skb, RX_OFFSET);
2074 2074
2075 /* Trim the original skb for the netif. */ 2075 /* Trim the original skb for the netif. */
2076 skb_trim(skb, len); 2076 skb_trim(skb, len);
2077 } else { 2077 } else {
2078 struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 2078 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
2079 2079
2080 if (copy_skb == NULL) { 2080 if (copy_skb == NULL) {
2081 drops++; 2081 drops++;
2082 goto drop_it; 2082 goto drop_it;
2083 } 2083 }
2084 2084
2085 copy_skb->dev = dev; 2085 copy_skb->dev = dev;
2086 skb_reserve(copy_skb, 2); 2086 skb_reserve(copy_skb, 2);
2087 skb_put(copy_skb, len); 2087 skb_put(copy_skb, len);
2088 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE); 2088 hme_dma_sync_for_cpu(hp, dma_addr, len, DMA_FROMDEVICE);
2089 memcpy(copy_skb->data, skb->data, len); 2089 memcpy(copy_skb->data, skb->data, len);
2090 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE); 2090 hme_dma_sync_for_device(hp, dma_addr, len, DMA_FROMDEVICE);
2091 2091
2092 /* Reuse original ring buffer. */ 2092 /* Reuse original ring buffer. */
2093 hme_write_rxd(hp, this, 2093 hme_write_rxd(hp, this,
2094 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2094 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2095 dma_addr); 2095 dma_addr);
2096 2096
2097 skb = copy_skb; 2097 skb = copy_skb;
2098 } 2098 }
2099 2099
2100 /* This card is _fucking_ hot... */ 2100 /* This card is _fucking_ hot... */
2101 skb->csum = ntohs(csum ^ 0xffff); 2101 skb->csum = ntohs(csum ^ 0xffff);
2102 skb->ip_summed = CHECKSUM_HW; 2102 skb->ip_summed = CHECKSUM_HW;
2103 2103
2104 RXD(("len=%d csum=%4x]", len, csum)); 2104 RXD(("len=%d csum=%4x]", len, csum));
2105 skb->protocol = eth_type_trans(skb, dev); 2105 skb->protocol = eth_type_trans(skb, dev);
2106 netif_rx(skb); 2106 netif_rx(skb);
2107 2107
2108 dev->last_rx = jiffies; 2108 dev->last_rx = jiffies;
2109 hp->net_stats.rx_packets++; 2109 hp->net_stats.rx_packets++;
2110 hp->net_stats.rx_bytes += len; 2110 hp->net_stats.rx_bytes += len;
2111 next: 2111 next:
2112 elem = NEXT_RX(elem); 2112 elem = NEXT_RX(elem);
2113 this = &rxbase[elem]; 2113 this = &rxbase[elem];
2114 } 2114 }
2115 hp->rx_new = elem; 2115 hp->rx_new = elem;
2116 if (drops) 2116 if (drops)
2117 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name); 2117 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
2118 RXD((">")); 2118 RXD((">"));
2119 } 2119 }
2120 2120
2121 static irqreturn_t happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs) 2121 static irqreturn_t happy_meal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2122 { 2122 {
2123 struct net_device *dev = (struct net_device *) dev_id; 2123 struct net_device *dev = (struct net_device *) dev_id;
2124 struct happy_meal *hp = dev->priv; 2124 struct happy_meal *hp = dev->priv;
2125 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); 2125 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2126 2126
2127 HMD(("happy_meal_interrupt: status=%08x ", happy_status)); 2127 HMD(("happy_meal_interrupt: status=%08x ", happy_status));
2128 2128
2129 spin_lock(&hp->happy_lock); 2129 spin_lock(&hp->happy_lock);
2130 2130
2131 if (happy_status & GREG_STAT_ERRORS) { 2131 if (happy_status & GREG_STAT_ERRORS) {
2132 HMD(("ERRORS ")); 2132 HMD(("ERRORS "));
2133 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) 2133 if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
2134 goto out; 2134 goto out;
2135 } 2135 }
2136 2136
2137 if (happy_status & GREG_STAT_MIFIRQ) { 2137 if (happy_status & GREG_STAT_MIFIRQ) {
2138 HMD(("MIFIRQ ")); 2138 HMD(("MIFIRQ "));
2139 happy_meal_mif_interrupt(hp); 2139 happy_meal_mif_interrupt(hp);
2140 } 2140 }
2141 2141
2142 if (happy_status & GREG_STAT_TXALL) { 2142 if (happy_status & GREG_STAT_TXALL) {
2143 HMD(("TXALL ")); 2143 HMD(("TXALL "));
2144 happy_meal_tx(hp); 2144 happy_meal_tx(hp);
2145 } 2145 }
2146 2146
2147 if (happy_status & GREG_STAT_RXTOHOST) { 2147 if (happy_status & GREG_STAT_RXTOHOST) {
2148 HMD(("RXTOHOST ")); 2148 HMD(("RXTOHOST "));
2149 happy_meal_rx(hp, dev); 2149 happy_meal_rx(hp, dev);
2150 } 2150 }
2151 2151
2152 HMD(("done\n")); 2152 HMD(("done\n"));
2153 out: 2153 out:
2154 spin_unlock(&hp->happy_lock); 2154 spin_unlock(&hp->happy_lock);
2155 2155
2156 return IRQ_HANDLED; 2156 return IRQ_HANDLED;
2157 } 2157 }
2158 2158
2159 #ifdef CONFIG_SBUS 2159 #ifdef CONFIG_SBUS
2160 static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie, struct pt_regs *ptregs) 2160 static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie, struct pt_regs *ptregs)
2161 { 2161 {
2162 struct quattro *qp = (struct quattro *) cookie; 2162 struct quattro *qp = (struct quattro *) cookie;
2163 int i; 2163 int i;
2164 2164
2165 for (i = 0; i < 4; i++) { 2165 for (i = 0; i < 4; i++) {
2166 struct net_device *dev = qp->happy_meals[i]; 2166 struct net_device *dev = qp->happy_meals[i];
2167 struct happy_meal *hp = dev->priv; 2167 struct happy_meal *hp = dev->priv;
2168 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); 2168 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2169 2169
2170 HMD(("quattro_interrupt: status=%08x ", happy_status)); 2170 HMD(("quattro_interrupt: status=%08x ", happy_status));
2171 2171
2172 if (!(happy_status & (GREG_STAT_ERRORS | 2172 if (!(happy_status & (GREG_STAT_ERRORS |
2173 GREG_STAT_MIFIRQ | 2173 GREG_STAT_MIFIRQ |
2174 GREG_STAT_TXALL | 2174 GREG_STAT_TXALL |
2175 GREG_STAT_RXTOHOST))) 2175 GREG_STAT_RXTOHOST)))
2176 continue; 2176 continue;
2177 2177
2178 spin_lock(&hp->happy_lock); 2178 spin_lock(&hp->happy_lock);
2179 2179
2180 if (happy_status & GREG_STAT_ERRORS) { 2180 if (happy_status & GREG_STAT_ERRORS) {
2181 HMD(("ERRORS ")); 2181 HMD(("ERRORS "));
2182 if (happy_meal_is_not_so_happy(hp, happy_status)) 2182 if (happy_meal_is_not_so_happy(hp, happy_status))
2183 goto next; 2183 goto next;
2184 } 2184 }
2185 2185
2186 if (happy_status & GREG_STAT_MIFIRQ) { 2186 if (happy_status & GREG_STAT_MIFIRQ) {
2187 HMD(("MIFIRQ ")); 2187 HMD(("MIFIRQ "));
2188 happy_meal_mif_interrupt(hp); 2188 happy_meal_mif_interrupt(hp);
2189 } 2189 }
2190 2190
2191 if (happy_status & GREG_STAT_TXALL) { 2191 if (happy_status & GREG_STAT_TXALL) {
2192 HMD(("TXALL ")); 2192 HMD(("TXALL "));
2193 happy_meal_tx(hp); 2193 happy_meal_tx(hp);
2194 } 2194 }
2195 2195
2196 if (happy_status & GREG_STAT_RXTOHOST) { 2196 if (happy_status & GREG_STAT_RXTOHOST) {
2197 HMD(("RXTOHOST ")); 2197 HMD(("RXTOHOST "));
2198 happy_meal_rx(hp, dev); 2198 happy_meal_rx(hp, dev);
2199 } 2199 }
2200 2200
2201 next: 2201 next:
2202 spin_unlock(&hp->happy_lock); 2202 spin_unlock(&hp->happy_lock);
2203 } 2203 }
2204 HMD(("done\n")); 2204 HMD(("done\n"));
2205 2205
2206 return IRQ_HANDLED; 2206 return IRQ_HANDLED;
2207 } 2207 }
2208 #endif 2208 #endif
2209 2209
2210 static int happy_meal_open(struct net_device *dev) 2210 static int happy_meal_open(struct net_device *dev)
2211 { 2211 {
2212 struct happy_meal *hp = dev->priv; 2212 struct happy_meal *hp = dev->priv;
2213 int res; 2213 int res;
2214 2214
2215 HMD(("happy_meal_open: ")); 2215 HMD(("happy_meal_open: "));
2216 2216
2217 /* On SBUS Quattro QFE cards, all hme interrupts are concentrated 2217 /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
2218 * into a single source which we register handling at probe time. 2218 * into a single source which we register handling at probe time.
2219 */ 2219 */
2220 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { 2220 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2221 if (request_irq(dev->irq, &happy_meal_interrupt, 2221 if (request_irq(dev->irq, &happy_meal_interrupt,
2222 SA_SHIRQ, dev->name, (void *)dev)) { 2222 SA_SHIRQ, dev->name, (void *)dev)) {
2223 HMD(("EAGAIN\n")); 2223 HMD(("EAGAIN\n"));
2224 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", 2224 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2225 dev->irq); 2225 dev->irq);
2226 2226
2227 return -EAGAIN; 2227 return -EAGAIN;
2228 } 2228 }
2229 } 2229 }
2230 2230
2231 HMD(("to happy_meal_init\n")); 2231 HMD(("to happy_meal_init\n"));
2232 2232
2233 spin_lock_irq(&hp->happy_lock); 2233 spin_lock_irq(&hp->happy_lock);
2234 res = happy_meal_init(hp); 2234 res = happy_meal_init(hp);
2235 spin_unlock_irq(&hp->happy_lock); 2235 spin_unlock_irq(&hp->happy_lock);
2236 2236
2237 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) 2237 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2238 free_irq(dev->irq, dev); 2238 free_irq(dev->irq, dev);
2239 return res; 2239 return res;
2240 } 2240 }
2241 2241
2242 static int happy_meal_close(struct net_device *dev) 2242 static int happy_meal_close(struct net_device *dev)
2243 { 2243 {
2244 struct happy_meal *hp = dev->priv; 2244 struct happy_meal *hp = dev->priv;
2245 2245
2246 spin_lock_irq(&hp->happy_lock); 2246 spin_lock_irq(&hp->happy_lock);
2247 happy_meal_stop(hp, hp->gregs); 2247 happy_meal_stop(hp, hp->gregs);
2248 happy_meal_clean_rings(hp); 2248 happy_meal_clean_rings(hp);
2249 2249
2250 /* If auto-negotiation timer is running, kill it. */ 2250 /* If auto-negotiation timer is running, kill it. */
2251 del_timer(&hp->happy_timer); 2251 del_timer(&hp->happy_timer);
2252 2252
2253 spin_unlock_irq(&hp->happy_lock); 2253 spin_unlock_irq(&hp->happy_lock);
2254 2254
2255 /* On Quattro QFE cards, all hme interrupts are concentrated 2255 /* On Quattro QFE cards, all hme interrupts are concentrated
2256 * into a single source which we register handling at probe 2256 * into a single source which we register handling at probe
2257 * time and never unregister. 2257 * time and never unregister.
2258 */ 2258 */
2259 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) 2259 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2260 free_irq(dev->irq, dev); 2260 free_irq(dev->irq, dev);
2261 2261
2262 return 0; 2262 return 0;
2263 } 2263 }
2264 2264
2265 #ifdef SXDEBUG 2265 #ifdef SXDEBUG
2266 #define SXD(x) printk x 2266 #define SXD(x) printk x
2267 #else 2267 #else
2268 #define SXD(x) 2268 #define SXD(x)
2269 #endif 2269 #endif
2270 2270
2271 static void happy_meal_tx_timeout(struct net_device *dev) 2271 static void happy_meal_tx_timeout(struct net_device *dev)
2272 { 2272 {
2273 struct happy_meal *hp = dev->priv; 2273 struct happy_meal *hp = dev->priv;
2274 2274
2275 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2275 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2276 tx_dump_log(); 2276 tx_dump_log();
2277 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, 2277 printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
2278 hme_read32(hp, hp->gregs + GREG_STAT), 2278 hme_read32(hp, hp->gregs + GREG_STAT),
2279 hme_read32(hp, hp->etxregs + ETX_CFG), 2279 hme_read32(hp, hp->etxregs + ETX_CFG),
2280 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); 2280 hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
2281 2281
2282 spin_lock_irq(&hp->happy_lock); 2282 spin_lock_irq(&hp->happy_lock);
2283 happy_meal_init(hp); 2283 happy_meal_init(hp);
2284 spin_unlock_irq(&hp->happy_lock); 2284 spin_unlock_irq(&hp->happy_lock);
2285 2285
2286 netif_wake_queue(dev); 2286 netif_wake_queue(dev);
2287 } 2287 }
2288 2288
2289 static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) 2289 static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2290 { 2290 {
2291 struct happy_meal *hp = dev->priv; 2291 struct happy_meal *hp = dev->priv;
2292 int entry; 2292 int entry;
2293 u32 tx_flags; 2293 u32 tx_flags;
2294 2294
2295 tx_flags = TXFLAG_OWN; 2295 tx_flags = TXFLAG_OWN;
2296 if (skb->ip_summed == CHECKSUM_HW) { 2296 if (skb->ip_summed == CHECKSUM_HW) {
2297 u32 csum_start_off, csum_stuff_off; 2297 u32 csum_start_off, csum_stuff_off;
2298 2298
2299 csum_start_off = (u32) (skb->h.raw - skb->data); 2299 csum_start_off = (u32) (skb->h.raw - skb->data);
2300 csum_stuff_off = (u32) ((skb->h.raw + skb->csum) - skb->data); 2300 csum_stuff_off = (u32) ((skb->h.raw + skb->csum) - skb->data);
2301 2301
2302 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | 2302 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
2303 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | 2303 ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
2304 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION)); 2304 ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
2305 } 2305 }
2306 2306
2307 spin_lock_irq(&hp->happy_lock); 2307 spin_lock_irq(&hp->happy_lock);
2308 2308
2309 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) { 2309 if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
2310 netif_stop_queue(dev); 2310 netif_stop_queue(dev);
2311 spin_unlock_irq(&hp->happy_lock); 2311 spin_unlock_irq(&hp->happy_lock);
2312 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", 2312 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
2313 dev->name); 2313 dev->name);
2314 return 1; 2314 return 1;
2315 } 2315 }
2316 2316
2317 entry = hp->tx_new; 2317 entry = hp->tx_new;
2318 SXD(("SX<l[%d]e[%d]>", len, entry)); 2318 SXD(("SX<l[%d]e[%d]>", len, entry));
2319 hp->tx_skbs[entry] = skb; 2319 hp->tx_skbs[entry] = skb;
2320 2320
2321 if (skb_shinfo(skb)->nr_frags == 0) { 2321 if (skb_shinfo(skb)->nr_frags == 0) {
2322 u32 mapping, len; 2322 u32 mapping, len;
2323 2323
2324 len = skb->len; 2324 len = skb->len;
2325 mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE); 2325 mapping = hme_dma_map(hp, skb->data, len, DMA_TODEVICE);
2326 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2326 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2327 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2327 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2328 (tx_flags | (len & TXFLAG_SIZE)), 2328 (tx_flags | (len & TXFLAG_SIZE)),
2329 mapping); 2329 mapping);
2330 entry = NEXT_TX(entry); 2330 entry = NEXT_TX(entry);
2331 } else { 2331 } else {
2332 u32 first_len, first_mapping; 2332 u32 first_len, first_mapping;
2333 int frag, first_entry = entry; 2333 int frag, first_entry = entry;
2334 2334
2335 /* We must give this initial chunk to the device last. 2335 /* We must give this initial chunk to the device last.
2336 * Otherwise we could race with the device. 2336 * Otherwise we could race with the device.
2337 */ 2337 */
2338 first_len = skb_headlen(skb); 2338 first_len = skb_headlen(skb);
2339 first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE); 2339 first_mapping = hme_dma_map(hp, skb->data, first_len, DMA_TODEVICE);
2340 entry = NEXT_TX(entry); 2340 entry = NEXT_TX(entry);
2341 2341
2342 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2342 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2343 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 2343 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2344 u32 len, mapping, this_txflags; 2344 u32 len, mapping, this_txflags;
2345 2345
2346 len = this_frag->size; 2346 len = this_frag->size;
2347 mapping = hme_dma_map(hp, 2347 mapping = hme_dma_map(hp,
2348 ((void *) page_address(this_frag->page) + 2348 ((void *) page_address(this_frag->page) +
2349 this_frag->page_offset), 2349 this_frag->page_offset),
2350 len, DMA_TODEVICE); 2350 len, DMA_TODEVICE);
2351 this_txflags = tx_flags; 2351 this_txflags = tx_flags;
2352 if (frag == skb_shinfo(skb)->nr_frags - 1) 2352 if (frag == skb_shinfo(skb)->nr_frags - 1)
2353 this_txflags |= TXFLAG_EOP; 2353 this_txflags |= TXFLAG_EOP;
2354 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2354 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2355 (this_txflags | (len & TXFLAG_SIZE)), 2355 (this_txflags | (len & TXFLAG_SIZE)),
2356 mapping); 2356 mapping);
2357 entry = NEXT_TX(entry); 2357 entry = NEXT_TX(entry);
2358 } 2358 }
2359 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry], 2359 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2360 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)), 2360 (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2361 first_mapping); 2361 first_mapping);
2362 } 2362 }
2363 2363
2364 hp->tx_new = entry; 2364 hp->tx_new = entry;
2365 2365
2366 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) 2366 if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2367 netif_stop_queue(dev); 2367 netif_stop_queue(dev);
2368 2368
2369 /* Get it going. */ 2369 /* Get it going. */
2370 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); 2370 hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2371 2371
2372 spin_unlock_irq(&hp->happy_lock); 2372 spin_unlock_irq(&hp->happy_lock);
2373 2373
2374 dev->trans_start = jiffies; 2374 dev->trans_start = jiffies;
2375 2375
2376 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2376 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2377 return 0; 2377 return 0;
2378 } 2378 }
2379 2379
2380 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2380 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2381 { 2381 {
2382 struct happy_meal *hp = dev->priv; 2382 struct happy_meal *hp = dev->priv;
2383 2383
2384 spin_lock_irq(&hp->happy_lock); 2384 spin_lock_irq(&hp->happy_lock);
2385 happy_meal_get_counters(hp, hp->bigmacregs); 2385 happy_meal_get_counters(hp, hp->bigmacregs);
2386 spin_unlock_irq(&hp->happy_lock); 2386 spin_unlock_irq(&hp->happy_lock);
2387 2387
2388 return &hp->net_stats; 2388 return &hp->net_stats;
2389 } 2389 }
2390 2390
2391 static void happy_meal_set_multicast(struct net_device *dev) 2391 static void happy_meal_set_multicast(struct net_device *dev)
2392 { 2392 {
2393 struct happy_meal *hp = dev->priv; 2393 struct happy_meal *hp = dev->priv;
2394 void __iomem *bregs = hp->bigmacregs; 2394 void __iomem *bregs = hp->bigmacregs;
2395 struct dev_mc_list *dmi = dev->mc_list; 2395 struct dev_mc_list *dmi = dev->mc_list;
2396 char *addrs; 2396 char *addrs;
2397 int i; 2397 int i;
2398 u32 crc; 2398 u32 crc;
2399 2399
2400 spin_lock_irq(&hp->happy_lock); 2400 spin_lock_irq(&hp->happy_lock);
2401 2401
2402 netif_stop_queue(dev); 2402 netif_stop_queue(dev);
2403 2403
2404 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 2404 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
2405 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); 2405 hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2406 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); 2406 hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2407 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); 2407 hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2408 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); 2408 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2409 } else if (dev->flags & IFF_PROMISC) { 2409 } else if (dev->flags & IFF_PROMISC) {
2410 hme_write32(hp, bregs + BMAC_RXCFG, 2410 hme_write32(hp, bregs + BMAC_RXCFG,
2411 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC); 2411 hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2412 } else { 2412 } else {
2413 u16 hash_table[4]; 2413 u16 hash_table[4];
2414 2414
2415 for (i = 0; i < 4; i++) 2415 for (i = 0; i < 4; i++)
2416 hash_table[i] = 0; 2416 hash_table[i] = 0;
2417 2417
2418 for (i = 0; i < dev->mc_count; i++) { 2418 for (i = 0; i < dev->mc_count; i++) {
2419 addrs = dmi->dmi_addr; 2419 addrs = dmi->dmi_addr;
2420 dmi = dmi->next; 2420 dmi = dmi->next;
2421 2421
2422 if (!(*addrs & 1)) 2422 if (!(*addrs & 1))
2423 continue; 2423 continue;
2424 2424
2425 crc = ether_crc_le(6, addrs); 2425 crc = ether_crc_le(6, addrs);
2426 crc >>= 26; 2426 crc >>= 26;
2427 hash_table[crc >> 4] |= 1 << (crc & 0xf); 2427 hash_table[crc >> 4] |= 1 << (crc & 0xf);
2428 } 2428 }
2429 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); 2429 hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2430 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); 2430 hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2431 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); 2431 hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2432 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); 2432 hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2433 } 2433 }
2434 2434
2435 netif_wake_queue(dev); 2435 netif_wake_queue(dev);
2436 2436
2437 spin_unlock_irq(&hp->happy_lock); 2437 spin_unlock_irq(&hp->happy_lock);
2438 } 2438 }
2439 2439
2440 /* Ethtool support... */ 2440 /* Ethtool support... */
2441 static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2441 static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2442 { 2442 {
2443 struct happy_meal *hp = dev->priv; 2443 struct happy_meal *hp = dev->priv;
2444 2444
2445 cmd->supported = 2445 cmd->supported =
2446 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2446 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2447 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2447 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2448 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); 2448 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2449 2449
2450 /* XXX hardcoded stuff for now */ 2450 /* XXX hardcoded stuff for now */
2451 cmd->port = PORT_TP; /* XXX no MII support */ 2451 cmd->port = PORT_TP; /* XXX no MII support */
2452 cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ 2452 cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
2453 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2453 cmd->phy_address = 0; /* XXX fixed PHYAD */
2454 2454
2455 /* Record PHY settings. */ 2455 /* Record PHY settings. */
2456 spin_lock_irq(&hp->happy_lock); 2456 spin_lock_irq(&hp->happy_lock);
2457 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); 2457 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2458 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA); 2458 hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2459 spin_unlock_irq(&hp->happy_lock); 2459 spin_unlock_irq(&hp->happy_lock);
2460 2460
2461 if (hp->sw_bmcr & BMCR_ANENABLE) { 2461 if (hp->sw_bmcr & BMCR_ANENABLE) {
2462 cmd->autoneg = AUTONEG_ENABLE; 2462 cmd->autoneg = AUTONEG_ENABLE;
2463 cmd->speed = 2463 cmd->speed =
2464 (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? 2464 (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2465 SPEED_100 : SPEED_10; 2465 SPEED_100 : SPEED_10;
2466 if (cmd->speed == SPEED_100) 2466 if (cmd->speed == SPEED_100)
2467 cmd->duplex = 2467 cmd->duplex =
2468 (hp->sw_lpa & (LPA_100FULL)) ? 2468 (hp->sw_lpa & (LPA_100FULL)) ?
2469 DUPLEX_FULL : DUPLEX_HALF; 2469 DUPLEX_FULL : DUPLEX_HALF;
2470 else 2470 else
2471 cmd->duplex = 2471 cmd->duplex =
2472 (hp->sw_lpa & (LPA_10FULL)) ? 2472 (hp->sw_lpa & (LPA_10FULL)) ?
2473 DUPLEX_FULL : DUPLEX_HALF; 2473 DUPLEX_FULL : DUPLEX_HALF;
2474 } else { 2474 } else {
2475 cmd->autoneg = AUTONEG_DISABLE; 2475 cmd->autoneg = AUTONEG_DISABLE;
2476 cmd->speed = 2476 cmd->speed =
2477 (hp->sw_bmcr & BMCR_SPEED100) ? 2477 (hp->sw_bmcr & BMCR_SPEED100) ?
2478 SPEED_100 : SPEED_10; 2478 SPEED_100 : SPEED_10;
2479 cmd->duplex = 2479 cmd->duplex =
2480 (hp->sw_bmcr & BMCR_FULLDPLX) ? 2480 (hp->sw_bmcr & BMCR_FULLDPLX) ?
2481 DUPLEX_FULL : DUPLEX_HALF; 2481 DUPLEX_FULL : DUPLEX_HALF;
2482 } 2482 }
2483 return 0; 2483 return 0;
2484 } 2484 }
2485 2485
2486 static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2486 static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2487 { 2487 {
2488 struct happy_meal *hp = dev->priv; 2488 struct happy_meal *hp = dev->priv;
2489 2489
2490 /* Verify the settings we care about. */ 2490 /* Verify the settings we care about. */
2491 if (cmd->autoneg != AUTONEG_ENABLE && 2491 if (cmd->autoneg != AUTONEG_ENABLE &&
2492 cmd->autoneg != AUTONEG_DISABLE) 2492 cmd->autoneg != AUTONEG_DISABLE)
2493 return -EINVAL; 2493 return -EINVAL;
2494 if (cmd->autoneg == AUTONEG_DISABLE && 2494 if (cmd->autoneg == AUTONEG_DISABLE &&
2495 ((cmd->speed != SPEED_100 && 2495 ((cmd->speed != SPEED_100 &&
2496 cmd->speed != SPEED_10) || 2496 cmd->speed != SPEED_10) ||
2497 (cmd->duplex != DUPLEX_HALF && 2497 (cmd->duplex != DUPLEX_HALF &&
2498 cmd->duplex != DUPLEX_FULL))) 2498 cmd->duplex != DUPLEX_FULL)))
2499 return -EINVAL; 2499 return -EINVAL;
2500 2500
2501 /* Ok, do it to it. */ 2501 /* Ok, do it to it. */
2502 spin_lock_irq(&hp->happy_lock); 2502 spin_lock_irq(&hp->happy_lock);
2503 del_timer(&hp->happy_timer); 2503 del_timer(&hp->happy_timer);
2504 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); 2504 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2505 spin_unlock_irq(&hp->happy_lock); 2505 spin_unlock_irq(&hp->happy_lock);
2506 2506
2507 return 0; 2507 return 0;
2508 } 2508 }
2509 2509
2510 static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2510 static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2511 { 2511 {
2512 struct happy_meal *hp = dev->priv; 2512 struct happy_meal *hp = dev->priv;
2513 2513
2514 strcpy(info->driver, "sunhme"); 2514 strcpy(info->driver, "sunhme");
2515 strcpy(info->version, "2.02"); 2515 strcpy(info->version, "2.02");
2516 if (hp->happy_flags & HFLAG_PCI) { 2516 if (hp->happy_flags & HFLAG_PCI) {
2517 struct pci_dev *pdev = hp->happy_dev; 2517 struct pci_dev *pdev = hp->happy_dev;
2518 strcpy(info->bus_info, pci_name(pdev)); 2518 strcpy(info->bus_info, pci_name(pdev));
2519 } 2519 }
2520 #ifdef CONFIG_SBUS 2520 #ifdef CONFIG_SBUS
2521 else { 2521 else {
2522 struct sbus_dev *sdev = hp->happy_dev; 2522 struct sbus_dev *sdev = hp->happy_dev;
2523 sprintf(info->bus_info, "SBUS:%d", 2523 sprintf(info->bus_info, "SBUS:%d",
2524 sdev->slot); 2524 sdev->slot);
2525 } 2525 }
2526 #endif 2526 #endif
2527 } 2527 }
2528 2528
2529 static u32 hme_get_link(struct net_device *dev) 2529 static u32 hme_get_link(struct net_device *dev)
2530 { 2530 {
2531 struct happy_meal *hp = dev->priv; 2531 struct happy_meal *hp = dev->priv;
2532 2532
2533 spin_lock_irq(&hp->happy_lock); 2533 spin_lock_irq(&hp->happy_lock);
2534 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); 2534 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2535 spin_unlock_irq(&hp->happy_lock); 2535 spin_unlock_irq(&hp->happy_lock);
2536 2536
2537 return (hp->sw_bmsr & BMSR_LSTATUS); 2537 return (hp->sw_bmsr & BMSR_LSTATUS);
2538 } 2538 }
2539 2539
2540 static struct ethtool_ops hme_ethtool_ops = { 2540 static struct ethtool_ops hme_ethtool_ops = {
2541 .get_settings = hme_get_settings, 2541 .get_settings = hme_get_settings,
2542 .set_settings = hme_set_settings, 2542 .set_settings = hme_set_settings,
2543 .get_drvinfo = hme_get_drvinfo, 2543 .get_drvinfo = hme_get_drvinfo,
2544 .get_link = hme_get_link, 2544 .get_link = hme_get_link,
2545 }; 2545 };
2546 2546
2547 static int hme_version_printed; 2547 static int hme_version_printed;
2548 2548
2549 #ifdef CONFIG_SBUS 2549 #ifdef CONFIG_SBUS
2550 void __init quattro_get_ranges(struct quattro *qp) 2550 void __init quattro_get_ranges(struct quattro *qp)
2551 { 2551 {
2552 struct sbus_dev *sdev = qp->quattro_dev; 2552 struct sbus_dev *sdev = qp->quattro_dev;
2553 int err; 2553 int err;
2554 2554
2555 err = prom_getproperty(sdev->prom_node, 2555 err = prom_getproperty(sdev->prom_node,
2556 "ranges", 2556 "ranges",
2557 (char *)&qp->ranges[0], 2557 (char *)&qp->ranges[0],
2558 sizeof(qp->ranges)); 2558 sizeof(qp->ranges));
2559 if (err == 0 || err == -1) { 2559 if (err == 0 || err == -1) {
2560 qp->nranges = 0; 2560 qp->nranges = 0;
2561 return; 2561 return;
2562 } 2562 }
2563 qp->nranges = (err / sizeof(struct linux_prom_ranges)); 2563 qp->nranges = (err / sizeof(struct linux_prom_ranges));
2564 } 2564 }
2565 2565
2566 static void __init quattro_apply_ranges(struct quattro *qp, struct happy_meal *hp) 2566 static void __init quattro_apply_ranges(struct quattro *qp, struct happy_meal *hp)
2567 { 2567 {
2568 struct sbus_dev *sdev = hp->happy_dev; 2568 struct sbus_dev *sdev = hp->happy_dev;
2569 int rng; 2569 int rng;
2570 2570
2571 for (rng = 0; rng < qp->nranges; rng++) { 2571 for (rng = 0; rng < qp->nranges; rng++) {
2572 struct linux_prom_ranges *rngp = &qp->ranges[rng]; 2572 struct linux_prom_ranges *rngp = &qp->ranges[rng];
2573 int reg; 2573 int reg;
2574 2574
2575 for (reg = 0; reg < 5; reg++) { 2575 for (reg = 0; reg < 5; reg++) {
2576 if (sdev->reg_addrs[reg].which_io == 2576 if (sdev->reg_addrs[reg].which_io ==
2577 rngp->ot_child_space) 2577 rngp->ot_child_space)
2578 break; 2578 break;
2579 } 2579 }
2580 if (reg == 5) 2580 if (reg == 5)
2581 continue; 2581 continue;
2582 2582
2583 sdev->reg_addrs[reg].which_io = rngp->ot_parent_space; 2583 sdev->reg_addrs[reg].which_io = rngp->ot_parent_space;
2584 sdev->reg_addrs[reg].phys_addr += rngp->ot_parent_base; 2584 sdev->reg_addrs[reg].phys_addr += rngp->ot_parent_base;
2585 } 2585 }
2586 } 2586 }
2587 2587
2588 /* Given a happy meal sbus device, find it's quattro parent. 2588 /* Given a happy meal sbus device, find it's quattro parent.
2589 * If none exist, allocate and return a new one. 2589 * If none exist, allocate and return a new one.
2590 * 2590 *
2591 * Return NULL on failure. 2591 * Return NULL on failure.
2592 */ 2592 */
2593 static struct quattro * __init quattro_sbus_find(struct sbus_dev *goal_sdev) 2593 static struct quattro * __init quattro_sbus_find(struct sbus_dev *goal_sdev)
2594 { 2594 {
2595 struct sbus_bus *sbus; 2595 struct sbus_bus *sbus;
2596 struct sbus_dev *sdev; 2596 struct sbus_dev *sdev;
2597 struct quattro *qp; 2597 struct quattro *qp;
2598 int i; 2598 int i;
2599 2599
2600 if (qfe_sbus_list == NULL) 2600 if (qfe_sbus_list == NULL)
2601 goto found; 2601 goto found;
2602 2602
2603 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { 2603 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2604 for (i = 0, sdev = qp->quattro_dev; 2604 for (i = 0, sdev = qp->quattro_dev;
2605 (sdev != NULL) && (i < 4); 2605 (sdev != NULL) && (i < 4);
2606 sdev = sdev->next, i++) { 2606 sdev = sdev->next, i++) {
2607 if (sdev == goal_sdev) 2607 if (sdev == goal_sdev)
2608 return qp; 2608 return qp;
2609 } 2609 }
2610 } 2610 }
2611 for_each_sbus(sbus) { 2611 for_each_sbus(sbus) {
2612 for_each_sbusdev(sdev, sbus) { 2612 for_each_sbusdev(sdev, sbus) {
2613 if (sdev == goal_sdev) 2613 if (sdev == goal_sdev)
2614 goto found; 2614 goto found;
2615 } 2615 }
2616 } 2616 }
2617 2617
2618 /* Cannot find quattro parent, fail. */ 2618 /* Cannot find quattro parent, fail. */
2619 return NULL; 2619 return NULL;
2620 2620
2621 found: 2621 found:
2622 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); 2622 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2623 if (qp != NULL) { 2623 if (qp != NULL) {
2624 int i; 2624 int i;
2625 2625
2626 for (i = 0; i < 4; i++) 2626 for (i = 0; i < 4; i++)
2627 qp->happy_meals[i] = NULL; 2627 qp->happy_meals[i] = NULL;
2628 2628
2629 qp->quattro_dev = goal_sdev; 2629 qp->quattro_dev = goal_sdev;
2630 qp->next = qfe_sbus_list; 2630 qp->next = qfe_sbus_list;
2631 qfe_sbus_list = qp; 2631 qfe_sbus_list = qp;
2632 quattro_get_ranges(qp); 2632 quattro_get_ranges(qp);
2633 } 2633 }
2634 return qp; 2634 return qp;
2635 } 2635 }
2636 2636
2637 /* After all quattro cards have been probed, we call these functions 2637 /* After all quattro cards have been probed, we call these functions
2638 * to register the IRQ handlers. 2638 * to register the IRQ handlers.
2639 */ 2639 */
2640 static void __init quattro_sbus_register_irqs(void) 2640 static void __init quattro_sbus_register_irqs(void)
2641 { 2641 {
2642 struct quattro *qp; 2642 struct quattro *qp;
2643 2643
2644 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { 2644 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
2645 struct sbus_dev *sdev = qp->quattro_dev; 2645 struct sbus_dev *sdev = qp->quattro_dev;
2646 int err; 2646 int err;
2647 2647
2648 err = request_irq(sdev->irqs[0], 2648 err = request_irq(sdev->irqs[0],
2649 quattro_sbus_interrupt, 2649 quattro_sbus_interrupt,
2650 SA_SHIRQ, "Quattro", 2650 SA_SHIRQ, "Quattro",
2651 qp); 2651 qp);
2652 if (err != 0) { 2652 if (err != 0) {
2653 printk(KERN_ERR "Quattro: Fatal IRQ registery error %d.\n", err); 2653 printk(KERN_ERR "Quattro: Fatal IRQ registery error %d.\n", err);
2654 panic("QFE request irq"); 2654 panic("QFE request irq");
2655 } 2655 }
2656 } 2656 }
2657 } 2657 }
2658 #endif /* CONFIG_SBUS */ 2658 #endif /* CONFIG_SBUS */
2659 2659
2660 #ifdef CONFIG_PCI 2660 #ifdef CONFIG_PCI
2661 static struct quattro * __init quattro_pci_find(struct pci_dev *pdev) 2661 static struct quattro * __init quattro_pci_find(struct pci_dev *pdev)
2662 { 2662 {
2663 struct pci_dev *bdev = pdev->bus->self; 2663 struct pci_dev *bdev = pdev->bus->self;
2664 struct quattro *qp; 2664 struct quattro *qp;
2665 2665
2666 if (!bdev) return NULL; 2666 if (!bdev) return NULL;
2667 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { 2667 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2668 struct pci_dev *qpdev = qp->quattro_dev; 2668 struct pci_dev *qpdev = qp->quattro_dev;
2669 2669
2670 if (qpdev == bdev) 2670 if (qpdev == bdev)
2671 return qp; 2671 return qp;
2672 } 2672 }
2673 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); 2673 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2674 if (qp != NULL) { 2674 if (qp != NULL) {
2675 int i; 2675 int i;
2676 2676
2677 for (i = 0; i < 4; i++) 2677 for (i = 0; i < 4; i++)
2678 qp->happy_meals[i] = NULL; 2678 qp->happy_meals[i] = NULL;
2679 2679
2680 qp->quattro_dev = bdev; 2680 qp->quattro_dev = bdev;
2681 qp->next = qfe_pci_list; 2681 qp->next = qfe_pci_list;
2682 qfe_pci_list = qp; 2682 qfe_pci_list = qp;
2683 2683
2684 /* No range tricks necessary on PCI. */ 2684 /* No range tricks necessary on PCI. */
2685 qp->nranges = 0; 2685 qp->nranges = 0;
2686 } 2686 }
2687 return qp; 2687 return qp;
2688 } 2688 }
2689 #endif /* CONFIG_PCI */ 2689 #endif /* CONFIG_PCI */
2690 2690
2691 #ifdef CONFIG_SBUS 2691 #ifdef CONFIG_SBUS
2692 static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe) 2692 static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
2693 { 2693 {
2694 struct quattro *qp = NULL; 2694 struct quattro *qp = NULL;
2695 struct happy_meal *hp; 2695 struct happy_meal *hp;
2696 struct net_device *dev; 2696 struct net_device *dev;
2697 int i, qfe_slot = -1; 2697 int i, qfe_slot = -1;
2698 int err = -ENODEV; 2698 int err = -ENODEV;
2699 2699
2700 if (is_qfe) { 2700 if (is_qfe) {
2701 qp = quattro_sbus_find(sdev); 2701 qp = quattro_sbus_find(sdev);
2702 if (qp == NULL) 2702 if (qp == NULL)
2703 goto err_out; 2703 goto err_out;
2704 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) 2704 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2705 if (qp->happy_meals[qfe_slot] == NULL) 2705 if (qp->happy_meals[qfe_slot] == NULL)
2706 break; 2706 break;
2707 if (qfe_slot == 4) 2707 if (qfe_slot == 4)
2708 goto err_out; 2708 goto err_out;
2709 } 2709 }
2710 2710
2711 err = -ENOMEM; 2711 err = -ENOMEM;
2712 dev = alloc_etherdev(sizeof(struct happy_meal)); 2712 dev = alloc_etherdev(sizeof(struct happy_meal));
2713 if (!dev) 2713 if (!dev)
2714 goto err_out; 2714 goto err_out;
2715 SET_MODULE_OWNER(dev); 2715 SET_MODULE_OWNER(dev);
2716 2716
2717 if (hme_version_printed++ == 0) 2717 if (hme_version_printed++ == 0)
2718 printk(KERN_INFO "%s", version); 2718 printk(KERN_INFO "%s", version);
2719 2719
2720 /* If user did not specify a MAC address specifically, use 2720 /* If user did not specify a MAC address specifically, use
2721 * the Quattro local-mac-address property... 2721 * the Quattro local-mac-address property...
2722 */ 2722 */
2723 for (i = 0; i < 6; i++) { 2723 for (i = 0; i < 6; i++) {
2724 if (macaddr[i] != 0) 2724 if (macaddr[i] != 0)
2725 break; 2725 break;
2726 } 2726 }
2727 if (i < 6) { /* a mac address was given */ 2727 if (i < 6) { /* a mac address was given */
2728 for (i = 0; i < 6; i++) 2728 for (i = 0; i < 6; i++)
2729 dev->dev_addr[i] = macaddr[i]; 2729 dev->dev_addr[i] = macaddr[i];
2730 macaddr[5]++; 2730 macaddr[5]++;
2731 } else if (qfe_slot != -1 && 2731 } else if (qfe_slot != -1 &&
2732 prom_getproplen(sdev->prom_node, 2732 prom_getproplen(sdev->prom_node,
2733 "local-mac-address") == 6) { 2733 "local-mac-address") == 6) {
2734 prom_getproperty(sdev->prom_node, "local-mac-address", 2734 prom_getproperty(sdev->prom_node, "local-mac-address",
2735 dev->dev_addr, 6); 2735 dev->dev_addr, 6);
2736 } else { 2736 } else {
2737 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2737 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2738 } 2738 }
2739 2739
2740 hp = dev->priv; 2740 hp = dev->priv;
2741 2741
2742 hp->happy_dev = sdev; 2742 hp->happy_dev = sdev;
2743 2743
2744 spin_lock_init(&hp->happy_lock); 2744 spin_lock_init(&hp->happy_lock);
2745 2745
2746 err = -ENODEV; 2746 err = -ENODEV;
2747 if (sdev->num_registers != 5) { 2747 if (sdev->num_registers != 5) {
2748 printk(KERN_ERR "happymeal: Device does not have 5 regs, it has %d.\n", 2748 printk(KERN_ERR "happymeal: Device does not have 5 regs, it has %d.\n",
2749 sdev->num_registers); 2749 sdev->num_registers);
2750 printk(KERN_ERR "happymeal: Would you like that for here or to go?\n"); 2750 printk(KERN_ERR "happymeal: Would you like that for here or to go?\n");
2751 goto err_out_free_netdev; 2751 goto err_out_free_netdev;
2752 } 2752 }
2753 2753
2754 if (qp != NULL) { 2754 if (qp != NULL) {
2755 hp->qfe_parent = qp; 2755 hp->qfe_parent = qp;
2756 hp->qfe_ent = qfe_slot; 2756 hp->qfe_ent = qfe_slot;
2757 qp->happy_meals[qfe_slot] = dev; 2757 qp->happy_meals[qfe_slot] = dev;
2758 quattro_apply_ranges(qp, hp); 2758 quattro_apply_ranges(qp, hp);
2759 } 2759 }
2760 2760
2761 hp->gregs = sbus_ioremap(&sdev->resource[0], 0, 2761 hp->gregs = sbus_ioremap(&sdev->resource[0], 0,
2762 GREG_REG_SIZE, "HME Global Regs"); 2762 GREG_REG_SIZE, "HME Global Regs");
2763 if (!hp->gregs) { 2763 if (!hp->gregs) {
2764 printk(KERN_ERR "happymeal: Cannot map Happy Meal global registers.\n"); 2764 printk(KERN_ERR "happymeal: Cannot map Happy Meal global registers.\n");
2765 goto err_out_free_netdev; 2765 goto err_out_free_netdev;
2766 } 2766 }
2767 2767
2768 hp->etxregs = sbus_ioremap(&sdev->resource[1], 0, 2768 hp->etxregs = sbus_ioremap(&sdev->resource[1], 0,
2769 ETX_REG_SIZE, "HME TX Regs"); 2769 ETX_REG_SIZE, "HME TX Regs");
2770 if (!hp->etxregs) { 2770 if (!hp->etxregs) {
2771 printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Transmit registers.\n"); 2771 printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Transmit registers.\n");
2772 goto err_out_iounmap; 2772 goto err_out_iounmap;
2773 } 2773 }
2774 2774
2775 hp->erxregs = sbus_ioremap(&sdev->resource[2], 0, 2775 hp->erxregs = sbus_ioremap(&sdev->resource[2], 0,
2776 ERX_REG_SIZE, "HME RX Regs"); 2776 ERX_REG_SIZE, "HME RX Regs");
2777 if (!hp->erxregs) { 2777 if (!hp->erxregs) {
2778 printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Receive registers.\n"); 2778 printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Receive registers.\n");
2779 goto err_out_iounmap; 2779 goto err_out_iounmap;
2780 } 2780 }
2781 2781
2782 hp->bigmacregs = sbus_ioremap(&sdev->resource[3], 0, 2782 hp->bigmacregs = sbus_ioremap(&sdev->resource[3], 0,
2783 BMAC_REG_SIZE, "HME BIGMAC Regs"); 2783 BMAC_REG_SIZE, "HME BIGMAC Regs");
2784 if (!hp->bigmacregs) { 2784 if (!hp->bigmacregs) {
2785 printk(KERN_ERR "happymeal: Cannot map Happy Meal BIGMAC registers.\n"); 2785 printk(KERN_ERR "happymeal: Cannot map Happy Meal BIGMAC registers.\n");
2786 goto err_out_iounmap; 2786 goto err_out_iounmap;
2787 } 2787 }
2788 2788
2789 hp->tcvregs = sbus_ioremap(&sdev->resource[4], 0, 2789 hp->tcvregs = sbus_ioremap(&sdev->resource[4], 0,
2790 TCVR_REG_SIZE, "HME Tranceiver Regs"); 2790 TCVR_REG_SIZE, "HME Tranceiver Regs");
2791 if (!hp->tcvregs) { 2791 if (!hp->tcvregs) {
2792 printk(KERN_ERR "happymeal: Cannot map Happy Meal Tranceiver registers.\n"); 2792 printk(KERN_ERR "happymeal: Cannot map Happy Meal Tranceiver registers.\n");
2793 goto err_out_iounmap; 2793 goto err_out_iounmap;
2794 } 2794 }
2795 2795
2796 hp->hm_revision = prom_getintdefault(sdev->prom_node, "hm-rev", 0xff); 2796 hp->hm_revision = prom_getintdefault(sdev->prom_node, "hm-rev", 0xff);
2797 if (hp->hm_revision == 0xff) 2797 if (hp->hm_revision == 0xff)
2798 hp->hm_revision = 0xa0; 2798 hp->hm_revision = 0xa0;
2799 2799
2800 /* Now enable the feature flags we can. */ 2800 /* Now enable the feature flags we can. */
2801 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) 2801 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2802 hp->happy_flags = HFLAG_20_21; 2802 hp->happy_flags = HFLAG_20_21;
2803 else if (hp->hm_revision != 0xa0) 2803 else if (hp->hm_revision != 0xa0)
2804 hp->happy_flags = HFLAG_NOT_A0; 2804 hp->happy_flags = HFLAG_NOT_A0;
2805 2805
2806 if (qp != NULL) 2806 if (qp != NULL)
2807 hp->happy_flags |= HFLAG_QUATTRO; 2807 hp->happy_flags |= HFLAG_QUATTRO;
2808 2808
2809 /* Get the supported DVMA burst sizes from our Happy SBUS. */ 2809 /* Get the supported DVMA burst sizes from our Happy SBUS. */
2810 hp->happy_bursts = prom_getintdefault(sdev->bus->prom_node, 2810 hp->happy_bursts = prom_getintdefault(sdev->bus->prom_node,
2811 "burst-sizes", 0x00); 2811 "burst-sizes", 0x00);
2812 2812
2813 hp->happy_block = sbus_alloc_consistent(hp->happy_dev, 2813 hp->happy_block = sbus_alloc_consistent(hp->happy_dev,
2814 PAGE_SIZE, 2814 PAGE_SIZE,
2815 &hp->hblock_dvma); 2815 &hp->hblock_dvma);
2816 err = -ENOMEM; 2816 err = -ENOMEM;
2817 if (!hp->happy_block) { 2817 if (!hp->happy_block) {
2818 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); 2818 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
2819 goto err_out_iounmap; 2819 goto err_out_iounmap;
2820 } 2820 }
2821 2821
2822 /* Force check of the link first time we are brought up. */ 2822 /* Force check of the link first time we are brought up. */
2823 hp->linkcheck = 0; 2823 hp->linkcheck = 0;
2824 2824
2825 /* Force timer state to 'asleep' with count of zero. */ 2825 /* Force timer state to 'asleep' with count of zero. */
2826 hp->timer_state = asleep; 2826 hp->timer_state = asleep;
2827 hp->timer_ticks = 0; 2827 hp->timer_ticks = 0;
2828 2828
2829 init_timer(&hp->happy_timer); 2829 init_timer(&hp->happy_timer);
2830 2830
2831 hp->dev = dev; 2831 hp->dev = dev;
2832 dev->open = &happy_meal_open; 2832 dev->open = &happy_meal_open;
2833 dev->stop = &happy_meal_close; 2833 dev->stop = &happy_meal_close;
2834 dev->hard_start_xmit = &happy_meal_start_xmit; 2834 dev->hard_start_xmit = &happy_meal_start_xmit;
2835 dev->get_stats = &happy_meal_get_stats; 2835 dev->get_stats = &happy_meal_get_stats;
2836 dev->set_multicast_list = &happy_meal_set_multicast; 2836 dev->set_multicast_list = &happy_meal_set_multicast;
2837 dev->tx_timeout = &happy_meal_tx_timeout; 2837 dev->tx_timeout = &happy_meal_tx_timeout;
2838 dev->watchdog_timeo = 5*HZ; 2838 dev->watchdog_timeo = 5*HZ;
2839 dev->ethtool_ops = &hme_ethtool_ops; 2839 dev->ethtool_ops = &hme_ethtool_ops;
2840 2840
2841 /* Happy Meal can do it all... except VLAN. */ 2841 /* Happy Meal can do it all... except VLAN. */
2842 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_VLAN_CHALLENGED; 2842 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_VLAN_CHALLENGED;
2843 2843
2844 dev->irq = sdev->irqs[0]; 2844 dev->irq = sdev->irqs[0];
2845 2845
2846 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 2846 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2847 /* Hook up PCI register/dma accessors. */ 2847 /* Hook up PCI register/dma accessors. */
2848 hp->read_desc32 = sbus_hme_read_desc32; 2848 hp->read_desc32 = sbus_hme_read_desc32;
2849 hp->write_txd = sbus_hme_write_txd; 2849 hp->write_txd = sbus_hme_write_txd;
2850 hp->write_rxd = sbus_hme_write_rxd; 2850 hp->write_rxd = sbus_hme_write_rxd;
2851 hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single; 2851 hp->dma_map = (u32 (*)(void *, void *, long, int))sbus_map_single;
2852 hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single; 2852 hp->dma_unmap = (void (*)(void *, u32, long, int))sbus_unmap_single;
2853 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int)) 2853 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
2854 sbus_dma_sync_single_for_cpu; 2854 sbus_dma_sync_single_for_cpu;
2855 hp->dma_sync_for_device = (void (*)(void *, u32, long, int)) 2855 hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
2856 sbus_dma_sync_single_for_device; 2856 sbus_dma_sync_single_for_device;
2857 hp->read32 = sbus_hme_read32; 2857 hp->read32 = sbus_hme_read32;
2858 hp->write32 = sbus_hme_write32; 2858 hp->write32 = sbus_hme_write32;
2859 #endif 2859 #endif
2860 2860
2861 /* Grrr, Happy Meal comes up by default not advertising 2861 /* Grrr, Happy Meal comes up by default not advertising
2862 * full duplex 100baseT capabilities, fix this. 2862 * full duplex 100baseT capabilities, fix this.
2863 */ 2863 */
2864 spin_lock_irq(&hp->happy_lock); 2864 spin_lock_irq(&hp->happy_lock);
2865 happy_meal_set_initial_advertisement(hp); 2865 happy_meal_set_initial_advertisement(hp);
2866 spin_unlock_irq(&hp->happy_lock); 2866 spin_unlock_irq(&hp->happy_lock);
2867 2867
2868 if (register_netdev(hp->dev)) { 2868 if (register_netdev(hp->dev)) {
2869 printk(KERN_ERR "happymeal: Cannot register net device, " 2869 printk(KERN_ERR "happymeal: Cannot register net device, "
2870 "aborting.\n"); 2870 "aborting.\n");
2871 goto err_out_free_consistent; 2871 goto err_out_free_consistent;
2872 } 2872 }
2873 2873
2874 if (qfe_slot != -1) 2874 if (qfe_slot != -1)
2875 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", 2875 printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
2876 dev->name, qfe_slot); 2876 dev->name, qfe_slot);
2877 else 2877 else
2878 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", 2878 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2879 dev->name); 2879 dev->name);
2880 2880
2881 for (i = 0; i < 6; i++) 2881 for (i = 0; i < 6; i++)
2882 printk("%2.2x%c", 2882 printk("%2.2x%c",
2883 dev->dev_addr[i], i == 5 ? ' ' : ':'); 2883 dev->dev_addr[i], i == 5 ? ' ' : ':');
2884 printk("\n"); 2884 printk("\n");
2885 2885
2886 /* We are home free at this point, link us in to the happy 2886 /* We are home free at this point, link us in to the happy
2887 * device list. 2887 * device list.
2888 */ 2888 */
2889 hp->next_module = root_happy_dev; 2889 hp->next_module = root_happy_dev;
2890 root_happy_dev = hp; 2890 root_happy_dev = hp;
2891 2891
2892 return 0; 2892 return 0;
2893 2893
2894 err_out_free_consistent: 2894 err_out_free_consistent:
2895 sbus_free_consistent(hp->happy_dev, 2895 sbus_free_consistent(hp->happy_dev,
2896 PAGE_SIZE, 2896 PAGE_SIZE,
2897 hp->happy_block, 2897 hp->happy_block,
2898 hp->hblock_dvma); 2898 hp->hblock_dvma);
2899 2899
2900 err_out_iounmap: 2900 err_out_iounmap:
2901 if (hp->gregs) 2901 if (hp->gregs)
2902 sbus_iounmap(hp->gregs, GREG_REG_SIZE); 2902 sbus_iounmap(hp->gregs, GREG_REG_SIZE);
2903 if (hp->etxregs) 2903 if (hp->etxregs)
2904 sbus_iounmap(hp->etxregs, ETX_REG_SIZE); 2904 sbus_iounmap(hp->etxregs, ETX_REG_SIZE);
2905 if (hp->erxregs) 2905 if (hp->erxregs)
2906 sbus_iounmap(hp->erxregs, ERX_REG_SIZE); 2906 sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
2907 if (hp->bigmacregs) 2907 if (hp->bigmacregs)
2908 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE); 2908 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
2909 if (hp->tcvregs) 2909 if (hp->tcvregs)
2910 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE); 2910 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
2911 2911
2912 err_out_free_netdev: 2912 err_out_free_netdev:
2913 free_netdev(dev); 2913 free_netdev(dev);
2914 2914
2915 err_out: 2915 err_out:
2916 return err; 2916 return err;
2917 } 2917 }
2918 #endif 2918 #endif
2919 2919
2920 #ifdef CONFIG_PCI 2920 #ifdef CONFIG_PCI
2921 #ifndef __sparc__ 2921 #ifndef __sparc__
2922 static int is_quattro_p(struct pci_dev *pdev) 2922 static int is_quattro_p(struct pci_dev *pdev)
2923 { 2923 {
2924 struct pci_dev *busdev = pdev->bus->self; 2924 struct pci_dev *busdev = pdev->bus->self;
2925 struct list_head *tmp; 2925 struct list_head *tmp;
2926 int n_hmes; 2926 int n_hmes;
2927 2927
2928 if (busdev == NULL || 2928 if (busdev == NULL ||
2929 busdev->vendor != PCI_VENDOR_ID_DEC || 2929 busdev->vendor != PCI_VENDOR_ID_DEC ||
2930 busdev->device != PCI_DEVICE_ID_DEC_21153) 2930 busdev->device != PCI_DEVICE_ID_DEC_21153)
2931 return 0; 2931 return 0;
2932 2932
2933 n_hmes = 0; 2933 n_hmes = 0;
2934 tmp = pdev->bus->devices.next; 2934 tmp = pdev->bus->devices.next;
2935 while (tmp != &pdev->bus->devices) { 2935 while (tmp != &pdev->bus->devices) {
2936 struct pci_dev *this_pdev = pci_dev_b(tmp); 2936 struct pci_dev *this_pdev = pci_dev_b(tmp);
2937 2937
2938 if (this_pdev->vendor == PCI_VENDOR_ID_SUN && 2938 if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2939 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) 2939 this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2940 n_hmes++; 2940 n_hmes++;
2941 2941
2942 tmp = tmp->next; 2942 tmp = tmp->next;
2943 } 2943 }
2944 2944
2945 if (n_hmes != 4) 2945 if (n_hmes != 4)
2946 return 0; 2946 return 0;
2947 2947
2948 return 1; 2948 return 1;
2949 } 2949 }
2950 2950
2951 /* Fetch MAC address from vital product data of PCI ROM. */ 2951 /* Fetch MAC address from vital product data of PCI ROM. */
2952 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr) 2952 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2953 { 2953 {
2954 int this_offset; 2954 int this_offset;
2955 2955
2956 for (this_offset = 0x20; this_offset < len; this_offset++) { 2956 for (this_offset = 0x20; this_offset < len; this_offset++) {
2957 void __iomem *p = rom_base + this_offset; 2957 void __iomem *p = rom_base + this_offset;
2958 2958
2959 if (readb(p + 0) != 0x90 || 2959 if (readb(p + 0) != 0x90 ||
2960 readb(p + 1) != 0x00 || 2960 readb(p + 1) != 0x00 ||
2961 readb(p + 2) != 0x09 || 2961 readb(p + 2) != 0x09 ||
2962 readb(p + 3) != 0x4e || 2962 readb(p + 3) != 0x4e ||
2963 readb(p + 4) != 0x41 || 2963 readb(p + 4) != 0x41 ||
2964 readb(p + 5) != 0x06) 2964 readb(p + 5) != 0x06)
2965 continue; 2965 continue;
2966 2966
2967 this_offset += 6; 2967 this_offset += 6;
2968 p += 6; 2968 p += 6;
2969 2969
2970 if (index == 0) { 2970 if (index == 0) {
2971 int i; 2971 int i;
2972 2972
2973 for (i = 0; i < 6; i++) 2973 for (i = 0; i < 6; i++)
2974 dev_addr[i] = readb(p + i); 2974 dev_addr[i] = readb(p + i);
2975 return 1; 2975 return 1;
2976 } 2976 }
2977 index--; 2977 index--;
2978 } 2978 }
2979 return 0; 2979 return 0;
2980 } 2980 }
2981 2981
2982 static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr) 2982 static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2983 { 2983 {
2984 size_t size; 2984 size_t size;
2985 void __iomem *p = pci_map_rom(pdev, &size); 2985 void __iomem *p = pci_map_rom(pdev, &size);
2986 2986
2987 if (p) { 2987 if (p) {
2988 int index = 0; 2988 int index = 0;
2989 int found; 2989 int found;
2990 2990
2991 if (is_quattro_p(pdev)) 2991 if (is_quattro_p(pdev))
2992 index = PCI_SLOT(pdev->devfn); 2992 index = PCI_SLOT(pdev->devfn);
2993 2993
2994 found = readb(p) == 0x55 && 2994 found = readb(p) == 0x55 &&
2995 readb(p + 1) == 0xaa && 2995 readb(p + 1) == 0xaa &&
2996 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr); 2996 find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2997 pci_unmap_rom(pdev, p); 2997 pci_unmap_rom(pdev, p);
2998 if (found) 2998 if (found)
2999 return; 2999 return;
3000 } 3000 }
3001 3001
3002 /* Sun MAC prefix then 3 random bytes. */ 3002 /* Sun MAC prefix then 3 random bytes. */
3003 dev_addr[0] = 0x08; 3003 dev_addr[0] = 0x08;
3004 dev_addr[1] = 0x00; 3004 dev_addr[1] = 0x00;
3005 dev_addr[2] = 0x20; 3005 dev_addr[2] = 0x20;
3006 get_random_bytes(&dev_addr[3], 3); 3006 get_random_bytes(&dev_addr[3], 3);
3007 return; 3007 return;
3008 } 3008 }
3009 #endif /* !(__sparc__) */ 3009 #endif /* !(__sparc__) */
3010 3010
3011 static int __init happy_meal_pci_init(struct pci_dev *pdev) 3011 static int __init happy_meal_pci_init(struct pci_dev *pdev)
3012 { 3012 {
3013 struct quattro *qp = NULL; 3013 struct quattro *qp = NULL;
3014 #ifdef __sparc__ 3014 #ifdef __sparc__
3015 struct pcidev_cookie *pcp; 3015 struct pcidev_cookie *pcp;
3016 int node;
3017 #endif 3016 #endif
3018 struct happy_meal *hp; 3017 struct happy_meal *hp;
3019 struct net_device *dev; 3018 struct net_device *dev;
3020 void __iomem *hpreg_base; 3019 void __iomem *hpreg_base;
3021 unsigned long hpreg_res; 3020 unsigned long hpreg_res;
3022 int i, qfe_slot = -1; 3021 int i, qfe_slot = -1;
3023 char prom_name[64]; 3022 char prom_name[64];
3024 int err; 3023 int err;
3025 3024
3026 /* Now make sure pci_dev cookie is there. */ 3025 /* Now make sure pci_dev cookie is there. */
3027 #ifdef __sparc__ 3026 #ifdef __sparc__
3028 pcp = pdev->sysdata; 3027 pcp = pdev->sysdata;
3029 if (pcp == NULL || pcp->prom_node == -1) { 3028 if (pcp == NULL) {
3030 printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n"); 3029 printk(KERN_ERR "happymeal(PCI): Some PCI device info missing\n");
3031 return -ENODEV; 3030 return -ENODEV;
3032 } 3031 }
3033 node = pcp->prom_node;
3034 3032
3035 prom_getstring(node, "name", prom_name, sizeof(prom_name)); 3033 strcpy(prom_name, pcp->prom_node->name);
3036 #else 3034 #else
3037 if (is_quattro_p(pdev)) 3035 if (is_quattro_p(pdev))
3038 strcpy(prom_name, "SUNW,qfe"); 3036 strcpy(prom_name, "SUNW,qfe");
3039 else 3037 else
3040 strcpy(prom_name, "SUNW,hme"); 3038 strcpy(prom_name, "SUNW,hme");
3041 #endif 3039 #endif
3042 3040
3043 err = -ENODEV; 3041 err = -ENODEV;
3044 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { 3042 if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
3045 qp = quattro_pci_find(pdev); 3043 qp = quattro_pci_find(pdev);
3046 if (qp == NULL) 3044 if (qp == NULL)
3047 goto err_out; 3045 goto err_out;
3048 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) 3046 for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
3049 if (qp->happy_meals[qfe_slot] == NULL) 3047 if (qp->happy_meals[qfe_slot] == NULL)
3050 break; 3048 break;
3051 if (qfe_slot == 4) 3049 if (qfe_slot == 4)
3052 goto err_out; 3050 goto err_out;
3053 } 3051 }
3054 3052
3055 dev = alloc_etherdev(sizeof(struct happy_meal)); 3053 dev = alloc_etherdev(sizeof(struct happy_meal));
3056 err = -ENOMEM; 3054 err = -ENOMEM;
3057 if (!dev) 3055 if (!dev)
3058 goto err_out; 3056 goto err_out;
3059 SET_MODULE_OWNER(dev); 3057 SET_MODULE_OWNER(dev);
3060 SET_NETDEV_DEV(dev, &pdev->dev); 3058 SET_NETDEV_DEV(dev, &pdev->dev);
3061 3059
3062 if (hme_version_printed++ == 0) 3060 if (hme_version_printed++ == 0)
3063 printk(KERN_INFO "%s", version); 3061 printk(KERN_INFO "%s", version);
3064 3062
3065 dev->base_addr = (long) pdev; 3063 dev->base_addr = (long) pdev;
3066 3064
3067 hp = (struct happy_meal *)dev->priv; 3065 hp = (struct happy_meal *)dev->priv;
3068 memset(hp, 0, sizeof(*hp)); 3066 memset(hp, 0, sizeof(*hp));
3069 3067
3070 hp->happy_dev = pdev; 3068 hp->happy_dev = pdev;
3071 3069
3072 spin_lock_init(&hp->happy_lock); 3070 spin_lock_init(&hp->happy_lock);
3073 3071
3074 if (qp != NULL) { 3072 if (qp != NULL) {
3075 hp->qfe_parent = qp; 3073 hp->qfe_parent = qp;
3076 hp->qfe_ent = qfe_slot; 3074 hp->qfe_ent = qfe_slot;
3077 qp->happy_meals[qfe_slot] = dev; 3075 qp->happy_meals[qfe_slot] = dev;
3078 } 3076 }
3079 3077
3080 hpreg_res = pci_resource_start(pdev, 0); 3078 hpreg_res = pci_resource_start(pdev, 0);
3081 err = -ENODEV; 3079 err = -ENODEV;
3082 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3080 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3083 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n"); 3081 printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
3084 goto err_out_clear_quattro; 3082 goto err_out_clear_quattro;
3085 } 3083 }
3086 if (pci_request_regions(pdev, DRV_NAME)) { 3084 if (pci_request_regions(pdev, DRV_NAME)) {
3087 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, " 3085 printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
3088 "aborting.\n"); 3086 "aborting.\n");
3089 goto err_out_clear_quattro; 3087 goto err_out_clear_quattro;
3090 } 3088 }
3091 3089
3092 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == 0) { 3090 if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == 0) {
3093 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n"); 3091 printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
3094 goto err_out_free_res; 3092 goto err_out_free_res;
3095 } 3093 }
3096 3094
3097 for (i = 0; i < 6; i++) { 3095 for (i = 0; i < 6; i++) {
3098 if (macaddr[i] != 0) 3096 if (macaddr[i] != 0)
3099 break; 3097 break;
3100 } 3098 }
3101 if (i < 6) { /* a mac address was given */ 3099 if (i < 6) { /* a mac address was given */
3102 for (i = 0; i < 6; i++) 3100 for (i = 0; i < 6; i++)
3103 dev->dev_addr[i] = macaddr[i]; 3101 dev->dev_addr[i] = macaddr[i];
3104 macaddr[5]++; 3102 macaddr[5]++;
3105 } else { 3103 } else {
3106 #ifdef __sparc__ 3104 #ifdef __sparc__
3105 unsigned char *addr;
3106 int len;
3107
3107 if (qfe_slot != -1 && 3108 if (qfe_slot != -1 &&
3108 prom_getproplen(node, "local-mac-address") == 6) { 3109 (addr = of_get_property(pcp->prom_node,
3109 prom_getproperty(node, "local-mac-address", 3110 "local-mac-address", &len)) != NULL
3110 dev->dev_addr, 6); 3111 && len == 6) {
3112 memcpy(dev->dev_addr, addr, 6);
3111 } else { 3113 } else {
3112 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 3114 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
3113 } 3115 }
3114 #else 3116 #else
3115 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); 3117 get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
3116 #endif 3118 #endif
3117 } 3119 }
3118 3120
3119 /* Layout registers. */ 3121 /* Layout registers. */
3120 hp->gregs = (hpreg_base + 0x0000UL); 3122 hp->gregs = (hpreg_base + 0x0000UL);
3121 hp->etxregs = (hpreg_base + 0x2000UL); 3123 hp->etxregs = (hpreg_base + 0x2000UL);
3122 hp->erxregs = (hpreg_base + 0x4000UL); 3124 hp->erxregs = (hpreg_base + 0x4000UL);
3123 hp->bigmacregs = (hpreg_base + 0x6000UL); 3125 hp->bigmacregs = (hpreg_base + 0x6000UL);
3124 hp->tcvregs = (hpreg_base + 0x7000UL); 3126 hp->tcvregs = (hpreg_base + 0x7000UL);
3125 3127
3126 #ifdef __sparc__ 3128 #ifdef __sparc__
3127 hp->hm_revision = prom_getintdefault(node, "hm-rev", 0xff); 3129 hp->hm_revision = of_getintprop_default(pcp->prom_node, "hm-rev", 0xff);
3128 if (hp->hm_revision == 0xff) { 3130 if (hp->hm_revision == 0xff) {
3129 unsigned char prev; 3131 unsigned char prev;
3130 3132
3131 pci_read_config_byte(pdev, PCI_REVISION_ID, &prev); 3133 pci_read_config_byte(pdev, PCI_REVISION_ID, &prev);
3132 hp->hm_revision = 0xc0 | (prev & 0x0f); 3134 hp->hm_revision = 0xc0 | (prev & 0x0f);
3133 } 3135 }
3134 #else 3136 #else
3135 /* works with this on non-sparc hosts */ 3137 /* works with this on non-sparc hosts */
3136 hp->hm_revision = 0x20; 3138 hp->hm_revision = 0x20;
3137 #endif 3139 #endif
3138 3140
3139 /* Now enable the feature flags we can. */ 3141 /* Now enable the feature flags we can. */
3140 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) 3142 if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
3141 hp->happy_flags = HFLAG_20_21; 3143 hp->happy_flags = HFLAG_20_21;
3142 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0) 3144 else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
3143 hp->happy_flags = HFLAG_NOT_A0; 3145 hp->happy_flags = HFLAG_NOT_A0;
3144 3146
3145 if (qp != NULL) 3147 if (qp != NULL)
3146 hp->happy_flags |= HFLAG_QUATTRO; 3148 hp->happy_flags |= HFLAG_QUATTRO;
3147 3149
3148 /* And of course, indicate this is PCI. */ 3150 /* And of course, indicate this is PCI. */
3149 hp->happy_flags |= HFLAG_PCI; 3151 hp->happy_flags |= HFLAG_PCI;
3150 3152
3151 #ifdef __sparc__ 3153 #ifdef __sparc__
3152 /* Assume PCI happy meals can handle all burst sizes. */ 3154 /* Assume PCI happy meals can handle all burst sizes. */
3153 hp->happy_bursts = DMA_BURSTBITS; 3155 hp->happy_bursts = DMA_BURSTBITS;
3154 #endif 3156 #endif
3155 3157
3156 hp->happy_block = (struct hmeal_init_block *) 3158 hp->happy_block = (struct hmeal_init_block *)
3157 pci_alloc_consistent(pdev, PAGE_SIZE, &hp->hblock_dvma); 3159 pci_alloc_consistent(pdev, PAGE_SIZE, &hp->hblock_dvma);
3158 3160
3159 err = -ENODEV; 3161 err = -ENODEV;
3160 if (!hp->happy_block) { 3162 if (!hp->happy_block) {
3161 printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n"); 3163 printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
3162 goto err_out_iounmap; 3164 goto err_out_iounmap;
3163 } 3165 }
3164 3166
3165 hp->linkcheck = 0; 3167 hp->linkcheck = 0;
3166 hp->timer_state = asleep; 3168 hp->timer_state = asleep;
3167 hp->timer_ticks = 0; 3169 hp->timer_ticks = 0;
3168 3170
3169 init_timer(&hp->happy_timer); 3171 init_timer(&hp->happy_timer);
3170 3172
3171 hp->dev = dev; 3173 hp->dev = dev;
3172 dev->open = &happy_meal_open; 3174 dev->open = &happy_meal_open;
3173 dev->stop = &happy_meal_close; 3175 dev->stop = &happy_meal_close;
3174 dev->hard_start_xmit = &happy_meal_start_xmit; 3176 dev->hard_start_xmit = &happy_meal_start_xmit;
3175 dev->get_stats = &happy_meal_get_stats; 3177 dev->get_stats = &happy_meal_get_stats;
3176 dev->set_multicast_list = &happy_meal_set_multicast; 3178 dev->set_multicast_list = &happy_meal_set_multicast;
3177 dev->tx_timeout = &happy_meal_tx_timeout; 3179 dev->tx_timeout = &happy_meal_tx_timeout;
3178 dev->watchdog_timeo = 5*HZ; 3180 dev->watchdog_timeo = 5*HZ;
3179 dev->ethtool_ops = &hme_ethtool_ops; 3181 dev->ethtool_ops = &hme_ethtool_ops;
3180 dev->irq = pdev->irq; 3182 dev->irq = pdev->irq;
3181 dev->dma = 0; 3183 dev->dma = 0;
3182 3184
3183 /* Happy Meal can do it all... */ 3185 /* Happy Meal can do it all... */
3184 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 3186 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
3185 3187
3186 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 3188 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
3187 /* Hook up PCI register/dma accessors. */ 3189 /* Hook up PCI register/dma accessors. */
3188 hp->read_desc32 = pci_hme_read_desc32; 3190 hp->read_desc32 = pci_hme_read_desc32;
3189 hp->write_txd = pci_hme_write_txd; 3191 hp->write_txd = pci_hme_write_txd;
3190 hp->write_rxd = pci_hme_write_rxd; 3192 hp->write_rxd = pci_hme_write_rxd;
3191 hp->dma_map = (u32 (*)(void *, void *, long, int))pci_map_single; 3193 hp->dma_map = (u32 (*)(void *, void *, long, int))pci_map_single;
3192 hp->dma_unmap = (void (*)(void *, u32, long, int))pci_unmap_single; 3194 hp->dma_unmap = (void (*)(void *, u32, long, int))pci_unmap_single;
3193 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int)) 3195 hp->dma_sync_for_cpu = (void (*)(void *, u32, long, int))
3194 pci_dma_sync_single_for_cpu; 3196 pci_dma_sync_single_for_cpu;
3195 hp->dma_sync_for_device = (void (*)(void *, u32, long, int)) 3197 hp->dma_sync_for_device = (void (*)(void *, u32, long, int))
3196 pci_dma_sync_single_for_device; 3198 pci_dma_sync_single_for_device;
3197 hp->read32 = pci_hme_read32; 3199 hp->read32 = pci_hme_read32;
3198 hp->write32 = pci_hme_write32; 3200 hp->write32 = pci_hme_write32;
3199 #endif 3201 #endif
3200 3202
3201 /* Grrr, Happy Meal comes up by default not advertising 3203 /* Grrr, Happy Meal comes up by default not advertising
3202 * full duplex 100baseT capabilities, fix this. 3204 * full duplex 100baseT capabilities, fix this.
3203 */ 3205 */
3204 spin_lock_irq(&hp->happy_lock); 3206 spin_lock_irq(&hp->happy_lock);
3205 happy_meal_set_initial_advertisement(hp); 3207 happy_meal_set_initial_advertisement(hp);
3206 spin_unlock_irq(&hp->happy_lock); 3208 spin_unlock_irq(&hp->happy_lock);
3207 3209
3208 if (register_netdev(hp->dev)) { 3210 if (register_netdev(hp->dev)) {
3209 printk(KERN_ERR "happymeal(PCI): Cannot register net device, " 3211 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3210 "aborting.\n"); 3212 "aborting.\n");
3211 goto err_out_iounmap; 3213 goto err_out_iounmap;
3212 } 3214 }
3213 3215
3214 if (!qfe_slot) { 3216 if (!qfe_slot) {
3215 struct pci_dev *qpdev = qp->quattro_dev; 3217 struct pci_dev *qpdev = qp->quattro_dev;
3216 3218
3217 prom_name[0] = 0; 3219 prom_name[0] = 0;
3218 if (!strncmp(dev->name, "eth", 3)) { 3220 if (!strncmp(dev->name, "eth", 3)) {
3219 int i = simple_strtoul(dev->name + 3, NULL, 10); 3221 int i = simple_strtoul(dev->name + 3, NULL, 10);
3220 sprintf(prom_name, "-%d", i + 3); 3222 sprintf(prom_name, "-%d", i + 3);
3221 } 3223 }
3222 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name); 3224 printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
3223 if (qpdev->vendor == PCI_VENDOR_ID_DEC && 3225 if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
3224 qpdev->device == PCI_DEVICE_ID_DEC_21153) 3226 qpdev->device == PCI_DEVICE_ID_DEC_21153)
3225 printk("DEC 21153 PCI Bridge\n"); 3227 printk("DEC 21153 PCI Bridge\n");
3226 else 3228 else
3227 printk("unknown bridge %04x.%04x\n", 3229 printk("unknown bridge %04x.%04x\n",
3228 qpdev->vendor, qpdev->device); 3230 qpdev->vendor, qpdev->device);
3229 } 3231 }
3230 3232
3231 if (qfe_slot != -1) 3233 if (qfe_slot != -1)
3232 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ", 3234 printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
3233 dev->name, qfe_slot); 3235 dev->name, qfe_slot);
3234 else 3236 else
3235 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", 3237 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3236 dev->name); 3238 dev->name);
3237 3239
3238 for (i = 0; i < 6; i++) 3240 for (i = 0; i < 6; i++)
3239 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ' : ':'); 3241 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ' : ':');
3240 3242
3241 printk("\n"); 3243 printk("\n");
3242 3244
3243 /* We are home free at this point, link us in to the happy 3245 /* We are home free at this point, link us in to the happy
3244 * device list. 3246 * device list.
3245 */ 3247 */
3246 hp->next_module = root_happy_dev; 3248 hp->next_module = root_happy_dev;
3247 root_happy_dev = hp; 3249 root_happy_dev = hp;
3248 3250
3249 return 0; 3251 return 0;
3250 3252
3251 err_out_iounmap: 3253 err_out_iounmap:
3252 iounmap(hp->gregs); 3254 iounmap(hp->gregs);
3253 3255
3254 err_out_free_res: 3256 err_out_free_res:
3255 pci_release_regions(pdev); 3257 pci_release_regions(pdev);
3256 3258
3257 err_out_clear_quattro: 3259 err_out_clear_quattro:
3258 if (qp != NULL) 3260 if (qp != NULL)
3259 qp->happy_meals[qfe_slot] = NULL; 3261 qp->happy_meals[qfe_slot] = NULL;
3260 3262
3261 free_netdev(dev); 3263 free_netdev(dev);
3262 3264
3263 err_out: 3265 err_out:
3264 return err; 3266 return err;
3265 } 3267 }
3266 #endif 3268 #endif
3267 3269
3268 #ifdef CONFIG_SBUS 3270 #ifdef CONFIG_SBUS
3269 static int __init happy_meal_sbus_probe(void) 3271 static int __init happy_meal_sbus_probe(void)
3270 { 3272 {
3271 struct sbus_bus *sbus; 3273 struct sbus_bus *sbus;
3272 struct sbus_dev *sdev; 3274 struct sbus_dev *sdev;
3273 int cards = 0; 3275 int cards = 0;
3274 char model[128]; 3276 char model[128];
3275 3277
3276 for_each_sbus(sbus) { 3278 for_each_sbus(sbus) {
3277 for_each_sbusdev(sdev, sbus) { 3279 for_each_sbusdev(sdev, sbus) {
3278 char *name = sdev->prom_name; 3280 char *name = sdev->prom_name;
3279 3281
3280 if (!strcmp(name, "SUNW,hme")) { 3282 if (!strcmp(name, "SUNW,hme")) {
3281 cards++; 3283 cards++;
3282 prom_getstring(sdev->prom_node, "model", 3284 prom_getstring(sdev->prom_node, "model",
3283 model, sizeof(model)); 3285 model, sizeof(model));
3284 if (!strcmp(model, "SUNW,sbus-qfe")) 3286 if (!strcmp(model, "SUNW,sbus-qfe"))
3285 happy_meal_sbus_init(sdev, 1); 3287 happy_meal_sbus_init(sdev, 1);
3286 else 3288 else
3287 happy_meal_sbus_init(sdev, 0); 3289 happy_meal_sbus_init(sdev, 0);
3288 } else if (!strcmp(name, "qfe") || 3290 } else if (!strcmp(name, "qfe") ||
3289 !strcmp(name, "SUNW,qfe")) { 3291 !strcmp(name, "SUNW,qfe")) {
3290 cards++; 3292 cards++;
3291 happy_meal_sbus_init(sdev, 1); 3293 happy_meal_sbus_init(sdev, 1);
3292 } 3294 }
3293 } 3295 }
3294 } 3296 }
3295 if (cards != 0) 3297 if (cards != 0)
3296 quattro_sbus_register_irqs(); 3298 quattro_sbus_register_irqs();
3297 return cards; 3299 return cards;
3298 } 3300 }
3299 #endif 3301 #endif
3300 3302
3301 #ifdef CONFIG_PCI 3303 #ifdef CONFIG_PCI
3302 static int __init happy_meal_pci_probe(void) 3304 static int __init happy_meal_pci_probe(void)
3303 { 3305 {
3304 struct pci_dev *pdev = NULL; 3306 struct pci_dev *pdev = NULL;
3305 int cards = 0; 3307 int cards = 0;
3306 3308
3307 while ((pdev = pci_find_device(PCI_VENDOR_ID_SUN, 3309 while ((pdev = pci_find_device(PCI_VENDOR_ID_SUN,
3308 PCI_DEVICE_ID_SUN_HAPPYMEAL, pdev)) != NULL) { 3310 PCI_DEVICE_ID_SUN_HAPPYMEAL, pdev)) != NULL) {
3309 if (pci_enable_device(pdev)) 3311 if (pci_enable_device(pdev))
3310 continue; 3312 continue;
3311 pci_set_master(pdev); 3313 pci_set_master(pdev);
3312 cards++; 3314 cards++;
3313 happy_meal_pci_init(pdev); 3315 happy_meal_pci_init(pdev);
3314 } 3316 }
3315 return cards; 3317 return cards;
3316 } 3318 }
3317 #endif 3319 #endif
3318 3320
3319 static int __init happy_meal_probe(void) 3321 static int __init happy_meal_probe(void)
3320 { 3322 {
3321 static int called = 0; 3323 static int called = 0;
3322 int cards; 3324 int cards;
3323 3325
3324 root_happy_dev = NULL; 3326 root_happy_dev = NULL;
3325 3327
3326 if (called) 3328 if (called)
3327 return -ENODEV; 3329 return -ENODEV;
3328 called++; 3330 called++;
3329 3331
3330 cards = 0; 3332 cards = 0;
3331 #ifdef CONFIG_SBUS 3333 #ifdef CONFIG_SBUS
3332 cards += happy_meal_sbus_probe(); 3334 cards += happy_meal_sbus_probe();
3333 #endif 3335 #endif
3334 #ifdef CONFIG_PCI 3336 #ifdef CONFIG_PCI
3335 cards += happy_meal_pci_probe(); 3337 cards += happy_meal_pci_probe();
3336 #endif 3338 #endif
3337 if (!cards) 3339 if (!cards)
3338 return -ENODEV; 3340 return -ENODEV;
3339 return 0; 3341 return 0;
3340 } 3342 }
3341 3343
3342 3344
3343 static void __exit happy_meal_cleanup_module(void) 3345 static void __exit happy_meal_cleanup_module(void)
3344 { 3346 {
3345 #ifdef CONFIG_SBUS 3347 #ifdef CONFIG_SBUS
3346 struct quattro *last_seen_qfe = NULL; 3348 struct quattro *last_seen_qfe = NULL;
3347 #endif 3349 #endif
3348 3350
3349 while (root_happy_dev) { 3351 while (root_happy_dev) {
3350 struct happy_meal *hp = root_happy_dev; 3352 struct happy_meal *hp = root_happy_dev;
3351 struct happy_meal *next = root_happy_dev->next_module; 3353 struct happy_meal *next = root_happy_dev->next_module;
3352 struct net_device *dev = hp->dev; 3354 struct net_device *dev = hp->dev;
3353 3355
3354 /* Unregister netdev before unmapping registers as this 3356 /* Unregister netdev before unmapping registers as this
3355 * call can end up trying to access those registers. 3357 * call can end up trying to access those registers.
3356 */ 3358 */
3357 unregister_netdev(dev); 3359 unregister_netdev(dev);
3358 3360
3359 #ifdef CONFIG_SBUS 3361 #ifdef CONFIG_SBUS
3360 if (!(hp->happy_flags & HFLAG_PCI)) { 3362 if (!(hp->happy_flags & HFLAG_PCI)) {
3361 if (hp->happy_flags & HFLAG_QUATTRO) { 3363 if (hp->happy_flags & HFLAG_QUATTRO) {
3362 if (hp->qfe_parent != last_seen_qfe) { 3364 if (hp->qfe_parent != last_seen_qfe) {
3363 free_irq(dev->irq, hp->qfe_parent); 3365 free_irq(dev->irq, hp->qfe_parent);
3364 last_seen_qfe = hp->qfe_parent; 3366 last_seen_qfe = hp->qfe_parent;
3365 } 3367 }
3366 } 3368 }
3367 3369
3368 sbus_iounmap(hp->gregs, GREG_REG_SIZE); 3370 sbus_iounmap(hp->gregs, GREG_REG_SIZE);
3369 sbus_iounmap(hp->etxregs, ETX_REG_SIZE); 3371 sbus_iounmap(hp->etxregs, ETX_REG_SIZE);
3370 sbus_iounmap(hp->erxregs, ERX_REG_SIZE); 3372 sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
3371 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE); 3373 sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
3372 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE); 3374 sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
3373 sbus_free_consistent(hp->happy_dev, 3375 sbus_free_consistent(hp->happy_dev,
3374 PAGE_SIZE, 3376 PAGE_SIZE,
3375 hp->happy_block, 3377 hp->happy_block,
3376 hp->hblock_dvma); 3378 hp->hblock_dvma);
3377 } 3379 }
3378 #endif 3380 #endif
3379 #ifdef CONFIG_PCI 3381 #ifdef CONFIG_PCI
3380 if ((hp->happy_flags & HFLAG_PCI)) { 3382 if ((hp->happy_flags & HFLAG_PCI)) {
3381 pci_free_consistent(hp->happy_dev, 3383 pci_free_consistent(hp->happy_dev,
3382 PAGE_SIZE, 3384 PAGE_SIZE,
3383 hp->happy_block, 3385 hp->happy_block,
3384 hp->hblock_dvma); 3386 hp->hblock_dvma);
3385 iounmap(hp->gregs); 3387 iounmap(hp->gregs);
3386 pci_release_regions(hp->happy_dev); 3388 pci_release_regions(hp->happy_dev);
3387 } 3389 }
3388 #endif 3390 #endif
3389 free_netdev(dev); 3391 free_netdev(dev);
3390 3392
3391 root_happy_dev = next; 3393 root_happy_dev = next;
3392 } 3394 }
3393 3395
3394 /* Now cleanup the quattro lists. */ 3396 /* Now cleanup the quattro lists. */
3395 #ifdef CONFIG_SBUS 3397 #ifdef CONFIG_SBUS
3396 while (qfe_sbus_list) { 3398 while (qfe_sbus_list) {
3397 struct quattro *qfe = qfe_sbus_list; 3399 struct quattro *qfe = qfe_sbus_list;
3398 struct quattro *next = qfe->next; 3400 struct quattro *next = qfe->next;
3399 3401
3400 kfree(qfe); 3402 kfree(qfe);
3401 3403
3402 qfe_sbus_list = next; 3404 qfe_sbus_list = next;
3403 } 3405 }
3404 #endif 3406 #endif
3405 #ifdef CONFIG_PCI 3407 #ifdef CONFIG_PCI
3406 while (qfe_pci_list) { 3408 while (qfe_pci_list) {
3407 struct quattro *qfe = qfe_pci_list; 3409 struct quattro *qfe = qfe_pci_list;
3408 struct quattro *next = qfe->next; 3410 struct quattro *next = qfe->next;
3409 3411
3410 kfree(qfe); 3412 kfree(qfe);
3411 3413
3412 qfe_pci_list = next; 3414 qfe_pci_list = next;
3413 } 3415 }
3414 #endif 3416 #endif
3415 } 3417 }
3416 3418
3417 module_init(happy_meal_probe); 3419 module_init(happy_meal_probe);
1 /* 1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver. 2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 * 3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation. 7 * Copyright (C) 2005 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation. 11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 * 12 *
13 * Permission is hereby granted for the distribution of this firmware 13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright 14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it. 15 * notice is accompanying it.
16 */ 16 */
17 17
18 #include <linux/config.h> 18 #include <linux/config.h>
19 19
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/moduleparam.h> 21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/types.h> 23 #include <linux/types.h>
24 #include <linux/compiler.h> 24 #include <linux/compiler.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/delay.h> 26 #include <linux/delay.h>
27 #include <linux/in.h> 27 #include <linux/in.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/ioport.h> 29 #include <linux/ioport.h>
30 #include <linux/pci.h> 30 #include <linux/pci.h>
31 #include <linux/netdevice.h> 31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h> 32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h> 33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h> 34 #include <linux/ethtool.h>
35 #include <linux/mii.h> 35 #include <linux/mii.h>
36 #include <linux/if_vlan.h> 36 #include <linux/if_vlan.h>
37 #include <linux/ip.h> 37 #include <linux/ip.h>
38 #include <linux/tcp.h> 38 #include <linux/tcp.h>
39 #include <linux/workqueue.h> 39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h> 40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h> 41 #include <linux/dma-mapping.h>
42 42
43 #include <net/checksum.h> 43 #include <net/checksum.h>
44 44
45 #include <asm/system.h> 45 #include <asm/system.h>
46 #include <asm/io.h> 46 #include <asm/io.h>
47 #include <asm/byteorder.h> 47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h> 48 #include <asm/uaccess.h>
49 49
50 #ifdef CONFIG_SPARC64 50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h> 51 #include <asm/idprom.h>
52 #include <asm/oplib.h> 52 #include <asm/oplib.h>
53 #include <asm/pbm.h> 53 #include <asm/pbm.h>
54 #endif 54 #endif
55 55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1 57 #define TG3_VLAN_TAG_USED 1
58 #else 58 #else
59 #define TG3_VLAN_TAG_USED 0 59 #define TG3_VLAN_TAG_USED 0
60 #endif 60 #endif
61 61
62 #ifdef NETIF_F_TSO 62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1 63 #define TG3_TSO_SUPPORT 1
64 #else 64 #else
65 #define TG3_TSO_SUPPORT 0 65 #define TG3_TSO_SUPPORT 0
66 #endif 66 #endif
67 67
68 #include "tg3.h" 68 #include "tg3.h"
69 69
70 #define DRV_MODULE_NAME "tg3" 70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": " 71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.60" 72 #define DRV_MODULE_VERSION "3.60"
73 #define DRV_MODULE_RELDATE "June 17, 2006" 73 #define DRV_MODULE_RELDATE "June 17, 2006"
74 74
75 #define TG3_DEF_MAC_MODE 0 75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0 76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0 77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \ 78 #define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \ 79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \ 80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \ 81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \ 82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \ 83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \ 84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \ 85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR) 86 NETIF_MSG_TX_ERR)
87 87
88 /* length of time before we decide the hardware is borked, 88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem 89 * and dev->tx_timeout() should be called to fix the problem
90 */ 90 */
91 #define TG3_TX_TIMEOUT (5 * HZ) 91 #define TG3_TX_TIMEOUT (5 * HZ)
92 92
93 /* hardware minimum and maximum for a single frame's data payload */ 93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60 94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \ 95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500) 96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 97
98 /* These numbers seem to be hard coded in the NIC firmware somehow. 98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place 99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory. 100 * them in the NIC onboard memory.
101 */ 101 */
102 #define TG3_RX_RING_SIZE 512 102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200 103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256 104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100 105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 106
107 /* Do not place this n-ring entries value into the tp struct itself, 107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et 108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with 109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to 110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'. 111 * replace things like '% foo' with '& (foo - 1)'.
112 */ 112 */
113 #define TG3_RX_RCB_RING_SIZE(tp) \ 113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024) 114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 115
116 #define TG3_TX_RING_SIZE 512 116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 118
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE) 120 TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE) 122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ 123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp)) 124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE) 126 TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP) \ 127 #define TX_BUFFS_AVAIL(TP) \
128 ((TP)->tx_pending - \ 128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) 129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 131
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64) 133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
134 134
135 /* minimum number of free TX descriptors required to wake up TX process */ 135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4) 136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
137 137
138 /* number of ETHTOOL_GSTATS u64's */ 138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140 140
141 #define TG3_NUM_TEST 6 141 #define TG3_NUM_TEST 6
142 142
143 static char version[] __devinitdata = 143 static char version[] __devinitdata =
144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 144 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145 145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); 147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL"); 148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION); 149 MODULE_VERSION(DRV_MODULE_VERSION);
150 150
151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 151 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0); 152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154 154
155 static struct pci_device_id tg3_pci_tbl[] = { 155 static struct pci_device_id tg3_pci_tbl[] = {
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700, 156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701, 158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702, 160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703, 162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704, 164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE, 166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705, 168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2, 170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M, 172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2, 174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X, 176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X, 178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S, 180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3, 182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3, 184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782, 186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788, 188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789, 190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901, 192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2, 194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2, 196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F, 198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720, 200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721, 202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750, 204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751, 206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M, 208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M, 210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F, 212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752, 214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M, 216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753, 218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M, 220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, 222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754, 224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M, 226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755, 228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, 230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786, 232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, 234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, 236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, 238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, 240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, 242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, 244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, 246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, 248 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781, 250 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX, 252 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX, 254 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000, 256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001, 258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003, 260 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100, 262 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3, 264 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
266 { 0, } 266 { 0, }
267 }; 267 };
268 268
269 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 269 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
270 270
271 static struct { 271 static struct {
272 const char string[ETH_GSTRING_LEN]; 272 const char string[ETH_GSTRING_LEN];
273 } ethtool_stats_keys[TG3_NUM_STATS] = { 273 } ethtool_stats_keys[TG3_NUM_STATS] = {
274 { "rx_octets" }, 274 { "rx_octets" },
275 { "rx_fragments" }, 275 { "rx_fragments" },
276 { "rx_ucast_packets" }, 276 { "rx_ucast_packets" },
277 { "rx_mcast_packets" }, 277 { "rx_mcast_packets" },
278 { "rx_bcast_packets" }, 278 { "rx_bcast_packets" },
279 { "rx_fcs_errors" }, 279 { "rx_fcs_errors" },
280 { "rx_align_errors" }, 280 { "rx_align_errors" },
281 { "rx_xon_pause_rcvd" }, 281 { "rx_xon_pause_rcvd" },
282 { "rx_xoff_pause_rcvd" }, 282 { "rx_xoff_pause_rcvd" },
283 { "rx_mac_ctrl_rcvd" }, 283 { "rx_mac_ctrl_rcvd" },
284 { "rx_xoff_entered" }, 284 { "rx_xoff_entered" },
285 { "rx_frame_too_long_errors" }, 285 { "rx_frame_too_long_errors" },
286 { "rx_jabbers" }, 286 { "rx_jabbers" },
287 { "rx_undersize_packets" }, 287 { "rx_undersize_packets" },
288 { "rx_in_length_errors" }, 288 { "rx_in_length_errors" },
289 { "rx_out_length_errors" }, 289 { "rx_out_length_errors" },
290 { "rx_64_or_less_octet_packets" }, 290 { "rx_64_or_less_octet_packets" },
291 { "rx_65_to_127_octet_packets" }, 291 { "rx_65_to_127_octet_packets" },
292 { "rx_128_to_255_octet_packets" }, 292 { "rx_128_to_255_octet_packets" },
293 { "rx_256_to_511_octet_packets" }, 293 { "rx_256_to_511_octet_packets" },
294 { "rx_512_to_1023_octet_packets" }, 294 { "rx_512_to_1023_octet_packets" },
295 { "rx_1024_to_1522_octet_packets" }, 295 { "rx_1024_to_1522_octet_packets" },
296 { "rx_1523_to_2047_octet_packets" }, 296 { "rx_1523_to_2047_octet_packets" },
297 { "rx_2048_to_4095_octet_packets" }, 297 { "rx_2048_to_4095_octet_packets" },
298 { "rx_4096_to_8191_octet_packets" }, 298 { "rx_4096_to_8191_octet_packets" },
299 { "rx_8192_to_9022_octet_packets" }, 299 { "rx_8192_to_9022_octet_packets" },
300 300
301 { "tx_octets" }, 301 { "tx_octets" },
302 { "tx_collisions" }, 302 { "tx_collisions" },
303 303
304 { "tx_xon_sent" }, 304 { "tx_xon_sent" },
305 { "tx_xoff_sent" }, 305 { "tx_xoff_sent" },
306 { "tx_flow_control" }, 306 { "tx_flow_control" },
307 { "tx_mac_errors" }, 307 { "tx_mac_errors" },
308 { "tx_single_collisions" }, 308 { "tx_single_collisions" },
309 { "tx_mult_collisions" }, 309 { "tx_mult_collisions" },
310 { "tx_deferred" }, 310 { "tx_deferred" },
311 { "tx_excessive_collisions" }, 311 { "tx_excessive_collisions" },
312 { "tx_late_collisions" }, 312 { "tx_late_collisions" },
313 { "tx_collide_2times" }, 313 { "tx_collide_2times" },
314 { "tx_collide_3times" }, 314 { "tx_collide_3times" },
315 { "tx_collide_4times" }, 315 { "tx_collide_4times" },
316 { "tx_collide_5times" }, 316 { "tx_collide_5times" },
317 { "tx_collide_6times" }, 317 { "tx_collide_6times" },
318 { "tx_collide_7times" }, 318 { "tx_collide_7times" },
319 { "tx_collide_8times" }, 319 { "tx_collide_8times" },
320 { "tx_collide_9times" }, 320 { "tx_collide_9times" },
321 { "tx_collide_10times" }, 321 { "tx_collide_10times" },
322 { "tx_collide_11times" }, 322 { "tx_collide_11times" },
323 { "tx_collide_12times" }, 323 { "tx_collide_12times" },
324 { "tx_collide_13times" }, 324 { "tx_collide_13times" },
325 { "tx_collide_14times" }, 325 { "tx_collide_14times" },
326 { "tx_collide_15times" }, 326 { "tx_collide_15times" },
327 { "tx_ucast_packets" }, 327 { "tx_ucast_packets" },
328 { "tx_mcast_packets" }, 328 { "tx_mcast_packets" },
329 { "tx_bcast_packets" }, 329 { "tx_bcast_packets" },
330 { "tx_carrier_sense_errors" }, 330 { "tx_carrier_sense_errors" },
331 { "tx_discards" }, 331 { "tx_discards" },
332 { "tx_errors" }, 332 { "tx_errors" },
333 333
334 { "dma_writeq_full" }, 334 { "dma_writeq_full" },
335 { "dma_write_prioq_full" }, 335 { "dma_write_prioq_full" },
336 { "rxbds_empty" }, 336 { "rxbds_empty" },
337 { "rx_discards" }, 337 { "rx_discards" },
338 { "rx_errors" }, 338 { "rx_errors" },
339 { "rx_threshold_hit" }, 339 { "rx_threshold_hit" },
340 340
341 { "dma_readq_full" }, 341 { "dma_readq_full" },
342 { "dma_read_prioq_full" }, 342 { "dma_read_prioq_full" },
343 { "tx_comp_queue_full" }, 343 { "tx_comp_queue_full" },
344 344
345 { "ring_set_send_prod_index" }, 345 { "ring_set_send_prod_index" },
346 { "ring_status_update" }, 346 { "ring_status_update" },
347 { "nic_irqs" }, 347 { "nic_irqs" },
348 { "nic_avoided_irqs" }, 348 { "nic_avoided_irqs" },
349 { "nic_tx_threshold_hit" } 349 { "nic_tx_threshold_hit" }
350 }; 350 };
351 351
352 static struct { 352 static struct {
353 const char string[ETH_GSTRING_LEN]; 353 const char string[ETH_GSTRING_LEN];
354 } ethtool_test_keys[TG3_NUM_TEST] = { 354 } ethtool_test_keys[TG3_NUM_TEST] = {
355 { "nvram test (online) " }, 355 { "nvram test (online) " },
356 { "link test (online) " }, 356 { "link test (online) " },
357 { "register test (offline)" }, 357 { "register test (offline)" },
358 { "memory test (offline)" }, 358 { "memory test (offline)" },
359 { "loopback test (offline)" }, 359 { "loopback test (offline)" },
360 { "interrupt test (offline)" }, 360 { "interrupt test (offline)" },
361 }; 361 };
362 362
363 static void tg3_write32(struct tg3 *tp, u32 off, u32 val) 363 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
364 { 364 {
365 writel(val, tp->regs + off); 365 writel(val, tp->regs + off);
366 } 366 }
367 367
368 static u32 tg3_read32(struct tg3 *tp, u32 off) 368 static u32 tg3_read32(struct tg3 *tp, u32 off)
369 { 369 {
370 return (readl(tp->regs + off)); 370 return (readl(tp->regs + off));
371 } 371 }
372 372
373 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 373 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
374 { 374 {
375 unsigned long flags; 375 unsigned long flags;
376 376
377 spin_lock_irqsave(&tp->indirect_lock, flags); 377 spin_lock_irqsave(&tp->indirect_lock, flags);
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 378 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 379 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
380 spin_unlock_irqrestore(&tp->indirect_lock, flags); 380 spin_unlock_irqrestore(&tp->indirect_lock, flags);
381 } 381 }
382 382
383 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) 383 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
384 { 384 {
385 writel(val, tp->regs + off); 385 writel(val, tp->regs + off);
386 readl(tp->regs + off); 386 readl(tp->regs + off);
387 } 387 }
388 388
389 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) 389 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
390 { 390 {
391 unsigned long flags; 391 unsigned long flags;
392 u32 val; 392 u32 val;
393 393
394 spin_lock_irqsave(&tp->indirect_lock, flags); 394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); 395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags); 397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
398 return val; 398 return val;
399 } 399 }
400 400
401 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) 401 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
402 { 402 {
403 unsigned long flags; 403 unsigned long flags;
404 404
405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { 405 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + 406 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
407 TG3_64BIT_REG_LOW, val); 407 TG3_64BIT_REG_LOW, val);
408 return; 408 return;
409 } 409 }
410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 410 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 411 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
412 TG3_64BIT_REG_LOW, val); 412 TG3_64BIT_REG_LOW, val);
413 return; 413 return;
414 } 414 }
415 415
416 spin_lock_irqsave(&tp->indirect_lock, flags); 416 spin_lock_irqsave(&tp->indirect_lock, flags);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 417 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); 418 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
419 spin_unlock_irqrestore(&tp->indirect_lock, flags); 419 spin_unlock_irqrestore(&tp->indirect_lock, flags);
420 420
421 /* In indirect mode when disabling interrupts, we also need 421 /* In indirect mode when disabling interrupts, we also need
422 * to clear the interrupt bit in the GRC local ctrl register. 422 * to clear the interrupt bit in the GRC local ctrl register.
423 */ 423 */
424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && 424 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
425 (val == 0x1)) { 425 (val == 0x1)) {
426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, 426 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); 427 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
428 } 428 }
429 } 429 }
430 430
431 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) 431 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
432 { 432 {
433 unsigned long flags; 433 unsigned long flags;
434 u32 val; 434 u32 val;
435 435
436 spin_lock_irqsave(&tp->indirect_lock, flags); 436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); 437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); 438 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags); 439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
440 return val; 440 return val;
441 } 441 }
442 442
443 /* usec_wait specifies the wait time in usec when writing to certain registers 443 /* usec_wait specifies the wait time in usec when writing to certain registers
444 * where it is unsafe to read back the register without some delay. 444 * where it is unsafe to read back the register without some delay.
445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. 445 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. 446 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
447 */ 447 */
448 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) 448 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
449 { 449 {
450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || 450 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) 451 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
452 /* Non-posted methods */ 452 /* Non-posted methods */
453 tp->write32(tp, off, val); 453 tp->write32(tp, off, val);
454 else { 454 else {
455 /* Posted method */ 455 /* Posted method */
456 tg3_write32(tp, off, val); 456 tg3_write32(tp, off, val);
457 if (usec_wait) 457 if (usec_wait)
458 udelay(usec_wait); 458 udelay(usec_wait);
459 tp->read32(tp, off); 459 tp->read32(tp, off);
460 } 460 }
461 /* Wait again after the read for the posted method to guarantee that 461 /* Wait again after the read for the posted method to guarantee that
462 * the wait time is met. 462 * the wait time is met.
463 */ 463 */
464 if (usec_wait) 464 if (usec_wait)
465 udelay(usec_wait); 465 udelay(usec_wait);
466 } 466 }
467 467
468 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) 468 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
469 { 469 {
470 tp->write32_mbox(tp, off, val); 470 tp->write32_mbox(tp, off, val);
471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && 471 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) 472 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
473 tp->read32_mbox(tp, off); 473 tp->read32_mbox(tp, off);
474 } 474 }
475 475
476 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) 476 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
477 { 477 {
478 void __iomem *mbox = tp->regs + off; 478 void __iomem *mbox = tp->regs + off;
479 writel(val, mbox); 479 writel(val, mbox);
480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) 480 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
481 writel(val, mbox); 481 writel(val, mbox);
482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 482 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
483 readl(mbox); 483 readl(mbox);
484 } 484 }
485 485
486 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 486 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
487 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 487 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
488 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 488 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
489 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 489 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
490 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 490 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 491
492 #define tw32(reg,val) tp->write32(tp, reg, val) 492 #define tw32(reg,val) tp->write32(tp, reg, val)
493 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) 493 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
494 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) 494 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
495 #define tr32(reg) tp->read32(tp, reg) 495 #define tr32(reg) tp->read32(tp, reg)
496 496
497 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 497 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
498 { 498 {
499 unsigned long flags; 499 unsigned long flags;
500 500
501 spin_lock_irqsave(&tp->indirect_lock, flags); 501 spin_lock_irqsave(&tp->indirect_lock, flags);
502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 502 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505 505
506 /* Always leave this as zero. */ 506 /* Always leave this as zero. */
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 } else { 508 } else {
509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 509 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
510 tw32_f(TG3PCI_MEM_WIN_DATA, val); 510 tw32_f(TG3PCI_MEM_WIN_DATA, val);
511 511
512 /* Always leave this as zero. */ 512 /* Always leave this as zero. */
513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 513 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } 514 }
515 spin_unlock_irqrestore(&tp->indirect_lock, flags); 515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 } 516 }
517 517
518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 518 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
519 { 519 {
520 unsigned long flags; 520 unsigned long flags;
521 521
522 spin_lock_irqsave(&tp->indirect_lock, flags); 522 spin_lock_irqsave(&tp->indirect_lock, flags);
523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { 523 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); 524 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 525 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
526 526
527 /* Always leave this as zero. */ 527 /* Always leave this as zero. */
528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 528 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
529 } else { 529 } else {
530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); 530 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
531 *val = tr32(TG3PCI_MEM_WIN_DATA); 531 *val = tr32(TG3PCI_MEM_WIN_DATA);
532 532
533 /* Always leave this as zero. */ 533 /* Always leave this as zero. */
534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); 534 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
535 } 535 }
536 spin_unlock_irqrestore(&tp->indirect_lock, flags); 536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 } 537 }
538 538
539 static void tg3_disable_ints(struct tg3 *tp) 539 static void tg3_disable_ints(struct tg3 *tp)
540 { 540 {
541 tw32(TG3PCI_MISC_HOST_CTRL, 541 tw32(TG3PCI_MISC_HOST_CTRL,
542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); 542 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
544 } 544 }
545 545
546 static inline void tg3_cond_int(struct tg3 *tp) 546 static inline void tg3_cond_int(struct tg3 *tp)
547 { 547 {
548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 548 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
549 (tp->hw_status->status & SD_STATUS_UPDATED)) 549 (tp->hw_status->status & SD_STATUS_UPDATED))
550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
551 } 551 }
552 552
553 static void tg3_enable_ints(struct tg3 *tp) 553 static void tg3_enable_ints(struct tg3 *tp)
554 { 554 {
555 tp->irq_sync = 0; 555 tp->irq_sync = 0;
556 wmb(); 556 wmb();
557 557
558 tw32(TG3PCI_MISC_HOST_CTRL, 558 tw32(TG3PCI_MISC_HOST_CTRL,
559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 559 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 560 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561 (tp->last_tag << 24)); 561 (tp->last_tag << 24));
562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 562 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 563 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
564 (tp->last_tag << 24)); 564 (tp->last_tag << 24));
565 tg3_cond_int(tp); 565 tg3_cond_int(tp);
566 } 566 }
567 567
568 static inline unsigned int tg3_has_work(struct tg3 *tp) 568 static inline unsigned int tg3_has_work(struct tg3 *tp)
569 { 569 {
570 struct tg3_hw_status *sblk = tp->hw_status; 570 struct tg3_hw_status *sblk = tp->hw_status;
571 unsigned int work_exists = 0; 571 unsigned int work_exists = 0;
572 572
573 /* check for phy events */ 573 /* check for phy events */
574 if (!(tp->tg3_flags & 574 if (!(tp->tg3_flags &
575 (TG3_FLAG_USE_LINKCHG_REG | 575 (TG3_FLAG_USE_LINKCHG_REG |
576 TG3_FLAG_POLL_SERDES))) { 576 TG3_FLAG_POLL_SERDES))) {
577 if (sblk->status & SD_STATUS_LINK_CHG) 577 if (sblk->status & SD_STATUS_LINK_CHG)
578 work_exists = 1; 578 work_exists = 1;
579 } 579 }
580 /* check for RX/TX work to do */ 580 /* check for RX/TX work to do */
581 if (sblk->idx[0].tx_consumer != tp->tx_cons || 581 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr) 582 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
583 work_exists = 1; 583 work_exists = 1;
584 584
585 return work_exists; 585 return work_exists;
586 } 586 }
587 587
588 /* tg3_restart_ints 588 /* tg3_restart_ints
589 * similar to tg3_enable_ints, but it accurately determines whether there 589 * similar to tg3_enable_ints, but it accurately determines whether there
590 * is new work pending and can return without flushing the PIO write 590 * is new work pending and can return without flushing the PIO write
591 * which reenables interrupts 591 * which reenables interrupts
592 */ 592 */
593 static void tg3_restart_ints(struct tg3 *tp) 593 static void tg3_restart_ints(struct tg3 *tp)
594 { 594 {
595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 595 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
596 tp->last_tag << 24); 596 tp->last_tag << 24);
597 mmiowb(); 597 mmiowb();
598 598
599 /* When doing tagged status, this work check is unnecessary. 599 /* When doing tagged status, this work check is unnecessary.
600 * The last_tag we write above tells the chip which piece of 600 * The last_tag we write above tells the chip which piece of
601 * work we've completed. 601 * work we've completed.
602 */ 602 */
603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && 603 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
604 tg3_has_work(tp)) 604 tg3_has_work(tp))
605 tw32(HOSTCC_MODE, tp->coalesce_mode | 605 tw32(HOSTCC_MODE, tp->coalesce_mode |
606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 606 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
607 } 607 }
608 608
609 static inline void tg3_netif_stop(struct tg3 *tp) 609 static inline void tg3_netif_stop(struct tg3 *tp)
610 { 610 {
611 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 611 tp->dev->trans_start = jiffies; /* prevent tx timeout */
612 netif_poll_disable(tp->dev); 612 netif_poll_disable(tp->dev);
613 netif_tx_disable(tp->dev); 613 netif_tx_disable(tp->dev);
614 } 614 }
615 615
616 static inline void tg3_netif_start(struct tg3 *tp) 616 static inline void tg3_netif_start(struct tg3 *tp)
617 { 617 {
618 netif_wake_queue(tp->dev); 618 netif_wake_queue(tp->dev);
619 /* NOTE: unconditional netif_wake_queue is only appropriate 619 /* NOTE: unconditional netif_wake_queue is only appropriate
620 * so long as all callers are assured to have free tx slots 620 * so long as all callers are assured to have free tx slots
621 * (such as after tg3_init_hw) 621 * (such as after tg3_init_hw)
622 */ 622 */
623 netif_poll_enable(tp->dev); 623 netif_poll_enable(tp->dev);
624 tp->hw_status->status |= SD_STATUS_UPDATED; 624 tp->hw_status->status |= SD_STATUS_UPDATED;
625 tg3_enable_ints(tp); 625 tg3_enable_ints(tp);
626 } 626 }
627 627
628 static void tg3_switch_clocks(struct tg3 *tp) 628 static void tg3_switch_clocks(struct tg3 *tp)
629 { 629 {
630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 630 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
631 u32 orig_clock_ctrl; 631 u32 orig_clock_ctrl;
632 632
633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 633 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
634 return; 634 return;
635 635
636 orig_clock_ctrl = clock_ctrl; 636 orig_clock_ctrl = clock_ctrl;
637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | 637 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
638 CLOCK_CTRL_CLKRUN_OENABLE | 638 CLOCK_CTRL_CLKRUN_OENABLE |
639 0x1f); 639 0x1f);
640 tp->pci_clock_ctrl = clock_ctrl; 640 tp->pci_clock_ctrl = clock_ctrl;
641 641
642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 642 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { 643 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
644 tw32_wait_f(TG3PCI_CLOCK_CTRL, 644 tw32_wait_f(TG3PCI_CLOCK_CTRL,
645 clock_ctrl | CLOCK_CTRL_625_CORE, 40); 645 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
646 } 646 }
647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { 647 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
648 tw32_wait_f(TG3PCI_CLOCK_CTRL, 648 tw32_wait_f(TG3PCI_CLOCK_CTRL,
649 clock_ctrl | 649 clock_ctrl |
650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), 650 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
651 40); 651 40);
652 tw32_wait_f(TG3PCI_CLOCK_CTRL, 652 tw32_wait_f(TG3PCI_CLOCK_CTRL,
653 clock_ctrl | (CLOCK_CTRL_ALTCLK), 653 clock_ctrl | (CLOCK_CTRL_ALTCLK),
654 40); 654 40);
655 } 655 }
656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); 656 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
657 } 657 }
658 658
659 #define PHY_BUSY_LOOPS 5000 659 #define PHY_BUSY_LOOPS 5000
660 660
661 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) 661 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
662 { 662 {
663 u32 frame_val; 663 u32 frame_val;
664 unsigned int loops; 664 unsigned int loops;
665 int ret; 665 int ret;
666 666
667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 667 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668 tw32_f(MAC_MI_MODE, 668 tw32_f(MAC_MI_MODE,
669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 669 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
670 udelay(80); 670 udelay(80);
671 } 671 }
672 672
673 *val = 0x0; 673 *val = 0x0;
674 674
675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & 675 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
676 MI_COM_PHY_ADDR_MASK); 676 MI_COM_PHY_ADDR_MASK);
677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 677 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
678 MI_COM_REG_ADDR_MASK); 678 MI_COM_REG_ADDR_MASK);
679 frame_val |= (MI_COM_CMD_READ | MI_COM_START); 679 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
680 680
681 tw32_f(MAC_MI_COM, frame_val); 681 tw32_f(MAC_MI_COM, frame_val);
682 682
683 loops = PHY_BUSY_LOOPS; 683 loops = PHY_BUSY_LOOPS;
684 while (loops != 0) { 684 while (loops != 0) {
685 udelay(10); 685 udelay(10);
686 frame_val = tr32(MAC_MI_COM); 686 frame_val = tr32(MAC_MI_COM);
687 687
688 if ((frame_val & MI_COM_BUSY) == 0) { 688 if ((frame_val & MI_COM_BUSY) == 0) {
689 udelay(5); 689 udelay(5);
690 frame_val = tr32(MAC_MI_COM); 690 frame_val = tr32(MAC_MI_COM);
691 break; 691 break;
692 } 692 }
693 loops -= 1; 693 loops -= 1;
694 } 694 }
695 695
696 ret = -EBUSY; 696 ret = -EBUSY;
697 if (loops != 0) { 697 if (loops != 0) {
698 *val = frame_val & MI_COM_DATA_MASK; 698 *val = frame_val & MI_COM_DATA_MASK;
699 ret = 0; 699 ret = 0;
700 } 700 }
701 701
702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 702 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
703 tw32_f(MAC_MI_MODE, tp->mi_mode); 703 tw32_f(MAC_MI_MODE, tp->mi_mode);
704 udelay(80); 704 udelay(80);
705 } 705 }
706 706
707 return ret; 707 return ret;
708 } 708 }
709 709
710 static int tg3_writephy(struct tg3 *tp, int reg, u32 val) 710 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
711 { 711 {
712 u32 frame_val; 712 u32 frame_val;
713 unsigned int loops; 713 unsigned int loops;
714 int ret; 714 int ret;
715 715
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 tw32_f(MAC_MI_MODE, 717 tw32_f(MAC_MI_MODE,
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); 718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
719 udelay(80); 719 udelay(80);
720 } 720 }
721 721
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) & 722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK); 723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & 724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK); 725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (val & MI_COM_DATA_MASK); 726 frame_val |= (val & MI_COM_DATA_MASK);
727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); 727 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
728 728
729 tw32_f(MAC_MI_COM, frame_val); 729 tw32_f(MAC_MI_COM, frame_val);
730 730
731 loops = PHY_BUSY_LOOPS; 731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) { 732 while (loops != 0) {
733 udelay(10); 733 udelay(10);
734 frame_val = tr32(MAC_MI_COM); 734 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) { 735 if ((frame_val & MI_COM_BUSY) == 0) {
736 udelay(5); 736 udelay(5);
737 frame_val = tr32(MAC_MI_COM); 737 frame_val = tr32(MAC_MI_COM);
738 break; 738 break;
739 } 739 }
740 loops -= 1; 740 loops -= 1;
741 } 741 }
742 742
743 ret = -EBUSY; 743 ret = -EBUSY;
744 if (loops != 0) 744 if (loops != 0)
745 ret = 0; 745 ret = 0;
746 746
747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { 747 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748 tw32_f(MAC_MI_MODE, tp->mi_mode); 748 tw32_f(MAC_MI_MODE, tp->mi_mode);
749 udelay(80); 749 udelay(80);
750 } 750 }
751 751
752 return ret; 752 return ret;
753 } 753 }
754 754
755 static void tg3_phy_set_wirespeed(struct tg3 *tp) 755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
756 { 756 {
757 u32 val; 757 u32 val;
758 758
759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) 759 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
760 return; 760 return;
761 761
762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && 762 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) 763 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
764 tg3_writephy(tp, MII_TG3_AUX_CTRL, 764 tg3_writephy(tp, MII_TG3_AUX_CTRL,
765 (val | (1 << 15) | (1 << 4))); 765 (val | (1 << 15) | (1 << 4)));
766 } 766 }
767 767
768 static int tg3_bmcr_reset(struct tg3 *tp) 768 static int tg3_bmcr_reset(struct tg3 *tp)
769 { 769 {
770 u32 phy_control; 770 u32 phy_control;
771 int limit, err; 771 int limit, err;
772 772
773 /* OK, reset it, and poll the BMCR_RESET bit until it 773 /* OK, reset it, and poll the BMCR_RESET bit until it
774 * clears or we time out. 774 * clears or we time out.
775 */ 775 */
776 phy_control = BMCR_RESET; 776 phy_control = BMCR_RESET;
777 err = tg3_writephy(tp, MII_BMCR, phy_control); 777 err = tg3_writephy(tp, MII_BMCR, phy_control);
778 if (err != 0) 778 if (err != 0)
779 return -EBUSY; 779 return -EBUSY;
780 780
781 limit = 5000; 781 limit = 5000;
782 while (limit--) { 782 while (limit--) {
783 err = tg3_readphy(tp, MII_BMCR, &phy_control); 783 err = tg3_readphy(tp, MII_BMCR, &phy_control);
784 if (err != 0) 784 if (err != 0)
785 return -EBUSY; 785 return -EBUSY;
786 786
787 if ((phy_control & BMCR_RESET) == 0) { 787 if ((phy_control & BMCR_RESET) == 0) {
788 udelay(40); 788 udelay(40);
789 break; 789 break;
790 } 790 }
791 udelay(10); 791 udelay(10);
792 } 792 }
793 if (limit <= 0) 793 if (limit <= 0)
794 return -EBUSY; 794 return -EBUSY;
795 795
796 return 0; 796 return 0;
797 } 797 }
798 798
799 static int tg3_wait_macro_done(struct tg3 *tp) 799 static int tg3_wait_macro_done(struct tg3 *tp)
800 { 800 {
801 int limit = 100; 801 int limit = 100;
802 802
803 while (limit--) { 803 while (limit--) {
804 u32 tmp32; 804 u32 tmp32;
805 805
806 if (!tg3_readphy(tp, 0x16, &tmp32)) { 806 if (!tg3_readphy(tp, 0x16, &tmp32)) {
807 if ((tmp32 & 0x1000) == 0) 807 if ((tmp32 & 0x1000) == 0)
808 break; 808 break;
809 } 809 }
810 } 810 }
811 if (limit <= 0) 811 if (limit <= 0)
812 return -EBUSY; 812 return -EBUSY;
813 813
814 return 0; 814 return 0;
815 } 815 }
816 816
817 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) 817 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
818 { 818 {
819 static const u32 test_pat[4][6] = { 819 static const u32 test_pat[4][6] = {
820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, 820 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, 821 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, 822 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } 823 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
824 }; 824 };
825 int chan; 825 int chan;
826 826
827 for (chan = 0; chan < 4; chan++) { 827 for (chan = 0; chan < 4; chan++) {
828 int i; 828 int i;
829 829
830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
831 (chan * 0x2000) | 0x0200); 831 (chan * 0x2000) | 0x0200);
832 tg3_writephy(tp, 0x16, 0x0002); 832 tg3_writephy(tp, 0x16, 0x0002);
833 833
834 for (i = 0; i < 6; i++) 834 for (i = 0; i < 6; i++)
835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
836 test_pat[chan][i]); 836 test_pat[chan][i]);
837 837
838 tg3_writephy(tp, 0x16, 0x0202); 838 tg3_writephy(tp, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp)) { 839 if (tg3_wait_macro_done(tp)) {
840 *resetp = 1; 840 *resetp = 1;
841 return -EBUSY; 841 return -EBUSY;
842 } 842 }
843 843
844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
845 (chan * 0x2000) | 0x0200); 845 (chan * 0x2000) | 0x0200);
846 tg3_writephy(tp, 0x16, 0x0082); 846 tg3_writephy(tp, 0x16, 0x0082);
847 if (tg3_wait_macro_done(tp)) { 847 if (tg3_wait_macro_done(tp)) {
848 *resetp = 1; 848 *resetp = 1;
849 return -EBUSY; 849 return -EBUSY;
850 } 850 }
851 851
852 tg3_writephy(tp, 0x16, 0x0802); 852 tg3_writephy(tp, 0x16, 0x0802);
853 if (tg3_wait_macro_done(tp)) { 853 if (tg3_wait_macro_done(tp)) {
854 *resetp = 1; 854 *resetp = 1;
855 return -EBUSY; 855 return -EBUSY;
856 } 856 }
857 857
858 for (i = 0; i < 6; i += 2) { 858 for (i = 0; i < 6; i += 2) {
859 u32 low, high; 859 u32 low, high;
860 860
861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || 861 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || 862 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
863 tg3_wait_macro_done(tp)) { 863 tg3_wait_macro_done(tp)) {
864 *resetp = 1; 864 *resetp = 1;
865 return -EBUSY; 865 return -EBUSY;
866 } 866 }
867 low &= 0x7fff; 867 low &= 0x7fff;
868 high &= 0x000f; 868 high &= 0x000f;
869 if (low != test_pat[chan][i] || 869 if (low != test_pat[chan][i] ||
870 high != test_pat[chan][i+1]) { 870 high != test_pat[chan][i+1]) {
871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); 871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); 872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); 873 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
874 874
875 return -EBUSY; 875 return -EBUSY;
876 } 876 }
877 } 877 }
878 } 878 }
879 879
880 return 0; 880 return 0;
881 } 881 }
882 882
883 static int tg3_phy_reset_chanpat(struct tg3 *tp) 883 static int tg3_phy_reset_chanpat(struct tg3 *tp)
884 { 884 {
885 int chan; 885 int chan;
886 886
887 for (chan = 0; chan < 4; chan++) { 887 for (chan = 0; chan < 4; chan++) {
888 int i; 888 int i;
889 889
890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
891 (chan * 0x2000) | 0x0200); 891 (chan * 0x2000) | 0x0200);
892 tg3_writephy(tp, 0x16, 0x0002); 892 tg3_writephy(tp, 0x16, 0x0002);
893 for (i = 0; i < 6; i++) 893 for (i = 0; i < 6; i++)
894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); 894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
895 tg3_writephy(tp, 0x16, 0x0202); 895 tg3_writephy(tp, 0x16, 0x0202);
896 if (tg3_wait_macro_done(tp)) 896 if (tg3_wait_macro_done(tp))
897 return -EBUSY; 897 return -EBUSY;
898 } 898 }
899 899
900 return 0; 900 return 0;
901 } 901 }
902 902
903 static int tg3_phy_reset_5703_4_5(struct tg3 *tp) 903 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
904 { 904 {
905 u32 reg32, phy9_orig; 905 u32 reg32, phy9_orig;
906 int retries, do_phy_reset, err; 906 int retries, do_phy_reset, err;
907 907
908 retries = 10; 908 retries = 10;
909 do_phy_reset = 1; 909 do_phy_reset = 1;
910 do { 910 do {
911 if (do_phy_reset) { 911 if (do_phy_reset) {
912 err = tg3_bmcr_reset(tp); 912 err = tg3_bmcr_reset(tp);
913 if (err) 913 if (err)
914 return err; 914 return err;
915 do_phy_reset = 0; 915 do_phy_reset = 0;
916 } 916 }
917 917
918 /* Disable transmitter and interrupt. */ 918 /* Disable transmitter and interrupt. */
919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) 919 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
920 continue; 920 continue;
921 921
922 reg32 |= 0x3000; 922 reg32 |= 0x3000;
923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 923 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
924 924
925 /* Set full-duplex, 1000 mbps. */ 925 /* Set full-duplex, 1000 mbps. */
926 tg3_writephy(tp, MII_BMCR, 926 tg3_writephy(tp, MII_BMCR,
927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000); 927 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
928 928
929 /* Set to master mode. */ 929 /* Set to master mode. */
930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) 930 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
931 continue; 931 continue;
932 932
933 tg3_writephy(tp, MII_TG3_CTRL, 933 tg3_writephy(tp, MII_TG3_CTRL,
934 (MII_TG3_CTRL_AS_MASTER | 934 (MII_TG3_CTRL_AS_MASTER |
935 MII_TG3_CTRL_ENABLE_AS_MASTER)); 935 MII_TG3_CTRL_ENABLE_AS_MASTER));
936 936
937 /* Enable SM_DSP_CLOCK and 6dB. */ 937 /* Enable SM_DSP_CLOCK and 6dB. */
938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
939 939
940 /* Block the PHY control access. */ 940 /* Block the PHY control access. */
941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); 942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
943 943
944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); 944 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
945 if (!err) 945 if (!err)
946 break; 946 break;
947 } while (--retries); 947 } while (--retries);
948 948
949 err = tg3_phy_reset_chanpat(tp); 949 err = tg3_phy_reset_chanpat(tp);
950 if (err) 950 if (err)
951 return err; 951 return err;
952 952
953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); 953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); 954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
955 955
956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 956 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
957 tg3_writephy(tp, 0x16, 0x0000); 957 tg3_writephy(tp, 0x16, 0x0000);
958 958
959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
961 /* Set Extended packet length bit for jumbo frames */ 961 /* Set Extended packet length bit for jumbo frames */
962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); 962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
963 } 963 }
964 else { 964 else {
965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 965 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
966 } 966 }
967 967
968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); 968 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
969 969
970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) { 970 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
971 reg32 &= ~0x3000; 971 reg32 &= ~0x3000;
972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); 972 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
973 } else if (!err) 973 } else if (!err)
974 err = -EBUSY; 974 err = -EBUSY;
975 975
976 return err; 976 return err;
977 } 977 }
978 978
979 static void tg3_link_report(struct tg3 *); 979 static void tg3_link_report(struct tg3 *);
980 980
981 /* This will reset the tigon3 PHY if there is no valid 981 /* This will reset the tigon3 PHY if there is no valid
982 * link unless the FORCE argument is non-zero. 982 * link unless the FORCE argument is non-zero.
983 */ 983 */
984 static int tg3_phy_reset(struct tg3 *tp) 984 static int tg3_phy_reset(struct tg3 *tp)
985 { 985 {
986 u32 phy_status; 986 u32 phy_status;
987 int err; 987 int err;
988 988
989 err = tg3_readphy(tp, MII_BMSR, &phy_status); 989 err = tg3_readphy(tp, MII_BMSR, &phy_status);
990 err |= tg3_readphy(tp, MII_BMSR, &phy_status); 990 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
991 if (err != 0) 991 if (err != 0)
992 return -EBUSY; 992 return -EBUSY;
993 993
994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { 994 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
995 netif_carrier_off(tp->dev); 995 netif_carrier_off(tp->dev);
996 tg3_link_report(tp); 996 tg3_link_report(tp);
997 } 997 }
998 998
999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 1001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1002 err = tg3_phy_reset_5703_4_5(tp); 1002 err = tg3_phy_reset_5703_4_5(tp);
1003 if (err) 1003 if (err)
1004 return err; 1004 return err;
1005 goto out; 1005 goto out;
1006 } 1006 }
1007 1007
1008 err = tg3_bmcr_reset(tp); 1008 err = tg3_bmcr_reset(tp);
1009 if (err) 1009 if (err)
1010 return err; 1010 return err;
1011 1011
1012 out: 1012 out:
1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { 1013 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 1015 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); 1016 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); 1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 } 1020 }
1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { 1021 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1022 tg3_writephy(tp, 0x1c, 0x8d68); 1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 tg3_writephy(tp, 0x1c, 0x8d68); 1023 tg3_writephy(tp, 0x1c, 0x8d68);
1024 } 1024 }
1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { 1025 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1026 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1027 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); 1028 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); 1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); 1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1033 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1034 } 1034 }
1035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { 1035 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1036 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1037 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 1038 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1040 } 1040 }
1041 /* Set Extended packet length bit (bit 14) on all chips that */ 1041 /* Set Extended packet length bit (bit 14) on all chips that */
1042 /* support jumbo frames */ 1042 /* support jumbo frames */
1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1043 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1044 /* Cannot do read-modify-write on 5401 */ 1044 /* Cannot do read-modify-write on 5401 */
1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 1045 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { 1046 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1047 u32 phy_reg; 1047 u32 phy_reg;
1048 1048
1049 /* Set bit 14 with read-modify-write to preserve other bits */ 1049 /* Set bit 14 with read-modify-write to preserve other bits */
1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && 1050 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) 1051 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); 1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1053 } 1053 }
1054 1054
1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 1055 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1056 * jumbo frames transmission. 1056 * jumbo frames transmission.
1057 */ 1057 */
1058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) { 1058 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1059 u32 phy_reg; 1059 u32 phy_reg;
1060 1060
1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) 1061 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 1063 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1064 } 1064 }
1065 1065
1066 tg3_phy_set_wirespeed(tp); 1066 tg3_phy_set_wirespeed(tp);
1067 return 0; 1067 return 0;
1068 } 1068 }
1069 1069
1070 static void tg3_frob_aux_power(struct tg3 *tp) 1070 static void tg3_frob_aux_power(struct tg3 *tp)
1071 { 1071 {
1072 struct tg3 *tp_peer = tp; 1072 struct tg3 *tp_peer = tp;
1073 1073
1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0) 1074 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1075 return; 1075 return;
1076 1076
1077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 1077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { 1078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1079 struct net_device *dev_peer; 1079 struct net_device *dev_peer;
1080 1080
1081 dev_peer = pci_get_drvdata(tp->pdev_peer); 1081 dev_peer = pci_get_drvdata(tp->pdev_peer);
1082 /* remove_one() may have been run on the peer. */ 1082 /* remove_one() may have been run on the peer. */
1083 if (!dev_peer) 1083 if (!dev_peer)
1084 tp_peer = tp; 1084 tp_peer = tp;
1085 else 1085 else
1086 tp_peer = netdev_priv(dev_peer); 1086 tp_peer = netdev_priv(dev_peer);
1087 } 1087 }
1088 1088
1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 1089 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || 1090 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || 1091 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { 1092 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 1094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1095 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096 (GRC_LCLCTRL_GPIO_OE0 | 1096 (GRC_LCLCTRL_GPIO_OE0 |
1097 GRC_LCLCTRL_GPIO_OE1 | 1097 GRC_LCLCTRL_GPIO_OE1 |
1098 GRC_LCLCTRL_GPIO_OE2 | 1098 GRC_LCLCTRL_GPIO_OE2 |
1099 GRC_LCLCTRL_GPIO_OUTPUT0 | 1099 GRC_LCLCTRL_GPIO_OUTPUT0 |
1100 GRC_LCLCTRL_GPIO_OUTPUT1), 1100 GRC_LCLCTRL_GPIO_OUTPUT1),
1101 100); 1101 100);
1102 } else { 1102 } else {
1103 u32 no_gpio2; 1103 u32 no_gpio2;
1104 u32 grc_local_ctrl = 0; 1104 u32 grc_local_ctrl = 0;
1105 1105
1106 if (tp_peer != tp && 1106 if (tp_peer != tp &&
1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 1107 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1108 return; 1108 return;
1109 1109
1110 /* Workaround to prevent overdrawing Amps. */ 1110 /* Workaround to prevent overdrawing Amps. */
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1112 ASIC_REV_5714) { 1112 ASIC_REV_5714) {
1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 1113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1114 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1115 grc_local_ctrl, 100); 1115 grc_local_ctrl, 100);
1116 } 1116 }
1117 1117
1118 /* On 5753 and variants, GPIO2 cannot be used. */ 1118 /* On 5753 and variants, GPIO2 cannot be used. */
1119 no_gpio2 = tp->nic_sram_data_cfg & 1119 no_gpio2 = tp->nic_sram_data_cfg &
1120 NIC_SRAM_DATA_CFG_NO_GPIO2; 1120 NIC_SRAM_DATA_CFG_NO_GPIO2;
1121 1121
1122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | 1122 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1123 GRC_LCLCTRL_GPIO_OE1 | 1123 GRC_LCLCTRL_GPIO_OE1 |
1124 GRC_LCLCTRL_GPIO_OE2 | 1124 GRC_LCLCTRL_GPIO_OE2 |
1125 GRC_LCLCTRL_GPIO_OUTPUT1 | 1125 GRC_LCLCTRL_GPIO_OUTPUT1 |
1126 GRC_LCLCTRL_GPIO_OUTPUT2; 1126 GRC_LCLCTRL_GPIO_OUTPUT2;
1127 if (no_gpio2) { 1127 if (no_gpio2) {
1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | 1128 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1129 GRC_LCLCTRL_GPIO_OUTPUT2); 1129 GRC_LCLCTRL_GPIO_OUTPUT2);
1130 } 1130 }
1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1132 grc_local_ctrl, 100); 1132 grc_local_ctrl, 100);
1133 1133
1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; 1134 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1135 1135
1136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1137 grc_local_ctrl, 100); 1137 grc_local_ctrl, 100);
1138 1138
1139 if (!no_gpio2) { 1139 if (!no_gpio2) {
1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; 1140 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142 grc_local_ctrl, 100); 1142 grc_local_ctrl, 100);
1143 } 1143 }
1144 } 1144 }
1145 } else { 1145 } else {
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 1147 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1148 if (tp_peer != tp && 1148 if (tp_peer != tp &&
1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) 1149 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1150 return; 1150 return;
1151 1151
1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1153 (GRC_LCLCTRL_GPIO_OE1 | 1153 (GRC_LCLCTRL_GPIO_OE1 |
1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 1154 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1155 1155
1156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1157 GRC_LCLCTRL_GPIO_OE1, 100); 1157 GRC_LCLCTRL_GPIO_OE1, 100);
1158 1158
1159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | 1159 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160 (GRC_LCLCTRL_GPIO_OE1 | 1160 (GRC_LCLCTRL_GPIO_OE1 |
1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100); 1161 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1162 } 1162 }
1163 } 1163 }
1164 } 1164 }
1165 1165
1166 static int tg3_setup_phy(struct tg3 *, int); 1166 static int tg3_setup_phy(struct tg3 *, int);
1167 1167
1168 #define RESET_KIND_SHUTDOWN 0 1168 #define RESET_KIND_SHUTDOWN 0
1169 #define RESET_KIND_INIT 1 1169 #define RESET_KIND_INIT 1
1170 #define RESET_KIND_SUSPEND 2 1170 #define RESET_KIND_SUSPEND 2
1171 1171
1172 static void tg3_write_sig_post_reset(struct tg3 *, int); 1172 static void tg3_write_sig_post_reset(struct tg3 *, int);
1173 static int tg3_halt_cpu(struct tg3 *, u32); 1173 static int tg3_halt_cpu(struct tg3 *, u32);
1174 static int tg3_nvram_lock(struct tg3 *); 1174 static int tg3_nvram_lock(struct tg3 *);
1175 static void tg3_nvram_unlock(struct tg3 *); 1175 static void tg3_nvram_unlock(struct tg3 *);
1176 1176
1177 static void tg3_power_down_phy(struct tg3 *tp) 1177 static void tg3_power_down_phy(struct tg3 *tp)
1178 { 1178 {
1179 /* The PHY should not be powered down on some chips because 1179 /* The PHY should not be powered down on some chips because
1180 * of bugs. 1180 * of bugs.
1181 */ 1181 */
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 1183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && 1184 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 1185 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1186 return; 1186 return;
1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 1187 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1188 } 1188 }
1189 1189
1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 1190 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1191 { 1191 {
1192 u32 misc_host_ctrl; 1192 u32 misc_host_ctrl;
1193 u16 power_control, power_caps; 1193 u16 power_control, power_caps;
1194 int pm = tp->pm_cap; 1194 int pm = tp->pm_cap;
1195 1195
1196 /* Make sure register accesses (indirect or otherwise) 1196 /* Make sure register accesses (indirect or otherwise)
1197 * will function correctly. 1197 * will function correctly.
1198 */ 1198 */
1199 pci_write_config_dword(tp->pdev, 1199 pci_write_config_dword(tp->pdev,
1200 TG3PCI_MISC_HOST_CTRL, 1200 TG3PCI_MISC_HOST_CTRL,
1201 tp->misc_host_ctrl); 1201 tp->misc_host_ctrl);
1202 1202
1203 pci_read_config_word(tp->pdev, 1203 pci_read_config_word(tp->pdev,
1204 pm + PCI_PM_CTRL, 1204 pm + PCI_PM_CTRL,
1205 &power_control); 1205 &power_control);
1206 power_control |= PCI_PM_CTRL_PME_STATUS; 1206 power_control |= PCI_PM_CTRL_PME_STATUS;
1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK); 1207 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1208 switch (state) { 1208 switch (state) {
1209 case PCI_D0: 1209 case PCI_D0:
1210 power_control |= 0; 1210 power_control |= 0;
1211 pci_write_config_word(tp->pdev, 1211 pci_write_config_word(tp->pdev,
1212 pm + PCI_PM_CTRL, 1212 pm + PCI_PM_CTRL,
1213 power_control); 1213 power_control);
1214 udelay(100); /* Delay after power state change */ 1214 udelay(100); /* Delay after power state change */
1215 1215
1216 /* Switch out of Vaux if it is not a LOM */ 1216 /* Switch out of Vaux if it is not a LOM */
1217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) 1217 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); 1218 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1219 1219
1220 return 0; 1220 return 0;
1221 1221
1222 case PCI_D1: 1222 case PCI_D1:
1223 power_control |= 1; 1223 power_control |= 1;
1224 break; 1224 break;
1225 1225
1226 case PCI_D2: 1226 case PCI_D2:
1227 power_control |= 2; 1227 power_control |= 2;
1228 break; 1228 break;
1229 1229
1230 case PCI_D3hot: 1230 case PCI_D3hot:
1231 power_control |= 3; 1231 power_control |= 3;
1232 break; 1232 break;
1233 1233
1234 default: 1234 default:
1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) " 1235 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1236 "requested.\n", 1236 "requested.\n",
1237 tp->dev->name, state); 1237 tp->dev->name, state);
1238 return -EINVAL; 1238 return -EINVAL;
1239 }; 1239 };
1240 1240
1241 power_control |= PCI_PM_CTRL_PME_ENABLE; 1241 power_control |= PCI_PM_CTRL_PME_ENABLE;
1242 1242
1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 1243 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1244 tw32(TG3PCI_MISC_HOST_CTRL, 1244 tw32(TG3PCI_MISC_HOST_CTRL,
1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 1245 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1246 1246
1247 if (tp->link_config.phy_is_low_power == 0) { 1247 if (tp->link_config.phy_is_low_power == 0) {
1248 tp->link_config.phy_is_low_power = 1; 1248 tp->link_config.phy_is_low_power = 1;
1249 tp->link_config.orig_speed = tp->link_config.speed; 1249 tp->link_config.orig_speed = tp->link_config.speed;
1250 tp->link_config.orig_duplex = tp->link_config.duplex; 1250 tp->link_config.orig_duplex = tp->link_config.duplex;
1251 tp->link_config.orig_autoneg = tp->link_config.autoneg; 1251 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1252 } 1252 }
1253 1253
1254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 1254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1255 tp->link_config.speed = SPEED_10; 1255 tp->link_config.speed = SPEED_10;
1256 tp->link_config.duplex = DUPLEX_HALF; 1256 tp->link_config.duplex = DUPLEX_HALF;
1257 tp->link_config.autoneg = AUTONEG_ENABLE; 1257 tp->link_config.autoneg = AUTONEG_ENABLE;
1258 tg3_setup_phy(tp, 0); 1258 tg3_setup_phy(tp, 0);
1259 } 1259 }
1260 1260
1261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 1261 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1262 int i; 1262 int i;
1263 u32 val; 1263 u32 val;
1264 1264
1265 for (i = 0; i < 200; i++) { 1265 for (i = 0; i < 200; i++) {
1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); 1266 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1267 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1268 break; 1268 break;
1269 msleep(1); 1269 msleep(1);
1270 } 1270 }
1271 } 1271 }
1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | 1272 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1273 WOL_DRV_STATE_SHUTDOWN | 1273 WOL_DRV_STATE_SHUTDOWN |
1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT); 1274 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1275 1275
1276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps); 1276 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1277 1277
1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { 1278 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1279 u32 mac_mode; 1279 u32 mac_mode;
1280 1280
1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 1281 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 1282 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1283 udelay(40); 1283 udelay(40);
1284 1284
1285 mac_mode = MAC_MODE_PORT_MODE_MII; 1285 mac_mode = MAC_MODE_PORT_MODE_MII;
1286 1286
1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 || 1287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)) 1288 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1289 mac_mode |= MAC_MODE_LINK_POLARITY; 1289 mac_mode |= MAC_MODE_LINK_POLARITY;
1290 } else { 1290 } else {
1291 mac_mode = MAC_MODE_PORT_MODE_TBI; 1291 mac_mode = MAC_MODE_PORT_MODE_TBI;
1292 } 1292 }
1293 1293
1294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 1294 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1295 tw32(MAC_LED_CTRL, tp->led_ctrl); 1295 tw32(MAC_LED_CTRL, tp->led_ctrl);
1296 1296
1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) && 1297 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))) 1298 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 1299 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1300 1300
1301 tw32_f(MAC_MODE, mac_mode); 1301 tw32_f(MAC_MODE, mac_mode);
1302 udelay(100); 1302 udelay(100);
1303 1303
1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); 1304 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1305 udelay(10); 1305 udelay(10);
1306 } 1306 }
1307 1307
1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && 1308 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1309 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 1310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1311 u32 base_val; 1311 u32 base_val;
1312 1312
1313 base_val = tp->pci_clock_ctrl; 1313 base_val = tp->pci_clock_ctrl;
1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE | 1314 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1315 CLOCK_CTRL_TXCLK_DISABLE); 1315 CLOCK_CTRL_TXCLK_DISABLE);
1316 1316
1317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | 1317 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1318 CLOCK_CTRL_PWRDOWN_PLL133, 40); 1318 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 1319 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1320 /* do nothing */ 1320 /* do nothing */
1321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 1321 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { 1322 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1323 u32 newbits1, newbits2; 1323 u32 newbits1, newbits2;
1324 1324
1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 1326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | 1327 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1328 CLOCK_CTRL_TXCLK_DISABLE | 1328 CLOCK_CTRL_TXCLK_DISABLE |
1329 CLOCK_CTRL_ALTCLK); 1329 CLOCK_CTRL_ALTCLK);
1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 1330 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 1331 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1332 newbits1 = CLOCK_CTRL_625_CORE; 1332 newbits1 = CLOCK_CTRL_625_CORE;
1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; 1333 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1334 } else { 1334 } else {
1335 newbits1 = CLOCK_CTRL_ALTCLK; 1335 newbits1 = CLOCK_CTRL_ALTCLK;
1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; 1336 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1337 } 1337 }
1338 1338
1339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, 1339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1340 40); 1340 40);
1341 1341
1342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, 1342 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1343 40); 1343 40);
1344 1344
1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 1345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1346 u32 newbits3; 1346 u32 newbits3;
1347 1347
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | 1350 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1351 CLOCK_CTRL_TXCLK_DISABLE | 1351 CLOCK_CTRL_TXCLK_DISABLE |
1352 CLOCK_CTRL_44MHZ_CORE); 1352 CLOCK_CTRL_44MHZ_CORE);
1353 } else { 1353 } else {
1354 newbits3 = CLOCK_CTRL_44MHZ_CORE; 1354 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1355 } 1355 }
1356 1356
1357 tw32_wait_f(TG3PCI_CLOCK_CTRL, 1357 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1358 tp->pci_clock_ctrl | newbits3, 40); 1358 tp->pci_clock_ctrl | newbits3, 40);
1359 } 1359 }
1360 } 1360 }
1361 1361
1362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && 1362 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 1363 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1364 /* Turn off the PHY */ 1364 /* Turn off the PHY */
1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 1365 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1366 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1366 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 1367 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 1368 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1369 tg3_power_down_phy(tp); 1369 tg3_power_down_phy(tp);
1370 } 1370 }
1371 } 1371 }
1372 1372
1373 tg3_frob_aux_power(tp); 1373 tg3_frob_aux_power(tp);
1374 1374
1375 /* Workaround for unstable PLL clock */ 1375 /* Workaround for unstable PLL clock */
1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || 1376 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { 1377 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1378 u32 val = tr32(0x7d00); 1378 u32 val = tr32(0x7d00);
1379 1379
1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); 1380 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1381 tw32(0x7d00, val); 1381 tw32(0x7d00, val);
1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 1382 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1383 int err; 1383 int err;
1384 1384
1385 err = tg3_nvram_lock(tp); 1385 err = tg3_nvram_lock(tp);
1386 tg3_halt_cpu(tp, RX_CPU_BASE); 1386 tg3_halt_cpu(tp, RX_CPU_BASE);
1387 if (!err) 1387 if (!err)
1388 tg3_nvram_unlock(tp); 1388 tg3_nvram_unlock(tp);
1389 } 1389 }
1390 } 1390 }
1391 1391
1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 1392 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1393 1393
1394 /* Finally, set the new power state. */ 1394 /* Finally, set the new power state. */
1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control); 1395 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1396 udelay(100); /* Delay after power state change */ 1396 udelay(100); /* Delay after power state change */
1397 1397
1398 return 0; 1398 return 0;
1399 } 1399 }
1400 1400
1401 static void tg3_link_report(struct tg3 *tp) 1401 static void tg3_link_report(struct tg3 *tp)
1402 { 1402 {
1403 if (!netif_carrier_ok(tp->dev)) { 1403 if (!netif_carrier_ok(tp->dev)) {
1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name); 1404 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1405 } else { 1405 } else {
1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", 1406 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1407 tp->dev->name, 1407 tp->dev->name,
1408 (tp->link_config.active_speed == SPEED_1000 ? 1408 (tp->link_config.active_speed == SPEED_1000 ?
1409 1000 : 1409 1000 :
1410 (tp->link_config.active_speed == SPEED_100 ? 1410 (tp->link_config.active_speed == SPEED_100 ?
1411 100 : 10)), 1411 100 : 10)),
1412 (tp->link_config.active_duplex == DUPLEX_FULL ? 1412 (tp->link_config.active_duplex == DUPLEX_FULL ?
1413 "full" : "half")); 1413 "full" : "half"));
1414 1414
1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and " 1415 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1416 "%s for RX.\n", 1416 "%s for RX.\n",
1417 tp->dev->name, 1417 tp->dev->name,
1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off", 1418 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off"); 1419 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1420 } 1420 }
1421 } 1421 }
1422 1422
1423 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv) 1423 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1424 { 1424 {
1425 u32 new_tg3_flags = 0; 1425 u32 new_tg3_flags = 0;
1426 u32 old_rx_mode = tp->rx_mode; 1426 u32 old_rx_mode = tp->rx_mode;
1427 u32 old_tx_mode = tp->tx_mode; 1427 u32 old_tx_mode = tp->tx_mode;
1428 1428
1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) { 1429 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1430 1430
1431 /* Convert 1000BaseX flow control bits to 1000BaseT 1431 /* Convert 1000BaseX flow control bits to 1000BaseT
1432 * bits before resolving flow control. 1432 * bits before resolving flow control.
1433 */ 1433 */
1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 1434 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1435 local_adv &= ~(ADVERTISE_PAUSE_CAP | 1435 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1436 ADVERTISE_PAUSE_ASYM); 1436 ADVERTISE_PAUSE_ASYM);
1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 1437 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1438 1438
1439 if (local_adv & ADVERTISE_1000XPAUSE) 1439 if (local_adv & ADVERTISE_1000XPAUSE)
1440 local_adv |= ADVERTISE_PAUSE_CAP; 1440 local_adv |= ADVERTISE_PAUSE_CAP;
1441 if (local_adv & ADVERTISE_1000XPSE_ASYM) 1441 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1442 local_adv |= ADVERTISE_PAUSE_ASYM; 1442 local_adv |= ADVERTISE_PAUSE_ASYM;
1443 if (remote_adv & LPA_1000XPAUSE) 1443 if (remote_adv & LPA_1000XPAUSE)
1444 remote_adv |= LPA_PAUSE_CAP; 1444 remote_adv |= LPA_PAUSE_CAP;
1445 if (remote_adv & LPA_1000XPAUSE_ASYM) 1445 if (remote_adv & LPA_1000XPAUSE_ASYM)
1446 remote_adv |= LPA_PAUSE_ASYM; 1446 remote_adv |= LPA_PAUSE_ASYM;
1447 } 1447 }
1448 1448
1449 if (local_adv & ADVERTISE_PAUSE_CAP) { 1449 if (local_adv & ADVERTISE_PAUSE_CAP) {
1450 if (local_adv & ADVERTISE_PAUSE_ASYM) { 1450 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1451 if (remote_adv & LPA_PAUSE_CAP) 1451 if (remote_adv & LPA_PAUSE_CAP)
1452 new_tg3_flags |= 1452 new_tg3_flags |=
1453 (TG3_FLAG_RX_PAUSE | 1453 (TG3_FLAG_RX_PAUSE |
1454 TG3_FLAG_TX_PAUSE); 1454 TG3_FLAG_TX_PAUSE);
1455 else if (remote_adv & LPA_PAUSE_ASYM) 1455 else if (remote_adv & LPA_PAUSE_ASYM)
1456 new_tg3_flags |= 1456 new_tg3_flags |=
1457 (TG3_FLAG_RX_PAUSE); 1457 (TG3_FLAG_RX_PAUSE);
1458 } else { 1458 } else {
1459 if (remote_adv & LPA_PAUSE_CAP) 1459 if (remote_adv & LPA_PAUSE_CAP)
1460 new_tg3_flags |= 1460 new_tg3_flags |=
1461 (TG3_FLAG_RX_PAUSE | 1461 (TG3_FLAG_RX_PAUSE |
1462 TG3_FLAG_TX_PAUSE); 1462 TG3_FLAG_TX_PAUSE);
1463 } 1463 }
1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) { 1464 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1465 if ((remote_adv & LPA_PAUSE_CAP) && 1465 if ((remote_adv & LPA_PAUSE_CAP) &&
1466 (remote_adv & LPA_PAUSE_ASYM)) 1466 (remote_adv & LPA_PAUSE_ASYM))
1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE; 1467 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1468 } 1468 }
1469 1469
1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE); 1470 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1471 tp->tg3_flags |= new_tg3_flags; 1471 tp->tg3_flags |= new_tg3_flags;
1472 } else { 1472 } else {
1473 new_tg3_flags = tp->tg3_flags; 1473 new_tg3_flags = tp->tg3_flags;
1474 } 1474 }
1475 1475
1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE) 1476 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1477 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1478 else 1478 else
1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1479 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1480 1480
1481 if (old_rx_mode != tp->rx_mode) { 1481 if (old_rx_mode != tp->rx_mode) {
1482 tw32_f(MAC_RX_MODE, tp->rx_mode); 1482 tw32_f(MAC_RX_MODE, tp->rx_mode);
1483 } 1483 }
1484 1484
1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE) 1485 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1486 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1487 else 1487 else
1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1488 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1489 1489
1490 if (old_tx_mode != tp->tx_mode) { 1490 if (old_tx_mode != tp->tx_mode) {
1491 tw32_f(MAC_TX_MODE, tp->tx_mode); 1491 tw32_f(MAC_TX_MODE, tp->tx_mode);
1492 } 1492 }
1493 } 1493 }
1494 1494
1495 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 1495 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1496 { 1496 {
1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) { 1497 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1498 case MII_TG3_AUX_STAT_10HALF: 1498 case MII_TG3_AUX_STAT_10HALF:
1499 *speed = SPEED_10; 1499 *speed = SPEED_10;
1500 *duplex = DUPLEX_HALF; 1500 *duplex = DUPLEX_HALF;
1501 break; 1501 break;
1502 1502
1503 case MII_TG3_AUX_STAT_10FULL: 1503 case MII_TG3_AUX_STAT_10FULL:
1504 *speed = SPEED_10; 1504 *speed = SPEED_10;
1505 *duplex = DUPLEX_FULL; 1505 *duplex = DUPLEX_FULL;
1506 break; 1506 break;
1507 1507
1508 case MII_TG3_AUX_STAT_100HALF: 1508 case MII_TG3_AUX_STAT_100HALF:
1509 *speed = SPEED_100; 1509 *speed = SPEED_100;
1510 *duplex = DUPLEX_HALF; 1510 *duplex = DUPLEX_HALF;
1511 break; 1511 break;
1512 1512
1513 case MII_TG3_AUX_STAT_100FULL: 1513 case MII_TG3_AUX_STAT_100FULL:
1514 *speed = SPEED_100; 1514 *speed = SPEED_100;
1515 *duplex = DUPLEX_FULL; 1515 *duplex = DUPLEX_FULL;
1516 break; 1516 break;
1517 1517
1518 case MII_TG3_AUX_STAT_1000HALF: 1518 case MII_TG3_AUX_STAT_1000HALF:
1519 *speed = SPEED_1000; 1519 *speed = SPEED_1000;
1520 *duplex = DUPLEX_HALF; 1520 *duplex = DUPLEX_HALF;
1521 break; 1521 break;
1522 1522
1523 case MII_TG3_AUX_STAT_1000FULL: 1523 case MII_TG3_AUX_STAT_1000FULL:
1524 *speed = SPEED_1000; 1524 *speed = SPEED_1000;
1525 *duplex = DUPLEX_FULL; 1525 *duplex = DUPLEX_FULL;
1526 break; 1526 break;
1527 1527
1528 default: 1528 default:
1529 *speed = SPEED_INVALID; 1529 *speed = SPEED_INVALID;
1530 *duplex = DUPLEX_INVALID; 1530 *duplex = DUPLEX_INVALID;
1531 break; 1531 break;
1532 }; 1532 };
1533 } 1533 }
1534 1534
1535 static void tg3_phy_copper_begin(struct tg3 *tp) 1535 static void tg3_phy_copper_begin(struct tg3 *tp)
1536 { 1536 {
1537 u32 new_adv; 1537 u32 new_adv;
1538 int i; 1538 int i;
1539 1539
1540 if (tp->link_config.phy_is_low_power) { 1540 if (tp->link_config.phy_is_low_power) {
1541 /* Entering low power mode. Disable gigabit and 1541 /* Entering low power mode. Disable gigabit and
1542 * 100baseT advertisements. 1542 * 100baseT advertisements.
1543 */ 1543 */
1544 tg3_writephy(tp, MII_TG3_CTRL, 0); 1544 tg3_writephy(tp, MII_TG3_CTRL, 0);
1545 1545
1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | 1546 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 1547 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) 1548 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); 1549 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1550 1550
1551 tg3_writephy(tp, MII_ADVERTISE, new_adv); 1551 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1552 } else if (tp->link_config.speed == SPEED_INVALID) { 1552 } else if (tp->link_config.speed == SPEED_INVALID) {
1553 tp->link_config.advertising = 1553 tp->link_config.advertising =
1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 1554 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 1555 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | 1556 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1557 ADVERTISED_Autoneg | ADVERTISED_MII); 1557 ADVERTISED_Autoneg | ADVERTISED_MII);
1558 1558
1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 1559 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1560 tp->link_config.advertising &= 1560 tp->link_config.advertising &=
1561 ~(ADVERTISED_1000baseT_Half | 1561 ~(ADVERTISED_1000baseT_Half |
1562 ADVERTISED_1000baseT_Full); 1562 ADVERTISED_1000baseT_Full);
1563 1563
1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 1564 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half) 1565 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1566 new_adv |= ADVERTISE_10HALF; 1566 new_adv |= ADVERTISE_10HALF;
1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full) 1567 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1568 new_adv |= ADVERTISE_10FULL; 1568 new_adv |= ADVERTISE_10FULL;
1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half) 1569 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1570 new_adv |= ADVERTISE_100HALF; 1570 new_adv |= ADVERTISE_100HALF;
1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full) 1571 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1572 new_adv |= ADVERTISE_100FULL; 1572 new_adv |= ADVERTISE_100FULL;
1573 tg3_writephy(tp, MII_ADVERTISE, new_adv); 1573 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1574 1574
1575 if (tp->link_config.advertising & 1575 if (tp->link_config.advertising &
1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { 1576 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1577 new_adv = 0; 1577 new_adv = 0;
1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) 1578 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF; 1579 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) 1580 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL; 1581 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && 1582 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 1583 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) 1584 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1585 new_adv |= (MII_TG3_CTRL_AS_MASTER | 1585 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1586 MII_TG3_CTRL_ENABLE_AS_MASTER); 1586 MII_TG3_CTRL_ENABLE_AS_MASTER);
1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv); 1587 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1588 } else { 1588 } else {
1589 tg3_writephy(tp, MII_TG3_CTRL, 0); 1589 tg3_writephy(tp, MII_TG3_CTRL, 0);
1590 } 1590 }
1591 } else { 1591 } else {
1592 /* Asking for a specific link mode. */ 1592 /* Asking for a specific link mode. */
1593 if (tp->link_config.speed == SPEED_1000) { 1593 if (tp->link_config.speed == SPEED_1000) {
1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1594 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595 tg3_writephy(tp, MII_ADVERTISE, new_adv); 1595 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1596 1596
1597 if (tp->link_config.duplex == DUPLEX_FULL) 1597 if (tp->link_config.duplex == DUPLEX_FULL)
1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL; 1598 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1599 else 1599 else
1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF; 1600 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 1601 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) 1602 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1603 new_adv |= (MII_TG3_CTRL_AS_MASTER | 1603 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1604 MII_TG3_CTRL_ENABLE_AS_MASTER); 1604 MII_TG3_CTRL_ENABLE_AS_MASTER);
1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv); 1605 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1606 } else { 1606 } else {
1607 tg3_writephy(tp, MII_TG3_CTRL, 0); 1607 tg3_writephy(tp, MII_TG3_CTRL, 0);
1608 1608
1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1609 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1610 if (tp->link_config.speed == SPEED_100) { 1610 if (tp->link_config.speed == SPEED_100) {
1611 if (tp->link_config.duplex == DUPLEX_FULL) 1611 if (tp->link_config.duplex == DUPLEX_FULL)
1612 new_adv |= ADVERTISE_100FULL; 1612 new_adv |= ADVERTISE_100FULL;
1613 else 1613 else
1614 new_adv |= ADVERTISE_100HALF; 1614 new_adv |= ADVERTISE_100HALF;
1615 } else { 1615 } else {
1616 if (tp->link_config.duplex == DUPLEX_FULL) 1616 if (tp->link_config.duplex == DUPLEX_FULL)
1617 new_adv |= ADVERTISE_10FULL; 1617 new_adv |= ADVERTISE_10FULL;
1618 else 1618 else
1619 new_adv |= ADVERTISE_10HALF; 1619 new_adv |= ADVERTISE_10HALF;
1620 } 1620 }
1621 tg3_writephy(tp, MII_ADVERTISE, new_adv); 1621 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1622 } 1622 }
1623 } 1623 }
1624 1624
1625 if (tp->link_config.autoneg == AUTONEG_DISABLE && 1625 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1626 tp->link_config.speed != SPEED_INVALID) { 1626 tp->link_config.speed != SPEED_INVALID) {
1627 u32 bmcr, orig_bmcr; 1627 u32 bmcr, orig_bmcr;
1628 1628
1629 tp->link_config.active_speed = tp->link_config.speed; 1629 tp->link_config.active_speed = tp->link_config.speed;
1630 tp->link_config.active_duplex = tp->link_config.duplex; 1630 tp->link_config.active_duplex = tp->link_config.duplex;
1631 1631
1632 bmcr = 0; 1632 bmcr = 0;
1633 switch (tp->link_config.speed) { 1633 switch (tp->link_config.speed) {
1634 default: 1634 default:
1635 case SPEED_10: 1635 case SPEED_10:
1636 break; 1636 break;
1637 1637
1638 case SPEED_100: 1638 case SPEED_100:
1639 bmcr |= BMCR_SPEED100; 1639 bmcr |= BMCR_SPEED100;
1640 break; 1640 break;
1641 1641
1642 case SPEED_1000: 1642 case SPEED_1000:
1643 bmcr |= TG3_BMCR_SPEED1000; 1643 bmcr |= TG3_BMCR_SPEED1000;
1644 break; 1644 break;
1645 }; 1645 };
1646 1646
1647 if (tp->link_config.duplex == DUPLEX_FULL) 1647 if (tp->link_config.duplex == DUPLEX_FULL)
1648 bmcr |= BMCR_FULLDPLX; 1648 bmcr |= BMCR_FULLDPLX;
1649 1649
1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && 1650 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1651 (bmcr != orig_bmcr)) { 1651 (bmcr != orig_bmcr)) {
1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); 1652 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1653 for (i = 0; i < 1500; i++) { 1653 for (i = 0; i < 1500; i++) {
1654 u32 tmp; 1654 u32 tmp;
1655 1655
1656 udelay(10); 1656 udelay(10);
1657 if (tg3_readphy(tp, MII_BMSR, &tmp) || 1657 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1658 tg3_readphy(tp, MII_BMSR, &tmp)) 1658 tg3_readphy(tp, MII_BMSR, &tmp))
1659 continue; 1659 continue;
1660 if (!(tmp & BMSR_LSTATUS)) { 1660 if (!(tmp & BMSR_LSTATUS)) {
1661 udelay(40); 1661 udelay(40);
1662 break; 1662 break;
1663 } 1663 }
1664 } 1664 }
1665 tg3_writephy(tp, MII_BMCR, bmcr); 1665 tg3_writephy(tp, MII_BMCR, bmcr);
1666 udelay(40); 1666 udelay(40);
1667 } 1667 }
1668 } else { 1668 } else {
1669 tg3_writephy(tp, MII_BMCR, 1669 tg3_writephy(tp, MII_BMCR,
1670 BMCR_ANENABLE | BMCR_ANRESTART); 1670 BMCR_ANENABLE | BMCR_ANRESTART);
1671 } 1671 }
1672 } 1672 }
1673 1673
1674 static int tg3_init_5401phy_dsp(struct tg3 *tp) 1674 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1675 { 1675 {
1676 int err; 1676 int err;
1677 1677
1678 /* Turn off tap power management. */ 1678 /* Turn off tap power management. */
1679 /* Set Extended packet length bit */ 1679 /* Set Extended packet length bit */
1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 1680 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1681 1681
1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); 1682 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); 1683 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1684 1684
1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); 1685 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); 1686 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1687 1687
1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); 1688 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); 1689 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1690 1690
1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); 1691 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); 1692 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1693 1693
1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); 1694 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); 1695 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1696 1696
1697 udelay(40); 1697 udelay(40);
1698 1698
1699 return err; 1699 return err;
1700 } 1700 }
1701 1701
1702 static int tg3_copper_is_advertising_all(struct tg3 *tp) 1702 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1703 { 1703 {
1704 u32 adv_reg, all_mask; 1704 u32 adv_reg, all_mask;
1705 1705
1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) 1706 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1707 return 0; 1707 return 0;
1708 1708
1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL | 1709 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1710 ADVERTISE_100HALF | ADVERTISE_100FULL); 1710 ADVERTISE_100HALF | ADVERTISE_100FULL);
1711 if ((adv_reg & all_mask) != all_mask) 1711 if ((adv_reg & all_mask) != all_mask)
1712 return 0; 1712 return 0;
1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 1713 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1714 u32 tg3_ctrl; 1714 u32 tg3_ctrl;
1715 1715
1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) 1716 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1717 return 0; 1717 return 0;
1718 1718
1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF | 1719 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1720 MII_TG3_CTRL_ADV_1000_FULL); 1720 MII_TG3_CTRL_ADV_1000_FULL);
1721 if ((tg3_ctrl & all_mask) != all_mask) 1721 if ((tg3_ctrl & all_mask) != all_mask)
1722 return 0; 1722 return 0;
1723 } 1723 }
1724 return 1; 1724 return 1;
1725 } 1725 }
1726 1726
1727 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 1727 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1728 { 1728 {
1729 int current_link_up; 1729 int current_link_up;
1730 u32 bmsr, dummy; 1730 u32 bmsr, dummy;
1731 u16 current_speed; 1731 u16 current_speed;
1732 u8 current_duplex; 1732 u8 current_duplex;
1733 int i, err; 1733 int i, err;
1734 1734
1735 tw32(MAC_EVENT, 0); 1735 tw32(MAC_EVENT, 0);
1736 1736
1737 tw32_f(MAC_STATUS, 1737 tw32_f(MAC_STATUS,
1738 (MAC_STATUS_SYNC_CHANGED | 1738 (MAC_STATUS_SYNC_CHANGED |
1739 MAC_STATUS_CFG_CHANGED | 1739 MAC_STATUS_CFG_CHANGED |
1740 MAC_STATUS_MI_COMPLETION | 1740 MAC_STATUS_MI_COMPLETION |
1741 MAC_STATUS_LNKSTATE_CHANGED)); 1741 MAC_STATUS_LNKSTATE_CHANGED));
1742 udelay(40); 1742 udelay(40);
1743 1743
1744 tp->mi_mode = MAC_MI_MODE_BASE; 1744 tp->mi_mode = MAC_MI_MODE_BASE;
1745 tw32_f(MAC_MI_MODE, tp->mi_mode); 1745 tw32_f(MAC_MI_MODE, tp->mi_mode);
1746 udelay(80); 1746 udelay(80);
1747 1747
1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); 1748 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1749 1749
1750 /* Some third-party PHYs need to be reset on link going 1750 /* Some third-party PHYs need to be reset on link going
1751 * down. 1751 * down.
1752 */ 1752 */
1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 1753 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 1755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1756 netif_carrier_ok(tp->dev)) { 1756 netif_carrier_ok(tp->dev)) {
1757 tg3_readphy(tp, MII_BMSR, &bmsr); 1757 tg3_readphy(tp, MII_BMSR, &bmsr);
1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 1758 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1759 !(bmsr & BMSR_LSTATUS)) 1759 !(bmsr & BMSR_LSTATUS))
1760 force_reset = 1; 1760 force_reset = 1;
1761 } 1761 }
1762 if (force_reset) 1762 if (force_reset)
1763 tg3_phy_reset(tp); 1763 tg3_phy_reset(tp);
1764 1764
1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1765 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1766 tg3_readphy(tp, MII_BMSR, &bmsr); 1766 tg3_readphy(tp, MII_BMSR, &bmsr);
1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) || 1767 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) 1768 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1769 bmsr = 0; 1769 bmsr = 0;
1770 1770
1771 if (!(bmsr & BMSR_LSTATUS)) { 1771 if (!(bmsr & BMSR_LSTATUS)) {
1772 err = tg3_init_5401phy_dsp(tp); 1772 err = tg3_init_5401phy_dsp(tp);
1773 if (err) 1773 if (err)
1774 return err; 1774 return err;
1775 1775
1776 tg3_readphy(tp, MII_BMSR, &bmsr); 1776 tg3_readphy(tp, MII_BMSR, &bmsr);
1777 for (i = 0; i < 1000; i++) { 1777 for (i = 0; i < 1000; i++) {
1778 udelay(10); 1778 udelay(10);
1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 1779 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1780 (bmsr & BMSR_LSTATUS)) { 1780 (bmsr & BMSR_LSTATUS)) {
1781 udelay(40); 1781 udelay(40);
1782 break; 1782 break;
1783 } 1783 }
1784 } 1784 }
1785 1785
1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && 1786 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1787 !(bmsr & BMSR_LSTATUS) && 1787 !(bmsr & BMSR_LSTATUS) &&
1788 tp->link_config.active_speed == SPEED_1000) { 1788 tp->link_config.active_speed == SPEED_1000) {
1789 err = tg3_phy_reset(tp); 1789 err = tg3_phy_reset(tp);
1790 if (!err) 1790 if (!err)
1791 err = tg3_init_5401phy_dsp(tp); 1791 err = tg3_init_5401phy_dsp(tp);
1792 if (err) 1792 if (err)
1793 return err; 1793 return err;
1794 } 1794 }
1795 } 1795 }
1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 1796 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { 1797 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1798 /* 5701 {A0,B0} CRC bug workaround */ 1798 /* 5701 {A0,B0} CRC bug workaround */
1799 tg3_writephy(tp, 0x15, 0x0a75); 1799 tg3_writephy(tp, 0x15, 0x0a75);
1800 tg3_writephy(tp, 0x1c, 0x8c68); 1800 tg3_writephy(tp, 0x1c, 0x8c68);
1801 tg3_writephy(tp, 0x1c, 0x8d68); 1801 tg3_writephy(tp, 0x1c, 0x8d68);
1802 tg3_writephy(tp, 0x1c, 0x8c68); 1802 tg3_writephy(tp, 0x1c, 0x8c68);
1803 } 1803 }
1804 1804
1805 /* Clear pending interrupts... */ 1805 /* Clear pending interrupts... */
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 1807 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1808 1808
1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) 1809 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 1810 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1811 else 1811 else
1812 tg3_writephy(tp, MII_TG3_IMASK, ~0); 1812 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1813 1813
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) 1816 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL, 1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 1818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1819 else 1819 else
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); 1820 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1821 } 1821 }
1822 1822
1823 current_link_up = 0; 1823 current_link_up = 0;
1824 current_speed = SPEED_INVALID; 1824 current_speed = SPEED_INVALID;
1825 current_duplex = DUPLEX_INVALID; 1825 current_duplex = DUPLEX_INVALID;
1826 1826
1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { 1827 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1828 u32 val; 1828 u32 val;
1829 1829
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); 1831 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1832 if (!(val & (1 << 10))) { 1832 if (!(val & (1 << 10))) {
1833 val |= (1 << 10); 1833 val |= (1 << 10);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1835 goto relink; 1835 goto relink;
1836 } 1836 }
1837 } 1837 }
1838 1838
1839 bmsr = 0; 1839 bmsr = 0;
1840 for (i = 0; i < 100; i++) { 1840 for (i = 0; i < 100; i++) {
1841 tg3_readphy(tp, MII_BMSR, &bmsr); 1841 tg3_readphy(tp, MII_BMSR, &bmsr);
1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 1842 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1843 (bmsr & BMSR_LSTATUS)) 1843 (bmsr & BMSR_LSTATUS))
1844 break; 1844 break;
1845 udelay(40); 1845 udelay(40);
1846 } 1846 }
1847 1847
1848 if (bmsr & BMSR_LSTATUS) { 1848 if (bmsr & BMSR_LSTATUS) {
1849 u32 aux_stat, bmcr; 1849 u32 aux_stat, bmcr;
1850 1850
1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); 1851 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1852 for (i = 0; i < 2000; i++) { 1852 for (i = 0; i < 2000; i++) {
1853 udelay(10); 1853 udelay(10);
1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && 1854 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1855 aux_stat) 1855 aux_stat)
1856 break; 1856 break;
1857 } 1857 }
1858 1858
1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat, 1859 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1860 &current_speed, 1860 &current_speed,
1861 &current_duplex); 1861 &current_duplex);
1862 1862
1863 bmcr = 0; 1863 bmcr = 0;
1864 for (i = 0; i < 200; i++) { 1864 for (i = 0; i < 200; i++) {
1865 tg3_readphy(tp, MII_BMCR, &bmcr); 1865 tg3_readphy(tp, MII_BMCR, &bmcr);
1866 if (tg3_readphy(tp, MII_BMCR, &bmcr)) 1866 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1867 continue; 1867 continue;
1868 if (bmcr && bmcr != 0x7fff) 1868 if (bmcr && bmcr != 0x7fff)
1869 break; 1869 break;
1870 udelay(10); 1870 udelay(10);
1871 } 1871 }
1872 1872
1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 1873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1874 if (bmcr & BMCR_ANENABLE) { 1874 if (bmcr & BMCR_ANENABLE) {
1875 current_link_up = 1; 1875 current_link_up = 1;
1876 1876
1877 /* Force autoneg restart if we are exiting 1877 /* Force autoneg restart if we are exiting
1878 * low power mode. 1878 * low power mode.
1879 */ 1879 */
1880 if (!tg3_copper_is_advertising_all(tp)) 1880 if (!tg3_copper_is_advertising_all(tp))
1881 current_link_up = 0; 1881 current_link_up = 0;
1882 } else { 1882 } else {
1883 current_link_up = 0; 1883 current_link_up = 0;
1884 } 1884 }
1885 } else { 1885 } else {
1886 if (!(bmcr & BMCR_ANENABLE) && 1886 if (!(bmcr & BMCR_ANENABLE) &&
1887 tp->link_config.speed == current_speed && 1887 tp->link_config.speed == current_speed &&
1888 tp->link_config.duplex == current_duplex) { 1888 tp->link_config.duplex == current_duplex) {
1889 current_link_up = 1; 1889 current_link_up = 1;
1890 } else { 1890 } else {
1891 current_link_up = 0; 1891 current_link_up = 0;
1892 } 1892 }
1893 } 1893 }
1894 1894
1895 tp->link_config.active_speed = current_speed; 1895 tp->link_config.active_speed = current_speed;
1896 tp->link_config.active_duplex = current_duplex; 1896 tp->link_config.active_duplex = current_duplex;
1897 } 1897 }
1898 1898
1899 if (current_link_up == 1 && 1899 if (current_link_up == 1 &&
1900 (tp->link_config.active_duplex == DUPLEX_FULL) && 1900 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 1901 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1902 u32 local_adv, remote_adv; 1902 u32 local_adv, remote_adv;
1903 1903
1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv)) 1904 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1905 local_adv = 0; 1905 local_adv = 0;
1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 1906 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1907 1907
1908 if (tg3_readphy(tp, MII_LPA, &remote_adv)) 1908 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1909 remote_adv = 0; 1909 remote_adv = 0;
1910 1910
1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); 1911 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1912 1912
1913 /* If we are not advertising full pause capability, 1913 /* If we are not advertising full pause capability,
1914 * something is wrong. Bring the link down and reconfigure. 1914 * something is wrong. Bring the link down and reconfigure.
1915 */ 1915 */
1916 if (local_adv != ADVERTISE_PAUSE_CAP) { 1916 if (local_adv != ADVERTISE_PAUSE_CAP) {
1917 current_link_up = 0; 1917 current_link_up = 0;
1918 } else { 1918 } else {
1919 tg3_setup_flow_control(tp, local_adv, remote_adv); 1919 tg3_setup_flow_control(tp, local_adv, remote_adv);
1920 } 1920 }
1921 } 1921 }
1922 relink: 1922 relink:
1923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) { 1923 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1924 u32 tmp; 1924 u32 tmp;
1925 1925
1926 tg3_phy_copper_begin(tp); 1926 tg3_phy_copper_begin(tp);
1927 1927
1928 tg3_readphy(tp, MII_BMSR, &tmp); 1928 tg3_readphy(tp, MII_BMSR, &tmp);
1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) && 1929 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1930 (tmp & BMSR_LSTATUS)) 1930 (tmp & BMSR_LSTATUS))
1931 current_link_up = 1; 1931 current_link_up = 1;
1932 } 1932 }
1933 1933
1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; 1934 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1935 if (current_link_up == 1) { 1935 if (current_link_up == 1) {
1936 if (tp->link_config.active_speed == SPEED_100 || 1936 if (tp->link_config.active_speed == SPEED_100 ||
1937 tp->link_config.active_speed == SPEED_10) 1937 tp->link_config.active_speed == SPEED_10)
1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII; 1938 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1939 else 1939 else
1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 1940 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1941 } else 1941 } else
1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 1942 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1943 1943
1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 1944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1945 if (tp->link_config.active_duplex == DUPLEX_HALF) 1945 if (tp->link_config.active_duplex == DUPLEX_HALF)
1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 1946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1947 1947
1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 1948 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { 1949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) || 1950 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1951 (current_link_up == 1 && 1951 (current_link_up == 1 &&
1952 tp->link_config.active_speed == SPEED_10)) 1952 tp->link_config.active_speed == SPEED_10))
1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 1953 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1954 } else { 1954 } else {
1955 if (current_link_up == 1) 1955 if (current_link_up == 1)
1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY; 1956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1957 } 1957 }
1958 1958
1959 /* ??? Without this setting Netgear GA302T PHY does not 1959 /* ??? Without this setting Netgear GA302T PHY does not
1960 * ??? send/receive packets... 1960 * ??? send/receive packets...
1961 */ 1961 */
1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && 1962 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { 1963 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; 1964 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1965 tw32_f(MAC_MI_MODE, tp->mi_mode); 1965 tw32_f(MAC_MI_MODE, tp->mi_mode);
1966 udelay(80); 1966 udelay(80);
1967 } 1967 }
1968 1968
1969 tw32_f(MAC_MODE, tp->mac_mode); 1969 tw32_f(MAC_MODE, tp->mac_mode);
1970 udelay(40); 1970 udelay(40);
1971 1971
1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 1972 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1973 /* Polled via timer. */ 1973 /* Polled via timer. */
1974 tw32_f(MAC_EVENT, 0); 1974 tw32_f(MAC_EVENT, 0);
1975 } else { 1975 } else {
1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 1976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1977 } 1977 }
1978 udelay(40); 1978 udelay(40);
1979 1979
1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && 1980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1981 current_link_up == 1 && 1981 current_link_up == 1 &&
1982 tp->link_config.active_speed == SPEED_1000 && 1982 tp->link_config.active_speed == SPEED_1000 &&
1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || 1983 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { 1984 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1985 udelay(120); 1985 udelay(120);
1986 tw32_f(MAC_STATUS, 1986 tw32_f(MAC_STATUS,
1987 (MAC_STATUS_SYNC_CHANGED | 1987 (MAC_STATUS_SYNC_CHANGED |
1988 MAC_STATUS_CFG_CHANGED)); 1988 MAC_STATUS_CFG_CHANGED));
1989 udelay(40); 1989 udelay(40);
1990 tg3_write_mem(tp, 1990 tg3_write_mem(tp,
1991 NIC_SRAM_FIRMWARE_MBOX, 1991 NIC_SRAM_FIRMWARE_MBOX,
1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 1992 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1993 } 1993 }
1994 1994
1995 if (current_link_up != netif_carrier_ok(tp->dev)) { 1995 if (current_link_up != netif_carrier_ok(tp->dev)) {
1996 if (current_link_up) 1996 if (current_link_up)
1997 netif_carrier_on(tp->dev); 1997 netif_carrier_on(tp->dev);
1998 else 1998 else
1999 netif_carrier_off(tp->dev); 1999 netif_carrier_off(tp->dev);
2000 tg3_link_report(tp); 2000 tg3_link_report(tp);
2001 } 2001 }
2002 2002
2003 return 0; 2003 return 0;
2004 } 2004 }
2005 2005
2006 struct tg3_fiber_aneginfo { 2006 struct tg3_fiber_aneginfo {
2007 int state; 2007 int state;
2008 #define ANEG_STATE_UNKNOWN 0 2008 #define ANEG_STATE_UNKNOWN 0
2009 #define ANEG_STATE_AN_ENABLE 1 2009 #define ANEG_STATE_AN_ENABLE 1
2010 #define ANEG_STATE_RESTART_INIT 2 2010 #define ANEG_STATE_RESTART_INIT 2
2011 #define ANEG_STATE_RESTART 3 2011 #define ANEG_STATE_RESTART 3
2012 #define ANEG_STATE_DISABLE_LINK_OK 4 2012 #define ANEG_STATE_DISABLE_LINK_OK 4
2013 #define ANEG_STATE_ABILITY_DETECT_INIT 5 2013 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2014 #define ANEG_STATE_ABILITY_DETECT 6 2014 #define ANEG_STATE_ABILITY_DETECT 6
2015 #define ANEG_STATE_ACK_DETECT_INIT 7 2015 #define ANEG_STATE_ACK_DETECT_INIT 7
2016 #define ANEG_STATE_ACK_DETECT 8 2016 #define ANEG_STATE_ACK_DETECT 8
2017 #define ANEG_STATE_COMPLETE_ACK_INIT 9 2017 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2018 #define ANEG_STATE_COMPLETE_ACK 10 2018 #define ANEG_STATE_COMPLETE_ACK 10
2019 #define ANEG_STATE_IDLE_DETECT_INIT 11 2019 #define ANEG_STATE_IDLE_DETECT_INIT 11
2020 #define ANEG_STATE_IDLE_DETECT 12 2020 #define ANEG_STATE_IDLE_DETECT 12
2021 #define ANEG_STATE_LINK_OK 13 2021 #define ANEG_STATE_LINK_OK 13
2022 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 2022 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2023 #define ANEG_STATE_NEXT_PAGE_WAIT 15 2023 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2024 2024
2025 u32 flags; 2025 u32 flags;
2026 #define MR_AN_ENABLE 0x00000001 2026 #define MR_AN_ENABLE 0x00000001
2027 #define MR_RESTART_AN 0x00000002 2027 #define MR_RESTART_AN 0x00000002
2028 #define MR_AN_COMPLETE 0x00000004 2028 #define MR_AN_COMPLETE 0x00000004
2029 #define MR_PAGE_RX 0x00000008 2029 #define MR_PAGE_RX 0x00000008
2030 #define MR_NP_LOADED 0x00000010 2030 #define MR_NP_LOADED 0x00000010
2031 #define MR_TOGGLE_TX 0x00000020 2031 #define MR_TOGGLE_TX 0x00000020
2032 #define MR_LP_ADV_FULL_DUPLEX 0x00000040 2032 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2033 #define MR_LP_ADV_HALF_DUPLEX 0x00000080 2033 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2034 #define MR_LP_ADV_SYM_PAUSE 0x00000100 2034 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2035 #define MR_LP_ADV_ASYM_PAUSE 0x00000200 2035 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2036 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400 2036 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2037 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800 2037 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2038 #define MR_LP_ADV_NEXT_PAGE 0x00001000 2038 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2039 #define MR_TOGGLE_RX 0x00002000 2039 #define MR_TOGGLE_RX 0x00002000
2040 #define MR_NP_RX 0x00004000 2040 #define MR_NP_RX 0x00004000
2041 2041
2042 #define MR_LINK_OK 0x80000000 2042 #define MR_LINK_OK 0x80000000
2043 2043
2044 unsigned long link_time, cur_time; 2044 unsigned long link_time, cur_time;
2045 2045
2046 u32 ability_match_cfg; 2046 u32 ability_match_cfg;
2047 int ability_match_count; 2047 int ability_match_count;
2048 2048
2049 char ability_match, idle_match, ack_match; 2049 char ability_match, idle_match, ack_match;
2050 2050
2051 u32 txconfig, rxconfig; 2051 u32 txconfig, rxconfig;
2052 #define ANEG_CFG_NP 0x00000080 2052 #define ANEG_CFG_NP 0x00000080
2053 #define ANEG_CFG_ACK 0x00000040 2053 #define ANEG_CFG_ACK 0x00000040
2054 #define ANEG_CFG_RF2 0x00000020 2054 #define ANEG_CFG_RF2 0x00000020
2055 #define ANEG_CFG_RF1 0x00000010 2055 #define ANEG_CFG_RF1 0x00000010
2056 #define ANEG_CFG_PS2 0x00000001 2056 #define ANEG_CFG_PS2 0x00000001
2057 #define ANEG_CFG_PS1 0x00008000 2057 #define ANEG_CFG_PS1 0x00008000
2058 #define ANEG_CFG_HD 0x00004000 2058 #define ANEG_CFG_HD 0x00004000
2059 #define ANEG_CFG_FD 0x00002000 2059 #define ANEG_CFG_FD 0x00002000
2060 #define ANEG_CFG_INVAL 0x00001f06 2060 #define ANEG_CFG_INVAL 0x00001f06
2061 2061
2062 }; 2062 };
2063 #define ANEG_OK 0 2063 #define ANEG_OK 0
2064 #define ANEG_DONE 1 2064 #define ANEG_DONE 1
2065 #define ANEG_TIMER_ENAB 2 2065 #define ANEG_TIMER_ENAB 2
2066 #define ANEG_FAILED -1 2066 #define ANEG_FAILED -1
2067 2067
2068 #define ANEG_STATE_SETTLE_TIME 10000 2068 #define ANEG_STATE_SETTLE_TIME 10000
2069 2069
2070 static int tg3_fiber_aneg_smachine(struct tg3 *tp, 2070 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2071 struct tg3_fiber_aneginfo *ap) 2071 struct tg3_fiber_aneginfo *ap)
2072 { 2072 {
2073 unsigned long delta; 2073 unsigned long delta;
2074 u32 rx_cfg_reg; 2074 u32 rx_cfg_reg;
2075 int ret; 2075 int ret;
2076 2076
2077 if (ap->state == ANEG_STATE_UNKNOWN) { 2077 if (ap->state == ANEG_STATE_UNKNOWN) {
2078 ap->rxconfig = 0; 2078 ap->rxconfig = 0;
2079 ap->link_time = 0; 2079 ap->link_time = 0;
2080 ap->cur_time = 0; 2080 ap->cur_time = 0;
2081 ap->ability_match_cfg = 0; 2081 ap->ability_match_cfg = 0;
2082 ap->ability_match_count = 0; 2082 ap->ability_match_count = 0;
2083 ap->ability_match = 0; 2083 ap->ability_match = 0;
2084 ap->idle_match = 0; 2084 ap->idle_match = 0;
2085 ap->ack_match = 0; 2085 ap->ack_match = 0;
2086 } 2086 }
2087 ap->cur_time++; 2087 ap->cur_time++;
2088 2088
2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { 2089 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); 2090 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2091 2091
2092 if (rx_cfg_reg != ap->ability_match_cfg) { 2092 if (rx_cfg_reg != ap->ability_match_cfg) {
2093 ap->ability_match_cfg = rx_cfg_reg; 2093 ap->ability_match_cfg = rx_cfg_reg;
2094 ap->ability_match = 0; 2094 ap->ability_match = 0;
2095 ap->ability_match_count = 0; 2095 ap->ability_match_count = 0;
2096 } else { 2096 } else {
2097 if (++ap->ability_match_count > 1) { 2097 if (++ap->ability_match_count > 1) {
2098 ap->ability_match = 1; 2098 ap->ability_match = 1;
2099 ap->ability_match_cfg = rx_cfg_reg; 2099 ap->ability_match_cfg = rx_cfg_reg;
2100 } 2100 }
2101 } 2101 }
2102 if (rx_cfg_reg & ANEG_CFG_ACK) 2102 if (rx_cfg_reg & ANEG_CFG_ACK)
2103 ap->ack_match = 1; 2103 ap->ack_match = 1;
2104 else 2104 else
2105 ap->ack_match = 0; 2105 ap->ack_match = 0;
2106 2106
2107 ap->idle_match = 0; 2107 ap->idle_match = 0;
2108 } else { 2108 } else {
2109 ap->idle_match = 1; 2109 ap->idle_match = 1;
2110 ap->ability_match_cfg = 0; 2110 ap->ability_match_cfg = 0;
2111 ap->ability_match_count = 0; 2111 ap->ability_match_count = 0;
2112 ap->ability_match = 0; 2112 ap->ability_match = 0;
2113 ap->ack_match = 0; 2113 ap->ack_match = 0;
2114 2114
2115 rx_cfg_reg = 0; 2115 rx_cfg_reg = 0;
2116 } 2116 }
2117 2117
2118 ap->rxconfig = rx_cfg_reg; 2118 ap->rxconfig = rx_cfg_reg;
2119 ret = ANEG_OK; 2119 ret = ANEG_OK;
2120 2120
2121 switch(ap->state) { 2121 switch(ap->state) {
2122 case ANEG_STATE_UNKNOWN: 2122 case ANEG_STATE_UNKNOWN:
2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 2123 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2124 ap->state = ANEG_STATE_AN_ENABLE; 2124 ap->state = ANEG_STATE_AN_ENABLE;
2125 2125
2126 /* fallthru */ 2126 /* fallthru */
2127 case ANEG_STATE_AN_ENABLE: 2127 case ANEG_STATE_AN_ENABLE:
2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); 2128 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2129 if (ap->flags & MR_AN_ENABLE) { 2129 if (ap->flags & MR_AN_ENABLE) {
2130 ap->link_time = 0; 2130 ap->link_time = 0;
2131 ap->cur_time = 0; 2131 ap->cur_time = 0;
2132 ap->ability_match_cfg = 0; 2132 ap->ability_match_cfg = 0;
2133 ap->ability_match_count = 0; 2133 ap->ability_match_count = 0;
2134 ap->ability_match = 0; 2134 ap->ability_match = 0;
2135 ap->idle_match = 0; 2135 ap->idle_match = 0;
2136 ap->ack_match = 0; 2136 ap->ack_match = 0;
2137 2137
2138 ap->state = ANEG_STATE_RESTART_INIT; 2138 ap->state = ANEG_STATE_RESTART_INIT;
2139 } else { 2139 } else {
2140 ap->state = ANEG_STATE_DISABLE_LINK_OK; 2140 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2141 } 2141 }
2142 break; 2142 break;
2143 2143
2144 case ANEG_STATE_RESTART_INIT: 2144 case ANEG_STATE_RESTART_INIT:
2145 ap->link_time = ap->cur_time; 2145 ap->link_time = ap->cur_time;
2146 ap->flags &= ~(MR_NP_LOADED); 2146 ap->flags &= ~(MR_NP_LOADED);
2147 ap->txconfig = 0; 2147 ap->txconfig = 0;
2148 tw32(MAC_TX_AUTO_NEG, 0); 2148 tw32(MAC_TX_AUTO_NEG, 0);
2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 2149 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150 tw32_f(MAC_MODE, tp->mac_mode); 2150 tw32_f(MAC_MODE, tp->mac_mode);
2151 udelay(40); 2151 udelay(40);
2152 2152
2153 ret = ANEG_TIMER_ENAB; 2153 ret = ANEG_TIMER_ENAB;
2154 ap->state = ANEG_STATE_RESTART; 2154 ap->state = ANEG_STATE_RESTART;
2155 2155
2156 /* fallthru */ 2156 /* fallthru */
2157 case ANEG_STATE_RESTART: 2157 case ANEG_STATE_RESTART:
2158 delta = ap->cur_time - ap->link_time; 2158 delta = ap->cur_time - ap->link_time;
2159 if (delta > ANEG_STATE_SETTLE_TIME) { 2159 if (delta > ANEG_STATE_SETTLE_TIME) {
2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 2160 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2161 } else { 2161 } else {
2162 ret = ANEG_TIMER_ENAB; 2162 ret = ANEG_TIMER_ENAB;
2163 } 2163 }
2164 break; 2164 break;
2165 2165
2166 case ANEG_STATE_DISABLE_LINK_OK: 2166 case ANEG_STATE_DISABLE_LINK_OK:
2167 ret = ANEG_DONE; 2167 ret = ANEG_DONE;
2168 break; 2168 break;
2169 2169
2170 case ANEG_STATE_ABILITY_DETECT_INIT: 2170 case ANEG_STATE_ABILITY_DETECT_INIT:
2171 ap->flags &= ~(MR_TOGGLE_TX); 2171 ap->flags &= ~(MR_TOGGLE_TX);
2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1); 2172 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 2173 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 2174 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2175 tw32_f(MAC_MODE, tp->mac_mode); 2175 tw32_f(MAC_MODE, tp->mac_mode);
2176 udelay(40); 2176 udelay(40);
2177 2177
2178 ap->state = ANEG_STATE_ABILITY_DETECT; 2178 ap->state = ANEG_STATE_ABILITY_DETECT;
2179 break; 2179 break;
2180 2180
2181 case ANEG_STATE_ABILITY_DETECT: 2181 case ANEG_STATE_ABILITY_DETECT:
2182 if (ap->ability_match != 0 && ap->rxconfig != 0) { 2182 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2183 ap->state = ANEG_STATE_ACK_DETECT_INIT; 2183 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2184 } 2184 }
2185 break; 2185 break;
2186 2186
2187 case ANEG_STATE_ACK_DETECT_INIT: 2187 case ANEG_STATE_ACK_DETECT_INIT:
2188 ap->txconfig |= ANEG_CFG_ACK; 2188 ap->txconfig |= ANEG_CFG_ACK;
2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig); 2189 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS; 2190 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2191 tw32_f(MAC_MODE, tp->mac_mode); 2191 tw32_f(MAC_MODE, tp->mac_mode);
2192 udelay(40); 2192 udelay(40);
2193 2193
2194 ap->state = ANEG_STATE_ACK_DETECT; 2194 ap->state = ANEG_STATE_ACK_DETECT;
2195 2195
2196 /* fallthru */ 2196 /* fallthru */
2197 case ANEG_STATE_ACK_DETECT: 2197 case ANEG_STATE_ACK_DETECT:
2198 if (ap->ack_match != 0) { 2198 if (ap->ack_match != 0) {
2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) == 2199 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { 2200 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT; 2201 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2202 } else { 2202 } else {
2203 ap->state = ANEG_STATE_AN_ENABLE; 2203 ap->state = ANEG_STATE_AN_ENABLE;
2204 } 2204 }
2205 } else if (ap->ability_match != 0 && 2205 } else if (ap->ability_match != 0 &&
2206 ap->rxconfig == 0) { 2206 ap->rxconfig == 0) {
2207 ap->state = ANEG_STATE_AN_ENABLE; 2207 ap->state = ANEG_STATE_AN_ENABLE;
2208 } 2208 }
2209 break; 2209 break;
2210 2210
2211 case ANEG_STATE_COMPLETE_ACK_INIT: 2211 case ANEG_STATE_COMPLETE_ACK_INIT:
2212 if (ap->rxconfig & ANEG_CFG_INVAL) { 2212 if (ap->rxconfig & ANEG_CFG_INVAL) {
2213 ret = ANEG_FAILED; 2213 ret = ANEG_FAILED;
2214 break; 2214 break;
2215 } 2215 }
2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | 2216 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2217 MR_LP_ADV_HALF_DUPLEX | 2217 MR_LP_ADV_HALF_DUPLEX |
2218 MR_LP_ADV_SYM_PAUSE | 2218 MR_LP_ADV_SYM_PAUSE |
2219 MR_LP_ADV_ASYM_PAUSE | 2219 MR_LP_ADV_ASYM_PAUSE |
2220 MR_LP_ADV_REMOTE_FAULT1 | 2220 MR_LP_ADV_REMOTE_FAULT1 |
2221 MR_LP_ADV_REMOTE_FAULT2 | 2221 MR_LP_ADV_REMOTE_FAULT2 |
2222 MR_LP_ADV_NEXT_PAGE | 2222 MR_LP_ADV_NEXT_PAGE |
2223 MR_TOGGLE_RX | 2223 MR_TOGGLE_RX |
2224 MR_NP_RX); 2224 MR_NP_RX);
2225 if (ap->rxconfig & ANEG_CFG_FD) 2225 if (ap->rxconfig & ANEG_CFG_FD)
2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX; 2226 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2227 if (ap->rxconfig & ANEG_CFG_HD) 2227 if (ap->rxconfig & ANEG_CFG_HD)
2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX; 2228 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2229 if (ap->rxconfig & ANEG_CFG_PS1) 2229 if (ap->rxconfig & ANEG_CFG_PS1)
2230 ap->flags |= MR_LP_ADV_SYM_PAUSE; 2230 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2231 if (ap->rxconfig & ANEG_CFG_PS2) 2231 if (ap->rxconfig & ANEG_CFG_PS2)
2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE; 2232 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2233 if (ap->rxconfig & ANEG_CFG_RF1) 2233 if (ap->rxconfig & ANEG_CFG_RF1)
2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1; 2234 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2235 if (ap->rxconfig & ANEG_CFG_RF2) 2235 if (ap->rxconfig & ANEG_CFG_RF2)
2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2; 2236 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2237 if (ap->rxconfig & ANEG_CFG_NP) 2237 if (ap->rxconfig & ANEG_CFG_NP)
2238 ap->flags |= MR_LP_ADV_NEXT_PAGE; 2238 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2239 2239
2240 ap->link_time = ap->cur_time; 2240 ap->link_time = ap->cur_time;
2241 2241
2242 ap->flags ^= (MR_TOGGLE_TX); 2242 ap->flags ^= (MR_TOGGLE_TX);
2243 if (ap->rxconfig & 0x0008) 2243 if (ap->rxconfig & 0x0008)
2244 ap->flags |= MR_TOGGLE_RX; 2244 ap->flags |= MR_TOGGLE_RX;
2245 if (ap->rxconfig & ANEG_CFG_NP) 2245 if (ap->rxconfig & ANEG_CFG_NP)
2246 ap->flags |= MR_NP_RX; 2246 ap->flags |= MR_NP_RX;
2247 ap->flags |= MR_PAGE_RX; 2247 ap->flags |= MR_PAGE_RX;
2248 2248
2249 ap->state = ANEG_STATE_COMPLETE_ACK; 2249 ap->state = ANEG_STATE_COMPLETE_ACK;
2250 ret = ANEG_TIMER_ENAB; 2250 ret = ANEG_TIMER_ENAB;
2251 break; 2251 break;
2252 2252
2253 case ANEG_STATE_COMPLETE_ACK: 2253 case ANEG_STATE_COMPLETE_ACK:
2254 if (ap->ability_match != 0 && 2254 if (ap->ability_match != 0 &&
2255 ap->rxconfig == 0) { 2255 ap->rxconfig == 0) {
2256 ap->state = ANEG_STATE_AN_ENABLE; 2256 ap->state = ANEG_STATE_AN_ENABLE;
2257 break; 2257 break;
2258 } 2258 }
2259 delta = ap->cur_time - ap->link_time; 2259 delta = ap->cur_time - ap->link_time;
2260 if (delta > ANEG_STATE_SETTLE_TIME) { 2260 if (delta > ANEG_STATE_SETTLE_TIME) {
2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { 2261 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 2262 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2263 } else { 2263 } else {
2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 && 2264 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2265 !(ap->flags & MR_NP_RX)) { 2265 !(ap->flags & MR_NP_RX)) {
2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT; 2266 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2267 } else { 2267 } else {
2268 ret = ANEG_FAILED; 2268 ret = ANEG_FAILED;
2269 } 2269 }
2270 } 2270 }
2271 } 2271 }
2272 break; 2272 break;
2273 2273
2274 case ANEG_STATE_IDLE_DETECT_INIT: 2274 case ANEG_STATE_IDLE_DETECT_INIT:
2275 ap->link_time = ap->cur_time; 2275 ap->link_time = ap->cur_time;
2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 2276 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2277 tw32_f(MAC_MODE, tp->mac_mode); 2277 tw32_f(MAC_MODE, tp->mac_mode);
2278 udelay(40); 2278 udelay(40);
2279 2279
2280 ap->state = ANEG_STATE_IDLE_DETECT; 2280 ap->state = ANEG_STATE_IDLE_DETECT;
2281 ret = ANEG_TIMER_ENAB; 2281 ret = ANEG_TIMER_ENAB;
2282 break; 2282 break;
2283 2283
2284 case ANEG_STATE_IDLE_DETECT: 2284 case ANEG_STATE_IDLE_DETECT:
2285 if (ap->ability_match != 0 && 2285 if (ap->ability_match != 0 &&
2286 ap->rxconfig == 0) { 2286 ap->rxconfig == 0) {
2287 ap->state = ANEG_STATE_AN_ENABLE; 2287 ap->state = ANEG_STATE_AN_ENABLE;
2288 break; 2288 break;
2289 } 2289 }
2290 delta = ap->cur_time - ap->link_time; 2290 delta = ap->cur_time - ap->link_time;
2291 if (delta > ANEG_STATE_SETTLE_TIME) { 2291 if (delta > ANEG_STATE_SETTLE_TIME) {
2292 /* XXX another gem from the Broadcom driver :( */ 2292 /* XXX another gem from the Broadcom driver :( */
2293 ap->state = ANEG_STATE_LINK_OK; 2293 ap->state = ANEG_STATE_LINK_OK;
2294 } 2294 }
2295 break; 2295 break;
2296 2296
2297 case ANEG_STATE_LINK_OK: 2297 case ANEG_STATE_LINK_OK:
2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); 2298 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2299 ret = ANEG_DONE; 2299 ret = ANEG_DONE;
2300 break; 2300 break;
2301 2301
2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT: 2302 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2303 /* ??? unimplemented */ 2303 /* ??? unimplemented */
2304 break; 2304 break;
2305 2305
2306 case ANEG_STATE_NEXT_PAGE_WAIT: 2306 case ANEG_STATE_NEXT_PAGE_WAIT:
2307 /* ??? unimplemented */ 2307 /* ??? unimplemented */
2308 break; 2308 break;
2309 2309
2310 default: 2310 default:
2311 ret = ANEG_FAILED; 2311 ret = ANEG_FAILED;
2312 break; 2312 break;
2313 }; 2313 };
2314 2314
2315 return ret; 2315 return ret;
2316 } 2316 }
2317 2317
2318 static int fiber_autoneg(struct tg3 *tp, u32 *flags) 2318 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2319 { 2319 {
2320 int res = 0; 2320 int res = 0;
2321 struct tg3_fiber_aneginfo aninfo; 2321 struct tg3_fiber_aneginfo aninfo;
2322 int status = ANEG_FAILED; 2322 int status = ANEG_FAILED;
2323 unsigned int tick; 2323 unsigned int tick;
2324 u32 tmp; 2324 u32 tmp;
2325 2325
2326 tw32_f(MAC_TX_AUTO_NEG, 0); 2326 tw32_f(MAC_TX_AUTO_NEG, 0);
2327 2327
2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 2328 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); 2329 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2330 udelay(40); 2330 udelay(40);
2331 2331
2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); 2332 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2333 udelay(40); 2333 udelay(40);
2334 2334
2335 memset(&aninfo, 0, sizeof(aninfo)); 2335 memset(&aninfo, 0, sizeof(aninfo));
2336 aninfo.flags |= MR_AN_ENABLE; 2336 aninfo.flags |= MR_AN_ENABLE;
2337 aninfo.state = ANEG_STATE_UNKNOWN; 2337 aninfo.state = ANEG_STATE_UNKNOWN;
2338 aninfo.cur_time = 0; 2338 aninfo.cur_time = 0;
2339 tick = 0; 2339 tick = 0;
2340 while (++tick < 195000) { 2340 while (++tick < 195000) {
2341 status = tg3_fiber_aneg_smachine(tp, &aninfo); 2341 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2342 if (status == ANEG_DONE || status == ANEG_FAILED) 2342 if (status == ANEG_DONE || status == ANEG_FAILED)
2343 break; 2343 break;
2344 2344
2345 udelay(1); 2345 udelay(1);
2346 } 2346 }
2347 2347
2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; 2348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2349 tw32_f(MAC_MODE, tp->mac_mode); 2349 tw32_f(MAC_MODE, tp->mac_mode);
2350 udelay(40); 2350 udelay(40);
2351 2351
2352 *flags = aninfo.flags; 2352 *flags = aninfo.flags;
2353 2353
2354 if (status == ANEG_DONE && 2354 if (status == ANEG_DONE &&
2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | 2355 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2356 MR_LP_ADV_FULL_DUPLEX))) 2356 MR_LP_ADV_FULL_DUPLEX)))
2357 res = 1; 2357 res = 1;
2358 2358
2359 return res; 2359 return res;
2360 } 2360 }
2361 2361
2362 static void tg3_init_bcm8002(struct tg3 *tp) 2362 static void tg3_init_bcm8002(struct tg3 *tp)
2363 { 2363 {
2364 u32 mac_status = tr32(MAC_STATUS); 2364 u32 mac_status = tr32(MAC_STATUS);
2365 int i; 2365 int i;
2366 2366
2367 /* Reset when initting first time or we have a link. */ 2367 /* Reset when initting first time or we have a link. */
2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && 2368 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2369 !(mac_status & MAC_STATUS_PCS_SYNCED)) 2369 !(mac_status & MAC_STATUS_PCS_SYNCED))
2370 return; 2370 return;
2371 2371
2372 /* Set PLL lock range. */ 2372 /* Set PLL lock range. */
2373 tg3_writephy(tp, 0x16, 0x8007); 2373 tg3_writephy(tp, 0x16, 0x8007);
2374 2374
2375 /* SW reset */ 2375 /* SW reset */
2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET); 2376 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2377 2377
2378 /* Wait for reset to complete. */ 2378 /* Wait for reset to complete. */
2379 /* XXX schedule_timeout() ... */ 2379 /* XXX schedule_timeout() ... */
2380 for (i = 0; i < 500; i++) 2380 for (i = 0; i < 500; i++)
2381 udelay(10); 2381 udelay(10);
2382 2382
2383 /* Config mode; select PMA/Ch 1 regs. */ 2383 /* Config mode; select PMA/Ch 1 regs. */
2384 tg3_writephy(tp, 0x10, 0x8411); 2384 tg3_writephy(tp, 0x10, 0x8411);
2385 2385
2386 /* Enable auto-lock and comdet, select txclk for tx. */ 2386 /* Enable auto-lock and comdet, select txclk for tx. */
2387 tg3_writephy(tp, 0x11, 0x0a10); 2387 tg3_writephy(tp, 0x11, 0x0a10);
2388 2388
2389 tg3_writephy(tp, 0x18, 0x00a0); 2389 tg3_writephy(tp, 0x18, 0x00a0);
2390 tg3_writephy(tp, 0x16, 0x41ff); 2390 tg3_writephy(tp, 0x16, 0x41ff);
2391 2391
2392 /* Assert and deassert POR. */ 2392 /* Assert and deassert POR. */
2393 tg3_writephy(tp, 0x13, 0x0400); 2393 tg3_writephy(tp, 0x13, 0x0400);
2394 udelay(40); 2394 udelay(40);
2395 tg3_writephy(tp, 0x13, 0x0000); 2395 tg3_writephy(tp, 0x13, 0x0000);
2396 2396
2397 tg3_writephy(tp, 0x11, 0x0a50); 2397 tg3_writephy(tp, 0x11, 0x0a50);
2398 udelay(40); 2398 udelay(40);
2399 tg3_writephy(tp, 0x11, 0x0a10); 2399 tg3_writephy(tp, 0x11, 0x0a10);
2400 2400
2401 /* Wait for signal to stabilize */ 2401 /* Wait for signal to stabilize */
2402 /* XXX schedule_timeout() ... */ 2402 /* XXX schedule_timeout() ... */
2403 for (i = 0; i < 15000; i++) 2403 for (i = 0; i < 15000; i++)
2404 udelay(10); 2404 udelay(10);
2405 2405
2406 /* Deselect the channel register so we can read the PHYID 2406 /* Deselect the channel register so we can read the PHYID
2407 * later. 2407 * later.
2408 */ 2408 */
2409 tg3_writephy(tp, 0x10, 0x8011); 2409 tg3_writephy(tp, 0x10, 0x8011);
2410 } 2410 }
2411 2411
2412 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) 2412 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2413 { 2413 {
2414 u32 sg_dig_ctrl, sg_dig_status; 2414 u32 sg_dig_ctrl, sg_dig_status;
2415 u32 serdes_cfg, expected_sg_dig_ctrl; 2415 u32 serdes_cfg, expected_sg_dig_ctrl;
2416 int workaround, port_a; 2416 int workaround, port_a;
2417 int current_link_up; 2417 int current_link_up;
2418 2418
2419 serdes_cfg = 0; 2419 serdes_cfg = 0;
2420 expected_sg_dig_ctrl = 0; 2420 expected_sg_dig_ctrl = 0;
2421 workaround = 0; 2421 workaround = 0;
2422 port_a = 1; 2422 port_a = 1;
2423 current_link_up = 0; 2423 current_link_up = 0;
2424 2424
2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && 2425 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { 2426 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2427 workaround = 1; 2427 workaround = 1;
2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 2428 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2429 port_a = 0; 2429 port_a = 0;
2430 2430
2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */ 2431 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2432 /* preserve bits 20-23 for voltage regulator */ 2432 /* preserve bits 20-23 for voltage regulator */
2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; 2433 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2434 } 2434 }
2435 2435
2436 sg_dig_ctrl = tr32(SG_DIG_CTRL); 2436 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2437 2437
2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) { 2438 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2439 if (sg_dig_ctrl & (1 << 31)) { 2439 if (sg_dig_ctrl & (1 << 31)) {
2440 if (workaround) { 2440 if (workaround) {
2441 u32 val = serdes_cfg; 2441 u32 val = serdes_cfg;
2442 2442
2443 if (port_a) 2443 if (port_a)
2444 val |= 0xc010000; 2444 val |= 0xc010000;
2445 else 2445 else
2446 val |= 0x4010000; 2446 val |= 0x4010000;
2447 tw32_f(MAC_SERDES_CFG, val); 2447 tw32_f(MAC_SERDES_CFG, val);
2448 } 2448 }
2449 tw32_f(SG_DIG_CTRL, 0x01388400); 2449 tw32_f(SG_DIG_CTRL, 0x01388400);
2450 } 2450 }
2451 if (mac_status & MAC_STATUS_PCS_SYNCED) { 2451 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2452 tg3_setup_flow_control(tp, 0, 0); 2452 tg3_setup_flow_control(tp, 0, 0);
2453 current_link_up = 1; 2453 current_link_up = 1;
2454 } 2454 }
2455 goto out; 2455 goto out;
2456 } 2456 }
2457 2457
2458 /* Want auto-negotiation. */ 2458 /* Want auto-negotiation. */
2459 expected_sg_dig_ctrl = 0x81388400; 2459 expected_sg_dig_ctrl = 0x81388400;
2460 2460
2461 /* Pause capability */ 2461 /* Pause capability */
2462 expected_sg_dig_ctrl |= (1 << 11); 2462 expected_sg_dig_ctrl |= (1 << 11);
2463 2463
2464 /* Asymettric pause */ 2464 /* Asymettric pause */
2465 expected_sg_dig_ctrl |= (1 << 12); 2465 expected_sg_dig_ctrl |= (1 << 12);
2466 2466
2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) { 2467 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2468 if (workaround) 2468 if (workaround)
2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); 2469 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30)); 2470 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2471 udelay(5); 2471 udelay(5);
2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); 2472 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2473 2473
2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED; 2474 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED | 2475 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2476 MAC_STATUS_SIGNAL_DET)) { 2476 MAC_STATUS_SIGNAL_DET)) {
2477 int i; 2477 int i;
2478 2478
2479 /* Giver time to negotiate (~200ms) */ 2479 /* Giver time to negotiate (~200ms) */
2480 for (i = 0; i < 40000; i++) { 2480 for (i = 0; i < 40000; i++) {
2481 sg_dig_status = tr32(SG_DIG_STATUS); 2481 sg_dig_status = tr32(SG_DIG_STATUS);
2482 if (sg_dig_status & (0x3)) 2482 if (sg_dig_status & (0x3))
2483 break; 2483 break;
2484 udelay(5); 2484 udelay(5);
2485 } 2485 }
2486 mac_status = tr32(MAC_STATUS); 2486 mac_status = tr32(MAC_STATUS);
2487 2487
2488 if ((sg_dig_status & (1 << 1)) && 2488 if ((sg_dig_status & (1 << 1)) &&
2489 (mac_status & MAC_STATUS_PCS_SYNCED)) { 2489 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2490 u32 local_adv, remote_adv; 2490 u32 local_adv, remote_adv;
2491 2491
2492 local_adv = ADVERTISE_PAUSE_CAP; 2492 local_adv = ADVERTISE_PAUSE_CAP;
2493 remote_adv = 0; 2493 remote_adv = 0;
2494 if (sg_dig_status & (1 << 19)) 2494 if (sg_dig_status & (1 << 19))
2495 remote_adv |= LPA_PAUSE_CAP; 2495 remote_adv |= LPA_PAUSE_CAP;
2496 if (sg_dig_status & (1 << 20)) 2496 if (sg_dig_status & (1 << 20))
2497 remote_adv |= LPA_PAUSE_ASYM; 2497 remote_adv |= LPA_PAUSE_ASYM;
2498 2498
2499 tg3_setup_flow_control(tp, local_adv, remote_adv); 2499 tg3_setup_flow_control(tp, local_adv, remote_adv);
2500 current_link_up = 1; 2500 current_link_up = 1;
2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; 2501 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2502 } else if (!(sg_dig_status & (1 << 1))) { 2502 } else if (!(sg_dig_status & (1 << 1))) {
2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) 2503 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; 2504 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2505 else { 2505 else {
2506 if (workaround) { 2506 if (workaround) {
2507 u32 val = serdes_cfg; 2507 u32 val = serdes_cfg;
2508 2508
2509 if (port_a) 2509 if (port_a)
2510 val |= 0xc010000; 2510 val |= 0xc010000;
2511 else 2511 else
2512 val |= 0x4010000; 2512 val |= 0x4010000;
2513 2513
2514 tw32_f(MAC_SERDES_CFG, val); 2514 tw32_f(MAC_SERDES_CFG, val);
2515 } 2515 }
2516 2516
2517 tw32_f(SG_DIG_CTRL, 0x01388400); 2517 tw32_f(SG_DIG_CTRL, 0x01388400);
2518 udelay(40); 2518 udelay(40);
2519 2519
2520 /* Link parallel detection - link is up */ 2520 /* Link parallel detection - link is up */
2521 /* only if we have PCS_SYNC and not */ 2521 /* only if we have PCS_SYNC and not */
2522 /* receiving config code words */ 2522 /* receiving config code words */
2523 mac_status = tr32(MAC_STATUS); 2523 mac_status = tr32(MAC_STATUS);
2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) && 2524 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2525 !(mac_status & MAC_STATUS_RCVD_CFG)) { 2525 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2526 tg3_setup_flow_control(tp, 0, 0); 2526 tg3_setup_flow_control(tp, 0, 0);
2527 current_link_up = 1; 2527 current_link_up = 1;
2528 } 2528 }
2529 } 2529 }
2530 } 2530 }
2531 } 2531 }
2532 2532
2533 out: 2533 out:
2534 return current_link_up; 2534 return current_link_up;
2535 } 2535 }
2536 2536
2537 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) 2537 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2538 { 2538 {
2539 int current_link_up = 0; 2539 int current_link_up = 0;
2540 2540
2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) { 2541 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL; 2542 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2543 goto out; 2543 goto out;
2544 } 2544 }
2545 2545
2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2546 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2547 u32 flags; 2547 u32 flags;
2548 int i; 2548 int i;
2549 2549
2550 if (fiber_autoneg(tp, &flags)) { 2550 if (fiber_autoneg(tp, &flags)) {
2551 u32 local_adv, remote_adv; 2551 u32 local_adv, remote_adv;
2552 2552
2553 local_adv = ADVERTISE_PAUSE_CAP; 2553 local_adv = ADVERTISE_PAUSE_CAP;
2554 remote_adv = 0; 2554 remote_adv = 0;
2555 if (flags & MR_LP_ADV_SYM_PAUSE) 2555 if (flags & MR_LP_ADV_SYM_PAUSE)
2556 remote_adv |= LPA_PAUSE_CAP; 2556 remote_adv |= LPA_PAUSE_CAP;
2557 if (flags & MR_LP_ADV_ASYM_PAUSE) 2557 if (flags & MR_LP_ADV_ASYM_PAUSE)
2558 remote_adv |= LPA_PAUSE_ASYM; 2558 remote_adv |= LPA_PAUSE_ASYM;
2559 2559
2560 tg3_setup_flow_control(tp, local_adv, remote_adv); 2560 tg3_setup_flow_control(tp, local_adv, remote_adv);
2561 2561
2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL; 2562 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2563 current_link_up = 1; 2563 current_link_up = 1;
2564 } 2564 }
2565 for (i = 0; i < 30; i++) { 2565 for (i = 0; i < 30; i++) {
2566 udelay(20); 2566 udelay(20);
2567 tw32_f(MAC_STATUS, 2567 tw32_f(MAC_STATUS,
2568 (MAC_STATUS_SYNC_CHANGED | 2568 (MAC_STATUS_SYNC_CHANGED |
2569 MAC_STATUS_CFG_CHANGED)); 2569 MAC_STATUS_CFG_CHANGED));
2570 udelay(40); 2570 udelay(40);
2571 if ((tr32(MAC_STATUS) & 2571 if ((tr32(MAC_STATUS) &
2572 (MAC_STATUS_SYNC_CHANGED | 2572 (MAC_STATUS_SYNC_CHANGED |
2573 MAC_STATUS_CFG_CHANGED)) == 0) 2573 MAC_STATUS_CFG_CHANGED)) == 0)
2574 break; 2574 break;
2575 } 2575 }
2576 2576
2577 mac_status = tr32(MAC_STATUS); 2577 mac_status = tr32(MAC_STATUS);
2578 if (current_link_up == 0 && 2578 if (current_link_up == 0 &&
2579 (mac_status & MAC_STATUS_PCS_SYNCED) && 2579 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2580 !(mac_status & MAC_STATUS_RCVD_CFG)) 2580 !(mac_status & MAC_STATUS_RCVD_CFG))
2581 current_link_up = 1; 2581 current_link_up = 1;
2582 } else { 2582 } else {
2583 /* Forcing 1000FD link up. */ 2583 /* Forcing 1000FD link up. */
2584 current_link_up = 1; 2584 current_link_up = 1;
2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL; 2585 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2586 2586
2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); 2587 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2588 udelay(40); 2588 udelay(40);
2589 } 2589 }
2590 2590
2591 out: 2591 out:
2592 return current_link_up; 2592 return current_link_up;
2593 } 2593 }
2594 2594
2595 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) 2595 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2596 { 2596 {
2597 u32 orig_pause_cfg; 2597 u32 orig_pause_cfg;
2598 u16 orig_active_speed; 2598 u16 orig_active_speed;
2599 u8 orig_active_duplex; 2599 u8 orig_active_duplex;
2600 u32 mac_status; 2600 u32 mac_status;
2601 int current_link_up; 2601 int current_link_up;
2602 int i; 2602 int i;
2603 2603
2604 orig_pause_cfg = 2604 orig_pause_cfg =
2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE | 2605 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2606 TG3_FLAG_TX_PAUSE)); 2606 TG3_FLAG_TX_PAUSE));
2607 orig_active_speed = tp->link_config.active_speed; 2607 orig_active_speed = tp->link_config.active_speed;
2608 orig_active_duplex = tp->link_config.active_duplex; 2608 orig_active_duplex = tp->link_config.active_duplex;
2609 2609
2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && 2610 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2611 netif_carrier_ok(tp->dev) && 2611 netif_carrier_ok(tp->dev) &&
2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { 2612 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2613 mac_status = tr32(MAC_STATUS); 2613 mac_status = tr32(MAC_STATUS);
2614 mac_status &= (MAC_STATUS_PCS_SYNCED | 2614 mac_status &= (MAC_STATUS_PCS_SYNCED |
2615 MAC_STATUS_SIGNAL_DET | 2615 MAC_STATUS_SIGNAL_DET |
2616 MAC_STATUS_CFG_CHANGED | 2616 MAC_STATUS_CFG_CHANGED |
2617 MAC_STATUS_RCVD_CFG); 2617 MAC_STATUS_RCVD_CFG);
2618 if (mac_status == (MAC_STATUS_PCS_SYNCED | 2618 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2619 MAC_STATUS_SIGNAL_DET)) { 2619 MAC_STATUS_SIGNAL_DET)) {
2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 2620 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2621 MAC_STATUS_CFG_CHANGED)); 2621 MAC_STATUS_CFG_CHANGED));
2622 return 0; 2622 return 0;
2623 } 2623 }
2624 } 2624 }
2625 2625
2626 tw32_f(MAC_TX_AUTO_NEG, 0); 2626 tw32_f(MAC_TX_AUTO_NEG, 0);
2627 2627
2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); 2628 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; 2629 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2630 tw32_f(MAC_MODE, tp->mac_mode); 2630 tw32_f(MAC_MODE, tp->mac_mode);
2631 udelay(40); 2631 udelay(40);
2632 2632
2633 if (tp->phy_id == PHY_ID_BCM8002) 2633 if (tp->phy_id == PHY_ID_BCM8002)
2634 tg3_init_bcm8002(tp); 2634 tg3_init_bcm8002(tp);
2635 2635
2636 /* Enable link change event even when serdes polling. */ 2636 /* Enable link change event even when serdes polling. */
2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 2637 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2638 udelay(40); 2638 udelay(40);
2639 2639
2640 current_link_up = 0; 2640 current_link_up = 0;
2641 mac_status = tr32(MAC_STATUS); 2641 mac_status = tr32(MAC_STATUS);
2642 2642
2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) 2643 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); 2644 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2645 else 2645 else
2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); 2646 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2647 2647
2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; 2648 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2649 tw32_f(MAC_MODE, tp->mac_mode); 2649 tw32_f(MAC_MODE, tp->mac_mode);
2650 udelay(40); 2650 udelay(40);
2651 2651
2652 tp->hw_status->status = 2652 tp->hw_status->status =
2653 (SD_STATUS_UPDATED | 2653 (SD_STATUS_UPDATED |
2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); 2654 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2655 2655
2656 for (i = 0; i < 100; i++) { 2656 for (i = 0; i < 100; i++) {
2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | 2657 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2658 MAC_STATUS_CFG_CHANGED)); 2658 MAC_STATUS_CFG_CHANGED));
2659 udelay(5); 2659 udelay(5);
2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | 2660 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2661 MAC_STATUS_CFG_CHANGED)) == 0) 2661 MAC_STATUS_CFG_CHANGED)) == 0)
2662 break; 2662 break;
2663 } 2663 }
2664 2664
2665 mac_status = tr32(MAC_STATUS); 2665 mac_status = tr32(MAC_STATUS);
2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { 2666 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2667 current_link_up = 0; 2667 current_link_up = 0;
2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2668 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2669 tw32_f(MAC_MODE, (tp->mac_mode | 2669 tw32_f(MAC_MODE, (tp->mac_mode |
2670 MAC_MODE_SEND_CONFIGS)); 2670 MAC_MODE_SEND_CONFIGS));
2671 udelay(1); 2671 udelay(1);
2672 tw32_f(MAC_MODE, tp->mac_mode); 2672 tw32_f(MAC_MODE, tp->mac_mode);
2673 } 2673 }
2674 } 2674 }
2675 2675
2676 if (current_link_up == 1) { 2676 if (current_link_up == 1) {
2677 tp->link_config.active_speed = SPEED_1000; 2677 tp->link_config.active_speed = SPEED_1000;
2678 tp->link_config.active_duplex = DUPLEX_FULL; 2678 tp->link_config.active_duplex = DUPLEX_FULL;
2679 tw32(MAC_LED_CTRL, (tp->led_ctrl | 2679 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2680 LED_CTRL_LNKLED_OVERRIDE | 2680 LED_CTRL_LNKLED_OVERRIDE |
2681 LED_CTRL_1000MBPS_ON)); 2681 LED_CTRL_1000MBPS_ON));
2682 } else { 2682 } else {
2683 tp->link_config.active_speed = SPEED_INVALID; 2683 tp->link_config.active_speed = SPEED_INVALID;
2684 tp->link_config.active_duplex = DUPLEX_INVALID; 2684 tp->link_config.active_duplex = DUPLEX_INVALID;
2685 tw32(MAC_LED_CTRL, (tp->led_ctrl | 2685 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2686 LED_CTRL_LNKLED_OVERRIDE | 2686 LED_CTRL_LNKLED_OVERRIDE |
2687 LED_CTRL_TRAFFIC_OVERRIDE)); 2687 LED_CTRL_TRAFFIC_OVERRIDE));
2688 } 2688 }
2689 2689
2690 if (current_link_up != netif_carrier_ok(tp->dev)) { 2690 if (current_link_up != netif_carrier_ok(tp->dev)) {
2691 if (current_link_up) 2691 if (current_link_up)
2692 netif_carrier_on(tp->dev); 2692 netif_carrier_on(tp->dev);
2693 else 2693 else
2694 netif_carrier_off(tp->dev); 2694 netif_carrier_off(tp->dev);
2695 tg3_link_report(tp); 2695 tg3_link_report(tp);
2696 } else { 2696 } else {
2697 u32 now_pause_cfg = 2697 u32 now_pause_cfg =
2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE | 2698 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2699 TG3_FLAG_TX_PAUSE); 2699 TG3_FLAG_TX_PAUSE);
2700 if (orig_pause_cfg != now_pause_cfg || 2700 if (orig_pause_cfg != now_pause_cfg ||
2701 orig_active_speed != tp->link_config.active_speed || 2701 orig_active_speed != tp->link_config.active_speed ||
2702 orig_active_duplex != tp->link_config.active_duplex) 2702 orig_active_duplex != tp->link_config.active_duplex)
2703 tg3_link_report(tp); 2703 tg3_link_report(tp);
2704 } 2704 }
2705 2705
2706 return 0; 2706 return 0;
2707 } 2707 }
2708 2708
2709 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) 2709 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2710 { 2710 {
2711 int current_link_up, err = 0; 2711 int current_link_up, err = 0;
2712 u32 bmsr, bmcr; 2712 u32 bmsr, bmcr;
2713 u16 current_speed; 2713 u16 current_speed;
2714 u8 current_duplex; 2714 u8 current_duplex;
2715 2715
2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; 2716 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2717 tw32_f(MAC_MODE, tp->mac_mode); 2717 tw32_f(MAC_MODE, tp->mac_mode);
2718 udelay(40); 2718 udelay(40);
2719 2719
2720 tw32(MAC_EVENT, 0); 2720 tw32(MAC_EVENT, 0);
2721 2721
2722 tw32_f(MAC_STATUS, 2722 tw32_f(MAC_STATUS,
2723 (MAC_STATUS_SYNC_CHANGED | 2723 (MAC_STATUS_SYNC_CHANGED |
2724 MAC_STATUS_CFG_CHANGED | 2724 MAC_STATUS_CFG_CHANGED |
2725 MAC_STATUS_MI_COMPLETION | 2725 MAC_STATUS_MI_COMPLETION |
2726 MAC_STATUS_LNKSTATE_CHANGED)); 2726 MAC_STATUS_LNKSTATE_CHANGED));
2727 udelay(40); 2727 udelay(40);
2728 2728
2729 if (force_reset) 2729 if (force_reset)
2730 tg3_phy_reset(tp); 2730 tg3_phy_reset(tp);
2731 2731
2732 current_link_up = 0; 2732 current_link_up = 0;
2733 current_speed = SPEED_INVALID; 2733 current_speed = SPEED_INVALID;
2734 current_duplex = DUPLEX_INVALID; 2734 current_duplex = DUPLEX_INVALID;
2735 2735
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2737 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 2738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 2739 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2740 bmsr |= BMSR_LSTATUS; 2740 bmsr |= BMSR_LSTATUS;
2741 else 2741 else
2742 bmsr &= ~BMSR_LSTATUS; 2742 bmsr &= ~BMSR_LSTATUS;
2743 } 2743 }
2744 2744
2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr); 2745 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2746 2746
2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && 2747 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 2748 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2749 /* do nothing, just check for link up at the end */ 2749 /* do nothing, just check for link up at the end */
2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2750 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2751 u32 adv, new_adv; 2751 u32 adv, new_adv;
2752 2752
2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 2753 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | 2754 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2755 ADVERTISE_1000XPAUSE | 2755 ADVERTISE_1000XPAUSE |
2756 ADVERTISE_1000XPSE_ASYM | 2756 ADVERTISE_1000XPSE_ASYM |
2757 ADVERTISE_SLCT); 2757 ADVERTISE_SLCT);
2758 2758
2759 /* Always advertise symmetric PAUSE just like copper */ 2759 /* Always advertise symmetric PAUSE just like copper */
2760 new_adv |= ADVERTISE_1000XPAUSE; 2760 new_adv |= ADVERTISE_1000XPAUSE;
2761 2761
2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) 2762 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2763 new_adv |= ADVERTISE_1000XHALF; 2763 new_adv |= ADVERTISE_1000XHALF;
2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) 2764 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2765 new_adv |= ADVERTISE_1000XFULL; 2765 new_adv |= ADVERTISE_1000XFULL;
2766 2766
2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { 2767 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2768 tg3_writephy(tp, MII_ADVERTISE, new_adv); 2768 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; 2769 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2770 tg3_writephy(tp, MII_BMCR, bmcr); 2770 tg3_writephy(tp, MII_BMCR, bmcr);
2771 2771
2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 2772 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED; 2773 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 2774 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775 2775
2776 return err; 2776 return err;
2777 } 2777 }
2778 } else { 2778 } else {
2779 u32 new_bmcr; 2779 u32 new_bmcr;
2780 2780
2781 bmcr &= ~BMCR_SPEED1000; 2781 bmcr &= ~BMCR_SPEED1000;
2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); 2782 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2783 2783
2784 if (tp->link_config.duplex == DUPLEX_FULL) 2784 if (tp->link_config.duplex == DUPLEX_FULL)
2785 new_bmcr |= BMCR_FULLDPLX; 2785 new_bmcr |= BMCR_FULLDPLX;
2786 2786
2787 if (new_bmcr != bmcr) { 2787 if (new_bmcr != bmcr) {
2788 /* BMCR_SPEED1000 is a reserved bit that needs 2788 /* BMCR_SPEED1000 is a reserved bit that needs
2789 * to be set on write. 2789 * to be set on write.
2790 */ 2790 */
2791 new_bmcr |= BMCR_SPEED1000; 2791 new_bmcr |= BMCR_SPEED1000;
2792 2792
2793 /* Force a linkdown */ 2793 /* Force a linkdown */
2794 if (netif_carrier_ok(tp->dev)) { 2794 if (netif_carrier_ok(tp->dev)) {
2795 u32 adv; 2795 u32 adv;
2796 2796
2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 2797 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2798 adv &= ~(ADVERTISE_1000XFULL | 2798 adv &= ~(ADVERTISE_1000XFULL |
2799 ADVERTISE_1000XHALF | 2799 ADVERTISE_1000XHALF |
2800 ADVERTISE_SLCT); 2800 ADVERTISE_SLCT);
2801 tg3_writephy(tp, MII_ADVERTISE, adv); 2801 tg3_writephy(tp, MII_ADVERTISE, adv);
2802 tg3_writephy(tp, MII_BMCR, bmcr | 2802 tg3_writephy(tp, MII_BMCR, bmcr |
2803 BMCR_ANRESTART | 2803 BMCR_ANRESTART |
2804 BMCR_ANENABLE); 2804 BMCR_ANENABLE);
2805 udelay(10); 2805 udelay(10);
2806 netif_carrier_off(tp->dev); 2806 netif_carrier_off(tp->dev);
2807 } 2807 }
2808 tg3_writephy(tp, MII_BMCR, new_bmcr); 2808 tg3_writephy(tp, MII_BMCR, new_bmcr);
2809 bmcr = new_bmcr; 2809 bmcr = new_bmcr;
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr); 2811 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 2812 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2813 ASIC_REV_5714) { 2813 ASIC_REV_5714) {
2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) 2814 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2815 bmsr |= BMSR_LSTATUS; 2815 bmsr |= BMSR_LSTATUS;
2816 else 2816 else
2817 bmsr &= ~BMSR_LSTATUS; 2817 bmsr &= ~BMSR_LSTATUS;
2818 } 2818 }
2819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 2819 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2820 } 2820 }
2821 } 2821 }
2822 2822
2823 if (bmsr & BMSR_LSTATUS) { 2823 if (bmsr & BMSR_LSTATUS) {
2824 current_speed = SPEED_1000; 2824 current_speed = SPEED_1000;
2825 current_link_up = 1; 2825 current_link_up = 1;
2826 if (bmcr & BMCR_FULLDPLX) 2826 if (bmcr & BMCR_FULLDPLX)
2827 current_duplex = DUPLEX_FULL; 2827 current_duplex = DUPLEX_FULL;
2828 else 2828 else
2829 current_duplex = DUPLEX_HALF; 2829 current_duplex = DUPLEX_HALF;
2830 2830
2831 if (bmcr & BMCR_ANENABLE) { 2831 if (bmcr & BMCR_ANENABLE) {
2832 u32 local_adv, remote_adv, common; 2832 u32 local_adv, remote_adv, common;
2833 2833
2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); 2834 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv); 2835 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2836 common = local_adv & remote_adv; 2836 common = local_adv & remote_adv;
2837 if (common & (ADVERTISE_1000XHALF | 2837 if (common & (ADVERTISE_1000XHALF |
2838 ADVERTISE_1000XFULL)) { 2838 ADVERTISE_1000XFULL)) {
2839 if (common & ADVERTISE_1000XFULL) 2839 if (common & ADVERTISE_1000XFULL)
2840 current_duplex = DUPLEX_FULL; 2840 current_duplex = DUPLEX_FULL;
2841 else 2841 else
2842 current_duplex = DUPLEX_HALF; 2842 current_duplex = DUPLEX_HALF;
2843 2843
2844 tg3_setup_flow_control(tp, local_adv, 2844 tg3_setup_flow_control(tp, local_adv,
2845 remote_adv); 2845 remote_adv);
2846 } 2846 }
2847 else 2847 else
2848 current_link_up = 0; 2848 current_link_up = 0;
2849 } 2849 }
2850 } 2850 }
2851 2851
2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; 2852 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2853 if (tp->link_config.active_duplex == DUPLEX_HALF) 2853 if (tp->link_config.active_duplex == DUPLEX_HALF)
2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX; 2854 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2855 2855
2856 tw32_f(MAC_MODE, tp->mac_mode); 2856 tw32_f(MAC_MODE, tp->mac_mode);
2857 udelay(40); 2857 udelay(40);
2858 2858
2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); 2859 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2860 2860
2861 tp->link_config.active_speed = current_speed; 2861 tp->link_config.active_speed = current_speed;
2862 tp->link_config.active_duplex = current_duplex; 2862 tp->link_config.active_duplex = current_duplex;
2863 2863
2864 if (current_link_up != netif_carrier_ok(tp->dev)) { 2864 if (current_link_up != netif_carrier_ok(tp->dev)) {
2865 if (current_link_up) 2865 if (current_link_up)
2866 netif_carrier_on(tp->dev); 2866 netif_carrier_on(tp->dev);
2867 else { 2867 else {
2868 netif_carrier_off(tp->dev); 2868 netif_carrier_off(tp->dev);
2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 2869 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2870 } 2870 }
2871 tg3_link_report(tp); 2871 tg3_link_report(tp);
2872 } 2872 }
2873 return err; 2873 return err;
2874 } 2874 }
2875 2875
2876 static void tg3_serdes_parallel_detect(struct tg3 *tp) 2876 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2877 { 2877 {
2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) { 2878 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2879 /* Give autoneg time to complete. */ 2879 /* Give autoneg time to complete. */
2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED; 2880 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2881 return; 2881 return;
2882 } 2882 }
2883 if (!netif_carrier_ok(tp->dev) && 2883 if (!netif_carrier_ok(tp->dev) &&
2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 2884 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2885 u32 bmcr; 2885 u32 bmcr;
2886 2886
2887 tg3_readphy(tp, MII_BMCR, &bmcr); 2887 tg3_readphy(tp, MII_BMCR, &bmcr);
2888 if (bmcr & BMCR_ANENABLE) { 2888 if (bmcr & BMCR_ANENABLE) {
2889 u32 phy1, phy2; 2889 u32 phy1, phy2;
2890 2890
2891 /* Select shadow register 0x1f */ 2891 /* Select shadow register 0x1f */
2892 tg3_writephy(tp, 0x1c, 0x7c00); 2892 tg3_writephy(tp, 0x1c, 0x7c00);
2893 tg3_readphy(tp, 0x1c, &phy1); 2893 tg3_readphy(tp, 0x1c, &phy1);
2894 2894
2895 /* Select expansion interrupt status register */ 2895 /* Select expansion interrupt status register */
2896 tg3_writephy(tp, 0x17, 0x0f01); 2896 tg3_writephy(tp, 0x17, 0x0f01);
2897 tg3_readphy(tp, 0x15, &phy2); 2897 tg3_readphy(tp, 0x15, &phy2);
2898 tg3_readphy(tp, 0x15, &phy2); 2898 tg3_readphy(tp, 0x15, &phy2);
2899 2899
2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) { 2900 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2901 /* We have signal detect and not receiving 2901 /* We have signal detect and not receiving
2902 * config code words, link is up by parallel 2902 * config code words, link is up by parallel
2903 * detection. 2903 * detection.
2904 */ 2904 */
2905 2905
2906 bmcr &= ~BMCR_ANENABLE; 2906 bmcr &= ~BMCR_ANENABLE;
2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; 2907 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2908 tg3_writephy(tp, MII_BMCR, bmcr); 2908 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; 2909 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2910 } 2910 }
2911 } 2911 }
2912 } 2912 }
2913 else if (netif_carrier_ok(tp->dev) && 2913 else if (netif_carrier_ok(tp->dev) &&
2914 (tp->link_config.autoneg == AUTONEG_ENABLE) && 2914 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { 2915 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2916 u32 phy2; 2916 u32 phy2;
2917 2917
2918 /* Select expansion interrupt status register */ 2918 /* Select expansion interrupt status register */
2919 tg3_writephy(tp, 0x17, 0x0f01); 2919 tg3_writephy(tp, 0x17, 0x0f01);
2920 tg3_readphy(tp, 0x15, &phy2); 2920 tg3_readphy(tp, 0x15, &phy2);
2921 if (phy2 & 0x20) { 2921 if (phy2 & 0x20) {
2922 u32 bmcr; 2922 u32 bmcr;
2923 2923
2924 /* Config code words received, turn on autoneg. */ 2924 /* Config code words received, turn on autoneg. */
2925 tg3_readphy(tp, MII_BMCR, &bmcr); 2925 tg3_readphy(tp, MII_BMCR, &bmcr);
2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); 2926 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2927 2927
2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 2928 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2929 2929
2930 } 2930 }
2931 } 2931 }
2932 } 2932 }
2933 2933
2934 static int tg3_setup_phy(struct tg3 *tp, int force_reset) 2934 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2935 { 2935 {
2936 int err; 2936 int err;
2937 2937
2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 2938 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2939 err = tg3_setup_fiber_phy(tp, force_reset); 2939 err = tg3_setup_fiber_phy(tp, force_reset);
2940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 2940 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2941 err = tg3_setup_fiber_mii_phy(tp, force_reset); 2941 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2942 } else { 2942 } else {
2943 err = tg3_setup_copper_phy(tp, force_reset); 2943 err = tg3_setup_copper_phy(tp, force_reset);
2944 } 2944 }
2945 2945
2946 if (tp->link_config.active_speed == SPEED_1000 && 2946 if (tp->link_config.active_speed == SPEED_1000 &&
2947 tp->link_config.active_duplex == DUPLEX_HALF) 2947 tp->link_config.active_duplex == DUPLEX_HALF)
2948 tw32(MAC_TX_LENGTHS, 2948 tw32(MAC_TX_LENGTHS,
2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2949 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2950 (6 << TX_LENGTHS_IPG_SHIFT) | 2950 (6 << TX_LENGTHS_IPG_SHIFT) |
2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); 2951 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2952 else 2952 else
2953 tw32(MAC_TX_LENGTHS, 2953 tw32(MAC_TX_LENGTHS,
2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 2954 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2955 (6 << TX_LENGTHS_IPG_SHIFT) | 2955 (6 << TX_LENGTHS_IPG_SHIFT) |
2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); 2956 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2957 2957
2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2958 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2959 if (netif_carrier_ok(tp->dev)) { 2959 if (netif_carrier_ok(tp->dev)) {
2960 tw32(HOSTCC_STAT_COAL_TICKS, 2960 tw32(HOSTCC_STAT_COAL_TICKS,
2961 tp->coal.stats_block_coalesce_usecs); 2961 tp->coal.stats_block_coalesce_usecs);
2962 } else { 2962 } else {
2963 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2963 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2964 } 2964 }
2965 } 2965 }
2966 2966
2967 return err; 2967 return err;
2968 } 2968 }
2969 2969
2970 /* This is called whenever we suspect that the system chipset is re- 2970 /* This is called whenever we suspect that the system chipset is re-
2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom 2971 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2972 * is bogus tx completions. We try to recover by setting the 2972 * is bogus tx completions. We try to recover by setting the
2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later 2973 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2974 * in the workqueue. 2974 * in the workqueue.
2975 */ 2975 */
2976 static void tg3_tx_recover(struct tg3 *tp) 2976 static void tg3_tx_recover(struct tg3 *tp)
2977 { 2977 {
2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 2978 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2979 tp->write32_tx_mbox == tg3_write_indirect_mbox); 2979 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2980 2980
2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" 2981 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2982 "mapped I/O cycles to the network device, attempting to " 2982 "mapped I/O cycles to the network device, attempting to "
2983 "recover. Please report the problem to the driver maintainer " 2983 "recover. Please report the problem to the driver maintainer "
2984 "and include system chipset information.\n", tp->dev->name); 2984 "and include system chipset information.\n", tp->dev->name);
2985 2985
2986 spin_lock(&tp->lock); 2986 spin_lock(&tp->lock);
2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 2987 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2988 spin_unlock(&tp->lock); 2988 spin_unlock(&tp->lock);
2989 } 2989 }
2990 2990
2991 /* Tigon3 never reports partial packet sends. So we do not 2991 /* Tigon3 never reports partial packet sends. So we do not
2992 * need special logic to handle SKBs that have not had all 2992 * need special logic to handle SKBs that have not had all
2993 * of their frags sent yet, like SunGEM does. 2993 * of their frags sent yet, like SunGEM does.
2994 */ 2994 */
2995 static void tg3_tx(struct tg3 *tp) 2995 static void tg3_tx(struct tg3 *tp)
2996 { 2996 {
2997 u32 hw_idx = tp->hw_status->idx[0].tx_consumer; 2997 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2998 u32 sw_idx = tp->tx_cons; 2998 u32 sw_idx = tp->tx_cons;
2999 2999
3000 while (sw_idx != hw_idx) { 3000 while (sw_idx != hw_idx) {
3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx]; 3001 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3002 struct sk_buff *skb = ri->skb; 3002 struct sk_buff *skb = ri->skb;
3003 int i, tx_bug = 0; 3003 int i, tx_bug = 0;
3004 3004
3005 if (unlikely(skb == NULL)) { 3005 if (unlikely(skb == NULL)) {
3006 tg3_tx_recover(tp); 3006 tg3_tx_recover(tp);
3007 return; 3007 return;
3008 } 3008 }
3009 3009
3010 pci_unmap_single(tp->pdev, 3010 pci_unmap_single(tp->pdev,
3011 pci_unmap_addr(ri, mapping), 3011 pci_unmap_addr(ri, mapping),
3012 skb_headlen(skb), 3012 skb_headlen(skb),
3013 PCI_DMA_TODEVICE); 3013 PCI_DMA_TODEVICE);
3014 3014
3015 ri->skb = NULL; 3015 ri->skb = NULL;
3016 3016
3017 sw_idx = NEXT_TX(sw_idx); 3017 sw_idx = NEXT_TX(sw_idx);
3018 3018
3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3020 ri = &tp->tx_buffers[sw_idx]; 3020 ri = &tp->tx_buffers[sw_idx];
3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 3021 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3022 tx_bug = 1; 3022 tx_bug = 1;
3023 3023
3024 pci_unmap_page(tp->pdev, 3024 pci_unmap_page(tp->pdev,
3025 pci_unmap_addr(ri, mapping), 3025 pci_unmap_addr(ri, mapping),
3026 skb_shinfo(skb)->frags[i].size, 3026 skb_shinfo(skb)->frags[i].size,
3027 PCI_DMA_TODEVICE); 3027 PCI_DMA_TODEVICE);
3028 3028
3029 sw_idx = NEXT_TX(sw_idx); 3029 sw_idx = NEXT_TX(sw_idx);
3030 } 3030 }
3031 3031
3032 dev_kfree_skb(skb); 3032 dev_kfree_skb(skb);
3033 3033
3034 if (unlikely(tx_bug)) { 3034 if (unlikely(tx_bug)) {
3035 tg3_tx_recover(tp); 3035 tg3_tx_recover(tp);
3036 return; 3036 return;
3037 } 3037 }
3038 } 3038 }
3039 3039
3040 tp->tx_cons = sw_idx; 3040 tp->tx_cons = sw_idx;
3041 3041
3042 if (unlikely(netif_queue_stopped(tp->dev))) { 3042 if (unlikely(netif_queue_stopped(tp->dev))) {
3043 spin_lock(&tp->tx_lock); 3043 spin_lock(&tp->tx_lock);
3044 if (netif_queue_stopped(tp->dev) && 3044 if (netif_queue_stopped(tp->dev) &&
3045 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 3045 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3046 netif_wake_queue(tp->dev); 3046 netif_wake_queue(tp->dev);
3047 spin_unlock(&tp->tx_lock); 3047 spin_unlock(&tp->tx_lock);
3048 } 3048 }
3049 } 3049 }
3050 3050
3051 /* Returns size of skb allocated or < 0 on error. 3051 /* Returns size of skb allocated or < 0 on error.
3052 * 3052 *
3053 * We only need to fill in the address because the other members 3053 * We only need to fill in the address because the other members
3054 * of the RX descriptor are invariant, see tg3_init_rings. 3054 * of the RX descriptor are invariant, see tg3_init_rings.
3055 * 3055 *
3056 * Note the purposeful assymetry of cpu vs. chip accesses. For 3056 * Note the purposeful assymetry of cpu vs. chip accesses. For
3057 * posting buffers we only dirty the first cache line of the RX 3057 * posting buffers we only dirty the first cache line of the RX
3058 * descriptor (containing the address). Whereas for the RX status 3058 * descriptor (containing the address). Whereas for the RX status
3059 * buffers the cpu only reads the last cacheline of the RX descriptor 3059 * buffers the cpu only reads the last cacheline of the RX descriptor
3060 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 3060 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3061 */ 3061 */
3062 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, 3062 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3063 int src_idx, u32 dest_idx_unmasked) 3063 int src_idx, u32 dest_idx_unmasked)
3064 { 3064 {
3065 struct tg3_rx_buffer_desc *desc; 3065 struct tg3_rx_buffer_desc *desc;
3066 struct ring_info *map, *src_map; 3066 struct ring_info *map, *src_map;
3067 struct sk_buff *skb; 3067 struct sk_buff *skb;
3068 dma_addr_t mapping; 3068 dma_addr_t mapping;
3069 int skb_size, dest_idx; 3069 int skb_size, dest_idx;
3070 3070
3071 src_map = NULL; 3071 src_map = NULL;
3072 switch (opaque_key) { 3072 switch (opaque_key) {
3073 case RXD_OPAQUE_RING_STD: 3073 case RXD_OPAQUE_RING_STD:
3074 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 3074 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075 desc = &tp->rx_std[dest_idx]; 3075 desc = &tp->rx_std[dest_idx];
3076 map = &tp->rx_std_buffers[dest_idx]; 3076 map = &tp->rx_std_buffers[dest_idx];
3077 if (src_idx >= 0) 3077 if (src_idx >= 0)
3078 src_map = &tp->rx_std_buffers[src_idx]; 3078 src_map = &tp->rx_std_buffers[src_idx];
3079 skb_size = tp->rx_pkt_buf_sz; 3079 skb_size = tp->rx_pkt_buf_sz;
3080 break; 3080 break;
3081 3081
3082 case RXD_OPAQUE_RING_JUMBO: 3082 case RXD_OPAQUE_RING_JUMBO:
3083 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 3083 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3084 desc = &tp->rx_jumbo[dest_idx]; 3084 desc = &tp->rx_jumbo[dest_idx];
3085 map = &tp->rx_jumbo_buffers[dest_idx]; 3085 map = &tp->rx_jumbo_buffers[dest_idx];
3086 if (src_idx >= 0) 3086 if (src_idx >= 0)
3087 src_map = &tp->rx_jumbo_buffers[src_idx]; 3087 src_map = &tp->rx_jumbo_buffers[src_idx];
3088 skb_size = RX_JUMBO_PKT_BUF_SZ; 3088 skb_size = RX_JUMBO_PKT_BUF_SZ;
3089 break; 3089 break;
3090 3090
3091 default: 3091 default:
3092 return -EINVAL; 3092 return -EINVAL;
3093 }; 3093 };
3094 3094
3095 /* Do not overwrite any of the map or rp information 3095 /* Do not overwrite any of the map or rp information
3096 * until we are sure we can commit to a new buffer. 3096 * until we are sure we can commit to a new buffer.
3097 * 3097 *
3098 * Callers depend upon this behavior and assume that 3098 * Callers depend upon this behavior and assume that
3099 * we leave everything unchanged if we fail. 3099 * we leave everything unchanged if we fail.
3100 */ 3100 */
3101 skb = dev_alloc_skb(skb_size); 3101 skb = dev_alloc_skb(skb_size);
3102 if (skb == NULL) 3102 if (skb == NULL)
3103 return -ENOMEM; 3103 return -ENOMEM;
3104 3104
3105 skb->dev = tp->dev; 3105 skb->dev = tp->dev;
3106 skb_reserve(skb, tp->rx_offset); 3106 skb_reserve(skb, tp->rx_offset);
3107 3107
3108 mapping = pci_map_single(tp->pdev, skb->data, 3108 mapping = pci_map_single(tp->pdev, skb->data,
3109 skb_size - tp->rx_offset, 3109 skb_size - tp->rx_offset,
3110 PCI_DMA_FROMDEVICE); 3110 PCI_DMA_FROMDEVICE);
3111 3111
3112 map->skb = skb; 3112 map->skb = skb;
3113 pci_unmap_addr_set(map, mapping, mapping); 3113 pci_unmap_addr_set(map, mapping, mapping);
3114 3114
3115 if (src_map != NULL) 3115 if (src_map != NULL)
3116 src_map->skb = NULL; 3116 src_map->skb = NULL;
3117 3117
3118 desc->addr_hi = ((u64)mapping >> 32); 3118 desc->addr_hi = ((u64)mapping >> 32);
3119 desc->addr_lo = ((u64)mapping & 0xffffffff); 3119 desc->addr_lo = ((u64)mapping & 0xffffffff);
3120 3120
3121 return skb_size; 3121 return skb_size;
3122 } 3122 }
3123 3123
3124 /* We only need to move over in the address because the other 3124 /* We only need to move over in the address because the other
3125 * members of the RX descriptor are invariant. See notes above 3125 * members of the RX descriptor are invariant. See notes above
3126 * tg3_alloc_rx_skb for full details. 3126 * tg3_alloc_rx_skb for full details.
3127 */ 3127 */
3128 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, 3128 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3129 int src_idx, u32 dest_idx_unmasked) 3129 int src_idx, u32 dest_idx_unmasked)
3130 { 3130 {
3131 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 3131 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3132 struct ring_info *src_map, *dest_map; 3132 struct ring_info *src_map, *dest_map;
3133 int dest_idx; 3133 int dest_idx;
3134 3134
3135 switch (opaque_key) { 3135 switch (opaque_key) {
3136 case RXD_OPAQUE_RING_STD: 3136 case RXD_OPAQUE_RING_STD:
3137 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 3137 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3138 dest_desc = &tp->rx_std[dest_idx]; 3138 dest_desc = &tp->rx_std[dest_idx];
3139 dest_map = &tp->rx_std_buffers[dest_idx]; 3139 dest_map = &tp->rx_std_buffers[dest_idx];
3140 src_desc = &tp->rx_std[src_idx]; 3140 src_desc = &tp->rx_std[src_idx];
3141 src_map = &tp->rx_std_buffers[src_idx]; 3141 src_map = &tp->rx_std_buffers[src_idx];
3142 break; 3142 break;
3143 3143
3144 case RXD_OPAQUE_RING_JUMBO: 3144 case RXD_OPAQUE_RING_JUMBO:
3145 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 3145 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3146 dest_desc = &tp->rx_jumbo[dest_idx]; 3146 dest_desc = &tp->rx_jumbo[dest_idx];
3147 dest_map = &tp->rx_jumbo_buffers[dest_idx]; 3147 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3148 src_desc = &tp->rx_jumbo[src_idx]; 3148 src_desc = &tp->rx_jumbo[src_idx];
3149 src_map = &tp->rx_jumbo_buffers[src_idx]; 3149 src_map = &tp->rx_jumbo_buffers[src_idx];
3150 break; 3150 break;
3151 3151
3152 default: 3152 default:
3153 return; 3153 return;
3154 }; 3154 };
3155 3155
3156 dest_map->skb = src_map->skb; 3156 dest_map->skb = src_map->skb;
3157 pci_unmap_addr_set(dest_map, mapping, 3157 pci_unmap_addr_set(dest_map, mapping,
3158 pci_unmap_addr(src_map, mapping)); 3158 pci_unmap_addr(src_map, mapping));
3159 dest_desc->addr_hi = src_desc->addr_hi; 3159 dest_desc->addr_hi = src_desc->addr_hi;
3160 dest_desc->addr_lo = src_desc->addr_lo; 3160 dest_desc->addr_lo = src_desc->addr_lo;
3161 3161
3162 src_map->skb = NULL; 3162 src_map->skb = NULL;
3163 } 3163 }
3164 3164
3165 #if TG3_VLAN_TAG_USED 3165 #if TG3_VLAN_TAG_USED
3166 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) 3166 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3167 { 3167 {
3168 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag); 3168 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3169 } 3169 }
3170 #endif 3170 #endif
3171 3171
3172 /* The RX ring scheme is composed of multiple rings which post fresh 3172 /* The RX ring scheme is composed of multiple rings which post fresh
3173 * buffers to the chip, and one special ring the chip uses to report 3173 * buffers to the chip, and one special ring the chip uses to report
3174 * status back to the host. 3174 * status back to the host.
3175 * 3175 *
3176 * The special ring reports the status of received packets to the 3176 * The special ring reports the status of received packets to the
3177 * host. The chip does not write into the original descriptor the 3177 * host. The chip does not write into the original descriptor the
3178 * RX buffer was obtained from. The chip simply takes the original 3178 * RX buffer was obtained from. The chip simply takes the original
3179 * descriptor as provided by the host, updates the status and length 3179 * descriptor as provided by the host, updates the status and length
3180 * field, then writes this into the next status ring entry. 3180 * field, then writes this into the next status ring entry.
3181 * 3181 *
3182 * Each ring the host uses to post buffers to the chip is described 3182 * Each ring the host uses to post buffers to the chip is described
3183 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, 3183 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3184 * it is first placed into the on-chip ram. When the packet's length 3184 * it is first placed into the on-chip ram. When the packet's length
3185 * is known, it walks down the TG3_BDINFO entries to select the ring. 3185 * is known, it walks down the TG3_BDINFO entries to select the ring.
3186 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO 3186 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3187 * which is within the range of the new packet's length is chosen. 3187 * which is within the range of the new packet's length is chosen.
3188 * 3188 *
3189 * The "separate ring for rx status" scheme may sound queer, but it makes 3189 * The "separate ring for rx status" scheme may sound queer, but it makes
3190 * sense from a cache coherency perspective. If only the host writes 3190 * sense from a cache coherency perspective. If only the host writes
3191 * to the buffer post rings, and only the chip writes to the rx status 3191 * to the buffer post rings, and only the chip writes to the rx status
3192 * rings, then cache lines never move beyond shared-modified state. 3192 * rings, then cache lines never move beyond shared-modified state.
3193 * If both the host and chip were to write into the same ring, cache line 3193 * If both the host and chip were to write into the same ring, cache line
3194 * eviction could occur since both entities want it in an exclusive state. 3194 * eviction could occur since both entities want it in an exclusive state.
3195 */ 3195 */
3196 static int tg3_rx(struct tg3 *tp, int budget) 3196 static int tg3_rx(struct tg3 *tp, int budget)
3197 { 3197 {
3198 u32 work_mask; 3198 u32 work_mask;
3199 u32 sw_idx = tp->rx_rcb_ptr; 3199 u32 sw_idx = tp->rx_rcb_ptr;
3200 u16 hw_idx; 3200 u16 hw_idx;
3201 int received; 3201 int received;
3202 3202
3203 hw_idx = tp->hw_status->idx[0].rx_producer; 3203 hw_idx = tp->hw_status->idx[0].rx_producer;
3204 /* 3204 /*
3205 * We need to order the read of hw_idx and the read of 3205 * We need to order the read of hw_idx and the read of
3206 * the opaque cookie. 3206 * the opaque cookie.
3207 */ 3207 */
3208 rmb(); 3208 rmb();
3209 work_mask = 0; 3209 work_mask = 0;
3210 received = 0; 3210 received = 0;
3211 while (sw_idx != hw_idx && budget > 0) { 3211 while (sw_idx != hw_idx && budget > 0) {
3212 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; 3212 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3213 unsigned int len; 3213 unsigned int len;
3214 struct sk_buff *skb; 3214 struct sk_buff *skb;
3215 dma_addr_t dma_addr; 3215 dma_addr_t dma_addr;
3216 u32 opaque_key, desc_idx, *post_ptr; 3216 u32 opaque_key, desc_idx, *post_ptr;
3217 3217
3218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 3218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 3219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3220 if (opaque_key == RXD_OPAQUE_RING_STD) { 3220 if (opaque_key == RXD_OPAQUE_RING_STD) {
3221 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], 3221 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3222 mapping); 3222 mapping);
3223 skb = tp->rx_std_buffers[desc_idx].skb; 3223 skb = tp->rx_std_buffers[desc_idx].skb;
3224 post_ptr = &tp->rx_std_ptr; 3224 post_ptr = &tp->rx_std_ptr;
3225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 3225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx], 3226 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227 mapping); 3227 mapping);
3228 skb = tp->rx_jumbo_buffers[desc_idx].skb; 3228 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229 post_ptr = &tp->rx_jumbo_ptr; 3229 post_ptr = &tp->rx_jumbo_ptr;
3230 } 3230 }
3231 else { 3231 else {
3232 goto next_pkt_nopost; 3232 goto next_pkt_nopost;
3233 } 3233 }
3234 3234
3235 work_mask |= opaque_key; 3235 work_mask |= opaque_key;
3236 3236
3237 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 3237 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 3238 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239 drop_it: 3239 drop_it:
3240 tg3_recycle_rx(tp, opaque_key, 3240 tg3_recycle_rx(tp, opaque_key,
3241 desc_idx, *post_ptr); 3241 desc_idx, *post_ptr);
3242 drop_it_no_recycle: 3242 drop_it_no_recycle:
3243 /* Other statistics kept track of by card. */ 3243 /* Other statistics kept track of by card. */
3244 tp->net_stats.rx_dropped++; 3244 tp->net_stats.rx_dropped++;
3245 goto next_pkt; 3245 goto next_pkt;
3246 } 3246 }
3247 3247
3248 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ 3248 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249 3249
3250 if (len > RX_COPY_THRESHOLD 3250 if (len > RX_COPY_THRESHOLD
3251 && tp->rx_offset == 2 3251 && tp->rx_offset == 2
3252 /* rx_offset != 2 iff this is a 5701 card running 3252 /* rx_offset != 2 iff this is a 5701 card running
3253 * in PCI-X mode [see tg3_get_invariants()] */ 3253 * in PCI-X mode [see tg3_get_invariants()] */
3254 ) { 3254 ) {
3255 int skb_size; 3255 int skb_size;
3256 3256
3257 skb_size = tg3_alloc_rx_skb(tp, opaque_key, 3257 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258 desc_idx, *post_ptr); 3258 desc_idx, *post_ptr);
3259 if (skb_size < 0) 3259 if (skb_size < 0)
3260 goto drop_it; 3260 goto drop_it;
3261 3261
3262 pci_unmap_single(tp->pdev, dma_addr, 3262 pci_unmap_single(tp->pdev, dma_addr,
3263 skb_size - tp->rx_offset, 3263 skb_size - tp->rx_offset,
3264 PCI_DMA_FROMDEVICE); 3264 PCI_DMA_FROMDEVICE);
3265 3265
3266 skb_put(skb, len); 3266 skb_put(skb, len);
3267 } else { 3267 } else {
3268 struct sk_buff *copy_skb; 3268 struct sk_buff *copy_skb;
3269 3269
3270 tg3_recycle_rx(tp, opaque_key, 3270 tg3_recycle_rx(tp, opaque_key,
3271 desc_idx, *post_ptr); 3271 desc_idx, *post_ptr);
3272 3272
3273 copy_skb = dev_alloc_skb(len + 2); 3273 copy_skb = dev_alloc_skb(len + 2);
3274 if (copy_skb == NULL) 3274 if (copy_skb == NULL)
3275 goto drop_it_no_recycle; 3275 goto drop_it_no_recycle;
3276 3276
3277 copy_skb->dev = tp->dev; 3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2); 3278 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len); 3279 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281 memcpy(copy_skb->data, skb->data, len); 3281 memcpy(copy_skb->data, skb->data, len);
3282 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3282 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283 3283
3284 /* We'll reuse the original ring buffer. */ 3284 /* We'll reuse the original ring buffer. */
3285 skb = copy_skb; 3285 skb = copy_skb;
3286 } 3286 }
3287 3287
3288 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && 3288 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 3289 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 3290 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 3291 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292 skb->ip_summed = CHECKSUM_UNNECESSARY; 3292 skb->ip_summed = CHECKSUM_UNNECESSARY;
3293 else 3293 else
3294 skb->ip_summed = CHECKSUM_NONE; 3294 skb->ip_summed = CHECKSUM_NONE;
3295 3295
3296 skb->protocol = eth_type_trans(skb, tp->dev); 3296 skb->protocol = eth_type_trans(skb, tp->dev);
3297 #if TG3_VLAN_TAG_USED 3297 #if TG3_VLAN_TAG_USED
3298 if (tp->vlgrp != NULL && 3298 if (tp->vlgrp != NULL &&
3299 desc->type_flags & RXD_FLAG_VLAN) { 3299 desc->type_flags & RXD_FLAG_VLAN) {
3300 tg3_vlan_rx(tp, skb, 3300 tg3_vlan_rx(tp, skb,
3301 desc->err_vlan & RXD_VLAN_MASK); 3301 desc->err_vlan & RXD_VLAN_MASK);
3302 } else 3302 } else
3303 #endif 3303 #endif
3304 netif_receive_skb(skb); 3304 netif_receive_skb(skb);
3305 3305
3306 tp->dev->last_rx = jiffies; 3306 tp->dev->last_rx = jiffies;
3307 received++; 3307 received++;
3308 budget--; 3308 budget--;
3309 3309
3310 next_pkt: 3310 next_pkt:
3311 (*post_ptr)++; 3311 (*post_ptr)++;
3312 next_pkt_nopost: 3312 next_pkt_nopost:
3313 sw_idx++; 3313 sw_idx++;
3314 sw_idx %= TG3_RX_RCB_RING_SIZE(tp); 3314 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3315 3315
3316 /* Refresh hw_idx to see if there is new work */ 3316 /* Refresh hw_idx to see if there is new work */
3317 if (sw_idx == hw_idx) { 3317 if (sw_idx == hw_idx) {
3318 hw_idx = tp->hw_status->idx[0].rx_producer; 3318 hw_idx = tp->hw_status->idx[0].rx_producer;
3319 rmb(); 3319 rmb();
3320 } 3320 }
3321 } 3321 }
3322 3322
3323 /* ACK the status ring. */ 3323 /* ACK the status ring. */
3324 tp->rx_rcb_ptr = sw_idx; 3324 tp->rx_rcb_ptr = sw_idx;
3325 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx); 3325 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3326 3326
3327 /* Refill RX ring(s). */ 3327 /* Refill RX ring(s). */
3328 if (work_mask & RXD_OPAQUE_RING_STD) { 3328 if (work_mask & RXD_OPAQUE_RING_STD) {
3329 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE; 3329 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3330 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 3330 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3331 sw_idx); 3331 sw_idx);
3332 } 3332 }
3333 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 3333 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3334 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE; 3334 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3335 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 3335 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3336 sw_idx); 3336 sw_idx);
3337 } 3337 }
3338 mmiowb(); 3338 mmiowb();
3339 3339
3340 return received; 3340 return received;
3341 } 3341 }
3342 3342
3343 static int tg3_poll(struct net_device *netdev, int *budget) 3343 static int tg3_poll(struct net_device *netdev, int *budget)
3344 { 3344 {
3345 struct tg3 *tp = netdev_priv(netdev); 3345 struct tg3 *tp = netdev_priv(netdev);
3346 struct tg3_hw_status *sblk = tp->hw_status; 3346 struct tg3_hw_status *sblk = tp->hw_status;
3347 int done; 3347 int done;
3348 3348
3349 /* handle link change and other phy events */ 3349 /* handle link change and other phy events */
3350 if (!(tp->tg3_flags & 3350 if (!(tp->tg3_flags &
3351 (TG3_FLAG_USE_LINKCHG_REG | 3351 (TG3_FLAG_USE_LINKCHG_REG |
3352 TG3_FLAG_POLL_SERDES))) { 3352 TG3_FLAG_POLL_SERDES))) {
3353 if (sblk->status & SD_STATUS_LINK_CHG) { 3353 if (sblk->status & SD_STATUS_LINK_CHG) {
3354 sblk->status = SD_STATUS_UPDATED | 3354 sblk->status = SD_STATUS_UPDATED |
3355 (sblk->status & ~SD_STATUS_LINK_CHG); 3355 (sblk->status & ~SD_STATUS_LINK_CHG);
3356 spin_lock(&tp->lock); 3356 spin_lock(&tp->lock);
3357 tg3_setup_phy(tp, 0); 3357 tg3_setup_phy(tp, 0);
3358 spin_unlock(&tp->lock); 3358 spin_unlock(&tp->lock);
3359 } 3359 }
3360 } 3360 }
3361 3361
3362 /* run TX completion thread */ 3362 /* run TX completion thread */
3363 if (sblk->idx[0].tx_consumer != tp->tx_cons) { 3363 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3364 tg3_tx(tp); 3364 tg3_tx(tp);
3365 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { 3365 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3366 netif_rx_complete(netdev); 3366 netif_rx_complete(netdev);
3367 schedule_work(&tp->reset_task); 3367 schedule_work(&tp->reset_task);
3368 return 0; 3368 return 0;
3369 } 3369 }
3370 } 3370 }
3371 3371
3372 /* run RX thread, within the bounds set by NAPI. 3372 /* run RX thread, within the bounds set by NAPI.
3373 * All RX "locking" is done by ensuring outside 3373 * All RX "locking" is done by ensuring outside
3374 * code synchronizes with dev->poll() 3374 * code synchronizes with dev->poll()
3375 */ 3375 */
3376 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 3376 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3377 int orig_budget = *budget; 3377 int orig_budget = *budget;
3378 int work_done; 3378 int work_done;
3379 3379
3380 if (orig_budget > netdev->quota) 3380 if (orig_budget > netdev->quota)
3381 orig_budget = netdev->quota; 3381 orig_budget = netdev->quota;
3382 3382
3383 work_done = tg3_rx(tp, orig_budget); 3383 work_done = tg3_rx(tp, orig_budget);
3384 3384
3385 *budget -= work_done; 3385 *budget -= work_done;
3386 netdev->quota -= work_done; 3386 netdev->quota -= work_done;
3387 } 3387 }
3388 3388
3389 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 3389 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3390 tp->last_tag = sblk->status_tag; 3390 tp->last_tag = sblk->status_tag;
3391 rmb(); 3391 rmb();
3392 } else 3392 } else
3393 sblk->status &= ~SD_STATUS_UPDATED; 3393 sblk->status &= ~SD_STATUS_UPDATED;
3394 3394
3395 /* if no more work, tell net stack and NIC we're done */ 3395 /* if no more work, tell net stack and NIC we're done */
3396 done = !tg3_has_work(tp); 3396 done = !tg3_has_work(tp);
3397 if (done) { 3397 if (done) {
3398 netif_rx_complete(netdev); 3398 netif_rx_complete(netdev);
3399 tg3_restart_ints(tp); 3399 tg3_restart_ints(tp);
3400 } 3400 }
3401 3401
3402 return (done ? 0 : 1); 3402 return (done ? 0 : 1);
3403 } 3403 }
3404 3404
3405 static void tg3_irq_quiesce(struct tg3 *tp) 3405 static void tg3_irq_quiesce(struct tg3 *tp)
3406 { 3406 {
3407 BUG_ON(tp->irq_sync); 3407 BUG_ON(tp->irq_sync);
3408 3408
3409 tp->irq_sync = 1; 3409 tp->irq_sync = 1;
3410 smp_mb(); 3410 smp_mb();
3411 3411
3412 synchronize_irq(tp->pdev->irq); 3412 synchronize_irq(tp->pdev->irq);
3413 } 3413 }
3414 3414
3415 static inline int tg3_irq_sync(struct tg3 *tp) 3415 static inline int tg3_irq_sync(struct tg3 *tp)
3416 { 3416 {
3417 return tp->irq_sync; 3417 return tp->irq_sync;
3418 } 3418 }
3419 3419
3420 /* Fully shutdown all tg3 driver activity elsewhere in the system. 3420 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3421 * If irq_sync is non-zero, then the IRQ handler must be synchronized 3421 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3422 * with as well. Most of the time, this is not necessary except when 3422 * with as well. Most of the time, this is not necessary except when
3423 * shutting down the device. 3423 * shutting down the device.
3424 */ 3424 */
3425 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) 3425 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3426 { 3426 {
3427 if (irq_sync) 3427 if (irq_sync)
3428 tg3_irq_quiesce(tp); 3428 tg3_irq_quiesce(tp);
3429 spin_lock_bh(&tp->lock); 3429 spin_lock_bh(&tp->lock);
3430 } 3430 }
3431 3431
3432 static inline void tg3_full_unlock(struct tg3 *tp) 3432 static inline void tg3_full_unlock(struct tg3 *tp)
3433 { 3433 {
3434 spin_unlock_bh(&tp->lock); 3434 spin_unlock_bh(&tp->lock);
3435 } 3435 }
3436 3436
3437 /* One-shot MSI handler - Chip automatically disables interrupt 3437 /* One-shot MSI handler - Chip automatically disables interrupt
3438 * after sending MSI so driver doesn't have to do it. 3438 * after sending MSI so driver doesn't have to do it.
3439 */ 3439 */
3440 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs) 3440 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3441 { 3441 {
3442 struct net_device *dev = dev_id; 3442 struct net_device *dev = dev_id;
3443 struct tg3 *tp = netdev_priv(dev); 3443 struct tg3 *tp = netdev_priv(dev);
3444 3444
3445 prefetch(tp->hw_status); 3445 prefetch(tp->hw_status);
3446 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 3446 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3447 3447
3448 if (likely(!tg3_irq_sync(tp))) 3448 if (likely(!tg3_irq_sync(tp)))
3449 netif_rx_schedule(dev); /* schedule NAPI poll */ 3449 netif_rx_schedule(dev); /* schedule NAPI poll */
3450 3450
3451 return IRQ_HANDLED; 3451 return IRQ_HANDLED;
3452 } 3452 }
3453 3453
3454 /* MSI ISR - No need to check for interrupt sharing and no need to 3454 /* MSI ISR - No need to check for interrupt sharing and no need to
3455 * flush status block and interrupt mailbox. PCI ordering rules 3455 * flush status block and interrupt mailbox. PCI ordering rules
3456 * guarantee that MSI will arrive after the status block. 3456 * guarantee that MSI will arrive after the status block.
3457 */ 3457 */
3458 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs) 3458 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3459 { 3459 {
3460 struct net_device *dev = dev_id; 3460 struct net_device *dev = dev_id;
3461 struct tg3 *tp = netdev_priv(dev); 3461 struct tg3 *tp = netdev_priv(dev);
3462 3462
3463 prefetch(tp->hw_status); 3463 prefetch(tp->hw_status);
3464 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 3464 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3465 /* 3465 /*
3466 * Writing any value to intr-mbox-0 clears PCI INTA# and 3466 * Writing any value to intr-mbox-0 clears PCI INTA# and
3467 * chip-internal interrupt pending events. 3467 * chip-internal interrupt pending events.
3468 * Writing non-zero to intr-mbox-0 additional tells the 3468 * Writing non-zero to intr-mbox-0 additional tells the
3469 * NIC to stop sending us irqs, engaging "in-intr-handler" 3469 * NIC to stop sending us irqs, engaging "in-intr-handler"
3470 * event coalescing. 3470 * event coalescing.
3471 */ 3471 */
3472 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 3472 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3473 if (likely(!tg3_irq_sync(tp))) 3473 if (likely(!tg3_irq_sync(tp)))
3474 netif_rx_schedule(dev); /* schedule NAPI poll */ 3474 netif_rx_schedule(dev); /* schedule NAPI poll */
3475 3475
3476 return IRQ_RETVAL(1); 3476 return IRQ_RETVAL(1);
3477 } 3477 }
3478 3478
3479 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) 3479 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3480 { 3480 {
3481 struct net_device *dev = dev_id; 3481 struct net_device *dev = dev_id;
3482 struct tg3 *tp = netdev_priv(dev); 3482 struct tg3 *tp = netdev_priv(dev);
3483 struct tg3_hw_status *sblk = tp->hw_status; 3483 struct tg3_hw_status *sblk = tp->hw_status;
3484 unsigned int handled = 1; 3484 unsigned int handled = 1;
3485 3485
3486 /* In INTx mode, it is possible for the interrupt to arrive at 3486 /* In INTx mode, it is possible for the interrupt to arrive at
3487 * the CPU before the status block posted prior to the interrupt. 3487 * the CPU before the status block posted prior to the interrupt.
3488 * Reading the PCI State register will confirm whether the 3488 * Reading the PCI State register will confirm whether the
3489 * interrupt is ours and will flush the status block. 3489 * interrupt is ours and will flush the status block.
3490 */ 3490 */
3491 if ((sblk->status & SD_STATUS_UPDATED) || 3491 if ((sblk->status & SD_STATUS_UPDATED) ||
3492 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 3492 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3493 /* 3493 /*
3494 * Writing any value to intr-mbox-0 clears PCI INTA# and 3494 * Writing any value to intr-mbox-0 clears PCI INTA# and
3495 * chip-internal interrupt pending events. 3495 * chip-internal interrupt pending events.
3496 * Writing non-zero to intr-mbox-0 additional tells the 3496 * Writing non-zero to intr-mbox-0 additional tells the
3497 * NIC to stop sending us irqs, engaging "in-intr-handler" 3497 * NIC to stop sending us irqs, engaging "in-intr-handler"
3498 * event coalescing. 3498 * event coalescing.
3499 */ 3499 */
3500 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3500 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3501 0x00000001); 3501 0x00000001);
3502 if (tg3_irq_sync(tp)) 3502 if (tg3_irq_sync(tp))
3503 goto out; 3503 goto out;
3504 sblk->status &= ~SD_STATUS_UPDATED; 3504 sblk->status &= ~SD_STATUS_UPDATED;
3505 if (likely(tg3_has_work(tp))) { 3505 if (likely(tg3_has_work(tp))) {
3506 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 3506 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3507 netif_rx_schedule(dev); /* schedule NAPI poll */ 3507 netif_rx_schedule(dev); /* schedule NAPI poll */
3508 } else { 3508 } else {
3509 /* No work, shared interrupt perhaps? re-enable 3509 /* No work, shared interrupt perhaps? re-enable
3510 * interrupts, and flush that PCI write 3510 * interrupts, and flush that PCI write
3511 */ 3511 */
3512 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3512 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3513 0x00000000); 3513 0x00000000);
3514 } 3514 }
3515 } else { /* shared interrupt */ 3515 } else { /* shared interrupt */
3516 handled = 0; 3516 handled = 0;
3517 } 3517 }
3518 out: 3518 out:
3519 return IRQ_RETVAL(handled); 3519 return IRQ_RETVAL(handled);
3520 } 3520 }
3521 3521
3522 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) 3522 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3523 { 3523 {
3524 struct net_device *dev = dev_id; 3524 struct net_device *dev = dev_id;
3525 struct tg3 *tp = netdev_priv(dev); 3525 struct tg3 *tp = netdev_priv(dev);
3526 struct tg3_hw_status *sblk = tp->hw_status; 3526 struct tg3_hw_status *sblk = tp->hw_status;
3527 unsigned int handled = 1; 3527 unsigned int handled = 1;
3528 3528
3529 /* In INTx mode, it is possible for the interrupt to arrive at 3529 /* In INTx mode, it is possible for the interrupt to arrive at
3530 * the CPU before the status block posted prior to the interrupt. 3530 * the CPU before the status block posted prior to the interrupt.
3531 * Reading the PCI State register will confirm whether the 3531 * Reading the PCI State register will confirm whether the
3532 * interrupt is ours and will flush the status block. 3532 * interrupt is ours and will flush the status block.
3533 */ 3533 */
3534 if ((sblk->status_tag != tp->last_tag) || 3534 if ((sblk->status_tag != tp->last_tag) ||
3535 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 3535 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3536 /* 3536 /*
3537 * writing any value to intr-mbox-0 clears PCI INTA# and 3537 * writing any value to intr-mbox-0 clears PCI INTA# and
3538 * chip-internal interrupt pending events. 3538 * chip-internal interrupt pending events.
3539 * writing non-zero to intr-mbox-0 additional tells the 3539 * writing non-zero to intr-mbox-0 additional tells the
3540 * NIC to stop sending us irqs, engaging "in-intr-handler" 3540 * NIC to stop sending us irqs, engaging "in-intr-handler"
3541 * event coalescing. 3541 * event coalescing.
3542 */ 3542 */
3543 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3543 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3544 0x00000001); 3544 0x00000001);
3545 if (tg3_irq_sync(tp)) 3545 if (tg3_irq_sync(tp))
3546 goto out; 3546 goto out;
3547 if (netif_rx_schedule_prep(dev)) { 3547 if (netif_rx_schedule_prep(dev)) {
3548 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 3548 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3549 /* Update last_tag to mark that this status has been 3549 /* Update last_tag to mark that this status has been
3550 * seen. Because interrupt may be shared, we may be 3550 * seen. Because interrupt may be shared, we may be
3551 * racing with tg3_poll(), so only update last_tag 3551 * racing with tg3_poll(), so only update last_tag
3552 * if tg3_poll() is not scheduled. 3552 * if tg3_poll() is not scheduled.
3553 */ 3553 */
3554 tp->last_tag = sblk->status_tag; 3554 tp->last_tag = sblk->status_tag;
3555 __netif_rx_schedule(dev); 3555 __netif_rx_schedule(dev);
3556 } 3556 }
3557 } else { /* shared interrupt */ 3557 } else { /* shared interrupt */
3558 handled = 0; 3558 handled = 0;
3559 } 3559 }
3560 out: 3560 out:
3561 return IRQ_RETVAL(handled); 3561 return IRQ_RETVAL(handled);
3562 } 3562 }
3563 3563
3564 /* ISR for interrupt test */ 3564 /* ISR for interrupt test */
3565 static irqreturn_t tg3_test_isr(int irq, void *dev_id, 3565 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3566 struct pt_regs *regs) 3566 struct pt_regs *regs)
3567 { 3567 {
3568 struct net_device *dev = dev_id; 3568 struct net_device *dev = dev_id;
3569 struct tg3 *tp = netdev_priv(dev); 3569 struct tg3 *tp = netdev_priv(dev);
3570 struct tg3_hw_status *sblk = tp->hw_status; 3570 struct tg3_hw_status *sblk = tp->hw_status;
3571 3571
3572 if ((sblk->status & SD_STATUS_UPDATED) || 3572 if ((sblk->status & SD_STATUS_UPDATED) ||
3573 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 3573 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3574 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3574 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3575 0x00000001); 3575 0x00000001);
3576 return IRQ_RETVAL(1); 3576 return IRQ_RETVAL(1);
3577 } 3577 }
3578 return IRQ_RETVAL(0); 3578 return IRQ_RETVAL(0);
3579 } 3579 }
3580 3580
3581 static int tg3_init_hw(struct tg3 *, int); 3581 static int tg3_init_hw(struct tg3 *, int);
3582 static int tg3_halt(struct tg3 *, int, int); 3582 static int tg3_halt(struct tg3 *, int, int);
3583 3583
3584 #ifdef CONFIG_NET_POLL_CONTROLLER 3584 #ifdef CONFIG_NET_POLL_CONTROLLER
3585 static void tg3_poll_controller(struct net_device *dev) 3585 static void tg3_poll_controller(struct net_device *dev)
3586 { 3586 {
3587 struct tg3 *tp = netdev_priv(dev); 3587 struct tg3 *tp = netdev_priv(dev);
3588 3588
3589 tg3_interrupt(tp->pdev->irq, dev, NULL); 3589 tg3_interrupt(tp->pdev->irq, dev, NULL);
3590 } 3590 }
3591 #endif 3591 #endif
3592 3592
3593 static void tg3_reset_task(void *_data) 3593 static void tg3_reset_task(void *_data)
3594 { 3594 {
3595 struct tg3 *tp = _data; 3595 struct tg3 *tp = _data;
3596 unsigned int restart_timer; 3596 unsigned int restart_timer;
3597 3597
3598 tg3_full_lock(tp, 0); 3598 tg3_full_lock(tp, 0);
3599 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK; 3599 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3600 3600
3601 if (!netif_running(tp->dev)) { 3601 if (!netif_running(tp->dev)) {
3602 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3602 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3603 tg3_full_unlock(tp); 3603 tg3_full_unlock(tp);
3604 return; 3604 return;
3605 } 3605 }
3606 3606
3607 tg3_full_unlock(tp); 3607 tg3_full_unlock(tp);
3608 3608
3609 tg3_netif_stop(tp); 3609 tg3_netif_stop(tp);
3610 3610
3611 tg3_full_lock(tp, 1); 3611 tg3_full_lock(tp, 1);
3612 3612
3613 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3613 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3614 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3614 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3615 3615
3616 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { 3616 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3617 tp->write32_tx_mbox = tg3_write32_tx_mbox; 3617 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3618 tp->write32_rx_mbox = tg3_write_flush_reg32; 3618 tp->write32_rx_mbox = tg3_write_flush_reg32;
3619 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 3619 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3620 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; 3620 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3621 } 3621 }
3622 3622
3623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3624 tg3_init_hw(tp, 1); 3624 tg3_init_hw(tp, 1);
3625 3625
3626 tg3_netif_start(tp); 3626 tg3_netif_start(tp);
3627 3627
3628 if (restart_timer) 3628 if (restart_timer)
3629 mod_timer(&tp->timer, jiffies + 1); 3629 mod_timer(&tp->timer, jiffies + 1);
3630 3630
3631 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3631 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3632 3632
3633 tg3_full_unlock(tp); 3633 tg3_full_unlock(tp);
3634 } 3634 }
3635 3635
3636 static void tg3_tx_timeout(struct net_device *dev) 3636 static void tg3_tx_timeout(struct net_device *dev)
3637 { 3637 {
3638 struct tg3 *tp = netdev_priv(dev); 3638 struct tg3 *tp = netdev_priv(dev);
3639 3639
3640 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", 3640 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3641 dev->name); 3641 dev->name);
3642 3642
3643 schedule_work(&tp->reset_task); 3643 schedule_work(&tp->reset_task);
3644 } 3644 }
3645 3645
3646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 3646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) 3647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3648 { 3648 {
3649 u32 base = (u32) mapping & 0xffffffff; 3649 u32 base = (u32) mapping & 0xffffffff;
3650 3650
3651 return ((base > 0xffffdcc0) && 3651 return ((base > 0xffffdcc0) &&
3652 (base + len + 8 < base)); 3652 (base + len + 8 < base));
3653 } 3653 }
3654 3654
3655 /* Test for DMA addresses > 40-bit */ 3655 /* Test for DMA addresses > 40-bit */
3656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 3656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3657 int len) 3657 int len)
3658 { 3658 {
3659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 3659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3660 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) 3660 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3661 return (((u64) mapping + len) > DMA_40BIT_MASK); 3661 return (((u64) mapping + len) > DMA_40BIT_MASK);
3662 return 0; 3662 return 0;
3663 #else 3663 #else
3664 return 0; 3664 return 0;
3665 #endif 3665 #endif
3666 } 3666 }
3667 3667
3668 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3668 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3669 3669
3670 /* Workaround 4GB and 40-bit hardware DMA bugs. */ 3670 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3671 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3671 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3672 u32 last_plus_one, u32 *start, 3672 u32 last_plus_one, u32 *start,
3673 u32 base_flags, u32 mss) 3673 u32 base_flags, u32 mss)
3674 { 3674 {
3675 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); 3675 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3676 dma_addr_t new_addr = 0; 3676 dma_addr_t new_addr = 0;
3677 u32 entry = *start; 3677 u32 entry = *start;
3678 int i, ret = 0; 3678 int i, ret = 0;
3679 3679
3680 if (!new_skb) { 3680 if (!new_skb) {
3681 ret = -1; 3681 ret = -1;
3682 } else { 3682 } else {
3683 /* New SKB is guaranteed to be linear. */ 3683 /* New SKB is guaranteed to be linear. */
3684 entry = *start; 3684 entry = *start;
3685 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 3685 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3686 PCI_DMA_TODEVICE); 3686 PCI_DMA_TODEVICE);
3687 /* Make sure new skb does not cross any 4G boundaries. 3687 /* Make sure new skb does not cross any 4G boundaries.
3688 * Drop the packet if it does. 3688 * Drop the packet if it does.
3689 */ 3689 */
3690 if (tg3_4g_overflow_test(new_addr, new_skb->len)) { 3690 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3691 ret = -1; 3691 ret = -1;
3692 dev_kfree_skb(new_skb); 3692 dev_kfree_skb(new_skb);
3693 new_skb = NULL; 3693 new_skb = NULL;
3694 } else { 3694 } else {
3695 tg3_set_txd(tp, entry, new_addr, new_skb->len, 3695 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3696 base_flags, 1 | (mss << 1)); 3696 base_flags, 1 | (mss << 1));
3697 *start = NEXT_TX(entry); 3697 *start = NEXT_TX(entry);
3698 } 3698 }
3699 } 3699 }
3700 3700
3701 /* Now clean up the sw ring entries. */ 3701 /* Now clean up the sw ring entries. */
3702 i = 0; 3702 i = 0;
3703 while (entry != last_plus_one) { 3703 while (entry != last_plus_one) {
3704 int len; 3704 int len;
3705 3705
3706 if (i == 0) 3706 if (i == 0)
3707 len = skb_headlen(skb); 3707 len = skb_headlen(skb);
3708 else 3708 else
3709 len = skb_shinfo(skb)->frags[i-1].size; 3709 len = skb_shinfo(skb)->frags[i-1].size;
3710 pci_unmap_single(tp->pdev, 3710 pci_unmap_single(tp->pdev,
3711 pci_unmap_addr(&tp->tx_buffers[entry], mapping), 3711 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3712 len, PCI_DMA_TODEVICE); 3712 len, PCI_DMA_TODEVICE);
3713 if (i == 0) { 3713 if (i == 0) {
3714 tp->tx_buffers[entry].skb = new_skb; 3714 tp->tx_buffers[entry].skb = new_skb;
3715 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr); 3715 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3716 } else { 3716 } else {
3717 tp->tx_buffers[entry].skb = NULL; 3717 tp->tx_buffers[entry].skb = NULL;
3718 } 3718 }
3719 entry = NEXT_TX(entry); 3719 entry = NEXT_TX(entry);
3720 i++; 3720 i++;
3721 } 3721 }
3722 3722
3723 dev_kfree_skb(skb); 3723 dev_kfree_skb(skb);
3724 3724
3725 return ret; 3725 return ret;
3726 } 3726 }
3727 3727
3728 static void tg3_set_txd(struct tg3 *tp, int entry, 3728 static void tg3_set_txd(struct tg3 *tp, int entry,
3729 dma_addr_t mapping, int len, u32 flags, 3729 dma_addr_t mapping, int len, u32 flags,
3730 u32 mss_and_is_end) 3730 u32 mss_and_is_end)
3731 { 3731 {
3732 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry]; 3732 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3733 int is_end = (mss_and_is_end & 0x1); 3733 int is_end = (mss_and_is_end & 0x1);
3734 u32 mss = (mss_and_is_end >> 1); 3734 u32 mss = (mss_and_is_end >> 1);
3735 u32 vlan_tag = 0; 3735 u32 vlan_tag = 0;
3736 3736
3737 if (is_end) 3737 if (is_end)
3738 flags |= TXD_FLAG_END; 3738 flags |= TXD_FLAG_END;
3739 if (flags & TXD_FLAG_VLAN) { 3739 if (flags & TXD_FLAG_VLAN) {
3740 vlan_tag = flags >> 16; 3740 vlan_tag = flags >> 16;
3741 flags &= 0xffff; 3741 flags &= 0xffff;
3742 } 3742 }
3743 vlan_tag |= (mss << TXD_MSS_SHIFT); 3743 vlan_tag |= (mss << TXD_MSS_SHIFT);
3744 3744
3745 txd->addr_hi = ((u64) mapping >> 32); 3745 txd->addr_hi = ((u64) mapping >> 32);
3746 txd->addr_lo = ((u64) mapping & 0xffffffff); 3746 txd->addr_lo = ((u64) mapping & 0xffffffff);
3747 txd->len_flags = (len << TXD_LEN_SHIFT) | flags; 3747 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3748 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3748 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3749 } 3749 }
3750 3750
3751 /* hard_start_xmit for devices that don't have any bugs and 3751 /* hard_start_xmit for devices that don't have any bugs and
3752 * support TG3_FLG2_HW_TSO_2 only. 3752 * support TG3_FLG2_HW_TSO_2 only.
3753 */ 3753 */
3754 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3754 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3755 { 3755 {
3756 struct tg3 *tp = netdev_priv(dev); 3756 struct tg3 *tp = netdev_priv(dev);
3757 dma_addr_t mapping; 3757 dma_addr_t mapping;
3758 u32 len, entry, base_flags, mss; 3758 u32 len, entry, base_flags, mss;
3759 3759
3760 len = skb_headlen(skb); 3760 len = skb_headlen(skb);
3761 3761
3762 /* We are running in BH disabled context with netif_tx_lock 3762 /* We are running in BH disabled context with netif_tx_lock
3763 * and TX reclaim runs via tp->poll inside of a software 3763 * and TX reclaim runs via tp->poll inside of a software
3764 * interrupt. Furthermore, IRQ processing runs lockless so we have 3764 * interrupt. Furthermore, IRQ processing runs lockless so we have
3765 * no IRQ context deadlocks to worry about either. Rejoice! 3765 * no IRQ context deadlocks to worry about either. Rejoice!
3766 */ 3766 */
3767 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3767 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3768 if (!netif_queue_stopped(dev)) { 3768 if (!netif_queue_stopped(dev)) {
3769 netif_stop_queue(dev); 3769 netif_stop_queue(dev);
3770 3770
3771 /* This is a hard error, log it. */ 3771 /* This is a hard error, log it. */
3772 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 3772 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3773 "queue awake!\n", dev->name); 3773 "queue awake!\n", dev->name);
3774 } 3774 }
3775 return NETDEV_TX_BUSY; 3775 return NETDEV_TX_BUSY;
3776 } 3776 }
3777 3777
3778 entry = tp->tx_prod; 3778 entry = tp->tx_prod;
3779 base_flags = 0; 3779 base_flags = 0;
3780 #if TG3_TSO_SUPPORT != 0 3780 #if TG3_TSO_SUPPORT != 0
3781 mss = 0; 3781 mss = 0;
3782 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3782 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3783 (mss = skb_shinfo(skb)->gso_size) != 0) { 3783 (mss = skb_shinfo(skb)->gso_size) != 0) {
3784 int tcp_opt_len, ip_tcp_len; 3784 int tcp_opt_len, ip_tcp_len;
3785 3785
3786 if (skb_header_cloned(skb) && 3786 if (skb_header_cloned(skb) &&
3787 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 3787 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3788 dev_kfree_skb(skb); 3788 dev_kfree_skb(skb);
3789 goto out_unlock; 3789 goto out_unlock;
3790 } 3790 }
3791 3791
3792 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 3792 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3793 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 3793 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3794 3794
3795 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 3795 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3796 TXD_FLAG_CPU_POST_DMA); 3796 TXD_FLAG_CPU_POST_DMA);
3797 3797
3798 skb->nh.iph->check = 0; 3798 skb->nh.iph->check = 0;
3799 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); 3799 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3800 3800
3801 skb->h.th->check = 0; 3801 skb->h.th->check = 0;
3802 3802
3803 mss |= (ip_tcp_len + tcp_opt_len) << 9; 3803 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3804 } 3804 }
3805 else if (skb->ip_summed == CHECKSUM_HW) 3805 else if (skb->ip_summed == CHECKSUM_HW)
3806 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3806 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3807 #else 3807 #else
3808 mss = 0; 3808 mss = 0;
3809 if (skb->ip_summed == CHECKSUM_HW) 3809 if (skb->ip_summed == CHECKSUM_HW)
3810 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3810 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3811 #endif 3811 #endif
3812 #if TG3_VLAN_TAG_USED 3812 #if TG3_VLAN_TAG_USED
3813 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 3813 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3814 base_flags |= (TXD_FLAG_VLAN | 3814 base_flags |= (TXD_FLAG_VLAN |
3815 (vlan_tx_tag_get(skb) << 16)); 3815 (vlan_tx_tag_get(skb) << 16));
3816 #endif 3816 #endif
3817 3817
3818 /* Queue skb data, a.k.a. the main skb fragment. */ 3818 /* Queue skb data, a.k.a. the main skb fragment. */
3819 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 3819 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3820 3820
3821 tp->tx_buffers[entry].skb = skb; 3821 tp->tx_buffers[entry].skb = skb;
3822 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3822 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3823 3823
3824 tg3_set_txd(tp, entry, mapping, len, base_flags, 3824 tg3_set_txd(tp, entry, mapping, len, base_flags,
3825 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3825 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3826 3826
3827 entry = NEXT_TX(entry); 3827 entry = NEXT_TX(entry);
3828 3828
3829 /* Now loop through additional data fragments, and queue them. */ 3829 /* Now loop through additional data fragments, and queue them. */
3830 if (skb_shinfo(skb)->nr_frags > 0) { 3830 if (skb_shinfo(skb)->nr_frags > 0) {
3831 unsigned int i, last; 3831 unsigned int i, last;
3832 3832
3833 last = skb_shinfo(skb)->nr_frags - 1; 3833 last = skb_shinfo(skb)->nr_frags - 1;
3834 for (i = 0; i <= last; i++) { 3834 for (i = 0; i <= last; i++) {
3835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3835 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3836 3836
3837 len = frag->size; 3837 len = frag->size;
3838 mapping = pci_map_page(tp->pdev, 3838 mapping = pci_map_page(tp->pdev,
3839 frag->page, 3839 frag->page,
3840 frag->page_offset, 3840 frag->page_offset,
3841 len, PCI_DMA_TODEVICE); 3841 len, PCI_DMA_TODEVICE);
3842 3842
3843 tp->tx_buffers[entry].skb = NULL; 3843 tp->tx_buffers[entry].skb = NULL;
3844 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3844 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3845 3845
3846 tg3_set_txd(tp, entry, mapping, len, 3846 tg3_set_txd(tp, entry, mapping, len,
3847 base_flags, (i == last) | (mss << 1)); 3847 base_flags, (i == last) | (mss << 1));
3848 3848
3849 entry = NEXT_TX(entry); 3849 entry = NEXT_TX(entry);
3850 } 3850 }
3851 } 3851 }
3852 3852
3853 /* Packets are ready, update Tx producer idx local and on card. */ 3853 /* Packets are ready, update Tx producer idx local and on card. */
3854 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3854 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3855 3855
3856 tp->tx_prod = entry; 3856 tp->tx_prod = entry;
3857 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 3857 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3858 spin_lock(&tp->tx_lock); 3858 spin_lock(&tp->tx_lock);
3859 netif_stop_queue(dev); 3859 netif_stop_queue(dev);
3860 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 3860 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3861 netif_wake_queue(tp->dev); 3861 netif_wake_queue(tp->dev);
3862 spin_unlock(&tp->tx_lock); 3862 spin_unlock(&tp->tx_lock);
3863 } 3863 }
3864 3864
3865 out_unlock: 3865 out_unlock:
3866 mmiowb(); 3866 mmiowb();
3867 3867
3868 dev->trans_start = jiffies; 3868 dev->trans_start = jiffies;
3869 3869
3870 return NETDEV_TX_OK; 3870 return NETDEV_TX_OK;
3871 } 3871 }
3872 3872
3873 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 3873 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3874 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 3874 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3875 */ 3875 */
3876 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) 3876 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3877 { 3877 {
3878 struct tg3 *tp = netdev_priv(dev); 3878 struct tg3 *tp = netdev_priv(dev);
3879 dma_addr_t mapping; 3879 dma_addr_t mapping;
3880 u32 len, entry, base_flags, mss; 3880 u32 len, entry, base_flags, mss;
3881 int would_hit_hwbug; 3881 int would_hit_hwbug;
3882 3882
3883 len = skb_headlen(skb); 3883 len = skb_headlen(skb);
3884 3884
3885 /* We are running in BH disabled context with netif_tx_lock 3885 /* We are running in BH disabled context with netif_tx_lock
3886 * and TX reclaim runs via tp->poll inside of a software 3886 * and TX reclaim runs via tp->poll inside of a software
3887 * interrupt. Furthermore, IRQ processing runs lockless so we have 3887 * interrupt. Furthermore, IRQ processing runs lockless so we have
3888 * no IRQ context deadlocks to worry about either. Rejoice! 3888 * no IRQ context deadlocks to worry about either. Rejoice!
3889 */ 3889 */
3890 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3890 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3891 if (!netif_queue_stopped(dev)) { 3891 if (!netif_queue_stopped(dev)) {
3892 netif_stop_queue(dev); 3892 netif_stop_queue(dev);
3893 3893
3894 /* This is a hard error, log it. */ 3894 /* This is a hard error, log it. */
3895 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 3895 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3896 "queue awake!\n", dev->name); 3896 "queue awake!\n", dev->name);
3897 } 3897 }
3898 return NETDEV_TX_BUSY; 3898 return NETDEV_TX_BUSY;
3899 } 3899 }
3900 3900
3901 entry = tp->tx_prod; 3901 entry = tp->tx_prod;
3902 base_flags = 0; 3902 base_flags = 0;
3903 if (skb->ip_summed == CHECKSUM_HW) 3903 if (skb->ip_summed == CHECKSUM_HW)
3904 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3904 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3905 #if TG3_TSO_SUPPORT != 0 3905 #if TG3_TSO_SUPPORT != 0
3906 mss = 0; 3906 mss = 0;
3907 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3907 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3908 (mss = skb_shinfo(skb)->gso_size) != 0) { 3908 (mss = skb_shinfo(skb)->gso_size) != 0) {
3909 int tcp_opt_len, ip_tcp_len; 3909 int tcp_opt_len, ip_tcp_len;
3910 3910
3911 if (skb_header_cloned(skb) && 3911 if (skb_header_cloned(skb) &&
3912 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 3912 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3913 dev_kfree_skb(skb); 3913 dev_kfree_skb(skb);
3914 goto out_unlock; 3914 goto out_unlock;
3915 } 3915 }
3916 3916
3917 tcp_opt_len = ((skb->h.th->doff - 5) * 4); 3917 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3918 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 3918 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3919 3919
3920 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 3920 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3921 TXD_FLAG_CPU_POST_DMA); 3921 TXD_FLAG_CPU_POST_DMA);
3922 3922
3923 skb->nh.iph->check = 0; 3923 skb->nh.iph->check = 0;
3924 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); 3924 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3925 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 3925 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3926 skb->h.th->check = 0; 3926 skb->h.th->check = 0;
3927 base_flags &= ~TXD_FLAG_TCPUDP_CSUM; 3927 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3928 } 3928 }
3929 else { 3929 else {
3930 skb->h.th->check = 3930 skb->h.th->check =
3931 ~csum_tcpudp_magic(skb->nh.iph->saddr, 3931 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3932 skb->nh.iph->daddr, 3932 skb->nh.iph->daddr,
3933 0, IPPROTO_TCP, 0); 3933 0, IPPROTO_TCP, 0);
3934 } 3934 }
3935 3935
3936 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 3936 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 3937 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3938 if (tcp_opt_len || skb->nh.iph->ihl > 5) { 3938 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3939 int tsflags; 3939 int tsflags;
3940 3940
3941 tsflags = ((skb->nh.iph->ihl - 5) + 3941 tsflags = ((skb->nh.iph->ihl - 5) +
3942 (tcp_opt_len >> 2)); 3942 (tcp_opt_len >> 2));
3943 mss |= (tsflags << 11); 3943 mss |= (tsflags << 11);
3944 } 3944 }
3945 } else { 3945 } else {
3946 if (tcp_opt_len || skb->nh.iph->ihl > 5) { 3946 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3947 int tsflags; 3947 int tsflags;
3948 3948
3949 tsflags = ((skb->nh.iph->ihl - 5) + 3949 tsflags = ((skb->nh.iph->ihl - 5) +
3950 (tcp_opt_len >> 2)); 3950 (tcp_opt_len >> 2));
3951 base_flags |= tsflags << 12; 3951 base_flags |= tsflags << 12;
3952 } 3952 }
3953 } 3953 }
3954 } 3954 }
3955 #else 3955 #else
3956 mss = 0; 3956 mss = 0;
3957 #endif 3957 #endif
3958 #if TG3_VLAN_TAG_USED 3958 #if TG3_VLAN_TAG_USED
3959 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 3959 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3960 base_flags |= (TXD_FLAG_VLAN | 3960 base_flags |= (TXD_FLAG_VLAN |
3961 (vlan_tx_tag_get(skb) << 16)); 3961 (vlan_tx_tag_get(skb) << 16));
3962 #endif 3962 #endif
3963 3963
3964 /* Queue skb data, a.k.a. the main skb fragment. */ 3964 /* Queue skb data, a.k.a. the main skb fragment. */
3965 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 3965 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3966 3966
3967 tp->tx_buffers[entry].skb = skb; 3967 tp->tx_buffers[entry].skb = skb;
3968 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3968 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3969 3969
3970 would_hit_hwbug = 0; 3970 would_hit_hwbug = 0;
3971 3971
3972 if (tg3_4g_overflow_test(mapping, len)) 3972 if (tg3_4g_overflow_test(mapping, len))
3973 would_hit_hwbug = 1; 3973 would_hit_hwbug = 1;
3974 3974
3975 tg3_set_txd(tp, entry, mapping, len, base_flags, 3975 tg3_set_txd(tp, entry, mapping, len, base_flags,
3976 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3976 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3977 3977
3978 entry = NEXT_TX(entry); 3978 entry = NEXT_TX(entry);
3979 3979
3980 /* Now loop through additional data fragments, and queue them. */ 3980 /* Now loop through additional data fragments, and queue them. */
3981 if (skb_shinfo(skb)->nr_frags > 0) { 3981 if (skb_shinfo(skb)->nr_frags > 0) {
3982 unsigned int i, last; 3982 unsigned int i, last;
3983 3983
3984 last = skb_shinfo(skb)->nr_frags - 1; 3984 last = skb_shinfo(skb)->nr_frags - 1;
3985 for (i = 0; i <= last; i++) { 3985 for (i = 0; i <= last; i++) {
3986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3987 3987
3988 len = frag->size; 3988 len = frag->size;
3989 mapping = pci_map_page(tp->pdev, 3989 mapping = pci_map_page(tp->pdev,
3990 frag->page, 3990 frag->page,
3991 frag->page_offset, 3991 frag->page_offset,
3992 len, PCI_DMA_TODEVICE); 3992 len, PCI_DMA_TODEVICE);
3993 3993
3994 tp->tx_buffers[entry].skb = NULL; 3994 tp->tx_buffers[entry].skb = NULL;
3995 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3995 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3996 3996
3997 if (tg3_4g_overflow_test(mapping, len)) 3997 if (tg3_4g_overflow_test(mapping, len))
3998 would_hit_hwbug = 1; 3998 would_hit_hwbug = 1;
3999 3999
4000 if (tg3_40bit_overflow_test(tp, mapping, len)) 4000 if (tg3_40bit_overflow_test(tp, mapping, len))
4001 would_hit_hwbug = 1; 4001 would_hit_hwbug = 1;
4002 4002
4003 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 4003 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4004 tg3_set_txd(tp, entry, mapping, len, 4004 tg3_set_txd(tp, entry, mapping, len,
4005 base_flags, (i == last)|(mss << 1)); 4005 base_flags, (i == last)|(mss << 1));
4006 else 4006 else
4007 tg3_set_txd(tp, entry, mapping, len, 4007 tg3_set_txd(tp, entry, mapping, len,
4008 base_flags, (i == last)); 4008 base_flags, (i == last));
4009 4009
4010 entry = NEXT_TX(entry); 4010 entry = NEXT_TX(entry);
4011 } 4011 }
4012 } 4012 }
4013 4013
4014 if (would_hit_hwbug) { 4014 if (would_hit_hwbug) {
4015 u32 last_plus_one = entry; 4015 u32 last_plus_one = entry;
4016 u32 start; 4016 u32 start;
4017 4017
4018 start = entry - 1 - skb_shinfo(skb)->nr_frags; 4018 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4019 start &= (TG3_TX_RING_SIZE - 1); 4019 start &= (TG3_TX_RING_SIZE - 1);
4020 4020
4021 /* If the workaround fails due to memory/mapping 4021 /* If the workaround fails due to memory/mapping
4022 * failure, silently drop this packet. 4022 * failure, silently drop this packet.
4023 */ 4023 */
4024 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 4024 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4025 &start, base_flags, mss)) 4025 &start, base_flags, mss))
4026 goto out_unlock; 4026 goto out_unlock;
4027 4027
4028 entry = start; 4028 entry = start;
4029 } 4029 }
4030 4030
4031 /* Packets are ready, update Tx producer idx local and on card. */ 4031 /* Packets are ready, update Tx producer idx local and on card. */
4032 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 4032 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4033 4033
4034 tp->tx_prod = entry; 4034 tp->tx_prod = entry;
4035 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 4035 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4036 spin_lock(&tp->tx_lock); 4036 spin_lock(&tp->tx_lock);
4037 netif_stop_queue(dev); 4037 netif_stop_queue(dev);
4038 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 4038 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4039 netif_wake_queue(tp->dev); 4039 netif_wake_queue(tp->dev);
4040 spin_unlock(&tp->tx_lock); 4040 spin_unlock(&tp->tx_lock);
4041 } 4041 }
4042 4042
4043 out_unlock: 4043 out_unlock:
4044 mmiowb(); 4044 mmiowb();
4045 4045
4046 dev->trans_start = jiffies; 4046 dev->trans_start = jiffies;
4047 4047
4048 return NETDEV_TX_OK; 4048 return NETDEV_TX_OK;
4049 } 4049 }
4050 4050
4051 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 4051 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4052 int new_mtu) 4052 int new_mtu)
4053 { 4053 {
4054 dev->mtu = new_mtu; 4054 dev->mtu = new_mtu;
4055 4055
4056 if (new_mtu > ETH_DATA_LEN) { 4056 if (new_mtu > ETH_DATA_LEN) {
4057 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 4057 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4058 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 4058 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4059 ethtool_op_set_tso(dev, 0); 4059 ethtool_op_set_tso(dev, 0);
4060 } 4060 }
4061 else 4061 else
4062 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 4062 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4063 } else { 4063 } else {
4064 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 4064 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4065 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 4065 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4066 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 4066 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4067 } 4067 }
4068 } 4068 }
4069 4069
4070 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4070 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4071 { 4071 {
4072 struct tg3 *tp = netdev_priv(dev); 4072 struct tg3 *tp = netdev_priv(dev);
4073 4073
4074 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4074 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4075 return -EINVAL; 4075 return -EINVAL;
4076 4076
4077 if (!netif_running(dev)) { 4077 if (!netif_running(dev)) {
4078 /* We'll just catch it later when the 4078 /* We'll just catch it later when the
4079 * device is up'd. 4079 * device is up'd.
4080 */ 4080 */
4081 tg3_set_mtu(dev, tp, new_mtu); 4081 tg3_set_mtu(dev, tp, new_mtu);
4082 return 0; 4082 return 0;
4083 } 4083 }
4084 4084
4085 tg3_netif_stop(tp); 4085 tg3_netif_stop(tp);
4086 4086
4087 tg3_full_lock(tp, 1); 4087 tg3_full_lock(tp, 1);
4088 4088
4089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 4089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4090 4090
4091 tg3_set_mtu(dev, tp, new_mtu); 4091 tg3_set_mtu(dev, tp, new_mtu);
4092 4092
4093 tg3_init_hw(tp, 0); 4093 tg3_init_hw(tp, 0);
4094 4094
4095 tg3_netif_start(tp); 4095 tg3_netif_start(tp);
4096 4096
4097 tg3_full_unlock(tp); 4097 tg3_full_unlock(tp);
4098 4098
4099 return 0; 4099 return 0;
4100 } 4100 }
4101 4101
4102 /* Free up pending packets in all rx/tx rings. 4102 /* Free up pending packets in all rx/tx rings.
4103 * 4103 *
4104 * The chip has been shut down and the driver detached from 4104 * The chip has been shut down and the driver detached from
4105 * the networking, so no interrupts or new tx packets will 4105 * the networking, so no interrupts or new tx packets will
4106 * end up in the driver. tp->{tx,}lock is not held and we are not 4106 * end up in the driver. tp->{tx,}lock is not held and we are not
4107 * in an interrupt context and thus may sleep. 4107 * in an interrupt context and thus may sleep.
4108 */ 4108 */
4109 static void tg3_free_rings(struct tg3 *tp) 4109 static void tg3_free_rings(struct tg3 *tp)
4110 { 4110 {
4111 struct ring_info *rxp; 4111 struct ring_info *rxp;
4112 int i; 4112 int i;
4113 4113
4114 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 4114 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4115 rxp = &tp->rx_std_buffers[i]; 4115 rxp = &tp->rx_std_buffers[i];
4116 4116
4117 if (rxp->skb == NULL) 4117 if (rxp->skb == NULL)
4118 continue; 4118 continue;
4119 pci_unmap_single(tp->pdev, 4119 pci_unmap_single(tp->pdev,
4120 pci_unmap_addr(rxp, mapping), 4120 pci_unmap_addr(rxp, mapping),
4121 tp->rx_pkt_buf_sz - tp->rx_offset, 4121 tp->rx_pkt_buf_sz - tp->rx_offset,
4122 PCI_DMA_FROMDEVICE); 4122 PCI_DMA_FROMDEVICE);
4123 dev_kfree_skb_any(rxp->skb); 4123 dev_kfree_skb_any(rxp->skb);
4124 rxp->skb = NULL; 4124 rxp->skb = NULL;
4125 } 4125 }
4126 4126
4127 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 4127 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4128 rxp = &tp->rx_jumbo_buffers[i]; 4128 rxp = &tp->rx_jumbo_buffers[i];
4129 4129
4130 if (rxp->skb == NULL) 4130 if (rxp->skb == NULL)
4131 continue; 4131 continue;
4132 pci_unmap_single(tp->pdev, 4132 pci_unmap_single(tp->pdev,
4133 pci_unmap_addr(rxp, mapping), 4133 pci_unmap_addr(rxp, mapping),
4134 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset, 4134 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4135 PCI_DMA_FROMDEVICE); 4135 PCI_DMA_FROMDEVICE);
4136 dev_kfree_skb_any(rxp->skb); 4136 dev_kfree_skb_any(rxp->skb);
4137 rxp->skb = NULL; 4137 rxp->skb = NULL;
4138 } 4138 }
4139 4139
4140 for (i = 0; i < TG3_TX_RING_SIZE; ) { 4140 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4141 struct tx_ring_info *txp; 4141 struct tx_ring_info *txp;
4142 struct sk_buff *skb; 4142 struct sk_buff *skb;
4143 int j; 4143 int j;
4144 4144
4145 txp = &tp->tx_buffers[i]; 4145 txp = &tp->tx_buffers[i];
4146 skb = txp->skb; 4146 skb = txp->skb;
4147 4147
4148 if (skb == NULL) { 4148 if (skb == NULL) {
4149 i++; 4149 i++;
4150 continue; 4150 continue;
4151 } 4151 }
4152 4152
4153 pci_unmap_single(tp->pdev, 4153 pci_unmap_single(tp->pdev,
4154 pci_unmap_addr(txp, mapping), 4154 pci_unmap_addr(txp, mapping),
4155 skb_headlen(skb), 4155 skb_headlen(skb),
4156 PCI_DMA_TODEVICE); 4156 PCI_DMA_TODEVICE);
4157 txp->skb = NULL; 4157 txp->skb = NULL;
4158 4158
4159 i++; 4159 i++;
4160 4160
4161 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { 4161 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4162 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; 4162 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4163 pci_unmap_page(tp->pdev, 4163 pci_unmap_page(tp->pdev,
4164 pci_unmap_addr(txp, mapping), 4164 pci_unmap_addr(txp, mapping),
4165 skb_shinfo(skb)->frags[j].size, 4165 skb_shinfo(skb)->frags[j].size,
4166 PCI_DMA_TODEVICE); 4166 PCI_DMA_TODEVICE);
4167 i++; 4167 i++;
4168 } 4168 }
4169 4169
4170 dev_kfree_skb_any(skb); 4170 dev_kfree_skb_any(skb);
4171 } 4171 }
4172 } 4172 }
4173 4173
4174 /* Initialize tx/rx rings for packet processing. 4174 /* Initialize tx/rx rings for packet processing.
4175 * 4175 *
4176 * The chip has been shut down and the driver detached from 4176 * The chip has been shut down and the driver detached from
4177 * the networking, so no interrupts or new tx packets will 4177 * the networking, so no interrupts or new tx packets will
4178 * end up in the driver. tp->{tx,}lock are held and thus 4178 * end up in the driver. tp->{tx,}lock are held and thus
4179 * we may not sleep. 4179 * we may not sleep.
4180 */ 4180 */
4181 static void tg3_init_rings(struct tg3 *tp) 4181 static void tg3_init_rings(struct tg3 *tp)
4182 { 4182 {
4183 u32 i; 4183 u32 i;
4184 4184
4185 /* Free up all the SKBs. */ 4185 /* Free up all the SKBs. */
4186 tg3_free_rings(tp); 4186 tg3_free_rings(tp);
4187 4187
4188 /* Zero out all descriptors. */ 4188 /* Zero out all descriptors. */
4189 memset(tp->rx_std, 0, TG3_RX_RING_BYTES); 4189 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4190 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); 4190 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4191 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 4191 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4192 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 4192 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4193 4193
4194 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; 4194 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4195 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 4195 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4196 (tp->dev->mtu > ETH_DATA_LEN)) 4196 (tp->dev->mtu > ETH_DATA_LEN))
4197 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; 4197 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4198 4198
4199 /* Initialize invariants of the rings, we only set this 4199 /* Initialize invariants of the rings, we only set this
4200 * stuff once. This works because the card does not 4200 * stuff once. This works because the card does not
4201 * write into the rx buffer posting rings. 4201 * write into the rx buffer posting rings.
4202 */ 4202 */
4203 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 4203 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4204 struct tg3_rx_buffer_desc *rxd; 4204 struct tg3_rx_buffer_desc *rxd;
4205 4205
4206 rxd = &tp->rx_std[i]; 4206 rxd = &tp->rx_std[i];
4207 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64) 4207 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4208 << RXD_LEN_SHIFT; 4208 << RXD_LEN_SHIFT;
4209 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); 4209 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4210 rxd->opaque = (RXD_OPAQUE_RING_STD | 4210 rxd->opaque = (RXD_OPAQUE_RING_STD |
4211 (i << RXD_OPAQUE_INDEX_SHIFT)); 4211 (i << RXD_OPAQUE_INDEX_SHIFT));
4212 } 4212 }
4213 4213
4214 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4214 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4215 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 4215 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4216 struct tg3_rx_buffer_desc *rxd; 4216 struct tg3_rx_buffer_desc *rxd;
4217 4217
4218 rxd = &tp->rx_jumbo[i]; 4218 rxd = &tp->rx_jumbo[i];
4219 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64) 4219 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4220 << RXD_LEN_SHIFT; 4220 << RXD_LEN_SHIFT;
4221 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | 4221 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4222 RXD_FLAG_JUMBO; 4222 RXD_FLAG_JUMBO;
4223 rxd->opaque = (RXD_OPAQUE_RING_JUMBO | 4223 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4224 (i << RXD_OPAQUE_INDEX_SHIFT)); 4224 (i << RXD_OPAQUE_INDEX_SHIFT));
4225 } 4225 }
4226 } 4226 }
4227 4227
4228 /* Now allocate fresh SKBs for each rx ring. */ 4228 /* Now allocate fresh SKBs for each rx ring. */
4229 for (i = 0; i < tp->rx_pending; i++) { 4229 for (i = 0; i < tp->rx_pending; i++) {
4230 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4230 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4231 -1, i) < 0) 4231 -1, i) < 0)
4232 break; 4232 break;
4233 } 4233 }
4234 4234
4235 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4235 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4236 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4236 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4237 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4237 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4238 -1, i) < 0) 4238 -1, i) < 0)
4239 break; 4239 break;
4240 } 4240 }
4241 } 4241 }
4242 } 4242 }
4243 4243
4244 /* 4244 /*
4245 * Must not be invoked with interrupt sources disabled and 4245 * Must not be invoked with interrupt sources disabled and
4246 * the hardware shutdown down. 4246 * the hardware shutdown down.
4247 */ 4247 */
4248 static void tg3_free_consistent(struct tg3 *tp) 4248 static void tg3_free_consistent(struct tg3 *tp)
4249 { 4249 {
4250 kfree(tp->rx_std_buffers); 4250 kfree(tp->rx_std_buffers);
4251 tp->rx_std_buffers = NULL; 4251 tp->rx_std_buffers = NULL;
4252 if (tp->rx_std) { 4252 if (tp->rx_std) {
4253 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 4253 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4254 tp->rx_std, tp->rx_std_mapping); 4254 tp->rx_std, tp->rx_std_mapping);
4255 tp->rx_std = NULL; 4255 tp->rx_std = NULL;
4256 } 4256 }
4257 if (tp->rx_jumbo) { 4257 if (tp->rx_jumbo) {
4258 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 4258 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4259 tp->rx_jumbo, tp->rx_jumbo_mapping); 4259 tp->rx_jumbo, tp->rx_jumbo_mapping);
4260 tp->rx_jumbo = NULL; 4260 tp->rx_jumbo = NULL;
4261 } 4261 }
4262 if (tp->rx_rcb) { 4262 if (tp->rx_rcb) {
4263 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 4263 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4264 tp->rx_rcb, tp->rx_rcb_mapping); 4264 tp->rx_rcb, tp->rx_rcb_mapping);
4265 tp->rx_rcb = NULL; 4265 tp->rx_rcb = NULL;
4266 } 4266 }
4267 if (tp->tx_ring) { 4267 if (tp->tx_ring) {
4268 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 4268 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4269 tp->tx_ring, tp->tx_desc_mapping); 4269 tp->tx_ring, tp->tx_desc_mapping);
4270 tp->tx_ring = NULL; 4270 tp->tx_ring = NULL;
4271 } 4271 }
4272 if (tp->hw_status) { 4272 if (tp->hw_status) {
4273 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 4273 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4274 tp->hw_status, tp->status_mapping); 4274 tp->hw_status, tp->status_mapping);
4275 tp->hw_status = NULL; 4275 tp->hw_status = NULL;
4276 } 4276 }
4277 if (tp->hw_stats) { 4277 if (tp->hw_stats) {
4278 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 4278 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4279 tp->hw_stats, tp->stats_mapping); 4279 tp->hw_stats, tp->stats_mapping);
4280 tp->hw_stats = NULL; 4280 tp->hw_stats = NULL;
4281 } 4281 }
4282 } 4282 }
4283 4283
4284 /* 4284 /*
4285 * Must not be invoked with interrupt sources disabled and 4285 * Must not be invoked with interrupt sources disabled and
4286 * the hardware shutdown down. Can sleep. 4286 * the hardware shutdown down. Can sleep.
4287 */ 4287 */
4288 static int tg3_alloc_consistent(struct tg3 *tp) 4288 static int tg3_alloc_consistent(struct tg3 *tp)
4289 { 4289 {
4290 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) * 4290 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4291 (TG3_RX_RING_SIZE + 4291 (TG3_RX_RING_SIZE +
4292 TG3_RX_JUMBO_RING_SIZE)) + 4292 TG3_RX_JUMBO_RING_SIZE)) +
4293 (sizeof(struct tx_ring_info) * 4293 (sizeof(struct tx_ring_info) *
4294 TG3_TX_RING_SIZE), 4294 TG3_TX_RING_SIZE),
4295 GFP_KERNEL); 4295 GFP_KERNEL);
4296 if (!tp->rx_std_buffers) 4296 if (!tp->rx_std_buffers)
4297 return -ENOMEM; 4297 return -ENOMEM;
4298 4298
4299 memset(tp->rx_std_buffers, 0, 4299 memset(tp->rx_std_buffers, 0,
4300 (sizeof(struct ring_info) * 4300 (sizeof(struct ring_info) *
4301 (TG3_RX_RING_SIZE + 4301 (TG3_RX_RING_SIZE +
4302 TG3_RX_JUMBO_RING_SIZE)) + 4302 TG3_RX_JUMBO_RING_SIZE)) +
4303 (sizeof(struct tx_ring_info) * 4303 (sizeof(struct tx_ring_info) *
4304 TG3_TX_RING_SIZE)); 4304 TG3_TX_RING_SIZE));
4305 4305
4306 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; 4306 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4307 tp->tx_buffers = (struct tx_ring_info *) 4307 tp->tx_buffers = (struct tx_ring_info *)
4308 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; 4308 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4309 4309
4310 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 4310 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4311 &tp->rx_std_mapping); 4311 &tp->rx_std_mapping);
4312 if (!tp->rx_std) 4312 if (!tp->rx_std)
4313 goto err_out; 4313 goto err_out;
4314 4314
4315 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 4315 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4316 &tp->rx_jumbo_mapping); 4316 &tp->rx_jumbo_mapping);
4317 4317
4318 if (!tp->rx_jumbo) 4318 if (!tp->rx_jumbo)
4319 goto err_out; 4319 goto err_out;
4320 4320
4321 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 4321 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4322 &tp->rx_rcb_mapping); 4322 &tp->rx_rcb_mapping);
4323 if (!tp->rx_rcb) 4323 if (!tp->rx_rcb)
4324 goto err_out; 4324 goto err_out;
4325 4325
4326 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES, 4326 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4327 &tp->tx_desc_mapping); 4327 &tp->tx_desc_mapping);
4328 if (!tp->tx_ring) 4328 if (!tp->tx_ring)
4329 goto err_out; 4329 goto err_out;
4330 4330
4331 tp->hw_status = pci_alloc_consistent(tp->pdev, 4331 tp->hw_status = pci_alloc_consistent(tp->pdev,
4332 TG3_HW_STATUS_SIZE, 4332 TG3_HW_STATUS_SIZE,
4333 &tp->status_mapping); 4333 &tp->status_mapping);
4334 if (!tp->hw_status) 4334 if (!tp->hw_status)
4335 goto err_out; 4335 goto err_out;
4336 4336
4337 tp->hw_stats = pci_alloc_consistent(tp->pdev, 4337 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4338 sizeof(struct tg3_hw_stats), 4338 sizeof(struct tg3_hw_stats),
4339 &tp->stats_mapping); 4339 &tp->stats_mapping);
4340 if (!tp->hw_stats) 4340 if (!tp->hw_stats)
4341 goto err_out; 4341 goto err_out;
4342 4342
4343 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 4343 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4344 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 4344 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4345 4345
4346 return 0; 4346 return 0;
4347 4347
4348 err_out: 4348 err_out:
4349 tg3_free_consistent(tp); 4349 tg3_free_consistent(tp);
4350 return -ENOMEM; 4350 return -ENOMEM;
4351 } 4351 }
4352 4352
4353 #define MAX_WAIT_CNT 1000 4353 #define MAX_WAIT_CNT 1000
4354 4354
4355 /* To stop a block, clear the enable bit and poll till it 4355 /* To stop a block, clear the enable bit and poll till it
4356 * clears. tp->lock is held. 4356 * clears. tp->lock is held.
4357 */ 4357 */
4358 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) 4358 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4359 { 4359 {
4360 unsigned int i; 4360 unsigned int i;
4361 u32 val; 4361 u32 val;
4362 4362
4363 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 4363 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4364 switch (ofs) { 4364 switch (ofs) {
4365 case RCVLSC_MODE: 4365 case RCVLSC_MODE:
4366 case DMAC_MODE: 4366 case DMAC_MODE:
4367 case MBFREE_MODE: 4367 case MBFREE_MODE:
4368 case BUFMGR_MODE: 4368 case BUFMGR_MODE:
4369 case MEMARB_MODE: 4369 case MEMARB_MODE:
4370 /* We can't enable/disable these bits of the 4370 /* We can't enable/disable these bits of the
4371 * 5705/5750, just say success. 4371 * 5705/5750, just say success.
4372 */ 4372 */
4373 return 0; 4373 return 0;
4374 4374
4375 default: 4375 default:
4376 break; 4376 break;
4377 }; 4377 };
4378 } 4378 }
4379 4379
4380 val = tr32(ofs); 4380 val = tr32(ofs);
4381 val &= ~enable_bit; 4381 val &= ~enable_bit;
4382 tw32_f(ofs, val); 4382 tw32_f(ofs, val);
4383 4383
4384 for (i = 0; i < MAX_WAIT_CNT; i++) { 4384 for (i = 0; i < MAX_WAIT_CNT; i++) {
4385 udelay(100); 4385 udelay(100);
4386 val = tr32(ofs); 4386 val = tr32(ofs);
4387 if ((val & enable_bit) == 0) 4387 if ((val & enable_bit) == 0)
4388 break; 4388 break;
4389 } 4389 }
4390 4390
4391 if (i == MAX_WAIT_CNT && !silent) { 4391 if (i == MAX_WAIT_CNT && !silent) {
4392 printk(KERN_ERR PFX "tg3_stop_block timed out, " 4392 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4393 "ofs=%lx enable_bit=%x\n", 4393 "ofs=%lx enable_bit=%x\n",
4394 ofs, enable_bit); 4394 ofs, enable_bit);
4395 return -ENODEV; 4395 return -ENODEV;
4396 } 4396 }
4397 4397
4398 return 0; 4398 return 0;
4399 } 4399 }
4400 4400
4401 /* tp->lock is held. */ 4401 /* tp->lock is held. */
4402 static int tg3_abort_hw(struct tg3 *tp, int silent) 4402 static int tg3_abort_hw(struct tg3 *tp, int silent)
4403 { 4403 {
4404 int i, err; 4404 int i, err;
4405 4405
4406 tg3_disable_ints(tp); 4406 tg3_disable_ints(tp);
4407 4407
4408 tp->rx_mode &= ~RX_MODE_ENABLE; 4408 tp->rx_mode &= ~RX_MODE_ENABLE;
4409 tw32_f(MAC_RX_MODE, tp->rx_mode); 4409 tw32_f(MAC_RX_MODE, tp->rx_mode);
4410 udelay(10); 4410 udelay(10);
4411 4411
4412 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); 4412 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4413 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); 4413 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4414 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); 4414 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4415 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); 4415 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4416 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); 4416 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4417 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); 4417 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4418 4418
4419 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); 4419 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4420 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); 4420 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4421 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); 4421 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4422 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); 4422 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4423 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); 4423 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4424 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); 4424 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4425 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); 4425 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4426 4426
4427 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; 4427 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4428 tw32_f(MAC_MODE, tp->mac_mode); 4428 tw32_f(MAC_MODE, tp->mac_mode);
4429 udelay(40); 4429 udelay(40);
4430 4430
4431 tp->tx_mode &= ~TX_MODE_ENABLE; 4431 tp->tx_mode &= ~TX_MODE_ENABLE;
4432 tw32_f(MAC_TX_MODE, tp->tx_mode); 4432 tw32_f(MAC_TX_MODE, tp->tx_mode);
4433 4433
4434 for (i = 0; i < MAX_WAIT_CNT; i++) { 4434 for (i = 0; i < MAX_WAIT_CNT; i++) {
4435 udelay(100); 4435 udelay(100);
4436 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) 4436 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4437 break; 4437 break;
4438 } 4438 }
4439 if (i >= MAX_WAIT_CNT) { 4439 if (i >= MAX_WAIT_CNT) {
4440 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " 4440 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4441 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", 4441 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4442 tp->dev->name, tr32(MAC_TX_MODE)); 4442 tp->dev->name, tr32(MAC_TX_MODE));
4443 err |= -ENODEV; 4443 err |= -ENODEV;
4444 } 4444 }
4445 4445
4446 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); 4446 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4447 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); 4447 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4448 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); 4448 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4449 4449
4450 tw32(FTQ_RESET, 0xffffffff); 4450 tw32(FTQ_RESET, 0xffffffff);
4451 tw32(FTQ_RESET, 0x00000000); 4451 tw32(FTQ_RESET, 0x00000000);
4452 4452
4453 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 4453 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4454 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 4454 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4455 4455
4456 if (tp->hw_status) 4456 if (tp->hw_status)
4457 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 4457 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4458 if (tp->hw_stats) 4458 if (tp->hw_stats)
4459 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); 4459 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4460 4460
4461 return err; 4461 return err;
4462 } 4462 }
4463 4463
4464 /* tp->lock is held. */ 4464 /* tp->lock is held. */
4465 static int tg3_nvram_lock(struct tg3 *tp) 4465 static int tg3_nvram_lock(struct tg3 *tp)
4466 { 4466 {
4467 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 4467 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4468 int i; 4468 int i;
4469 4469
4470 if (tp->nvram_lock_cnt == 0) { 4470 if (tp->nvram_lock_cnt == 0) {
4471 tw32(NVRAM_SWARB, SWARB_REQ_SET1); 4471 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4472 for (i = 0; i < 8000; i++) { 4472 for (i = 0; i < 8000; i++) {
4473 if (tr32(NVRAM_SWARB) & SWARB_GNT1) 4473 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4474 break; 4474 break;
4475 udelay(20); 4475 udelay(20);
4476 } 4476 }
4477 if (i == 8000) { 4477 if (i == 8000) {
4478 tw32(NVRAM_SWARB, SWARB_REQ_CLR1); 4478 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4479 return -ENODEV; 4479 return -ENODEV;
4480 } 4480 }
4481 } 4481 }
4482 tp->nvram_lock_cnt++; 4482 tp->nvram_lock_cnt++;
4483 } 4483 }
4484 return 0; 4484 return 0;
4485 } 4485 }
4486 4486
4487 /* tp->lock is held. */ 4487 /* tp->lock is held. */
4488 static void tg3_nvram_unlock(struct tg3 *tp) 4488 static void tg3_nvram_unlock(struct tg3 *tp)
4489 { 4489 {
4490 if (tp->tg3_flags & TG3_FLAG_NVRAM) { 4490 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4491 if (tp->nvram_lock_cnt > 0) 4491 if (tp->nvram_lock_cnt > 0)
4492 tp->nvram_lock_cnt--; 4492 tp->nvram_lock_cnt--;
4493 if (tp->nvram_lock_cnt == 0) 4493 if (tp->nvram_lock_cnt == 0)
4494 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); 4494 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4495 } 4495 }
4496 } 4496 }
4497 4497
4498 /* tp->lock is held. */ 4498 /* tp->lock is held. */
4499 static void tg3_enable_nvram_access(struct tg3 *tp) 4499 static void tg3_enable_nvram_access(struct tg3 *tp)
4500 { 4500 {
4501 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 4501 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4502 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 4502 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4503 u32 nvaccess = tr32(NVRAM_ACCESS); 4503 u32 nvaccess = tr32(NVRAM_ACCESS);
4504 4504
4505 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 4505 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4506 } 4506 }
4507 } 4507 }
4508 4508
4509 /* tp->lock is held. */ 4509 /* tp->lock is held. */
4510 static void tg3_disable_nvram_access(struct tg3 *tp) 4510 static void tg3_disable_nvram_access(struct tg3 *tp)
4511 { 4511 {
4512 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 4512 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4513 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 4513 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4514 u32 nvaccess = tr32(NVRAM_ACCESS); 4514 u32 nvaccess = tr32(NVRAM_ACCESS);
4515 4515
4516 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 4516 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4517 } 4517 }
4518 } 4518 }
4519 4519
4520 /* tp->lock is held. */ 4520 /* tp->lock is held. */
4521 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) 4521 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4522 { 4522 {
4523 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, 4523 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4524 NIC_SRAM_FIRMWARE_MBOX_MAGIC1); 4524 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4525 4525
4526 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 4526 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4527 switch (kind) { 4527 switch (kind) {
4528 case RESET_KIND_INIT: 4528 case RESET_KIND_INIT:
4529 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4529 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4530 DRV_STATE_START); 4530 DRV_STATE_START);
4531 break; 4531 break;
4532 4532
4533 case RESET_KIND_SHUTDOWN: 4533 case RESET_KIND_SHUTDOWN:
4534 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4534 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4535 DRV_STATE_UNLOAD); 4535 DRV_STATE_UNLOAD);
4536 break; 4536 break;
4537 4537
4538 case RESET_KIND_SUSPEND: 4538 case RESET_KIND_SUSPEND:
4539 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4539 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4540 DRV_STATE_SUSPEND); 4540 DRV_STATE_SUSPEND);
4541 break; 4541 break;
4542 4542
4543 default: 4543 default:
4544 break; 4544 break;
4545 }; 4545 };
4546 } 4546 }
4547 } 4547 }
4548 4548
4549 /* tp->lock is held. */ 4549 /* tp->lock is held. */
4550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) 4550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4551 { 4551 {
4552 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { 4552 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4553 switch (kind) { 4553 switch (kind) {
4554 case RESET_KIND_INIT: 4554 case RESET_KIND_INIT:
4555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4556 DRV_STATE_START_DONE); 4556 DRV_STATE_START_DONE);
4557 break; 4557 break;
4558 4558
4559 case RESET_KIND_SHUTDOWN: 4559 case RESET_KIND_SHUTDOWN:
4560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4561 DRV_STATE_UNLOAD_DONE); 4561 DRV_STATE_UNLOAD_DONE);
4562 break; 4562 break;
4563 4563
4564 default: 4564 default:
4565 break; 4565 break;
4566 }; 4566 };
4567 } 4567 }
4568 } 4568 }
4569 4569
4570 /* tp->lock is held. */ 4570 /* tp->lock is held. */
4571 static void tg3_write_sig_legacy(struct tg3 *tp, int kind) 4571 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4572 { 4572 {
4573 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 4573 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4574 switch (kind) { 4574 switch (kind) {
4575 case RESET_KIND_INIT: 4575 case RESET_KIND_INIT:
4576 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4576 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4577 DRV_STATE_START); 4577 DRV_STATE_START);
4578 break; 4578 break;
4579 4579
4580 case RESET_KIND_SHUTDOWN: 4580 case RESET_KIND_SHUTDOWN:
4581 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4581 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4582 DRV_STATE_UNLOAD); 4582 DRV_STATE_UNLOAD);
4583 break; 4583 break;
4584 4584
4585 case RESET_KIND_SUSPEND: 4585 case RESET_KIND_SUSPEND:
4586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, 4586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4587 DRV_STATE_SUSPEND); 4587 DRV_STATE_SUSPEND);
4588 break; 4588 break;
4589 4589
4590 default: 4590 default:
4591 break; 4591 break;
4592 }; 4592 };
4593 } 4593 }
4594 } 4594 }
4595 4595
4596 static void tg3_stop_fw(struct tg3 *); 4596 static void tg3_stop_fw(struct tg3 *);
4597 4597
4598 /* tp->lock is held. */ 4598 /* tp->lock is held. */
4599 static int tg3_chip_reset(struct tg3 *tp) 4599 static int tg3_chip_reset(struct tg3 *tp)
4600 { 4600 {
4601 u32 val; 4601 u32 val;
4602 void (*write_op)(struct tg3 *, u32, u32); 4602 void (*write_op)(struct tg3 *, u32, u32);
4603 int i; 4603 int i;
4604 4604
4605 tg3_nvram_lock(tp); 4605 tg3_nvram_lock(tp);
4606 4606
4607 /* No matching tg3_nvram_unlock() after this because 4607 /* No matching tg3_nvram_unlock() after this because
4608 * chip reset below will undo the nvram lock. 4608 * chip reset below will undo the nvram lock.
4609 */ 4609 */
4610 tp->nvram_lock_cnt = 0; 4610 tp->nvram_lock_cnt = 0;
4611 4611
4612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 4612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 4613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 4614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4615 tw32(GRC_FASTBOOT_PC, 0); 4615 tw32(GRC_FASTBOOT_PC, 0);
4616 4616
4617 /* 4617 /*
4618 * We must avoid the readl() that normally takes place. 4618 * We must avoid the readl() that normally takes place.
4619 * It locks machines, causes machine checks, and other 4619 * It locks machines, causes machine checks, and other
4620 * fun things. So, temporarily disable the 5701 4620 * fun things. So, temporarily disable the 5701
4621 * hardware workaround, while we do the reset. 4621 * hardware workaround, while we do the reset.
4622 */ 4622 */
4623 write_op = tp->write32; 4623 write_op = tp->write32;
4624 if (write_op == tg3_write_flush_reg32) 4624 if (write_op == tg3_write_flush_reg32)
4625 tp->write32 = tg3_write32; 4625 tp->write32 = tg3_write32;
4626 4626
4627 /* do the reset */ 4627 /* do the reset */
4628 val = GRC_MISC_CFG_CORECLK_RESET; 4628 val = GRC_MISC_CFG_CORECLK_RESET;
4629 4629
4630 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 4630 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4631 if (tr32(0x7e2c) == 0x60) { 4631 if (tr32(0x7e2c) == 0x60) {
4632 tw32(0x7e2c, 0x20); 4632 tw32(0x7e2c, 0x20);
4633 } 4633 }
4634 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { 4634 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4635 tw32(GRC_MISC_CFG, (1 << 29)); 4635 tw32(GRC_MISC_CFG, (1 << 29));
4636 val |= (1 << 29); 4636 val |= (1 << 29);
4637 } 4637 }
4638 } 4638 }
4639 4639
4640 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 4640 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4641 val |= GRC_MISC_CFG_KEEP_GPHY_POWER; 4641 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4642 tw32(GRC_MISC_CFG, val); 4642 tw32(GRC_MISC_CFG, val);
4643 4643
4644 /* restore 5701 hardware bug workaround write method */ 4644 /* restore 5701 hardware bug workaround write method */
4645 tp->write32 = write_op; 4645 tp->write32 = write_op;
4646 4646
4647 /* Unfortunately, we have to delay before the PCI read back. 4647 /* Unfortunately, we have to delay before the PCI read back.
4648 * Some 575X chips even will not respond to a PCI cfg access 4648 * Some 575X chips even will not respond to a PCI cfg access
4649 * when the reset command is given to the chip. 4649 * when the reset command is given to the chip.
4650 * 4650 *
4651 * How do these hardware designers expect things to work 4651 * How do these hardware designers expect things to work
4652 * properly if the PCI write is posted for a long period 4652 * properly if the PCI write is posted for a long period
4653 * of time? It is always necessary to have some method by 4653 * of time? It is always necessary to have some method by
4654 * which a register read back can occur to push the write 4654 * which a register read back can occur to push the write
4655 * out which does the reset. 4655 * out which does the reset.
4656 * 4656 *
4657 * For most tg3 variants the trick below was working. 4657 * For most tg3 variants the trick below was working.
4658 * Ho hum... 4658 * Ho hum...
4659 */ 4659 */
4660 udelay(120); 4660 udelay(120);
4661 4661
4662 /* Flush PCI posted writes. The normal MMIO registers 4662 /* Flush PCI posted writes. The normal MMIO registers
4663 * are inaccessible at this time so this is the only 4663 * are inaccessible at this time so this is the only
4664 * way to make this reliably (actually, this is no longer 4664 * way to make this reliably (actually, this is no longer
4665 * the case, see above). I tried to use indirect 4665 * the case, see above). I tried to use indirect
4666 * register read/write but this upset some 5701 variants. 4666 * register read/write but this upset some 5701 variants.
4667 */ 4667 */
4668 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); 4668 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4669 4669
4670 udelay(120); 4670 udelay(120);
4671 4671
4672 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 4672 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4673 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 4673 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4674 int i; 4674 int i;
4675 u32 cfg_val; 4675 u32 cfg_val;
4676 4676
4677 /* Wait for link training to complete. */ 4677 /* Wait for link training to complete. */
4678 for (i = 0; i < 5000; i++) 4678 for (i = 0; i < 5000; i++)
4679 udelay(100); 4679 udelay(100);
4680 4680
4681 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); 4681 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4682 pci_write_config_dword(tp->pdev, 0xc4, 4682 pci_write_config_dword(tp->pdev, 0xc4,
4683 cfg_val | (1 << 15)); 4683 cfg_val | (1 << 15));
4684 } 4684 }
4685 /* Set PCIE max payload size and clear error status. */ 4685 /* Set PCIE max payload size and clear error status. */
4686 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); 4686 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4687 } 4687 }
4688 4688
4689 /* Re-enable indirect register accesses. */ 4689 /* Re-enable indirect register accesses. */
4690 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 4690 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4691 tp->misc_host_ctrl); 4691 tp->misc_host_ctrl);
4692 4692
4693 /* Set MAX PCI retry to zero. */ 4693 /* Set MAX PCI retry to zero. */
4694 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); 4694 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4695 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 4695 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4696 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) 4696 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4697 val |= PCISTATE_RETRY_SAME_DMA; 4697 val |= PCISTATE_RETRY_SAME_DMA;
4698 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); 4698 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4699 4699
4700 pci_restore_state(tp->pdev); 4700 pci_restore_state(tp->pdev);
4701 4701
4702 /* Make sure PCI-X relaxed ordering bit is clear. */ 4702 /* Make sure PCI-X relaxed ordering bit is clear. */
4703 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); 4703 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4704 val &= ~PCIX_CAPS_RELAXED_ORDERING; 4704 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4705 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); 4705 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4706 4706
4707 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 4707 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4708 u32 val; 4708 u32 val;
4709 4709
4710 /* Chip reset on 5780 will reset MSI enable bit, 4710 /* Chip reset on 5780 will reset MSI enable bit,
4711 * so need to restore it. 4711 * so need to restore it.
4712 */ 4712 */
4713 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 4713 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4714 u16 ctrl; 4714 u16 ctrl;
4715 4715
4716 pci_read_config_word(tp->pdev, 4716 pci_read_config_word(tp->pdev,
4717 tp->msi_cap + PCI_MSI_FLAGS, 4717 tp->msi_cap + PCI_MSI_FLAGS,
4718 &ctrl); 4718 &ctrl);
4719 pci_write_config_word(tp->pdev, 4719 pci_write_config_word(tp->pdev,
4720 tp->msi_cap + PCI_MSI_FLAGS, 4720 tp->msi_cap + PCI_MSI_FLAGS,
4721 ctrl | PCI_MSI_FLAGS_ENABLE); 4721 ctrl | PCI_MSI_FLAGS_ENABLE);
4722 val = tr32(MSGINT_MODE); 4722 val = tr32(MSGINT_MODE);
4723 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); 4723 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4724 } 4724 }
4725 4725
4726 val = tr32(MEMARB_MODE); 4726 val = tr32(MEMARB_MODE);
4727 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 4727 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4728 4728
4729 } else 4729 } else
4730 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 4730 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4731 4731
4732 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { 4732 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4733 tg3_stop_fw(tp); 4733 tg3_stop_fw(tp);
4734 tw32(0x5000, 0x400); 4734 tw32(0x5000, 0x400);
4735 } 4735 }
4736 4736
4737 tw32(GRC_MODE, tp->grc_mode); 4737 tw32(GRC_MODE, tp->grc_mode);
4738 4738
4739 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { 4739 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4740 u32 val = tr32(0xc4); 4740 u32 val = tr32(0xc4);
4741 4741
4742 tw32(0xc4, val | (1 << 15)); 4742 tw32(0xc4, val | (1 << 15));
4743 } 4743 }
4744 4744
4745 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && 4745 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 4746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4747 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; 4747 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4748 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) 4748 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4749 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; 4749 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4750 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 4750 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4751 } 4751 }
4752 4752
4753 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 4753 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4754 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 4754 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4755 tw32_f(MAC_MODE, tp->mac_mode); 4755 tw32_f(MAC_MODE, tp->mac_mode);
4756 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 4756 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4757 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 4757 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4758 tw32_f(MAC_MODE, tp->mac_mode); 4758 tw32_f(MAC_MODE, tp->mac_mode);
4759 } else 4759 } else
4760 tw32_f(MAC_MODE, 0); 4760 tw32_f(MAC_MODE, 0);
4761 udelay(40); 4761 udelay(40);
4762 4762
4763 /* Wait for firmware initialization to complete. */ 4763 /* Wait for firmware initialization to complete. */
4764 for (i = 0; i < 100000; i++) { 4764 for (i = 0; i < 100000; i++) {
4765 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 4765 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4766 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 4766 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4767 break; 4767 break;
4768 udelay(10); 4768 udelay(10);
4769 } 4769 }
4770 4770
4771 /* Chip might not be fitted with firmare. Some Sun onboard 4771 /* Chip might not be fitted with firmare. Some Sun onboard
4772 * parts are configured like that. So don't signal the timeout 4772 * parts are configured like that. So don't signal the timeout
4773 * of the above loop as an error, but do report the lack of 4773 * of the above loop as an error, but do report the lack of
4774 * running firmware once. 4774 * running firmware once.
4775 */ 4775 */
4776 if (i >= 100000 && 4776 if (i >= 100000 &&
4777 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { 4777 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4778 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; 4778 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4779 4779
4780 printk(KERN_INFO PFX "%s: No firmware running.\n", 4780 printk(KERN_INFO PFX "%s: No firmware running.\n",
4781 tp->dev->name); 4781 tp->dev->name);
4782 } 4782 }
4783 4783
4784 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 4784 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4785 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { 4785 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4786 u32 val = tr32(0x7c00); 4786 u32 val = tr32(0x7c00);
4787 4787
4788 tw32(0x7c00, val | (1 << 25)); 4788 tw32(0x7c00, val | (1 << 25));
4789 } 4789 }
4790 4790
4791 /* Reprobe ASF enable state. */ 4791 /* Reprobe ASF enable state. */
4792 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; 4792 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4793 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; 4793 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4794 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 4794 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4795 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 4795 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4796 u32 nic_cfg; 4796 u32 nic_cfg;
4797 4797
4798 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 4798 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4799 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 4799 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4800 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 4800 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4801 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 4801 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4802 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 4802 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4803 } 4803 }
4804 } 4804 }
4805 4805
4806 return 0; 4806 return 0;
4807 } 4807 }
4808 4808
4809 /* tp->lock is held. */ 4809 /* tp->lock is held. */
4810 static void tg3_stop_fw(struct tg3 *tp) 4810 static void tg3_stop_fw(struct tg3 *tp)
4811 { 4811 {
4812 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 4812 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4813 u32 val; 4813 u32 val;
4814 int i; 4814 int i;
4815 4815
4816 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); 4816 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4817 val = tr32(GRC_RX_CPU_EVENT); 4817 val = tr32(GRC_RX_CPU_EVENT);
4818 val |= (1 << 14); 4818 val |= (1 << 14);
4819 tw32(GRC_RX_CPU_EVENT, val); 4819 tw32(GRC_RX_CPU_EVENT, val);
4820 4820
4821 /* Wait for RX cpu to ACK the event. */ 4821 /* Wait for RX cpu to ACK the event. */
4822 for (i = 0; i < 100; i++) { 4822 for (i = 0; i < 100; i++) {
4823 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14))) 4823 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4824 break; 4824 break;
4825 udelay(1); 4825 udelay(1);
4826 } 4826 }
4827 } 4827 }
4828 } 4828 }
4829 4829
4830 /* tp->lock is held. */ 4830 /* tp->lock is held. */
4831 static int tg3_halt(struct tg3 *tp, int kind, int silent) 4831 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4832 { 4832 {
4833 int err; 4833 int err;
4834 4834
4835 tg3_stop_fw(tp); 4835 tg3_stop_fw(tp);
4836 4836
4837 tg3_write_sig_pre_reset(tp, kind); 4837 tg3_write_sig_pre_reset(tp, kind);
4838 4838
4839 tg3_abort_hw(tp, silent); 4839 tg3_abort_hw(tp, silent);
4840 err = tg3_chip_reset(tp); 4840 err = tg3_chip_reset(tp);
4841 4841
4842 tg3_write_sig_legacy(tp, kind); 4842 tg3_write_sig_legacy(tp, kind);
4843 tg3_write_sig_post_reset(tp, kind); 4843 tg3_write_sig_post_reset(tp, kind);
4844 4844
4845 if (err) 4845 if (err)
4846 return err; 4846 return err;
4847 4847
4848 return 0; 4848 return 0;
4849 } 4849 }
4850 4850
4851 #define TG3_FW_RELEASE_MAJOR 0x0 4851 #define TG3_FW_RELEASE_MAJOR 0x0
4852 #define TG3_FW_RELASE_MINOR 0x0 4852 #define TG3_FW_RELASE_MINOR 0x0
4853 #define TG3_FW_RELEASE_FIX 0x0 4853 #define TG3_FW_RELEASE_FIX 0x0
4854 #define TG3_FW_START_ADDR 0x08000000 4854 #define TG3_FW_START_ADDR 0x08000000
4855 #define TG3_FW_TEXT_ADDR 0x08000000 4855 #define TG3_FW_TEXT_ADDR 0x08000000
4856 #define TG3_FW_TEXT_LEN 0x9c0 4856 #define TG3_FW_TEXT_LEN 0x9c0
4857 #define TG3_FW_RODATA_ADDR 0x080009c0 4857 #define TG3_FW_RODATA_ADDR 0x080009c0
4858 #define TG3_FW_RODATA_LEN 0x60 4858 #define TG3_FW_RODATA_LEN 0x60
4859 #define TG3_FW_DATA_ADDR 0x08000a40 4859 #define TG3_FW_DATA_ADDR 0x08000a40
4860 #define TG3_FW_DATA_LEN 0x20 4860 #define TG3_FW_DATA_LEN 0x20
4861 #define TG3_FW_SBSS_ADDR 0x08000a60 4861 #define TG3_FW_SBSS_ADDR 0x08000a60
4862 #define TG3_FW_SBSS_LEN 0xc 4862 #define TG3_FW_SBSS_LEN 0xc
4863 #define TG3_FW_BSS_ADDR 0x08000a70 4863 #define TG3_FW_BSS_ADDR 0x08000a70
4864 #define TG3_FW_BSS_LEN 0x10 4864 #define TG3_FW_BSS_LEN 0x10
4865 4865
4866 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { 4866 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4867 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, 4867 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4868 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, 4868 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4869 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, 4869 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4870 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000, 4870 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4871 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105, 4871 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4872 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0, 4872 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4873 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010, 4873 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4874 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01, 4874 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4875 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c, 4875 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4876 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000, 4876 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4877 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400, 4877 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4878 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c, 4878 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4879 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 4879 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4880 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64, 4880 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4881 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000, 4881 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4882 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 4882 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4883 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68, 4883 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4884 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003, 4884 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4885 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800, 4885 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4886 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, 4886 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4887 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60, 4887 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4888 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008, 4888 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4889 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 4889 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4890 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4890 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4891 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4891 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4892 0, 0, 0, 0, 0, 0, 4892 0, 0, 0, 0, 0, 0,
4893 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002, 4893 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4894 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 4894 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4895 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 4895 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4896 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 4896 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4897 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009, 4897 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4898 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b, 4898 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4899 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000, 4899 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4900 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000, 4900 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4901 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 4901 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4902 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 4902 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4903 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014, 4903 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4904 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4904 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4905 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4905 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4906 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4906 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4907 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010, 4907 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4908 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74, 4908 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4909 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c, 4909 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4910 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800, 4910 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4911 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001, 4911 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4912 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028, 4912 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4913 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800, 4913 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4914 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0, 4914 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4915 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 4915 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4916 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001, 4916 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4917 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810, 4917 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4918 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018, 4918 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4919 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec, 4919 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4920 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c, 4920 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4921 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74, 4921 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4922 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000, 4922 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4923 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c, 4923 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4924 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c, 4924 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4925 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df, 4925 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4926 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000, 4926 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4927 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800, 4927 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4928 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402, 4928 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4929 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00, 4929 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4930 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010, 4930 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4931 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df, 4931 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4932 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001, 4932 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4933 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008, 4933 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4934 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021, 4934 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4935 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018, 4935 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4936 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b, 4936 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4937 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000, 4937 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4938 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008, 4938 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4939 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b, 4939 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4940 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001, 4940 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4941 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821, 4941 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4942 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000, 4942 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4943 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000, 4943 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4944 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821, 4944 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4945 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff, 4945 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4946 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008, 4946 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4947 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010, 4947 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4948 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000, 4948 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4949 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428, 4949 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4950 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c, 4950 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4951 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e, 4951 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4952 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010, 4952 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4953 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000, 4953 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4954 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001, 4954 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4955 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000, 4955 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4956 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824, 4956 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4957 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 4957 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4958 }; 4958 };
4959 4959
4960 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { 4960 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4961 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, 4961 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4962 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, 4962 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4963 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 4963 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4964 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000, 4964 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4965 0x00000000 4965 0x00000000
4966 }; 4966 };
4967 4967
4968 #if 0 /* All zeros, don't eat up space with it. */ 4968 #if 0 /* All zeros, don't eat up space with it. */
4969 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = { 4969 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4970 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 4970 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4971 0x00000000, 0x00000000, 0x00000000, 0x00000000 4971 0x00000000, 0x00000000, 0x00000000, 0x00000000
4972 }; 4972 };
4973 #endif 4973 #endif
4974 4974
4975 #define RX_CPU_SCRATCH_BASE 0x30000 4975 #define RX_CPU_SCRATCH_BASE 0x30000
4976 #define RX_CPU_SCRATCH_SIZE 0x04000 4976 #define RX_CPU_SCRATCH_SIZE 0x04000
4977 #define TX_CPU_SCRATCH_BASE 0x34000 4977 #define TX_CPU_SCRATCH_BASE 0x34000
4978 #define TX_CPU_SCRATCH_SIZE 0x04000 4978 #define TX_CPU_SCRATCH_SIZE 0x04000
4979 4979
4980 /* tp->lock is held. */ 4980 /* tp->lock is held. */
4981 static int tg3_halt_cpu(struct tg3 *tp, u32 offset) 4981 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4982 { 4982 {
4983 int i; 4983 int i;
4984 4984
4985 BUG_ON(offset == TX_CPU_BASE && 4985 BUG_ON(offset == TX_CPU_BASE &&
4986 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); 4986 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
4987 4987
4988 if (offset == RX_CPU_BASE) { 4988 if (offset == RX_CPU_BASE) {
4989 for (i = 0; i < 10000; i++) { 4989 for (i = 0; i < 10000; i++) {
4990 tw32(offset + CPU_STATE, 0xffffffff); 4990 tw32(offset + CPU_STATE, 0xffffffff);
4991 tw32(offset + CPU_MODE, CPU_MODE_HALT); 4991 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4992 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) 4992 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4993 break; 4993 break;
4994 } 4994 }
4995 4995
4996 tw32(offset + CPU_STATE, 0xffffffff); 4996 tw32(offset + CPU_STATE, 0xffffffff);
4997 tw32_f(offset + CPU_MODE, CPU_MODE_HALT); 4997 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4998 udelay(10); 4998 udelay(10);
4999 } else { 4999 } else {
5000 for (i = 0; i < 10000; i++) { 5000 for (i = 0; i < 10000; i++) {
5001 tw32(offset + CPU_STATE, 0xffffffff); 5001 tw32(offset + CPU_STATE, 0xffffffff);
5002 tw32(offset + CPU_MODE, CPU_MODE_HALT); 5002 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5003 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) 5003 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5004 break; 5004 break;
5005 } 5005 }
5006 } 5006 }
5007 5007
5008 if (i >= 10000) { 5008 if (i >= 10000) {
5009 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " 5009 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5010 "and %s CPU\n", 5010 "and %s CPU\n",
5011 tp->dev->name, 5011 tp->dev->name,
5012 (offset == RX_CPU_BASE ? "RX" : "TX")); 5012 (offset == RX_CPU_BASE ? "RX" : "TX"));
5013 return -ENODEV; 5013 return -ENODEV;
5014 } 5014 }
5015 5015
5016 /* Clear firmware's nvram arbitration. */ 5016 /* Clear firmware's nvram arbitration. */
5017 if (tp->tg3_flags & TG3_FLAG_NVRAM) 5017 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5018 tw32(NVRAM_SWARB, SWARB_REQ_CLR0); 5018 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5019 return 0; 5019 return 0;
5020 } 5020 }
5021 5021
5022 struct fw_info { 5022 struct fw_info {
5023 unsigned int text_base; 5023 unsigned int text_base;
5024 unsigned int text_len; 5024 unsigned int text_len;
5025 u32 *text_data; 5025 u32 *text_data;
5026 unsigned int rodata_base; 5026 unsigned int rodata_base;
5027 unsigned int rodata_len; 5027 unsigned int rodata_len;
5028 u32 *rodata_data; 5028 u32 *rodata_data;
5029 unsigned int data_base; 5029 unsigned int data_base;
5030 unsigned int data_len; 5030 unsigned int data_len;
5031 u32 *data_data; 5031 u32 *data_data;
5032 }; 5032 };
5033 5033
5034 /* tp->lock is held. */ 5034 /* tp->lock is held. */
5035 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, 5035 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5036 int cpu_scratch_size, struct fw_info *info) 5036 int cpu_scratch_size, struct fw_info *info)
5037 { 5037 {
5038 int err, lock_err, i; 5038 int err, lock_err, i;
5039 void (*write_op)(struct tg3 *, u32, u32); 5039 void (*write_op)(struct tg3 *, u32, u32);
5040 5040
5041 if (cpu_base == TX_CPU_BASE && 5041 if (cpu_base == TX_CPU_BASE &&
5042 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5042 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5043 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " 5043 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5044 "TX cpu firmware on %s which is 5705.\n", 5044 "TX cpu firmware on %s which is 5705.\n",
5045 tp->dev->name); 5045 tp->dev->name);
5046 return -EINVAL; 5046 return -EINVAL;
5047 } 5047 }
5048 5048
5049 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 5049 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5050 write_op = tg3_write_mem; 5050 write_op = tg3_write_mem;
5051 else 5051 else
5052 write_op = tg3_write_indirect_reg32; 5052 write_op = tg3_write_indirect_reg32;
5053 5053
5054 /* It is possible that bootcode is still loading at this point. 5054 /* It is possible that bootcode is still loading at this point.
5055 * Get the nvram lock first before halting the cpu. 5055 * Get the nvram lock first before halting the cpu.
5056 */ 5056 */
5057 lock_err = tg3_nvram_lock(tp); 5057 lock_err = tg3_nvram_lock(tp);
5058 err = tg3_halt_cpu(tp, cpu_base); 5058 err = tg3_halt_cpu(tp, cpu_base);
5059 if (!lock_err) 5059 if (!lock_err)
5060 tg3_nvram_unlock(tp); 5060 tg3_nvram_unlock(tp);
5061 if (err) 5061 if (err)
5062 goto out; 5062 goto out;
5063 5063
5064 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 5064 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5065 write_op(tp, cpu_scratch_base + i, 0); 5065 write_op(tp, cpu_scratch_base + i, 0);
5066 tw32(cpu_base + CPU_STATE, 0xffffffff); 5066 tw32(cpu_base + CPU_STATE, 0xffffffff);
5067 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); 5067 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5068 for (i = 0; i < (info->text_len / sizeof(u32)); i++) 5068 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5069 write_op(tp, (cpu_scratch_base + 5069 write_op(tp, (cpu_scratch_base +
5070 (info->text_base & 0xffff) + 5070 (info->text_base & 0xffff) +
5071 (i * sizeof(u32))), 5071 (i * sizeof(u32))),
5072 (info->text_data ? 5072 (info->text_data ?
5073 info->text_data[i] : 0)); 5073 info->text_data[i] : 0));
5074 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++) 5074 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5075 write_op(tp, (cpu_scratch_base + 5075 write_op(tp, (cpu_scratch_base +
5076 (info->rodata_base & 0xffff) + 5076 (info->rodata_base & 0xffff) +
5077 (i * sizeof(u32))), 5077 (i * sizeof(u32))),
5078 (info->rodata_data ? 5078 (info->rodata_data ?
5079 info->rodata_data[i] : 0)); 5079 info->rodata_data[i] : 0));
5080 for (i = 0; i < (info->data_len / sizeof(u32)); i++) 5080 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5081 write_op(tp, (cpu_scratch_base + 5081 write_op(tp, (cpu_scratch_base +
5082 (info->data_base & 0xffff) + 5082 (info->data_base & 0xffff) +
5083 (i * sizeof(u32))), 5083 (i * sizeof(u32))),
5084 (info->data_data ? 5084 (info->data_data ?
5085 info->data_data[i] : 0)); 5085 info->data_data[i] : 0));
5086 5086
5087 err = 0; 5087 err = 0;
5088 5088
5089 out: 5089 out:
5090 return err; 5090 return err;
5091 } 5091 }
5092 5092
5093 /* tp->lock is held. */ 5093 /* tp->lock is held. */
5094 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 5094 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5095 { 5095 {
5096 struct fw_info info; 5096 struct fw_info info;
5097 int err, i; 5097 int err, i;
5098 5098
5099 info.text_base = TG3_FW_TEXT_ADDR; 5099 info.text_base = TG3_FW_TEXT_ADDR;
5100 info.text_len = TG3_FW_TEXT_LEN; 5100 info.text_len = TG3_FW_TEXT_LEN;
5101 info.text_data = &tg3FwText[0]; 5101 info.text_data = &tg3FwText[0];
5102 info.rodata_base = TG3_FW_RODATA_ADDR; 5102 info.rodata_base = TG3_FW_RODATA_ADDR;
5103 info.rodata_len = TG3_FW_RODATA_LEN; 5103 info.rodata_len = TG3_FW_RODATA_LEN;
5104 info.rodata_data = &tg3FwRodata[0]; 5104 info.rodata_data = &tg3FwRodata[0];
5105 info.data_base = TG3_FW_DATA_ADDR; 5105 info.data_base = TG3_FW_DATA_ADDR;
5106 info.data_len = TG3_FW_DATA_LEN; 5106 info.data_len = TG3_FW_DATA_LEN;
5107 info.data_data = NULL; 5107 info.data_data = NULL;
5108 5108
5109 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 5109 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5110 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 5110 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5111 &info); 5111 &info);
5112 if (err) 5112 if (err)
5113 return err; 5113 return err;
5114 5114
5115 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 5115 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5116 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 5116 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5117 &info); 5117 &info);
5118 if (err) 5118 if (err)
5119 return err; 5119 return err;
5120 5120
5121 /* Now startup only the RX cpu. */ 5121 /* Now startup only the RX cpu. */
5122 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 5122 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5123 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); 5123 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5124 5124
5125 for (i = 0; i < 5; i++) { 5125 for (i = 0; i < 5; i++) {
5126 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR) 5126 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5127 break; 5127 break;
5128 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 5128 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5129 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); 5129 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5130 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR); 5130 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5131 udelay(1000); 5131 udelay(1000);
5132 } 5132 }
5133 if (i >= 5) { 5133 if (i >= 5) {
5134 printk(KERN_ERR PFX "tg3_load_firmware fails for %s " 5134 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5135 "to set RX CPU PC, is %08x should be %08x\n", 5135 "to set RX CPU PC, is %08x should be %08x\n",
5136 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), 5136 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5137 TG3_FW_TEXT_ADDR); 5137 TG3_FW_TEXT_ADDR);
5138 return -ENODEV; 5138 return -ENODEV;
5139 } 5139 }
5140 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 5140 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5141 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); 5141 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5142 5142
5143 return 0; 5143 return 0;
5144 } 5144 }
5145 5145
5146 #if TG3_TSO_SUPPORT != 0 5146 #if TG3_TSO_SUPPORT != 0
5147 5147
5148 #define TG3_TSO_FW_RELEASE_MAJOR 0x1 5148 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5149 #define TG3_TSO_FW_RELASE_MINOR 0x6 5149 #define TG3_TSO_FW_RELASE_MINOR 0x6
5150 #define TG3_TSO_FW_RELEASE_FIX 0x0 5150 #define TG3_TSO_FW_RELEASE_FIX 0x0
5151 #define TG3_TSO_FW_START_ADDR 0x08000000 5151 #define TG3_TSO_FW_START_ADDR 0x08000000
5152 #define TG3_TSO_FW_TEXT_ADDR 0x08000000 5152 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5153 #define TG3_TSO_FW_TEXT_LEN 0x1aa0 5153 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5154 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0 5154 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5155 #define TG3_TSO_FW_RODATA_LEN 0x60 5155 #define TG3_TSO_FW_RODATA_LEN 0x60
5156 #define TG3_TSO_FW_DATA_ADDR 0x08001b20 5156 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5157 #define TG3_TSO_FW_DATA_LEN 0x30 5157 #define TG3_TSO_FW_DATA_LEN 0x30
5158 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50 5158 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5159 #define TG3_TSO_FW_SBSS_LEN 0x2c 5159 #define TG3_TSO_FW_SBSS_LEN 0x2c
5160 #define TG3_TSO_FW_BSS_ADDR 0x08001b80 5160 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5161 #define TG3_TSO_FW_BSS_LEN 0x894 5161 #define TG3_TSO_FW_BSS_LEN 0x894
5162 5162
5163 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { 5163 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5164 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, 5164 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5165 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, 5165 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5166 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5166 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5167 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800, 5167 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5168 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001, 5168 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5169 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c, 5169 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5170 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001, 5170 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5171 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008, 5171 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5172 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 5172 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5173 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001, 5173 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5174 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000, 5174 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5175 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001, 5175 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5176 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800, 5176 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5177 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c, 5177 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5178 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 5178 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5179 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021, 5179 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5180 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800, 5180 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5181 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c, 5181 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5182 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, 5182 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5183 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800, 5183 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5184 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8, 5184 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5185 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8, 5185 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5186 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90, 5186 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5187 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068, 5187 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5188 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c, 5188 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5189 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021, 5189 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5190 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008, 5190 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5191 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021, 5191 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5192 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b, 5192 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5193 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 5193 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5194 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 5194 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5195 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020, 5195 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5196 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800, 5196 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5197 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98, 5197 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5198 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902, 5198 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5199 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602, 5199 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5200 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001, 5200 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5201 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c, 5201 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5202 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac, 5202 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5203 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4, 5203 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5204 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410, 5204 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5205 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800, 5205 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5206 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4, 5206 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5207 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800, 5207 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5208 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800, 5208 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5209 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800, 5209 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5210 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800, 5210 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5211 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, 5211 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5212 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800, 5212 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5213 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821, 5213 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5214 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800, 5214 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5215 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14, 5215 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5216 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800, 5216 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5217 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 5217 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5218 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002, 5218 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5219 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80, 5219 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5220 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001, 5220 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5221 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003, 5221 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5222 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000, 5222 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5223 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656, 5223 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5224 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078, 5224 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5225 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800, 5225 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5226 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c, 5226 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5227 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c, 5227 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5228 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100, 5228 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5229 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054, 5229 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5230 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c, 5230 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5231 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0, 5231 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5232 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825, 5232 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5233 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, 5233 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5234 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, 5234 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5235 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, 5235 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5236 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, 5236 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5237 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, 5237 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5238 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, 5238 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5239 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, 5239 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5240 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003, 5240 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5241 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, 5241 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5242 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b, 5242 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5243 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98, 5243 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5244 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000, 5244 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5245 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, 5245 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5246 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, 5246 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5247 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, 5247 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5248 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000, 5248 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5249 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, 5249 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5250 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, 5250 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5251 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 5251 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5252 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014, 5252 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5253 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, 5253 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5254 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a, 5254 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5255 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400, 5255 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5256 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010, 5256 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5257 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e, 5257 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5258 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800, 5258 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5259 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000, 5259 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5260 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000, 5260 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5261 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246, 5261 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5262 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff, 5262 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5263 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821, 5263 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5264 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000, 5264 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5265 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9, 5265 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5266 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc, 5266 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5267 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000, 5267 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5268 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a, 5268 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5269 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286, 5269 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5270 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023, 5270 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5271 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c, 5271 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5272 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010, 5272 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5273 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400, 5273 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5274 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024, 5274 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5275 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800, 5275 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5276 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800, 5276 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5277 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021, 5277 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5278 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8, 5278 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5279 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021, 5279 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5280 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8, 5280 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5281 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60, 5281 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5282 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 5282 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5283 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000, 5283 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5284 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800, 5284 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5285 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021, 5285 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5286 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021, 5286 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5287 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002, 5287 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5288 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000, 5288 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5289 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800, 5289 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5290 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc, 5290 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5291 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50, 5291 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5292 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025, 5292 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5293 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800, 5293 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5294 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f, 5294 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5295 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40, 5295 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5296 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 5296 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5297 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 5297 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5298 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000, 5298 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5299 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008, 5299 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5300 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02, 5300 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5301 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02, 5301 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5302 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 5302 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5303 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000, 5303 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5304 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000, 5304 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5305 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008, 5305 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5306 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2, 5306 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5307 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402, 5307 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5308 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4, 5308 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5309 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023, 5309 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5310 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a, 5310 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5311 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004, 5311 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5312 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400, 5312 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5313 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4, 5313 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5314 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800, 5314 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5315 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4, 5315 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5316 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800, 5316 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5317 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4, 5317 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5318 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821, 5318 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5319 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800, 5319 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5320 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6, 5320 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5321 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800, 5321 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5322 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021, 5322 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5323 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008, 5323 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5324 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a, 5324 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5325 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, 5325 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5326 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c, 5326 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5327 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb, 5327 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5328 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821, 5328 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5329 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021, 5329 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5330 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, 5330 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5331 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, 5331 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5332 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02, 5332 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5333 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021, 5333 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5334 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081, 5334 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5335 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800, 5335 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5336 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800, 5336 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5337 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a, 5337 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5338 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02, 5338 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5339 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821, 5339 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5340 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023, 5340 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5341 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff, 5341 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5342 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042, 5342 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5343 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 5343 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5344 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 5344 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5345 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 5345 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5346 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 5346 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5347 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 5347 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5348 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821, 5348 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5349 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800, 5349 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5350 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043, 5350 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5351 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021, 5351 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5352 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 5352 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5353 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800, 5353 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5354 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff, 5354 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5355 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 5355 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5356 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007, 5356 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5357 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402, 5357 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5358 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff, 5358 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5359 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021, 5359 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5360 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff, 5360 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5361 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005, 5361 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5362 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800, 5362 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5363 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4, 5363 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5364 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b, 5364 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5365 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4, 5365 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5366 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, 5366 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5367 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, 5367 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5368 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, 5368 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5369 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac, 5369 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5370 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022, 5370 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5371 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, 5371 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5372 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0, 5372 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5373 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021, 5373 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5374 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, 5374 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5375 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc, 5375 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5376 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005, 5376 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5377 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080, 5377 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5378 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800, 5378 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5379 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014, 5379 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5380 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823, 5380 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5381 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, 5381 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5382 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, 5382 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5383 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5, 5383 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5384 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, 5384 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5385 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, 5385 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5386 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, 5386 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5387 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005, 5387 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5388 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800, 5388 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5389 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500, 5389 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5390 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023, 5390 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5391 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821, 5391 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5392 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000, 5392 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5393 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021, 5393 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5394 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006, 5394 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5395 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0, 5395 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5396 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006, 5396 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5397 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905, 5397 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5398 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860, 5398 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5399 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab, 5399 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5400 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff, 5400 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5401 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, 5401 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5402 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038, 5402 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5403 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020, 5403 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5404 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450, 5404 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5405 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003, 5405 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5406 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff, 5406 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5407 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002, 5407 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5408 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f, 5408 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5409 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000, 5409 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5410 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820, 5410 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5411 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4, 5411 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5412 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 5412 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5413 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 5413 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5414 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 5414 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5415 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002, 5415 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5416 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff, 5416 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5417 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8, 5417 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5418 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438, 5418 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5419 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800, 5419 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5420 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800, 5420 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5421 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000, 5421 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5422 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000, 5422 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5423 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021, 5423 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5424 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 5424 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5425 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 5425 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5426 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b, 5426 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5427 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02, 5427 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5428 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 5428 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5429 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 5429 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5430 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff, 5430 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5431 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 5431 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5432 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651, 5432 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5433 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 5433 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5434 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0, 5434 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5435 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 5435 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5436 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 5436 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5437 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000, 5437 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5438 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800, 5438 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5439 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b, 5439 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5440 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, 5440 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5441 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001, 5441 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5442 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800, 5442 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5443 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000, 5443 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5444 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008, 5444 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5445 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 5445 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5446 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010, 5446 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5447 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, 5447 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5448 }; 5448 };
5449 5449
5450 static u32 tg3TsoFwRodata[] = { 5450 static u32 tg3TsoFwRodata[] = {
5451 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5451 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5452 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, 5452 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5453 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, 5453 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5454 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000, 5454 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5455 0x00000000, 5455 0x00000000,
5456 }; 5456 };
5457 5457
5458 static u32 tg3TsoFwData[] = { 5458 static u32 tg3TsoFwData[] = {
5459 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, 5459 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5460 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 5460 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5461 0x00000000, 5461 0x00000000,
5462 }; 5462 };
5463 5463
5464 /* 5705 needs a special version of the TSO firmware. */ 5464 /* 5705 needs a special version of the TSO firmware. */
5465 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1 5465 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5466 #define TG3_TSO5_FW_RELASE_MINOR 0x2 5466 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5467 #define TG3_TSO5_FW_RELEASE_FIX 0x0 5467 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5468 #define TG3_TSO5_FW_START_ADDR 0x00010000 5468 #define TG3_TSO5_FW_START_ADDR 0x00010000
5469 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000 5469 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5470 #define TG3_TSO5_FW_TEXT_LEN 0xe90 5470 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5471 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90 5471 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5472 #define TG3_TSO5_FW_RODATA_LEN 0x50 5472 #define TG3_TSO5_FW_RODATA_LEN 0x50
5473 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00 5473 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5474 #define TG3_TSO5_FW_DATA_LEN 0x20 5474 #define TG3_TSO5_FW_DATA_LEN 0x20
5475 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20 5475 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5476 #define TG3_TSO5_FW_SBSS_LEN 0x28 5476 #define TG3_TSO5_FW_SBSS_LEN 0x28
5477 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50 5477 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5478 #define TG3_TSO5_FW_BSS_LEN 0x88 5478 #define TG3_TSO5_FW_BSS_LEN 0x88
5479 5479
5480 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { 5480 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5481 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, 5481 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5482 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, 5482 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5483 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5483 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5484 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001, 5484 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5485 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001, 5485 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5486 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378, 5486 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5487 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 5487 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5488 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014, 5488 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5489 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400, 5489 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5490 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000, 5490 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5491 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200, 5491 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5492 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000, 5492 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5493 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 5493 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5494 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821, 5494 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5495 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 5495 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5496 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 5496 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5497 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60, 5497 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5498 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821, 5498 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5499 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000, 5499 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5500 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028, 5500 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5501 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402, 5501 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5502 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014, 5502 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5503 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff, 5503 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5504 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b, 5504 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5505 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004, 5505 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5506 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8, 5506 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5507 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001, 5507 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5508 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021, 5508 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5509 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2, 5509 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5510 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a, 5510 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5511 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 5511 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5512 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001, 5512 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5513 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001, 5513 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5514 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021, 5514 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5515 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000, 5515 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5516 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c, 5516 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5517 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005, 5517 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5518 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006, 5518 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5519 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c, 5519 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5520 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c, 5520 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5521 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021, 5521 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5522 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001, 5522 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5523 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b, 5523 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5524 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c, 5524 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5525 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76, 5525 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5526 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c, 5526 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5527 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70, 5527 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5528 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c, 5528 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5529 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72, 5529 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5530 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff, 5530 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5531 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78, 5531 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5532 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78, 5532 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5533 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005, 5533 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5534 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d, 5534 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5535 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005, 5535 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5536 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027, 5536 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5537 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d, 5537 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5538 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff, 5538 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5539 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001, 5539 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5540 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000, 5540 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5541 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a, 5541 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5542 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff, 5542 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5543 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001, 5543 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5544 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200, 5544 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5545 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001, 5545 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5546 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021, 5546 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5547 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 5547 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5548 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00, 5548 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5549 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001, 5549 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5550 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000, 5550 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5551 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003, 5551 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5552 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001, 5552 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5553 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56, 5553 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5554 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4, 5554 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5555 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64, 5555 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5556 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088, 5556 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5557 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001, 5557 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5558 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57, 5558 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5559 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001, 5559 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5560 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001, 5560 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5561 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000, 5561 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5562 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001, 5562 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5563 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823, 5563 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5564 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001, 5564 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5565 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001, 5565 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5566 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001, 5566 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5567 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021, 5567 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5568 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 5568 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5569 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 5569 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5570 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001, 5570 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5571 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001, 5571 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5572 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec, 5572 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5573 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000, 5573 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5574 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024, 5574 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5575 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 5575 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5576 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000, 5576 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5577 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 5577 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5578 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 5578 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5579 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001, 5579 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5580 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001, 5580 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5581 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff, 5581 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5582 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c, 5582 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5583 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54, 5583 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5584 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001, 5584 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5585 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 5585 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5586 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624, 5586 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5587 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 5587 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5588 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 5588 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5589 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283, 5589 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5590 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825, 5590 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5591 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003, 5591 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5592 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 5592 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5593 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c, 5593 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5594 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009, 5594 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5595 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025, 5595 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5596 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008, 5596 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5597 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021, 5597 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5598 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 5598 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5599 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 5599 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5600 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014, 5600 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5601 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001, 5601 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5602 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 5602 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5603 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, 5603 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5604 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020, 5604 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5605 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, 5605 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5606 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20, 5606 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5607 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315, 5607 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5608 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005, 5608 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5609 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001, 5609 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5610 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001, 5610 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5611 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014, 5611 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5612 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8, 5612 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5613 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000, 5613 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5614 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008, 5614 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5615 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008, 5615 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5616 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b, 5616 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5617 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd, 5617 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5618 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000, 5618 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5619 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025, 5619 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5620 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008, 5620 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5621 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff, 5621 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5622 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008, 5622 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5623 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021, 5623 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5624 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f, 5624 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5625 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600, 5625 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5626 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40, 5626 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5627 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000, 5627 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5628 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 5628 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5629 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44, 5629 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5630 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003, 5630 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5631 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001, 5631 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5632 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001, 5632 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5633 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c, 5633 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5634 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 5634 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5635 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 5635 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5636 0x00000000, 0x00000000, 0x00000000, 5636 0x00000000, 0x00000000, 0x00000000,
5637 }; 5637 };
5638 5638
5639 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { 5639 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5640 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5640 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5641 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 5641 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5642 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 5642 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5643 0x00000000, 0x00000000, 0x00000000, 5643 0x00000000, 0x00000000, 0x00000000,
5644 }; 5644 };
5645 5645
5646 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { 5646 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5647 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, 5647 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5648 0x00000000, 0x00000000, 0x00000000, 5648 0x00000000, 0x00000000, 0x00000000,
5649 }; 5649 };
5650 5650
5651 /* tp->lock is held. */ 5651 /* tp->lock is held. */
5652 static int tg3_load_tso_firmware(struct tg3 *tp) 5652 static int tg3_load_tso_firmware(struct tg3 *tp)
5653 { 5653 {
5654 struct fw_info info; 5654 struct fw_info info;
5655 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 5655 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5656 int err, i; 5656 int err, i;
5657 5657
5658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5659 return 0; 5659 return 0;
5660 5660
5661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 5661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5662 info.text_base = TG3_TSO5_FW_TEXT_ADDR; 5662 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5663 info.text_len = TG3_TSO5_FW_TEXT_LEN; 5663 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5664 info.text_data = &tg3Tso5FwText[0]; 5664 info.text_data = &tg3Tso5FwText[0];
5665 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR; 5665 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5666 info.rodata_len = TG3_TSO5_FW_RODATA_LEN; 5666 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5667 info.rodata_data = &tg3Tso5FwRodata[0]; 5667 info.rodata_data = &tg3Tso5FwRodata[0];
5668 info.data_base = TG3_TSO5_FW_DATA_ADDR; 5668 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5669 info.data_len = TG3_TSO5_FW_DATA_LEN; 5669 info.data_len = TG3_TSO5_FW_DATA_LEN;
5670 info.data_data = &tg3Tso5FwData[0]; 5670 info.data_data = &tg3Tso5FwData[0];
5671 cpu_base = RX_CPU_BASE; 5671 cpu_base = RX_CPU_BASE;
5672 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; 5672 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5673 cpu_scratch_size = (info.text_len + 5673 cpu_scratch_size = (info.text_len +
5674 info.rodata_len + 5674 info.rodata_len +
5675 info.data_len + 5675 info.data_len +
5676 TG3_TSO5_FW_SBSS_LEN + 5676 TG3_TSO5_FW_SBSS_LEN +
5677 TG3_TSO5_FW_BSS_LEN); 5677 TG3_TSO5_FW_BSS_LEN);
5678 } else { 5678 } else {
5679 info.text_base = TG3_TSO_FW_TEXT_ADDR; 5679 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5680 info.text_len = TG3_TSO_FW_TEXT_LEN; 5680 info.text_len = TG3_TSO_FW_TEXT_LEN;
5681 info.text_data = &tg3TsoFwText[0]; 5681 info.text_data = &tg3TsoFwText[0];
5682 info.rodata_base = TG3_TSO_FW_RODATA_ADDR; 5682 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5683 info.rodata_len = TG3_TSO_FW_RODATA_LEN; 5683 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5684 info.rodata_data = &tg3TsoFwRodata[0]; 5684 info.rodata_data = &tg3TsoFwRodata[0];
5685 info.data_base = TG3_TSO_FW_DATA_ADDR; 5685 info.data_base = TG3_TSO_FW_DATA_ADDR;
5686 info.data_len = TG3_TSO_FW_DATA_LEN; 5686 info.data_len = TG3_TSO_FW_DATA_LEN;
5687 info.data_data = &tg3TsoFwData[0]; 5687 info.data_data = &tg3TsoFwData[0];
5688 cpu_base = TX_CPU_BASE; 5688 cpu_base = TX_CPU_BASE;
5689 cpu_scratch_base = TX_CPU_SCRATCH_BASE; 5689 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5690 cpu_scratch_size = TX_CPU_SCRATCH_SIZE; 5690 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5691 } 5691 }
5692 5692
5693 err = tg3_load_firmware_cpu(tp, cpu_base, 5693 err = tg3_load_firmware_cpu(tp, cpu_base,
5694 cpu_scratch_base, cpu_scratch_size, 5694 cpu_scratch_base, cpu_scratch_size,
5695 &info); 5695 &info);
5696 if (err) 5696 if (err)
5697 return err; 5697 return err;
5698 5698
5699 /* Now startup the cpu. */ 5699 /* Now startup the cpu. */
5700 tw32(cpu_base + CPU_STATE, 0xffffffff); 5700 tw32(cpu_base + CPU_STATE, 0xffffffff);
5701 tw32_f(cpu_base + CPU_PC, info.text_base); 5701 tw32_f(cpu_base + CPU_PC, info.text_base);
5702 5702
5703 for (i = 0; i < 5; i++) { 5703 for (i = 0; i < 5; i++) {
5704 if (tr32(cpu_base + CPU_PC) == info.text_base) 5704 if (tr32(cpu_base + CPU_PC) == info.text_base)
5705 break; 5705 break;
5706 tw32(cpu_base + CPU_STATE, 0xffffffff); 5706 tw32(cpu_base + CPU_STATE, 0xffffffff);
5707 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 5707 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5708 tw32_f(cpu_base + CPU_PC, info.text_base); 5708 tw32_f(cpu_base + CPU_PC, info.text_base);
5709 udelay(1000); 5709 udelay(1000);
5710 } 5710 }
5711 if (i >= 5) { 5711 if (i >= 5) {
5712 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " 5712 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5713 "to set CPU PC, is %08x should be %08x\n", 5713 "to set CPU PC, is %08x should be %08x\n",
5714 tp->dev->name, tr32(cpu_base + CPU_PC), 5714 tp->dev->name, tr32(cpu_base + CPU_PC),
5715 info.text_base); 5715 info.text_base);
5716 return -ENODEV; 5716 return -ENODEV;
5717 } 5717 }
5718 tw32(cpu_base + CPU_STATE, 0xffffffff); 5718 tw32(cpu_base + CPU_STATE, 0xffffffff);
5719 tw32_f(cpu_base + CPU_MODE, 0x00000000); 5719 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5720 return 0; 5720 return 0;
5721 } 5721 }
5722 5722
5723 #endif /* TG3_TSO_SUPPORT != 0 */ 5723 #endif /* TG3_TSO_SUPPORT != 0 */
5724 5724
5725 /* tp->lock is held. */ 5725 /* tp->lock is held. */
5726 static void __tg3_set_mac_addr(struct tg3 *tp) 5726 static void __tg3_set_mac_addr(struct tg3 *tp)
5727 { 5727 {
5728 u32 addr_high, addr_low; 5728 u32 addr_high, addr_low;
5729 int i; 5729 int i;
5730 5730
5731 addr_high = ((tp->dev->dev_addr[0] << 8) | 5731 addr_high = ((tp->dev->dev_addr[0] << 8) |
5732 tp->dev->dev_addr[1]); 5732 tp->dev->dev_addr[1]);
5733 addr_low = ((tp->dev->dev_addr[2] << 24) | 5733 addr_low = ((tp->dev->dev_addr[2] << 24) |
5734 (tp->dev->dev_addr[3] << 16) | 5734 (tp->dev->dev_addr[3] << 16) |
5735 (tp->dev->dev_addr[4] << 8) | 5735 (tp->dev->dev_addr[4] << 8) |
5736 (tp->dev->dev_addr[5] << 0)); 5736 (tp->dev->dev_addr[5] << 0));
5737 for (i = 0; i < 4; i++) { 5737 for (i = 0; i < 4; i++) {
5738 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); 5738 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5739 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); 5739 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5740 } 5740 }
5741 5741
5742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 5742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 5743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5744 for (i = 0; i < 12; i++) { 5744 for (i = 0; i < 12; i++) {
5745 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); 5745 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5746 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); 5746 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5747 } 5747 }
5748 } 5748 }
5749 5749
5750 addr_high = (tp->dev->dev_addr[0] + 5750 addr_high = (tp->dev->dev_addr[0] +
5751 tp->dev->dev_addr[1] + 5751 tp->dev->dev_addr[1] +
5752 tp->dev->dev_addr[2] + 5752 tp->dev->dev_addr[2] +
5753 tp->dev->dev_addr[3] + 5753 tp->dev->dev_addr[3] +
5754 tp->dev->dev_addr[4] + 5754 tp->dev->dev_addr[4] +
5755 tp->dev->dev_addr[5]) & 5755 tp->dev->dev_addr[5]) &
5756 TX_BACKOFF_SEED_MASK; 5756 TX_BACKOFF_SEED_MASK;
5757 tw32(MAC_TX_BACKOFF_SEED, addr_high); 5757 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5758 } 5758 }
5759 5759
5760 static int tg3_set_mac_addr(struct net_device *dev, void *p) 5760 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5761 { 5761 {
5762 struct tg3 *tp = netdev_priv(dev); 5762 struct tg3 *tp = netdev_priv(dev);
5763 struct sockaddr *addr = p; 5763 struct sockaddr *addr = p;
5764 5764
5765 if (!is_valid_ether_addr(addr->sa_data)) 5765 if (!is_valid_ether_addr(addr->sa_data))
5766 return -EINVAL; 5766 return -EINVAL;
5767 5767
5768 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5768 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5769 5769
5770 if (!netif_running(dev)) 5770 if (!netif_running(dev))
5771 return 0; 5771 return 0;
5772 5772
5773 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 5773 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5774 /* Reset chip so that ASF can re-init any MAC addresses it 5774 /* Reset chip so that ASF can re-init any MAC addresses it
5775 * needs. 5775 * needs.
5776 */ 5776 */
5777 tg3_netif_stop(tp); 5777 tg3_netif_stop(tp);
5778 tg3_full_lock(tp, 1); 5778 tg3_full_lock(tp, 1);
5779 5779
5780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5781 tg3_init_hw(tp, 0); 5781 tg3_init_hw(tp, 0);
5782 5782
5783 tg3_netif_start(tp); 5783 tg3_netif_start(tp);
5784 tg3_full_unlock(tp); 5784 tg3_full_unlock(tp);
5785 } else { 5785 } else {
5786 spin_lock_bh(&tp->lock); 5786 spin_lock_bh(&tp->lock);
5787 __tg3_set_mac_addr(tp); 5787 __tg3_set_mac_addr(tp);
5788 spin_unlock_bh(&tp->lock); 5788 spin_unlock_bh(&tp->lock);
5789 } 5789 }
5790 5790
5791 return 0; 5791 return 0;
5792 } 5792 }
5793 5793
5794 /* tp->lock is held. */ 5794 /* tp->lock is held. */
5795 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, 5795 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5796 dma_addr_t mapping, u32 maxlen_flags, 5796 dma_addr_t mapping, u32 maxlen_flags,
5797 u32 nic_addr) 5797 u32 nic_addr)
5798 { 5798 {
5799 tg3_write_mem(tp, 5799 tg3_write_mem(tp,
5800 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), 5800 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5801 ((u64) mapping >> 32)); 5801 ((u64) mapping >> 32));
5802 tg3_write_mem(tp, 5802 tg3_write_mem(tp,
5803 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), 5803 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5804 ((u64) mapping & 0xffffffff)); 5804 ((u64) mapping & 0xffffffff));
5805 tg3_write_mem(tp, 5805 tg3_write_mem(tp,
5806 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), 5806 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5807 maxlen_flags); 5807 maxlen_flags);
5808 5808
5809 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 5809 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5810 tg3_write_mem(tp, 5810 tg3_write_mem(tp,
5811 (bdinfo_addr + TG3_BDINFO_NIC_ADDR), 5811 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5812 nic_addr); 5812 nic_addr);
5813 } 5813 }
5814 5814
5815 static void __tg3_set_rx_mode(struct net_device *); 5815 static void __tg3_set_rx_mode(struct net_device *);
5816 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) 5816 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5817 { 5817 {
5818 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); 5818 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5819 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); 5819 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5820 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); 5820 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5821 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); 5821 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5822 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5822 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5823 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 5823 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5824 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 5824 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5825 } 5825 }
5826 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); 5826 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5827 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); 5827 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5828 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5828 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5829 u32 val = ec->stats_block_coalesce_usecs; 5829 u32 val = ec->stats_block_coalesce_usecs;
5830 5830
5831 if (!netif_carrier_ok(tp->dev)) 5831 if (!netif_carrier_ok(tp->dev))
5832 val = 0; 5832 val = 0;
5833 5833
5834 tw32(HOSTCC_STAT_COAL_TICKS, val); 5834 tw32(HOSTCC_STAT_COAL_TICKS, val);
5835 } 5835 }
5836 } 5836 }
5837 5837
5838 /* tp->lock is held. */ 5838 /* tp->lock is held. */
5839 static int tg3_reset_hw(struct tg3 *tp, int reset_phy) 5839 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5840 { 5840 {
5841 u32 val, rdmac_mode; 5841 u32 val, rdmac_mode;
5842 int i, err, limit; 5842 int i, err, limit;
5843 5843
5844 tg3_disable_ints(tp); 5844 tg3_disable_ints(tp);
5845 5845
5846 tg3_stop_fw(tp); 5846 tg3_stop_fw(tp);
5847 5847
5848 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 5848 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5849 5849
5850 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { 5850 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5851 tg3_abort_hw(tp, 1); 5851 tg3_abort_hw(tp, 1);
5852 } 5852 }
5853 5853
5854 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy) 5854 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5855 tg3_phy_reset(tp); 5855 tg3_phy_reset(tp);
5856 5856
5857 err = tg3_chip_reset(tp); 5857 err = tg3_chip_reset(tp);
5858 if (err) 5858 if (err)
5859 return err; 5859 return err;
5860 5860
5861 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 5861 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5862 5862
5863 /* This works around an issue with Athlon chipsets on 5863 /* This works around an issue with Athlon chipsets on
5864 * B3 tigon3 silicon. This bit has no effect on any 5864 * B3 tigon3 silicon. This bit has no effect on any
5865 * other revision. But do not set this on PCI Express 5865 * other revision. But do not set this on PCI Express
5866 * chips. 5866 * chips.
5867 */ 5867 */
5868 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 5868 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5869 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; 5869 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5870 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 5870 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5871 5871
5872 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && 5872 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5873 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 5873 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5874 val = tr32(TG3PCI_PCISTATE); 5874 val = tr32(TG3PCI_PCISTATE);
5875 val |= PCISTATE_RETRY_SAME_DMA; 5875 val |= PCISTATE_RETRY_SAME_DMA;
5876 tw32(TG3PCI_PCISTATE, val); 5876 tw32(TG3PCI_PCISTATE, val);
5877 } 5877 }
5878 5878
5879 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { 5879 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5880 /* Enable some hw fixes. */ 5880 /* Enable some hw fixes. */
5881 val = tr32(TG3PCI_MSI_DATA); 5881 val = tr32(TG3PCI_MSI_DATA);
5882 val |= (1 << 26) | (1 << 28) | (1 << 29); 5882 val |= (1 << 26) | (1 << 28) | (1 << 29);
5883 tw32(TG3PCI_MSI_DATA, val); 5883 tw32(TG3PCI_MSI_DATA, val);
5884 } 5884 }
5885 5885
5886 /* Descriptor ring init may make accesses to the 5886 /* Descriptor ring init may make accesses to the
5887 * NIC SRAM area to setup the TX descriptors, so we 5887 * NIC SRAM area to setup the TX descriptors, so we
5888 * can only do this after the hardware has been 5888 * can only do this after the hardware has been
5889 * successfully reset. 5889 * successfully reset.
5890 */ 5890 */
5891 tg3_init_rings(tp); 5891 tg3_init_rings(tp);
5892 5892
5893 /* This value is determined during the probe time DMA 5893 /* This value is determined during the probe time DMA
5894 * engine test, tg3_test_dma. 5894 * engine test, tg3_test_dma.
5895 */ 5895 */
5896 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 5896 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5897 5897
5898 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | 5898 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5899 GRC_MODE_4X_NIC_SEND_RINGS | 5899 GRC_MODE_4X_NIC_SEND_RINGS |
5900 GRC_MODE_NO_TX_PHDR_CSUM | 5900 GRC_MODE_NO_TX_PHDR_CSUM |
5901 GRC_MODE_NO_RX_PHDR_CSUM); 5901 GRC_MODE_NO_RX_PHDR_CSUM);
5902 tp->grc_mode |= GRC_MODE_HOST_SENDBDS; 5902 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5903 5903
5904 /* Pseudo-header checksum is done by hardware logic and not 5904 /* Pseudo-header checksum is done by hardware logic and not
5905 * the offload processers, so make the chip do the pseudo- 5905 * the offload processers, so make the chip do the pseudo-
5906 * header checksums on receive. For transmit it is more 5906 * header checksums on receive. For transmit it is more
5907 * convenient to do the pseudo-header checksum in software 5907 * convenient to do the pseudo-header checksum in software
5908 * as Linux does that on transmit for us in all cases. 5908 * as Linux does that on transmit for us in all cases.
5909 */ 5909 */
5910 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 5910 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5911 5911
5912 tw32(GRC_MODE, 5912 tw32(GRC_MODE,
5913 tp->grc_mode | 5913 tp->grc_mode |
5914 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); 5914 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5915 5915
5916 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 5916 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5917 val = tr32(GRC_MISC_CFG); 5917 val = tr32(GRC_MISC_CFG);
5918 val &= ~0xff; 5918 val &= ~0xff;
5919 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); 5919 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5920 tw32(GRC_MISC_CFG, val); 5920 tw32(GRC_MISC_CFG, val);
5921 5921
5922 /* Initialize MBUF/DESC pool. */ 5922 /* Initialize MBUF/DESC pool. */
5923 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 5923 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5924 /* Do nothing. */ 5924 /* Do nothing. */
5925 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { 5925 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5926 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); 5926 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 5927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5928 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); 5928 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5929 else 5929 else
5930 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 5930 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5931 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 5931 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5932 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 5932 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5933 } 5933 }
5934 #if TG3_TSO_SUPPORT != 0 5934 #if TG3_TSO_SUPPORT != 0
5935 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 5935 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5936 int fw_len; 5936 int fw_len;
5937 5937
5938 fw_len = (TG3_TSO5_FW_TEXT_LEN + 5938 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5939 TG3_TSO5_FW_RODATA_LEN + 5939 TG3_TSO5_FW_RODATA_LEN +
5940 TG3_TSO5_FW_DATA_LEN + 5940 TG3_TSO5_FW_DATA_LEN +
5941 TG3_TSO5_FW_SBSS_LEN + 5941 TG3_TSO5_FW_SBSS_LEN +
5942 TG3_TSO5_FW_BSS_LEN); 5942 TG3_TSO5_FW_BSS_LEN);
5943 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); 5943 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5944 tw32(BUFMGR_MB_POOL_ADDR, 5944 tw32(BUFMGR_MB_POOL_ADDR,
5945 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); 5945 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5946 tw32(BUFMGR_MB_POOL_SIZE, 5946 tw32(BUFMGR_MB_POOL_SIZE,
5947 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 5947 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5948 } 5948 }
5949 #endif 5949 #endif
5950 5950
5951 if (tp->dev->mtu <= ETH_DATA_LEN) { 5951 if (tp->dev->mtu <= ETH_DATA_LEN) {
5952 tw32(BUFMGR_MB_RDMA_LOW_WATER, 5952 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5953 tp->bufmgr_config.mbuf_read_dma_low_water); 5953 tp->bufmgr_config.mbuf_read_dma_low_water);
5954 tw32(BUFMGR_MB_MACRX_LOW_WATER, 5954 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5955 tp->bufmgr_config.mbuf_mac_rx_low_water); 5955 tp->bufmgr_config.mbuf_mac_rx_low_water);
5956 tw32(BUFMGR_MB_HIGH_WATER, 5956 tw32(BUFMGR_MB_HIGH_WATER,
5957 tp->bufmgr_config.mbuf_high_water); 5957 tp->bufmgr_config.mbuf_high_water);
5958 } else { 5958 } else {
5959 tw32(BUFMGR_MB_RDMA_LOW_WATER, 5959 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5960 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); 5960 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5961 tw32(BUFMGR_MB_MACRX_LOW_WATER, 5961 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5962 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); 5962 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5963 tw32(BUFMGR_MB_HIGH_WATER, 5963 tw32(BUFMGR_MB_HIGH_WATER,
5964 tp->bufmgr_config.mbuf_high_water_jumbo); 5964 tp->bufmgr_config.mbuf_high_water_jumbo);
5965 } 5965 }
5966 tw32(BUFMGR_DMA_LOW_WATER, 5966 tw32(BUFMGR_DMA_LOW_WATER,
5967 tp->bufmgr_config.dma_low_water); 5967 tp->bufmgr_config.dma_low_water);
5968 tw32(BUFMGR_DMA_HIGH_WATER, 5968 tw32(BUFMGR_DMA_HIGH_WATER,
5969 tp->bufmgr_config.dma_high_water); 5969 tp->bufmgr_config.dma_high_water);
5970 5970
5971 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); 5971 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5972 for (i = 0; i < 2000; i++) { 5972 for (i = 0; i < 2000; i++) {
5973 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 5973 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5974 break; 5974 break;
5975 udelay(10); 5975 udelay(10);
5976 } 5976 }
5977 if (i >= 2000) { 5977 if (i >= 2000) {
5978 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", 5978 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5979 tp->dev->name); 5979 tp->dev->name);
5980 return -ENODEV; 5980 return -ENODEV;
5981 } 5981 }
5982 5982
5983 /* Setup replenish threshold. */ 5983 /* Setup replenish threshold. */
5984 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8); 5984 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5985 5985
5986 /* Initialize TG3_BDINFO's at: 5986 /* Initialize TG3_BDINFO's at:
5987 * RCVDBDI_STD_BD: standard eth size rx ring 5987 * RCVDBDI_STD_BD: standard eth size rx ring
5988 * RCVDBDI_JUMBO_BD: jumbo frame rx ring 5988 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5989 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) 5989 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5990 * 5990 *
5991 * like so: 5991 * like so:
5992 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring 5992 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5993 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | 5993 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5994 * ring attribute flags 5994 * ring attribute flags
5995 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM 5995 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5996 * 5996 *
5997 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. 5997 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5998 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. 5998 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5999 * 5999 *
6000 * The size of each ring is fixed in the firmware, but the location is 6000 * The size of each ring is fixed in the firmware, but the location is
6001 * configurable. 6001 * configurable.
6002 */ 6002 */
6003 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 6003 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6004 ((u64) tp->rx_std_mapping >> 32)); 6004 ((u64) tp->rx_std_mapping >> 32));
6005 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 6005 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6006 ((u64) tp->rx_std_mapping & 0xffffffff)); 6006 ((u64) tp->rx_std_mapping & 0xffffffff));
6007 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 6007 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6008 NIC_SRAM_RX_BUFFER_DESC); 6008 NIC_SRAM_RX_BUFFER_DESC);
6009 6009
6010 /* Don't even try to program the JUMBO/MINI buffer descriptor 6010 /* Don't even try to program the JUMBO/MINI buffer descriptor
6011 * configs on 5705. 6011 * configs on 5705.
6012 */ 6012 */
6013 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 6013 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6014 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, 6014 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6015 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); 6015 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6016 } else { 6016 } else {
6017 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, 6017 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6018 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); 6018 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6019 6019
6020 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, 6020 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6021 BDINFO_FLAGS_DISABLED); 6021 BDINFO_FLAGS_DISABLED);
6022 6022
6023 /* Setup replenish threshold. */ 6023 /* Setup replenish threshold. */
6024 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 6024 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6025 6025
6026 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 6026 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6027 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 6027 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6028 ((u64) tp->rx_jumbo_mapping >> 32)); 6028 ((u64) tp->rx_jumbo_mapping >> 32));
6029 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 6029 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6030 ((u64) tp->rx_jumbo_mapping & 0xffffffff)); 6030 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6031 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 6031 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6032 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT); 6032 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6033 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 6033 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6034 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 6034 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6035 } else { 6035 } else {
6036 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 6036 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6037 BDINFO_FLAGS_DISABLED); 6037 BDINFO_FLAGS_DISABLED);
6038 } 6038 }
6039 6039
6040 } 6040 }
6041 6041
6042 /* There is only one send ring on 5705/5750, no need to explicitly 6042 /* There is only one send ring on 5705/5750, no need to explicitly
6043 * disable the others. 6043 * disable the others.
6044 */ 6044 */
6045 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 6045 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6046 /* Clear out send RCB ring in SRAM. */ 6046 /* Clear out send RCB ring in SRAM. */
6047 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) 6047 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6048 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, 6048 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6049 BDINFO_FLAGS_DISABLED); 6049 BDINFO_FLAGS_DISABLED);
6050 } 6050 }
6051 6051
6052 tp->tx_prod = 0; 6052 tp->tx_prod = 0;
6053 tp->tx_cons = 0; 6053 tp->tx_cons = 0;
6054 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); 6054 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6055 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); 6055 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6056 6056
6057 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, 6057 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6058 tp->tx_desc_mapping, 6058 tp->tx_desc_mapping,
6059 (TG3_TX_RING_SIZE << 6059 (TG3_TX_RING_SIZE <<
6060 BDINFO_FLAGS_MAXLEN_SHIFT), 6060 BDINFO_FLAGS_MAXLEN_SHIFT),
6061 NIC_SRAM_TX_BUFFER_DESC); 6061 NIC_SRAM_TX_BUFFER_DESC);
6062 6062
6063 /* There is only one receive return ring on 5705/5750, no need 6063 /* There is only one receive return ring on 5705/5750, no need
6064 * to explicitly disable the others. 6064 * to explicitly disable the others.
6065 */ 6065 */
6066 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 6066 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6067 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; 6067 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6068 i += TG3_BDINFO_SIZE) { 6068 i += TG3_BDINFO_SIZE) {
6069 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS, 6069 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6070 BDINFO_FLAGS_DISABLED); 6070 BDINFO_FLAGS_DISABLED);
6071 } 6071 }
6072 } 6072 }
6073 6073
6074 tp->rx_rcb_ptr = 0; 6074 tp->rx_rcb_ptr = 0;
6075 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); 6075 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6076 6076
6077 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, 6077 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6078 tp->rx_rcb_mapping, 6078 tp->rx_rcb_mapping,
6079 (TG3_RX_RCB_RING_SIZE(tp) << 6079 (TG3_RX_RCB_RING_SIZE(tp) <<
6080 BDINFO_FLAGS_MAXLEN_SHIFT), 6080 BDINFO_FLAGS_MAXLEN_SHIFT),
6081 0); 6081 0);
6082 6082
6083 tp->rx_std_ptr = tp->rx_pending; 6083 tp->rx_std_ptr = tp->rx_pending;
6084 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 6084 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6085 tp->rx_std_ptr); 6085 tp->rx_std_ptr);
6086 6086
6087 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 6087 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6088 tp->rx_jumbo_pending : 0; 6088 tp->rx_jumbo_pending : 0;
6089 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 6089 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6090 tp->rx_jumbo_ptr); 6090 tp->rx_jumbo_ptr);
6091 6091
6092 /* Initialize MAC address and backoff seed. */ 6092 /* Initialize MAC address and backoff seed. */
6093 __tg3_set_mac_addr(tp); 6093 __tg3_set_mac_addr(tp);
6094 6094
6095 /* MTU + ethernet header + FCS + optional VLAN tag */ 6095 /* MTU + ethernet header + FCS + optional VLAN tag */
6096 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8); 6096 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6097 6097
6098 /* The slot time is changed by tg3_setup_phy if we 6098 /* The slot time is changed by tg3_setup_phy if we
6099 * run at gigabit with half duplex. 6099 * run at gigabit with half duplex.
6100 */ 6100 */
6101 tw32(MAC_TX_LENGTHS, 6101 tw32(MAC_TX_LENGTHS,
6102 (2 << TX_LENGTHS_IPG_CRS_SHIFT) | 6102 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6103 (6 << TX_LENGTHS_IPG_SHIFT) | 6103 (6 << TX_LENGTHS_IPG_SHIFT) |
6104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 6104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 6105
6106 /* Receive rules. */ 6106 /* Receive rules. */
6107 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); 6107 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6108 tw32(RCVLPC_CONFIG, 0x0181); 6108 tw32(RCVLPC_CONFIG, 0x0181);
6109 6109
6110 /* Calculate RDMAC_MODE setting early, we need it to determine 6110 /* Calculate RDMAC_MODE setting early, we need it to determine
6111 * the RCVLPC_STATE_ENABLE mask. 6111 * the RCVLPC_STATE_ENABLE mask.
6112 */ 6112 */
6113 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | 6113 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6114 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | 6114 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6115 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | 6115 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6116 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 6116 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6117 RDMAC_MODE_LNGREAD_ENAB); 6117 RDMAC_MODE_LNGREAD_ENAB);
6118 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) 6118 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6119 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE; 6119 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6120 6120
6121 /* If statement applies to 5705 and 5750 PCI devices only */ 6121 /* If statement applies to 5705 and 5750 PCI devices only */
6122 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 6122 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6123 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || 6123 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6124 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { 6124 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6125 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && 6125 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6126 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 6126 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6127 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 6127 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6128 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; 6128 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6129 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 6129 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6130 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { 6130 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6131 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 6131 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6132 } 6132 }
6133 } 6133 }
6134 6134
6135 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6135 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6136 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 6136 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6137 6137
6138 #if TG3_TSO_SUPPORT != 0 6138 #if TG3_TSO_SUPPORT != 0
6139 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6139 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6140 rdmac_mode |= (1 << 27); 6140 rdmac_mode |= (1 << 27);
6141 #endif 6141 #endif
6142 6142
6143 /* Receive/send statistics. */ 6143 /* Receive/send statistics. */
6144 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && 6144 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6145 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 6145 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6146 val = tr32(RCVLPC_STATS_ENABLE); 6146 val = tr32(RCVLPC_STATS_ENABLE);
6147 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; 6147 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6148 tw32(RCVLPC_STATS_ENABLE, val); 6148 tw32(RCVLPC_STATS_ENABLE, val);
6149 } else { 6149 } else {
6150 tw32(RCVLPC_STATS_ENABLE, 0xffffff); 6150 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6151 } 6151 }
6152 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); 6152 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6153 tw32(SNDDATAI_STATSENAB, 0xffffff); 6153 tw32(SNDDATAI_STATSENAB, 0xffffff);
6154 tw32(SNDDATAI_STATSCTRL, 6154 tw32(SNDDATAI_STATSCTRL,
6155 (SNDDATAI_SCTRL_ENABLE | 6155 (SNDDATAI_SCTRL_ENABLE |
6156 SNDDATAI_SCTRL_FASTUPD)); 6156 SNDDATAI_SCTRL_FASTUPD));
6157 6157
6158 /* Setup host coalescing engine. */ 6158 /* Setup host coalescing engine. */
6159 tw32(HOSTCC_MODE, 0); 6159 tw32(HOSTCC_MODE, 0);
6160 for (i = 0; i < 2000; i++) { 6160 for (i = 0; i < 2000; i++) {
6161 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) 6161 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6162 break; 6162 break;
6163 udelay(10); 6163 udelay(10);
6164 } 6164 }
6165 6165
6166 __tg3_set_coalesce(tp, &tp->coal); 6166 __tg3_set_coalesce(tp, &tp->coal);
6167 6167
6168 /* set status block DMA address */ 6168 /* set status block DMA address */
6169 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 6169 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6170 ((u64) tp->status_mapping >> 32)); 6170 ((u64) tp->status_mapping >> 32));
6171 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 6171 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6172 ((u64) tp->status_mapping & 0xffffffff)); 6172 ((u64) tp->status_mapping & 0xffffffff));
6173 6173
6174 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 6174 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6175 /* Status/statistics block address. See tg3_timer, 6175 /* Status/statistics block address. See tg3_timer,
6176 * the tg3_periodic_fetch_stats call there, and 6176 * the tg3_periodic_fetch_stats call there, and
6177 * tg3_get_stats to see how this works for 5705/5750 chips. 6177 * tg3_get_stats to see how this works for 5705/5750 chips.
6178 */ 6178 */
6179 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 6179 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6180 ((u64) tp->stats_mapping >> 32)); 6180 ((u64) tp->stats_mapping >> 32));
6181 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 6181 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6182 ((u64) tp->stats_mapping & 0xffffffff)); 6182 ((u64) tp->stats_mapping & 0xffffffff));
6183 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); 6183 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6184 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); 6184 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6185 } 6185 }
6186 6186
6187 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); 6187 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6188 6188
6189 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); 6189 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6190 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); 6190 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6191 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 6191 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6192 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); 6192 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6193 6193
6194 /* Clear statistics/status block in chip, and status block in ram. */ 6194 /* Clear statistics/status block in chip, and status block in ram. */
6195 for (i = NIC_SRAM_STATS_BLK; 6195 for (i = NIC_SRAM_STATS_BLK;
6196 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; 6196 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6197 i += sizeof(u32)) { 6197 i += sizeof(u32)) {
6198 tg3_write_mem(tp, i, 0); 6198 tg3_write_mem(tp, i, 0);
6199 udelay(40); 6199 udelay(40);
6200 } 6200 }
6201 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 6201 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6202 6202
6203 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 6203 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6204 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; 6204 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6205 /* reset to prevent losing 1st rx packet intermittently */ 6205 /* reset to prevent losing 1st rx packet intermittently */
6206 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 6206 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6207 udelay(10); 6207 udelay(10);
6208 } 6208 }
6209 6209
6210 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 6210 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6211 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 6211 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6212 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 6212 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6213 udelay(40); 6213 udelay(40);
6214 6214
6215 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). 6215 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6216 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the 6216 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6217 * register to preserve the GPIO settings for LOMs. The GPIOs, 6217 * register to preserve the GPIO settings for LOMs. The GPIOs,
6218 * whether used as inputs or outputs, are set by boot code after 6218 * whether used as inputs or outputs, are set by boot code after
6219 * reset. 6219 * reset.
6220 */ 6220 */
6221 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 6221 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6222 u32 gpio_mask; 6222 u32 gpio_mask;
6223 6223
6224 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 | 6224 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6225 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2; 6225 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6226 6226
6227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 6227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6228 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | 6228 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6229 GRC_LCLCTRL_GPIO_OUTPUT3; 6229 GRC_LCLCTRL_GPIO_OUTPUT3;
6230 6230
6231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 6231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6232 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; 6232 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6233 6233
6234 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; 6234 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6235 6235
6236 /* GPIO1 must be driven high for eeprom write protect */ 6236 /* GPIO1 must be driven high for eeprom write protect */
6237 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 6237 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6238 GRC_LCLCTRL_GPIO_OUTPUT1); 6238 GRC_LCLCTRL_GPIO_OUTPUT1);
6239 } 6239 }
6240 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 6240 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6241 udelay(100); 6241 udelay(100);
6242 6242
6243 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 6243 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6244 tp->last_tag = 0; 6244 tp->last_tag = 0;
6245 6245
6246 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 6246 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6247 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 6247 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6248 udelay(40); 6248 udelay(40);
6249 } 6249 }
6250 6250
6251 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | 6251 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6252 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | 6252 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6253 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | 6253 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6254 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | 6254 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6255 WDMAC_MODE_LNGREAD_ENAB); 6255 WDMAC_MODE_LNGREAD_ENAB);
6256 6256
6257 /* If statement applies to 5705 and 5750 PCI devices only */ 6257 /* If statement applies to 5705 and 5750 PCI devices only */
6258 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 6258 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6259 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || 6259 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { 6260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6261 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) && 6261 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6262 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || 6262 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6263 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { 6263 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6264 /* nothing */ 6264 /* nothing */
6265 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && 6265 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6266 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 6266 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6267 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 6267 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6268 val |= WDMAC_MODE_RX_ACCEL; 6268 val |= WDMAC_MODE_RX_ACCEL;
6269 } 6269 }
6270 } 6270 }
6271 6271
6272 /* Enable host coalescing bug fix */ 6272 /* Enable host coalescing bug fix */
6273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || 6273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) 6274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6275 val |= (1 << 29); 6275 val |= (1 << 29);
6276 6276
6277 tw32_f(WDMAC_MODE, val); 6277 tw32_f(WDMAC_MODE, val);
6278 udelay(40); 6278 udelay(40);
6279 6279
6280 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { 6280 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6281 val = tr32(TG3PCI_X_CAPS); 6281 val = tr32(TG3PCI_X_CAPS);
6282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { 6282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6283 val &= ~PCIX_CAPS_BURST_MASK; 6283 val &= ~PCIX_CAPS_BURST_MASK;
6284 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); 6284 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 6285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6286 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); 6286 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6287 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); 6287 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6288 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) 6288 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6289 val |= (tp->split_mode_max_reqs << 6289 val |= (tp->split_mode_max_reqs <<
6290 PCIX_CAPS_SPLIT_SHIFT); 6290 PCIX_CAPS_SPLIT_SHIFT);
6291 } 6291 }
6292 tw32(TG3PCI_X_CAPS, val); 6292 tw32(TG3PCI_X_CAPS, val);
6293 } 6293 }
6294 6294
6295 tw32_f(RDMAC_MODE, rdmac_mode); 6295 tw32_f(RDMAC_MODE, rdmac_mode);
6296 udelay(40); 6296 udelay(40);
6297 6297
6298 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); 6298 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6299 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 6299 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6300 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); 6300 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6301 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); 6301 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6302 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 6302 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6303 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 6303 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6304 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 6304 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6305 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 6305 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6306 #if TG3_TSO_SUPPORT != 0 6306 #if TG3_TSO_SUPPORT != 0
6307 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6307 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6308 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 6308 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6309 #endif 6309 #endif
6310 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); 6310 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6311 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 6311 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6312 6312
6313 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { 6313 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6314 err = tg3_load_5701_a0_firmware_fix(tp); 6314 err = tg3_load_5701_a0_firmware_fix(tp);
6315 if (err) 6315 if (err)
6316 return err; 6316 return err;
6317 } 6317 }
6318 6318
6319 #if TG3_TSO_SUPPORT != 0 6319 #if TG3_TSO_SUPPORT != 0
6320 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6320 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6321 err = tg3_load_tso_firmware(tp); 6321 err = tg3_load_tso_firmware(tp);
6322 if (err) 6322 if (err)
6323 return err; 6323 return err;
6324 } 6324 }
6325 #endif 6325 #endif
6326 6326
6327 tp->tx_mode = TX_MODE_ENABLE; 6327 tp->tx_mode = TX_MODE_ENABLE;
6328 tw32_f(MAC_TX_MODE, tp->tx_mode); 6328 tw32_f(MAC_TX_MODE, tp->tx_mode);
6329 udelay(100); 6329 udelay(100);
6330 6330
6331 tp->rx_mode = RX_MODE_ENABLE; 6331 tp->rx_mode = RX_MODE_ENABLE;
6332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 6332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6333 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 6333 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6334 6334
6335 tw32_f(MAC_RX_MODE, tp->rx_mode); 6335 tw32_f(MAC_RX_MODE, tp->rx_mode);
6336 udelay(10); 6336 udelay(10);
6337 6337
6338 if (tp->link_config.phy_is_low_power) { 6338 if (tp->link_config.phy_is_low_power) {
6339 tp->link_config.phy_is_low_power = 0; 6339 tp->link_config.phy_is_low_power = 0;
6340 tp->link_config.speed = tp->link_config.orig_speed; 6340 tp->link_config.speed = tp->link_config.orig_speed;
6341 tp->link_config.duplex = tp->link_config.orig_duplex; 6341 tp->link_config.duplex = tp->link_config.orig_duplex;
6342 tp->link_config.autoneg = tp->link_config.orig_autoneg; 6342 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6343 } 6343 }
6344 6344
6345 tp->mi_mode = MAC_MI_MODE_BASE; 6345 tp->mi_mode = MAC_MI_MODE_BASE;
6346 tw32_f(MAC_MI_MODE, tp->mi_mode); 6346 tw32_f(MAC_MI_MODE, tp->mi_mode);
6347 udelay(80); 6347 udelay(80);
6348 6348
6349 tw32(MAC_LED_CTRL, tp->led_ctrl); 6349 tw32(MAC_LED_CTRL, tp->led_ctrl);
6350 6350
6351 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 6351 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6352 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 6352 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6353 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 6353 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6354 udelay(10); 6354 udelay(10);
6355 } 6355 }
6356 tw32_f(MAC_RX_MODE, tp->rx_mode); 6356 tw32_f(MAC_RX_MODE, tp->rx_mode);
6357 udelay(10); 6357 udelay(10);
6358 6358
6359 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 6359 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6360 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && 6360 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6361 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { 6361 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6362 /* Set drive transmission level to 1.2V */ 6362 /* Set drive transmission level to 1.2V */
6363 /* only if the signal pre-emphasis bit is not set */ 6363 /* only if the signal pre-emphasis bit is not set */
6364 val = tr32(MAC_SERDES_CFG); 6364 val = tr32(MAC_SERDES_CFG);
6365 val &= 0xfffff000; 6365 val &= 0xfffff000;
6366 val |= 0x880; 6366 val |= 0x880;
6367 tw32(MAC_SERDES_CFG, val); 6367 tw32(MAC_SERDES_CFG, val);
6368 } 6368 }
6369 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) 6369 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6370 tw32(MAC_SERDES_CFG, 0x616000); 6370 tw32(MAC_SERDES_CFG, 0x616000);
6371 } 6371 }
6372 6372
6373 /* Prevent chip from dropping frames when flow control 6373 /* Prevent chip from dropping frames when flow control
6374 * is enabled. 6374 * is enabled.
6375 */ 6375 */
6376 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); 6376 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6377 6377
6378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 6378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6379 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 6379 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6380 /* Use hardware link auto-negotiation */ 6380 /* Use hardware link auto-negotiation */
6381 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; 6381 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6382 } 6382 }
6383 6383
6384 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && 6384 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6385 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { 6385 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6386 u32 tmp; 6386 u32 tmp;
6387 6387
6388 tmp = tr32(SERDES_RX_CTRL); 6388 tmp = tr32(SERDES_RX_CTRL);
6389 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); 6389 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6390 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; 6390 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6391 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; 6391 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6392 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 6392 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6393 } 6393 }
6394 6394
6395 err = tg3_setup_phy(tp, reset_phy); 6395 err = tg3_setup_phy(tp, reset_phy);
6396 if (err) 6396 if (err)
6397 return err; 6397 return err;
6398 6398
6399 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 6399 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6400 u32 tmp; 6400 u32 tmp;
6401 6401
6402 /* Clear CRC stats. */ 6402 /* Clear CRC stats. */
6403 if (!tg3_readphy(tp, 0x1e, &tmp)) { 6403 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6404 tg3_writephy(tp, 0x1e, tmp | 0x8000); 6404 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6405 tg3_readphy(tp, 0x14, &tmp); 6405 tg3_readphy(tp, 0x14, &tmp);
6406 } 6406 }
6407 } 6407 }
6408 6408
6409 __tg3_set_rx_mode(tp->dev); 6409 __tg3_set_rx_mode(tp->dev);
6410 6410
6411 /* Initialize receive rules. */ 6411 /* Initialize receive rules. */
6412 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); 6412 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6413 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); 6413 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6414 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); 6414 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6415 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 6415 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6416 6416
6417 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 6417 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6418 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 6418 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6419 limit = 8; 6419 limit = 8;
6420 else 6420 else
6421 limit = 16; 6421 limit = 16;
6422 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) 6422 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6423 limit -= 4; 6423 limit -= 4;
6424 switch (limit) { 6424 switch (limit) {
6425 case 16: 6425 case 16:
6426 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); 6426 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6427 case 15: 6427 case 15:
6428 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); 6428 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6429 case 14: 6429 case 14:
6430 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); 6430 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6431 case 13: 6431 case 13:
6432 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); 6432 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6433 case 12: 6433 case 12:
6434 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); 6434 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6435 case 11: 6435 case 11:
6436 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); 6436 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6437 case 10: 6437 case 10:
6438 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); 6438 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6439 case 9: 6439 case 9:
6440 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); 6440 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6441 case 8: 6441 case 8:
6442 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); 6442 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6443 case 7: 6443 case 7:
6444 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); 6444 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6445 case 6: 6445 case 6:
6446 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); 6446 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6447 case 5: 6447 case 5:
6448 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); 6448 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6449 case 4: 6449 case 4:
6450 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ 6450 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6451 case 3: 6451 case 3:
6452 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ 6452 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6453 case 2: 6453 case 2:
6454 case 1: 6454 case 1:
6455 6455
6456 default: 6456 default:
6457 break; 6457 break;
6458 }; 6458 };
6459 6459
6460 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 6460 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6461 6461
6462 return 0; 6462 return 0;
6463 } 6463 }
6464 6464
6465 /* Called at device open time to get the chip ready for 6465 /* Called at device open time to get the chip ready for
6466 * packet processing. Invoked with tp->lock held. 6466 * packet processing. Invoked with tp->lock held.
6467 */ 6467 */
6468 static int tg3_init_hw(struct tg3 *tp, int reset_phy) 6468 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6469 { 6469 {
6470 int err; 6470 int err;
6471 6471
6472 /* Force the chip into D0. */ 6472 /* Force the chip into D0. */
6473 err = tg3_set_power_state(tp, PCI_D0); 6473 err = tg3_set_power_state(tp, PCI_D0);
6474 if (err) 6474 if (err)
6475 goto out; 6475 goto out;
6476 6476
6477 tg3_switch_clocks(tp); 6477 tg3_switch_clocks(tp);
6478 6478
6479 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 6479 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6480 6480
6481 err = tg3_reset_hw(tp, reset_phy); 6481 err = tg3_reset_hw(tp, reset_phy);
6482 6482
6483 out: 6483 out:
6484 return err; 6484 return err;
6485 } 6485 }
6486 6486
6487 #define TG3_STAT_ADD32(PSTAT, REG) \ 6487 #define TG3_STAT_ADD32(PSTAT, REG) \
6488 do { u32 __val = tr32(REG); \ 6488 do { u32 __val = tr32(REG); \
6489 (PSTAT)->low += __val; \ 6489 (PSTAT)->low += __val; \
6490 if ((PSTAT)->low < __val) \ 6490 if ((PSTAT)->low < __val) \
6491 (PSTAT)->high += 1; \ 6491 (PSTAT)->high += 1; \
6492 } while (0) 6492 } while (0)
6493 6493
6494 static void tg3_periodic_fetch_stats(struct tg3 *tp) 6494 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6495 { 6495 {
6496 struct tg3_hw_stats *sp = tp->hw_stats; 6496 struct tg3_hw_stats *sp = tp->hw_stats;
6497 6497
6498 if (!netif_carrier_ok(tp->dev)) 6498 if (!netif_carrier_ok(tp->dev))
6499 return; 6499 return;
6500 6500
6501 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 6501 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6502 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); 6502 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6503 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); 6503 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6504 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); 6504 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6505 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); 6505 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6506 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); 6506 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6507 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); 6507 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6508 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); 6508 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6509 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); 6509 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6510 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); 6510 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6511 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); 6511 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6512 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); 6512 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6513 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); 6513 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6514 6514
6515 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); 6515 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6516 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); 6516 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6517 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); 6517 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6518 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); 6518 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6519 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); 6519 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6520 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); 6520 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6521 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); 6521 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6522 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); 6522 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6523 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); 6523 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6524 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); 6524 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6525 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); 6525 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6526 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); 6526 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6527 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); 6527 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6528 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); 6528 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6529 6529
6530 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); 6530 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6531 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); 6531 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6532 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); 6532 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6533 } 6533 }
6534 6534
6535 static void tg3_timer(unsigned long __opaque) 6535 static void tg3_timer(unsigned long __opaque)
6536 { 6536 {
6537 struct tg3 *tp = (struct tg3 *) __opaque; 6537 struct tg3 *tp = (struct tg3 *) __opaque;
6538 6538
6539 if (tp->irq_sync) 6539 if (tp->irq_sync)
6540 goto restart_timer; 6540 goto restart_timer;
6541 6541
6542 spin_lock(&tp->lock); 6542 spin_lock(&tp->lock);
6543 6543
6544 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 6544 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6545 /* All of this garbage is because when using non-tagged 6545 /* All of this garbage is because when using non-tagged
6546 * IRQ status the mailbox/status_block protocol the chip 6546 * IRQ status the mailbox/status_block protocol the chip
6547 * uses with the cpu is race prone. 6547 * uses with the cpu is race prone.
6548 */ 6548 */
6549 if (tp->hw_status->status & SD_STATUS_UPDATED) { 6549 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6550 tw32(GRC_LOCAL_CTRL, 6550 tw32(GRC_LOCAL_CTRL,
6551 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 6551 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6552 } else { 6552 } else {
6553 tw32(HOSTCC_MODE, tp->coalesce_mode | 6553 tw32(HOSTCC_MODE, tp->coalesce_mode |
6554 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 6554 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6555 } 6555 }
6556 6556
6557 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 6557 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6558 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 6558 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6559 spin_unlock(&tp->lock); 6559 spin_unlock(&tp->lock);
6560 schedule_work(&tp->reset_task); 6560 schedule_work(&tp->reset_task);
6561 return; 6561 return;
6562 } 6562 }
6563 } 6563 }
6564 6564
6565 /* This part only runs once per second. */ 6565 /* This part only runs once per second. */
6566 if (!--tp->timer_counter) { 6566 if (!--tp->timer_counter) {
6567 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 6567 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6568 tg3_periodic_fetch_stats(tp); 6568 tg3_periodic_fetch_stats(tp);
6569 6569
6570 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 6570 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6571 u32 mac_stat; 6571 u32 mac_stat;
6572 int phy_event; 6572 int phy_event;
6573 6573
6574 mac_stat = tr32(MAC_STATUS); 6574 mac_stat = tr32(MAC_STATUS);
6575 6575
6576 phy_event = 0; 6576 phy_event = 0;
6577 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { 6577 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6578 if (mac_stat & MAC_STATUS_MI_INTERRUPT) 6578 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6579 phy_event = 1; 6579 phy_event = 1;
6580 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) 6580 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6581 phy_event = 1; 6581 phy_event = 1;
6582 6582
6583 if (phy_event) 6583 if (phy_event)
6584 tg3_setup_phy(tp, 0); 6584 tg3_setup_phy(tp, 0);
6585 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { 6585 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6586 u32 mac_stat = tr32(MAC_STATUS); 6586 u32 mac_stat = tr32(MAC_STATUS);
6587 int need_setup = 0; 6587 int need_setup = 0;
6588 6588
6589 if (netif_carrier_ok(tp->dev) && 6589 if (netif_carrier_ok(tp->dev) &&
6590 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 6590 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6591 need_setup = 1; 6591 need_setup = 1;
6592 } 6592 }
6593 if (! netif_carrier_ok(tp->dev) && 6593 if (! netif_carrier_ok(tp->dev) &&
6594 (mac_stat & (MAC_STATUS_PCS_SYNCED | 6594 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6595 MAC_STATUS_SIGNAL_DET))) { 6595 MAC_STATUS_SIGNAL_DET))) {
6596 need_setup = 1; 6596 need_setup = 1;
6597 } 6597 }
6598 if (need_setup) { 6598 if (need_setup) {
6599 tw32_f(MAC_MODE, 6599 tw32_f(MAC_MODE,
6600 (tp->mac_mode & 6600 (tp->mac_mode &
6601 ~MAC_MODE_PORT_MODE_MASK)); 6601 ~MAC_MODE_PORT_MODE_MASK));
6602 udelay(40); 6602 udelay(40);
6603 tw32_f(MAC_MODE, tp->mac_mode); 6603 tw32_f(MAC_MODE, tp->mac_mode);
6604 udelay(40); 6604 udelay(40);
6605 tg3_setup_phy(tp, 0); 6605 tg3_setup_phy(tp, 0);
6606 } 6606 }
6607 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 6607 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6608 tg3_serdes_parallel_detect(tp); 6608 tg3_serdes_parallel_detect(tp);
6609 6609
6610 tp->timer_counter = tp->timer_multiplier; 6610 tp->timer_counter = tp->timer_multiplier;
6611 } 6611 }
6612 6612
6613 /* Heartbeat is only sent once every 2 seconds. */ 6613 /* Heartbeat is only sent once every 2 seconds. */
6614 if (!--tp->asf_counter) { 6614 if (!--tp->asf_counter) {
6615 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6615 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6616 u32 val; 6616 u32 val;
6617 6617
6618 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 6618 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6619 FWCMD_NICDRV_ALIVE2); 6619 FWCMD_NICDRV_ALIVE2);
6620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 6620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6621 /* 5 seconds timeout */ 6621 /* 5 seconds timeout */
6622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 6622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6623 val = tr32(GRC_RX_CPU_EVENT); 6623 val = tr32(GRC_RX_CPU_EVENT);
6624 val |= (1 << 14); 6624 val |= (1 << 14);
6625 tw32(GRC_RX_CPU_EVENT, val); 6625 tw32(GRC_RX_CPU_EVENT, val);
6626 } 6626 }
6627 tp->asf_counter = tp->asf_multiplier; 6627 tp->asf_counter = tp->asf_multiplier;
6628 } 6628 }
6629 6629
6630 spin_unlock(&tp->lock); 6630 spin_unlock(&tp->lock);
6631 6631
6632 restart_timer: 6632 restart_timer:
6633 tp->timer.expires = jiffies + tp->timer_offset; 6633 tp->timer.expires = jiffies + tp->timer_offset;
6634 add_timer(&tp->timer); 6634 add_timer(&tp->timer);
6635 } 6635 }
6636 6636
6637 static int tg3_request_irq(struct tg3 *tp) 6637 static int tg3_request_irq(struct tg3 *tp)
6638 { 6638 {
6639 irqreturn_t (*fn)(int, void *, struct pt_regs *); 6639 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6640 unsigned long flags; 6640 unsigned long flags;
6641 struct net_device *dev = tp->dev; 6641 struct net_device *dev = tp->dev;
6642 6642
6643 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6643 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6644 fn = tg3_msi; 6644 fn = tg3_msi;
6645 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 6645 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6646 fn = tg3_msi_1shot; 6646 fn = tg3_msi_1shot;
6647 flags = SA_SAMPLE_RANDOM; 6647 flags = SA_SAMPLE_RANDOM;
6648 } else { 6648 } else {
6649 fn = tg3_interrupt; 6649 fn = tg3_interrupt;
6650 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 6650 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6651 fn = tg3_interrupt_tagged; 6651 fn = tg3_interrupt_tagged;
6652 flags = SA_SHIRQ | SA_SAMPLE_RANDOM; 6652 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6653 } 6653 }
6654 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); 6654 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6655 } 6655 }
6656 6656
6657 static int tg3_test_interrupt(struct tg3 *tp) 6657 static int tg3_test_interrupt(struct tg3 *tp)
6658 { 6658 {
6659 struct net_device *dev = tp->dev; 6659 struct net_device *dev = tp->dev;
6660 int err, i; 6660 int err, i;
6661 u32 int_mbox = 0; 6661 u32 int_mbox = 0;
6662 6662
6663 if (!netif_running(dev)) 6663 if (!netif_running(dev))
6664 return -ENODEV; 6664 return -ENODEV;
6665 6665
6666 tg3_disable_ints(tp); 6666 tg3_disable_ints(tp);
6667 6667
6668 free_irq(tp->pdev->irq, dev); 6668 free_irq(tp->pdev->irq, dev);
6669 6669
6670 err = request_irq(tp->pdev->irq, tg3_test_isr, 6670 err = request_irq(tp->pdev->irq, tg3_test_isr,
6671 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6671 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6672 if (err) 6672 if (err)
6673 return err; 6673 return err;
6674 6674
6675 tp->hw_status->status &= ~SD_STATUS_UPDATED; 6675 tp->hw_status->status &= ~SD_STATUS_UPDATED;
6676 tg3_enable_ints(tp); 6676 tg3_enable_ints(tp);
6677 6677
6678 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 6678 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6679 HOSTCC_MODE_NOW); 6679 HOSTCC_MODE_NOW);
6680 6680
6681 for (i = 0; i < 5; i++) { 6681 for (i = 0; i < 5; i++) {
6682 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 + 6682 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6683 TG3_64BIT_REG_LOW); 6683 TG3_64BIT_REG_LOW);
6684 if (int_mbox != 0) 6684 if (int_mbox != 0)
6685 break; 6685 break;
6686 msleep(10); 6686 msleep(10);
6687 } 6687 }
6688 6688
6689 tg3_disable_ints(tp); 6689 tg3_disable_ints(tp);
6690 6690
6691 free_irq(tp->pdev->irq, dev); 6691 free_irq(tp->pdev->irq, dev);
6692 6692
6693 err = tg3_request_irq(tp); 6693 err = tg3_request_irq(tp);
6694 6694
6695 if (err) 6695 if (err)
6696 return err; 6696 return err;
6697 6697
6698 if (int_mbox != 0) 6698 if (int_mbox != 0)
6699 return 0; 6699 return 0;
6700 6700
6701 return -EIO; 6701 return -EIO;
6702 } 6702 }
6703 6703
6704 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is 6704 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6705 * successfully restored 6705 * successfully restored
6706 */ 6706 */
6707 static int tg3_test_msi(struct tg3 *tp) 6707 static int tg3_test_msi(struct tg3 *tp)
6708 { 6708 {
6709 struct net_device *dev = tp->dev; 6709 struct net_device *dev = tp->dev;
6710 int err; 6710 int err;
6711 u16 pci_cmd; 6711 u16 pci_cmd;
6712 6712
6713 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) 6713 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6714 return 0; 6714 return 0;
6715 6715
6716 /* Turn off SERR reporting in case MSI terminates with Master 6716 /* Turn off SERR reporting in case MSI terminates with Master
6717 * Abort. 6717 * Abort.
6718 */ 6718 */
6719 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 6719 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6720 pci_write_config_word(tp->pdev, PCI_COMMAND, 6720 pci_write_config_word(tp->pdev, PCI_COMMAND,
6721 pci_cmd & ~PCI_COMMAND_SERR); 6721 pci_cmd & ~PCI_COMMAND_SERR);
6722 6722
6723 err = tg3_test_interrupt(tp); 6723 err = tg3_test_interrupt(tp);
6724 6724
6725 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 6725 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6726 6726
6727 if (!err) 6727 if (!err)
6728 return 0; 6728 return 0;
6729 6729
6730 /* other failures */ 6730 /* other failures */
6731 if (err != -EIO) 6731 if (err != -EIO)
6732 return err; 6732 return err;
6733 6733
6734 /* MSI test failed, go back to INTx mode */ 6734 /* MSI test failed, go back to INTx mode */
6735 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " 6735 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6736 "switching to INTx mode. Please report this failure to " 6736 "switching to INTx mode. Please report this failure to "
6737 "the PCI maintainer and include system chipset information.\n", 6737 "the PCI maintainer and include system chipset information.\n",
6738 tp->dev->name); 6738 tp->dev->name);
6739 6739
6740 free_irq(tp->pdev->irq, dev); 6740 free_irq(tp->pdev->irq, dev);
6741 pci_disable_msi(tp->pdev); 6741 pci_disable_msi(tp->pdev);
6742 6742
6743 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6743 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6744 6744
6745 err = tg3_request_irq(tp); 6745 err = tg3_request_irq(tp);
6746 if (err) 6746 if (err)
6747 return err; 6747 return err;
6748 6748
6749 /* Need to reset the chip because the MSI cycle may have terminated 6749 /* Need to reset the chip because the MSI cycle may have terminated
6750 * with Master Abort. 6750 * with Master Abort.
6751 */ 6751 */
6752 tg3_full_lock(tp, 1); 6752 tg3_full_lock(tp, 1);
6753 6753
6754 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6754 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6755 err = tg3_init_hw(tp, 1); 6755 err = tg3_init_hw(tp, 1);
6756 6756
6757 tg3_full_unlock(tp); 6757 tg3_full_unlock(tp);
6758 6758
6759 if (err) 6759 if (err)
6760 free_irq(tp->pdev->irq, dev); 6760 free_irq(tp->pdev->irq, dev);
6761 6761
6762 return err; 6762 return err;
6763 } 6763 }
6764 6764
6765 static int tg3_open(struct net_device *dev) 6765 static int tg3_open(struct net_device *dev)
6766 { 6766 {
6767 struct tg3 *tp = netdev_priv(dev); 6767 struct tg3 *tp = netdev_priv(dev);
6768 int err; 6768 int err;
6769 6769
6770 tg3_full_lock(tp, 0); 6770 tg3_full_lock(tp, 0);
6771 6771
6772 err = tg3_set_power_state(tp, PCI_D0); 6772 err = tg3_set_power_state(tp, PCI_D0);
6773 if (err) 6773 if (err)
6774 return err; 6774 return err;
6775 6775
6776 tg3_disable_ints(tp); 6776 tg3_disable_ints(tp);
6777 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 6777 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6778 6778
6779 tg3_full_unlock(tp); 6779 tg3_full_unlock(tp);
6780 6780
6781 /* The placement of this call is tied 6781 /* The placement of this call is tied
6782 * to the setup and use of Host TX descriptors. 6782 * to the setup and use of Host TX descriptors.
6783 */ 6783 */
6784 err = tg3_alloc_consistent(tp); 6784 err = tg3_alloc_consistent(tp);
6785 if (err) 6785 if (err)
6786 return err; 6786 return err;
6787 6787
6788 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6788 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6789 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6789 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6790 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) && 6790 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6791 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) && 6791 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6792 (tp->pdev_peer == tp->pdev))) { 6792 (tp->pdev_peer == tp->pdev))) {
6793 /* All MSI supporting chips should support tagged 6793 /* All MSI supporting chips should support tagged
6794 * status. Assert that this is the case. 6794 * status. Assert that this is the case.
6795 */ 6795 */
6796 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { 6796 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6797 printk(KERN_WARNING PFX "%s: MSI without TAGGED? " 6797 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6798 "Not using MSI.\n", tp->dev->name); 6798 "Not using MSI.\n", tp->dev->name);
6799 } else if (pci_enable_msi(tp->pdev) == 0) { 6799 } else if (pci_enable_msi(tp->pdev) == 0) {
6800 u32 msi_mode; 6800 u32 msi_mode;
6801 6801
6802 msi_mode = tr32(MSGINT_MODE); 6802 msi_mode = tr32(MSGINT_MODE);
6803 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 6803 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6804 tp->tg3_flags2 |= TG3_FLG2_USING_MSI; 6804 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6805 } 6805 }
6806 } 6806 }
6807 err = tg3_request_irq(tp); 6807 err = tg3_request_irq(tp);
6808 6808
6809 if (err) { 6809 if (err) {
6810 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6810 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6811 pci_disable_msi(tp->pdev); 6811 pci_disable_msi(tp->pdev);
6812 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6812 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6813 } 6813 }
6814 tg3_free_consistent(tp); 6814 tg3_free_consistent(tp);
6815 return err; 6815 return err;
6816 } 6816 }
6817 6817
6818 tg3_full_lock(tp, 0); 6818 tg3_full_lock(tp, 0);
6819 6819
6820 err = tg3_init_hw(tp, 1); 6820 err = tg3_init_hw(tp, 1);
6821 if (err) { 6821 if (err) {
6822 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6822 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6823 tg3_free_rings(tp); 6823 tg3_free_rings(tp);
6824 } else { 6824 } else {
6825 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 6825 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6826 tp->timer_offset = HZ; 6826 tp->timer_offset = HZ;
6827 else 6827 else
6828 tp->timer_offset = HZ / 10; 6828 tp->timer_offset = HZ / 10;
6829 6829
6830 BUG_ON(tp->timer_offset > HZ); 6830 BUG_ON(tp->timer_offset > HZ);
6831 tp->timer_counter = tp->timer_multiplier = 6831 tp->timer_counter = tp->timer_multiplier =
6832 (HZ / tp->timer_offset); 6832 (HZ / tp->timer_offset);
6833 tp->asf_counter = tp->asf_multiplier = 6833 tp->asf_counter = tp->asf_multiplier =
6834 ((HZ / tp->timer_offset) * 2); 6834 ((HZ / tp->timer_offset) * 2);
6835 6835
6836 init_timer(&tp->timer); 6836 init_timer(&tp->timer);
6837 tp->timer.expires = jiffies + tp->timer_offset; 6837 tp->timer.expires = jiffies + tp->timer_offset;
6838 tp->timer.data = (unsigned long) tp; 6838 tp->timer.data = (unsigned long) tp;
6839 tp->timer.function = tg3_timer; 6839 tp->timer.function = tg3_timer;
6840 } 6840 }
6841 6841
6842 tg3_full_unlock(tp); 6842 tg3_full_unlock(tp);
6843 6843
6844 if (err) { 6844 if (err) {
6845 free_irq(tp->pdev->irq, dev); 6845 free_irq(tp->pdev->irq, dev);
6846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6847 pci_disable_msi(tp->pdev); 6847 pci_disable_msi(tp->pdev);
6848 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6848 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6849 } 6849 }
6850 tg3_free_consistent(tp); 6850 tg3_free_consistent(tp);
6851 return err; 6851 return err;
6852 } 6852 }
6853 6853
6854 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6854 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6855 err = tg3_test_msi(tp); 6855 err = tg3_test_msi(tp);
6856 6856
6857 if (err) { 6857 if (err) {
6858 tg3_full_lock(tp, 0); 6858 tg3_full_lock(tp, 0);
6859 6859
6860 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6860 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6861 pci_disable_msi(tp->pdev); 6861 pci_disable_msi(tp->pdev);
6862 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6862 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6863 } 6863 }
6864 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6864 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6865 tg3_free_rings(tp); 6865 tg3_free_rings(tp);
6866 tg3_free_consistent(tp); 6866 tg3_free_consistent(tp);
6867 6867
6868 tg3_full_unlock(tp); 6868 tg3_full_unlock(tp);
6869 6869
6870 return err; 6870 return err;
6871 } 6871 }
6872 6872
6873 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6873 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6874 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { 6874 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6875 u32 val = tr32(0x7c04); 6875 u32 val = tr32(0x7c04);
6876 6876
6877 tw32(0x7c04, val | (1 << 29)); 6877 tw32(0x7c04, val | (1 << 29));
6878 } 6878 }
6879 } 6879 }
6880 } 6880 }
6881 6881
6882 tg3_full_lock(tp, 0); 6882 tg3_full_lock(tp, 0);
6883 6883
6884 add_timer(&tp->timer); 6884 add_timer(&tp->timer);
6885 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 6885 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6886 tg3_enable_ints(tp); 6886 tg3_enable_ints(tp);
6887 6887
6888 tg3_full_unlock(tp); 6888 tg3_full_unlock(tp);
6889 6889
6890 netif_start_queue(dev); 6890 netif_start_queue(dev);
6891 6891
6892 return 0; 6892 return 0;
6893 } 6893 }
6894 6894
6895 #if 0 6895 #if 0
6896 /*static*/ void tg3_dump_state(struct tg3 *tp) 6896 /*static*/ void tg3_dump_state(struct tg3 *tp)
6897 { 6897 {
6898 u32 val32, val32_2, val32_3, val32_4, val32_5; 6898 u32 val32, val32_2, val32_3, val32_4, val32_5;
6899 u16 val16; 6899 u16 val16;
6900 int i; 6900 int i;
6901 6901
6902 pci_read_config_word(tp->pdev, PCI_STATUS, &val16); 6902 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6903 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); 6903 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6904 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n", 6904 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6905 val16, val32); 6905 val16, val32);
6906 6906
6907 /* MAC block */ 6907 /* MAC block */
6908 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n", 6908 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6909 tr32(MAC_MODE), tr32(MAC_STATUS)); 6909 tr32(MAC_MODE), tr32(MAC_STATUS));
6910 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n", 6910 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6911 tr32(MAC_EVENT), tr32(MAC_LED_CTRL)); 6911 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6912 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n", 6912 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6913 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS)); 6913 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6914 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n", 6914 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6915 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS)); 6915 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6916 6916
6917 /* Send data initiator control block */ 6917 /* Send data initiator control block */
6918 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n", 6918 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6919 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS)); 6919 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6920 printk(" SNDDATAI_STATSCTRL[%08x]\n", 6920 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6921 tr32(SNDDATAI_STATSCTRL)); 6921 tr32(SNDDATAI_STATSCTRL));
6922 6922
6923 /* Send data completion control block */ 6923 /* Send data completion control block */
6924 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE)); 6924 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6925 6925
6926 /* Send BD ring selector block */ 6926 /* Send BD ring selector block */
6927 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n", 6927 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6928 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS)); 6928 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6929 6929
6930 /* Send BD initiator control block */ 6930 /* Send BD initiator control block */
6931 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n", 6931 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6932 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS)); 6932 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6933 6933
6934 /* Send BD completion control block */ 6934 /* Send BD completion control block */
6935 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE)); 6935 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6936 6936
6937 /* Receive list placement control block */ 6937 /* Receive list placement control block */
6938 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n", 6938 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6939 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS)); 6939 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6940 printk(" RCVLPC_STATSCTRL[%08x]\n", 6940 printk(" RCVLPC_STATSCTRL[%08x]\n",
6941 tr32(RCVLPC_STATSCTRL)); 6941 tr32(RCVLPC_STATSCTRL));
6942 6942
6943 /* Receive data and receive BD initiator control block */ 6943 /* Receive data and receive BD initiator control block */
6944 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n", 6944 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6945 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS)); 6945 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6946 6946
6947 /* Receive data completion control block */ 6947 /* Receive data completion control block */
6948 printk("DEBUG: RCVDCC_MODE[%08x]\n", 6948 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6949 tr32(RCVDCC_MODE)); 6949 tr32(RCVDCC_MODE));
6950 6950
6951 /* Receive BD initiator control block */ 6951 /* Receive BD initiator control block */
6952 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n", 6952 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6953 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS)); 6953 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6954 6954
6955 /* Receive BD completion control block */ 6955 /* Receive BD completion control block */
6956 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n", 6956 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6957 tr32(RCVCC_MODE), tr32(RCVCC_STATUS)); 6957 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6958 6958
6959 /* Receive list selector control block */ 6959 /* Receive list selector control block */
6960 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n", 6960 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6961 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS)); 6961 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6962 6962
6963 /* Mbuf cluster free block */ 6963 /* Mbuf cluster free block */
6964 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n", 6964 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6965 tr32(MBFREE_MODE), tr32(MBFREE_STATUS)); 6965 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6966 6966
6967 /* Host coalescing control block */ 6967 /* Host coalescing control block */
6968 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n", 6968 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6969 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS)); 6969 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6970 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n", 6970 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6971 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), 6971 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6972 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); 6972 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6973 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n", 6973 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6974 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), 6974 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6975 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); 6975 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6976 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n", 6976 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6977 tr32(HOSTCC_STATS_BLK_NIC_ADDR)); 6977 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6978 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n", 6978 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6979 tr32(HOSTCC_STATUS_BLK_NIC_ADDR)); 6979 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6980 6980
6981 /* Memory arbiter control block */ 6981 /* Memory arbiter control block */
6982 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n", 6982 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6983 tr32(MEMARB_MODE), tr32(MEMARB_STATUS)); 6983 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6984 6984
6985 /* Buffer manager control block */ 6985 /* Buffer manager control block */
6986 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n", 6986 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6987 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS)); 6987 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6988 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n", 6988 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6989 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE)); 6989 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6990 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] " 6990 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6991 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n", 6991 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6992 tr32(BUFMGR_DMA_DESC_POOL_ADDR), 6992 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6993 tr32(BUFMGR_DMA_DESC_POOL_SIZE)); 6993 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6994 6994
6995 /* Read DMA control block */ 6995 /* Read DMA control block */
6996 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n", 6996 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6997 tr32(RDMAC_MODE), tr32(RDMAC_STATUS)); 6997 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6998 6998
6999 /* Write DMA control block */ 6999 /* Write DMA control block */
7000 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n", 7000 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7001 tr32(WDMAC_MODE), tr32(WDMAC_STATUS)); 7001 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7002 7002
7003 /* DMA completion block */ 7003 /* DMA completion block */
7004 printk("DEBUG: DMAC_MODE[%08x]\n", 7004 printk("DEBUG: DMAC_MODE[%08x]\n",
7005 tr32(DMAC_MODE)); 7005 tr32(DMAC_MODE));
7006 7006
7007 /* GRC block */ 7007 /* GRC block */
7008 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n", 7008 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7009 tr32(GRC_MODE), tr32(GRC_MISC_CFG)); 7009 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7010 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n", 7010 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7011 tr32(GRC_LOCAL_CTRL)); 7011 tr32(GRC_LOCAL_CTRL));
7012 7012
7013 /* TG3_BDINFOs */ 7013 /* TG3_BDINFOs */
7014 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n", 7014 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7015 tr32(RCVDBDI_JUMBO_BD + 0x0), 7015 tr32(RCVDBDI_JUMBO_BD + 0x0),
7016 tr32(RCVDBDI_JUMBO_BD + 0x4), 7016 tr32(RCVDBDI_JUMBO_BD + 0x4),
7017 tr32(RCVDBDI_JUMBO_BD + 0x8), 7017 tr32(RCVDBDI_JUMBO_BD + 0x8),
7018 tr32(RCVDBDI_JUMBO_BD + 0xc)); 7018 tr32(RCVDBDI_JUMBO_BD + 0xc));
7019 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n", 7019 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7020 tr32(RCVDBDI_STD_BD + 0x0), 7020 tr32(RCVDBDI_STD_BD + 0x0),
7021 tr32(RCVDBDI_STD_BD + 0x4), 7021 tr32(RCVDBDI_STD_BD + 0x4),
7022 tr32(RCVDBDI_STD_BD + 0x8), 7022 tr32(RCVDBDI_STD_BD + 0x8),
7023 tr32(RCVDBDI_STD_BD + 0xc)); 7023 tr32(RCVDBDI_STD_BD + 0xc));
7024 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n", 7024 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7025 tr32(RCVDBDI_MINI_BD + 0x0), 7025 tr32(RCVDBDI_MINI_BD + 0x0),
7026 tr32(RCVDBDI_MINI_BD + 0x4), 7026 tr32(RCVDBDI_MINI_BD + 0x4),
7027 tr32(RCVDBDI_MINI_BD + 0x8), 7027 tr32(RCVDBDI_MINI_BD + 0x8),
7028 tr32(RCVDBDI_MINI_BD + 0xc)); 7028 tr32(RCVDBDI_MINI_BD + 0xc));
7029 7029
7030 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32); 7030 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7031 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2); 7031 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7032 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3); 7032 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7033 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4); 7033 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7034 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n", 7034 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7035 val32, val32_2, val32_3, val32_4); 7035 val32, val32_2, val32_3, val32_4);
7036 7036
7037 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32); 7037 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7038 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2); 7038 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7039 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3); 7039 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7040 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4); 7040 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7041 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n", 7041 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7042 val32, val32_2, val32_3, val32_4); 7042 val32, val32_2, val32_3, val32_4);
7043 7043
7044 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32); 7044 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7045 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2); 7045 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7046 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3); 7046 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7047 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4); 7047 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7048 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5); 7048 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7049 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n", 7049 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7050 val32, val32_2, val32_3, val32_4, val32_5); 7050 val32, val32_2, val32_3, val32_4, val32_5);
7051 7051
7052 /* SW status block */ 7052 /* SW status block */
7053 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", 7053 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7054 tp->hw_status->status, 7054 tp->hw_status->status,
7055 tp->hw_status->status_tag, 7055 tp->hw_status->status_tag,
7056 tp->hw_status->rx_jumbo_consumer, 7056 tp->hw_status->rx_jumbo_consumer,
7057 tp->hw_status->rx_consumer, 7057 tp->hw_status->rx_consumer,
7058 tp->hw_status->rx_mini_consumer, 7058 tp->hw_status->rx_mini_consumer,
7059 tp->hw_status->idx[0].rx_producer, 7059 tp->hw_status->idx[0].rx_producer,
7060 tp->hw_status->idx[0].tx_consumer); 7060 tp->hw_status->idx[0].tx_consumer);
7061 7061
7062 /* SW statistics block */ 7062 /* SW statistics block */
7063 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", 7063 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7064 ((u32 *)tp->hw_stats)[0], 7064 ((u32 *)tp->hw_stats)[0],
7065 ((u32 *)tp->hw_stats)[1], 7065 ((u32 *)tp->hw_stats)[1],
7066 ((u32 *)tp->hw_stats)[2], 7066 ((u32 *)tp->hw_stats)[2],
7067 ((u32 *)tp->hw_stats)[3]); 7067 ((u32 *)tp->hw_stats)[3]);
7068 7068
7069 /* Mailboxes */ 7069 /* Mailboxes */
7070 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", 7070 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7071 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), 7071 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7072 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), 7072 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7073 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), 7073 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7074 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); 7074 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7075 7075
7076 /* NIC side send descriptors. */ 7076 /* NIC side send descriptors. */
7077 for (i = 0; i < 6; i++) { 7077 for (i = 0; i < 6; i++) {
7078 unsigned long txd; 7078 unsigned long txd;
7079 7079
7080 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC 7080 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7081 + (i * sizeof(struct tg3_tx_buffer_desc)); 7081 + (i * sizeof(struct tg3_tx_buffer_desc));
7082 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n", 7082 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7083 i, 7083 i,
7084 readl(txd + 0x0), readl(txd + 0x4), 7084 readl(txd + 0x0), readl(txd + 0x4),
7085 readl(txd + 0x8), readl(txd + 0xc)); 7085 readl(txd + 0x8), readl(txd + 0xc));
7086 } 7086 }
7087 7087
7088 /* NIC side RX descriptors. */ 7088 /* NIC side RX descriptors. */
7089 for (i = 0; i < 6; i++) { 7089 for (i = 0; i < 6; i++) {
7090 unsigned long rxd; 7090 unsigned long rxd;
7091 7091
7092 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC 7092 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7093 + (i * sizeof(struct tg3_rx_buffer_desc)); 7093 + (i * sizeof(struct tg3_rx_buffer_desc));
7094 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n", 7094 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7095 i, 7095 i,
7096 readl(rxd + 0x0), readl(rxd + 0x4), 7096 readl(rxd + 0x0), readl(rxd + 0x4),
7097 readl(rxd + 0x8), readl(rxd + 0xc)); 7097 readl(rxd + 0x8), readl(rxd + 0xc));
7098 rxd += (4 * sizeof(u32)); 7098 rxd += (4 * sizeof(u32));
7099 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n", 7099 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7100 i, 7100 i,
7101 readl(rxd + 0x0), readl(rxd + 0x4), 7101 readl(rxd + 0x0), readl(rxd + 0x4),
7102 readl(rxd + 0x8), readl(rxd + 0xc)); 7102 readl(rxd + 0x8), readl(rxd + 0xc));
7103 } 7103 }
7104 7104
7105 for (i = 0; i < 6; i++) { 7105 for (i = 0; i < 6; i++) {
7106 unsigned long rxd; 7106 unsigned long rxd;
7107 7107
7108 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC 7108 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7109 + (i * sizeof(struct tg3_rx_buffer_desc)); 7109 + (i * sizeof(struct tg3_rx_buffer_desc));
7110 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n", 7110 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7111 i, 7111 i,
7112 readl(rxd + 0x0), readl(rxd + 0x4), 7112 readl(rxd + 0x0), readl(rxd + 0x4),
7113 readl(rxd + 0x8), readl(rxd + 0xc)); 7113 readl(rxd + 0x8), readl(rxd + 0xc));
7114 rxd += (4 * sizeof(u32)); 7114 rxd += (4 * sizeof(u32));
7115 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n", 7115 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7116 i, 7116 i,
7117 readl(rxd + 0x0), readl(rxd + 0x4), 7117 readl(rxd + 0x0), readl(rxd + 0x4),
7118 readl(rxd + 0x8), readl(rxd + 0xc)); 7118 readl(rxd + 0x8), readl(rxd + 0xc));
7119 } 7119 }
7120 } 7120 }
7121 #endif 7121 #endif
7122 7122
7123 static struct net_device_stats *tg3_get_stats(struct net_device *); 7123 static struct net_device_stats *tg3_get_stats(struct net_device *);
7124 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); 7124 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7125 7125
7126 static int tg3_close(struct net_device *dev) 7126 static int tg3_close(struct net_device *dev)
7127 { 7127 {
7128 struct tg3 *tp = netdev_priv(dev); 7128 struct tg3 *tp = netdev_priv(dev);
7129 7129
7130 /* Calling flush_scheduled_work() may deadlock because 7130 /* Calling flush_scheduled_work() may deadlock because
7131 * linkwatch_event() may be on the workqueue and it will try to get 7131 * linkwatch_event() may be on the workqueue and it will try to get
7132 * the rtnl_lock which we are holding. 7132 * the rtnl_lock which we are holding.
7133 */ 7133 */
7134 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK) 7134 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7135 msleep(1); 7135 msleep(1);
7136 7136
7137 netif_stop_queue(dev); 7137 netif_stop_queue(dev);
7138 7138
7139 del_timer_sync(&tp->timer); 7139 del_timer_sync(&tp->timer);
7140 7140
7141 tg3_full_lock(tp, 1); 7141 tg3_full_lock(tp, 1);
7142 #if 0 7142 #if 0
7143 tg3_dump_state(tp); 7143 tg3_dump_state(tp);
7144 #endif 7144 #endif
7145 7145
7146 tg3_disable_ints(tp); 7146 tg3_disable_ints(tp);
7147 7147
7148 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7148 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7149 tg3_free_rings(tp); 7149 tg3_free_rings(tp);
7150 tp->tg3_flags &= 7150 tp->tg3_flags &=
7151 ~(TG3_FLAG_INIT_COMPLETE | 7151 ~(TG3_FLAG_INIT_COMPLETE |
7152 TG3_FLAG_GOT_SERDES_FLOWCTL); 7152 TG3_FLAG_GOT_SERDES_FLOWCTL);
7153 7153
7154 tg3_full_unlock(tp); 7154 tg3_full_unlock(tp);
7155 7155
7156 free_irq(tp->pdev->irq, dev); 7156 free_irq(tp->pdev->irq, dev);
7157 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 7157 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7158 pci_disable_msi(tp->pdev); 7158 pci_disable_msi(tp->pdev);
7159 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 7159 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7160 } 7160 }
7161 7161
7162 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), 7162 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7163 sizeof(tp->net_stats_prev)); 7163 sizeof(tp->net_stats_prev));
7164 memcpy(&tp->estats_prev, tg3_get_estats(tp), 7164 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7165 sizeof(tp->estats_prev)); 7165 sizeof(tp->estats_prev));
7166 7166
7167 tg3_free_consistent(tp); 7167 tg3_free_consistent(tp);
7168 7168
7169 tg3_set_power_state(tp, PCI_D3hot); 7169 tg3_set_power_state(tp, PCI_D3hot);
7170 7170
7171 netif_carrier_off(tp->dev); 7171 netif_carrier_off(tp->dev);
7172 7172
7173 return 0; 7173 return 0;
7174 } 7174 }
7175 7175
7176 static inline unsigned long get_stat64(tg3_stat64_t *val) 7176 static inline unsigned long get_stat64(tg3_stat64_t *val)
7177 { 7177 {
7178 unsigned long ret; 7178 unsigned long ret;
7179 7179
7180 #if (BITS_PER_LONG == 32) 7180 #if (BITS_PER_LONG == 32)
7181 ret = val->low; 7181 ret = val->low;
7182 #else 7182 #else
7183 ret = ((u64)val->high << 32) | ((u64)val->low); 7183 ret = ((u64)val->high << 32) | ((u64)val->low);
7184 #endif 7184 #endif
7185 return ret; 7185 return ret;
7186 } 7186 }
7187 7187
7188 static unsigned long calc_crc_errors(struct tg3 *tp) 7188 static unsigned long calc_crc_errors(struct tg3 *tp)
7189 { 7189 {
7190 struct tg3_hw_stats *hw_stats = tp->hw_stats; 7190 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7191 7191
7192 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && 7192 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7193 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 7193 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { 7194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7195 u32 val; 7195 u32 val;
7196 7196
7197 spin_lock_bh(&tp->lock); 7197 spin_lock_bh(&tp->lock);
7198 if (!tg3_readphy(tp, 0x1e, &val)) { 7198 if (!tg3_readphy(tp, 0x1e, &val)) {
7199 tg3_writephy(tp, 0x1e, val | 0x8000); 7199 tg3_writephy(tp, 0x1e, val | 0x8000);
7200 tg3_readphy(tp, 0x14, &val); 7200 tg3_readphy(tp, 0x14, &val);
7201 } else 7201 } else
7202 val = 0; 7202 val = 0;
7203 spin_unlock_bh(&tp->lock); 7203 spin_unlock_bh(&tp->lock);
7204 7204
7205 tp->phy_crc_errors += val; 7205 tp->phy_crc_errors += val;
7206 7206
7207 return tp->phy_crc_errors; 7207 return tp->phy_crc_errors;
7208 } 7208 }
7209 7209
7210 return get_stat64(&hw_stats->rx_fcs_errors); 7210 return get_stat64(&hw_stats->rx_fcs_errors);
7211 } 7211 }
7212 7212
7213 #define ESTAT_ADD(member) \ 7213 #define ESTAT_ADD(member) \
7214 estats->member = old_estats->member + \ 7214 estats->member = old_estats->member + \
7215 get_stat64(&hw_stats->member) 7215 get_stat64(&hw_stats->member)
7216 7216
7217 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) 7217 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7218 { 7218 {
7219 struct tg3_ethtool_stats *estats = &tp->estats; 7219 struct tg3_ethtool_stats *estats = &tp->estats;
7220 struct tg3_ethtool_stats *old_estats = &tp->estats_prev; 7220 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7221 struct tg3_hw_stats *hw_stats = tp->hw_stats; 7221 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7222 7222
7223 if (!hw_stats) 7223 if (!hw_stats)
7224 return old_estats; 7224 return old_estats;
7225 7225
7226 ESTAT_ADD(rx_octets); 7226 ESTAT_ADD(rx_octets);
7227 ESTAT_ADD(rx_fragments); 7227 ESTAT_ADD(rx_fragments);
7228 ESTAT_ADD(rx_ucast_packets); 7228 ESTAT_ADD(rx_ucast_packets);
7229 ESTAT_ADD(rx_mcast_packets); 7229 ESTAT_ADD(rx_mcast_packets);
7230 ESTAT_ADD(rx_bcast_packets); 7230 ESTAT_ADD(rx_bcast_packets);
7231 ESTAT_ADD(rx_fcs_errors); 7231 ESTAT_ADD(rx_fcs_errors);
7232 ESTAT_ADD(rx_align_errors); 7232 ESTAT_ADD(rx_align_errors);
7233 ESTAT_ADD(rx_xon_pause_rcvd); 7233 ESTAT_ADD(rx_xon_pause_rcvd);
7234 ESTAT_ADD(rx_xoff_pause_rcvd); 7234 ESTAT_ADD(rx_xoff_pause_rcvd);
7235 ESTAT_ADD(rx_mac_ctrl_rcvd); 7235 ESTAT_ADD(rx_mac_ctrl_rcvd);
7236 ESTAT_ADD(rx_xoff_entered); 7236 ESTAT_ADD(rx_xoff_entered);
7237 ESTAT_ADD(rx_frame_too_long_errors); 7237 ESTAT_ADD(rx_frame_too_long_errors);
7238 ESTAT_ADD(rx_jabbers); 7238 ESTAT_ADD(rx_jabbers);
7239 ESTAT_ADD(rx_undersize_packets); 7239 ESTAT_ADD(rx_undersize_packets);
7240 ESTAT_ADD(rx_in_length_errors); 7240 ESTAT_ADD(rx_in_length_errors);
7241 ESTAT_ADD(rx_out_length_errors); 7241 ESTAT_ADD(rx_out_length_errors);
7242 ESTAT_ADD(rx_64_or_less_octet_packets); 7242 ESTAT_ADD(rx_64_or_less_octet_packets);
7243 ESTAT_ADD(rx_65_to_127_octet_packets); 7243 ESTAT_ADD(rx_65_to_127_octet_packets);
7244 ESTAT_ADD(rx_128_to_255_octet_packets); 7244 ESTAT_ADD(rx_128_to_255_octet_packets);
7245 ESTAT_ADD(rx_256_to_511_octet_packets); 7245 ESTAT_ADD(rx_256_to_511_octet_packets);
7246 ESTAT_ADD(rx_512_to_1023_octet_packets); 7246 ESTAT_ADD(rx_512_to_1023_octet_packets);
7247 ESTAT_ADD(rx_1024_to_1522_octet_packets); 7247 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7248 ESTAT_ADD(rx_1523_to_2047_octet_packets); 7248 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7249 ESTAT_ADD(rx_2048_to_4095_octet_packets); 7249 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7250 ESTAT_ADD(rx_4096_to_8191_octet_packets); 7250 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7251 ESTAT_ADD(rx_8192_to_9022_octet_packets); 7251 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7252 7252
7253 ESTAT_ADD(tx_octets); 7253 ESTAT_ADD(tx_octets);
7254 ESTAT_ADD(tx_collisions); 7254 ESTAT_ADD(tx_collisions);
7255 ESTAT_ADD(tx_xon_sent); 7255 ESTAT_ADD(tx_xon_sent);
7256 ESTAT_ADD(tx_xoff_sent); 7256 ESTAT_ADD(tx_xoff_sent);
7257 ESTAT_ADD(tx_flow_control); 7257 ESTAT_ADD(tx_flow_control);
7258 ESTAT_ADD(tx_mac_errors); 7258 ESTAT_ADD(tx_mac_errors);
7259 ESTAT_ADD(tx_single_collisions); 7259 ESTAT_ADD(tx_single_collisions);
7260 ESTAT_ADD(tx_mult_collisions); 7260 ESTAT_ADD(tx_mult_collisions);
7261 ESTAT_ADD(tx_deferred); 7261 ESTAT_ADD(tx_deferred);
7262 ESTAT_ADD(tx_excessive_collisions); 7262 ESTAT_ADD(tx_excessive_collisions);
7263 ESTAT_ADD(tx_late_collisions); 7263 ESTAT_ADD(tx_late_collisions);
7264 ESTAT_ADD(tx_collide_2times); 7264 ESTAT_ADD(tx_collide_2times);
7265 ESTAT_ADD(tx_collide_3times); 7265 ESTAT_ADD(tx_collide_3times);
7266 ESTAT_ADD(tx_collide_4times); 7266 ESTAT_ADD(tx_collide_4times);
7267 ESTAT_ADD(tx_collide_5times); 7267 ESTAT_ADD(tx_collide_5times);
7268 ESTAT_ADD(tx_collide_6times); 7268 ESTAT_ADD(tx_collide_6times);
7269 ESTAT_ADD(tx_collide_7times); 7269 ESTAT_ADD(tx_collide_7times);
7270 ESTAT_ADD(tx_collide_8times); 7270 ESTAT_ADD(tx_collide_8times);
7271 ESTAT_ADD(tx_collide_9times); 7271 ESTAT_ADD(tx_collide_9times);
7272 ESTAT_ADD(tx_collide_10times); 7272 ESTAT_ADD(tx_collide_10times);
7273 ESTAT_ADD(tx_collide_11times); 7273 ESTAT_ADD(tx_collide_11times);
7274 ESTAT_ADD(tx_collide_12times); 7274 ESTAT_ADD(tx_collide_12times);
7275 ESTAT_ADD(tx_collide_13times); 7275 ESTAT_ADD(tx_collide_13times);
7276 ESTAT_ADD(tx_collide_14times); 7276 ESTAT_ADD(tx_collide_14times);
7277 ESTAT_ADD(tx_collide_15times); 7277 ESTAT_ADD(tx_collide_15times);
7278 ESTAT_ADD(tx_ucast_packets); 7278 ESTAT_ADD(tx_ucast_packets);
7279 ESTAT_ADD(tx_mcast_packets); 7279 ESTAT_ADD(tx_mcast_packets);
7280 ESTAT_ADD(tx_bcast_packets); 7280 ESTAT_ADD(tx_bcast_packets);
7281 ESTAT_ADD(tx_carrier_sense_errors); 7281 ESTAT_ADD(tx_carrier_sense_errors);
7282 ESTAT_ADD(tx_discards); 7282 ESTAT_ADD(tx_discards);
7283 ESTAT_ADD(tx_errors); 7283 ESTAT_ADD(tx_errors);
7284 7284
7285 ESTAT_ADD(dma_writeq_full); 7285 ESTAT_ADD(dma_writeq_full);
7286 ESTAT_ADD(dma_write_prioq_full); 7286 ESTAT_ADD(dma_write_prioq_full);
7287 ESTAT_ADD(rxbds_empty); 7287 ESTAT_ADD(rxbds_empty);
7288 ESTAT_ADD(rx_discards); 7288 ESTAT_ADD(rx_discards);
7289 ESTAT_ADD(rx_errors); 7289 ESTAT_ADD(rx_errors);
7290 ESTAT_ADD(rx_threshold_hit); 7290 ESTAT_ADD(rx_threshold_hit);
7291 7291
7292 ESTAT_ADD(dma_readq_full); 7292 ESTAT_ADD(dma_readq_full);
7293 ESTAT_ADD(dma_read_prioq_full); 7293 ESTAT_ADD(dma_read_prioq_full);
7294 ESTAT_ADD(tx_comp_queue_full); 7294 ESTAT_ADD(tx_comp_queue_full);
7295 7295
7296 ESTAT_ADD(ring_set_send_prod_index); 7296 ESTAT_ADD(ring_set_send_prod_index);
7297 ESTAT_ADD(ring_status_update); 7297 ESTAT_ADD(ring_status_update);
7298 ESTAT_ADD(nic_irqs); 7298 ESTAT_ADD(nic_irqs);
7299 ESTAT_ADD(nic_avoided_irqs); 7299 ESTAT_ADD(nic_avoided_irqs);
7300 ESTAT_ADD(nic_tx_threshold_hit); 7300 ESTAT_ADD(nic_tx_threshold_hit);
7301 7301
7302 return estats; 7302 return estats;
7303 } 7303 }
7304 7304
7305 static struct net_device_stats *tg3_get_stats(struct net_device *dev) 7305 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7306 { 7306 {
7307 struct tg3 *tp = netdev_priv(dev); 7307 struct tg3 *tp = netdev_priv(dev);
7308 struct net_device_stats *stats = &tp->net_stats; 7308 struct net_device_stats *stats = &tp->net_stats;
7309 struct net_device_stats *old_stats = &tp->net_stats_prev; 7309 struct net_device_stats *old_stats = &tp->net_stats_prev;
7310 struct tg3_hw_stats *hw_stats = tp->hw_stats; 7310 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7311 7311
7312 if (!hw_stats) 7312 if (!hw_stats)
7313 return old_stats; 7313 return old_stats;
7314 7314
7315 stats->rx_packets = old_stats->rx_packets + 7315 stats->rx_packets = old_stats->rx_packets +
7316 get_stat64(&hw_stats->rx_ucast_packets) + 7316 get_stat64(&hw_stats->rx_ucast_packets) +
7317 get_stat64(&hw_stats->rx_mcast_packets) + 7317 get_stat64(&hw_stats->rx_mcast_packets) +
7318 get_stat64(&hw_stats->rx_bcast_packets); 7318 get_stat64(&hw_stats->rx_bcast_packets);
7319 7319
7320 stats->tx_packets = old_stats->tx_packets + 7320 stats->tx_packets = old_stats->tx_packets +
7321 get_stat64(&hw_stats->tx_ucast_packets) + 7321 get_stat64(&hw_stats->tx_ucast_packets) +
7322 get_stat64(&hw_stats->tx_mcast_packets) + 7322 get_stat64(&hw_stats->tx_mcast_packets) +
7323 get_stat64(&hw_stats->tx_bcast_packets); 7323 get_stat64(&hw_stats->tx_bcast_packets);
7324 7324
7325 stats->rx_bytes = old_stats->rx_bytes + 7325 stats->rx_bytes = old_stats->rx_bytes +
7326 get_stat64(&hw_stats->rx_octets); 7326 get_stat64(&hw_stats->rx_octets);
7327 stats->tx_bytes = old_stats->tx_bytes + 7327 stats->tx_bytes = old_stats->tx_bytes +
7328 get_stat64(&hw_stats->tx_octets); 7328 get_stat64(&hw_stats->tx_octets);
7329 7329
7330 stats->rx_errors = old_stats->rx_errors + 7330 stats->rx_errors = old_stats->rx_errors +
7331 get_stat64(&hw_stats->rx_errors); 7331 get_stat64(&hw_stats->rx_errors);
7332 stats->tx_errors = old_stats->tx_errors + 7332 stats->tx_errors = old_stats->tx_errors +
7333 get_stat64(&hw_stats->tx_errors) + 7333 get_stat64(&hw_stats->tx_errors) +
7334 get_stat64(&hw_stats->tx_mac_errors) + 7334 get_stat64(&hw_stats->tx_mac_errors) +
7335 get_stat64(&hw_stats->tx_carrier_sense_errors) + 7335 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7336 get_stat64(&hw_stats->tx_discards); 7336 get_stat64(&hw_stats->tx_discards);
7337 7337
7338 stats->multicast = old_stats->multicast + 7338 stats->multicast = old_stats->multicast +
7339 get_stat64(&hw_stats->rx_mcast_packets); 7339 get_stat64(&hw_stats->rx_mcast_packets);
7340 stats->collisions = old_stats->collisions + 7340 stats->collisions = old_stats->collisions +
7341 get_stat64(&hw_stats->tx_collisions); 7341 get_stat64(&hw_stats->tx_collisions);
7342 7342
7343 stats->rx_length_errors = old_stats->rx_length_errors + 7343 stats->rx_length_errors = old_stats->rx_length_errors +
7344 get_stat64(&hw_stats->rx_frame_too_long_errors) + 7344 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7345 get_stat64(&hw_stats->rx_undersize_packets); 7345 get_stat64(&hw_stats->rx_undersize_packets);
7346 7346
7347 stats->rx_over_errors = old_stats->rx_over_errors + 7347 stats->rx_over_errors = old_stats->rx_over_errors +
7348 get_stat64(&hw_stats->rxbds_empty); 7348 get_stat64(&hw_stats->rxbds_empty);
7349 stats->rx_frame_errors = old_stats->rx_frame_errors + 7349 stats->rx_frame_errors = old_stats->rx_frame_errors +
7350 get_stat64(&hw_stats->rx_align_errors); 7350 get_stat64(&hw_stats->rx_align_errors);
7351 stats->tx_aborted_errors = old_stats->tx_aborted_errors + 7351 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7352 get_stat64(&hw_stats->tx_discards); 7352 get_stat64(&hw_stats->tx_discards);
7353 stats->tx_carrier_errors = old_stats->tx_carrier_errors + 7353 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7354 get_stat64(&hw_stats->tx_carrier_sense_errors); 7354 get_stat64(&hw_stats->tx_carrier_sense_errors);
7355 7355
7356 stats->rx_crc_errors = old_stats->rx_crc_errors + 7356 stats->rx_crc_errors = old_stats->rx_crc_errors +
7357 calc_crc_errors(tp); 7357 calc_crc_errors(tp);
7358 7358
7359 stats->rx_missed_errors = old_stats->rx_missed_errors + 7359 stats->rx_missed_errors = old_stats->rx_missed_errors +
7360 get_stat64(&hw_stats->rx_discards); 7360 get_stat64(&hw_stats->rx_discards);
7361 7361
7362 return stats; 7362 return stats;
7363 } 7363 }
7364 7364
7365 static inline u32 calc_crc(unsigned char *buf, int len) 7365 static inline u32 calc_crc(unsigned char *buf, int len)
7366 { 7366 {
7367 u32 reg; 7367 u32 reg;
7368 u32 tmp; 7368 u32 tmp;
7369 int j, k; 7369 int j, k;
7370 7370
7371 reg = 0xffffffff; 7371 reg = 0xffffffff;
7372 7372
7373 for (j = 0; j < len; j++) { 7373 for (j = 0; j < len; j++) {
7374 reg ^= buf[j]; 7374 reg ^= buf[j];
7375 7375
7376 for (k = 0; k < 8; k++) { 7376 for (k = 0; k < 8; k++) {
7377 tmp = reg & 0x01; 7377 tmp = reg & 0x01;
7378 7378
7379 reg >>= 1; 7379 reg >>= 1;
7380 7380
7381 if (tmp) { 7381 if (tmp) {
7382 reg ^= 0xedb88320; 7382 reg ^= 0xedb88320;
7383 } 7383 }
7384 } 7384 }
7385 } 7385 }
7386 7386
7387 return ~reg; 7387 return ~reg;
7388 } 7388 }
7389 7389
7390 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) 7390 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7391 { 7391 {
7392 /* accept or reject all multicast frames */ 7392 /* accept or reject all multicast frames */
7393 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); 7393 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7394 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); 7394 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7395 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); 7395 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7396 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); 7396 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7397 } 7397 }
7398 7398
7399 static void __tg3_set_rx_mode(struct net_device *dev) 7399 static void __tg3_set_rx_mode(struct net_device *dev)
7400 { 7400 {
7401 struct tg3 *tp = netdev_priv(dev); 7401 struct tg3 *tp = netdev_priv(dev);
7402 u32 rx_mode; 7402 u32 rx_mode;
7403 7403
7404 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | 7404 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7405 RX_MODE_KEEP_VLAN_TAG); 7405 RX_MODE_KEEP_VLAN_TAG);
7406 7406
7407 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG 7407 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7408 * flag clear. 7408 * flag clear.
7409 */ 7409 */
7410 #if TG3_VLAN_TAG_USED 7410 #if TG3_VLAN_TAG_USED
7411 if (!tp->vlgrp && 7411 if (!tp->vlgrp &&
7412 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 7412 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7413 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 7413 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7414 #else 7414 #else
7415 /* By definition, VLAN is disabled always in this 7415 /* By definition, VLAN is disabled always in this
7416 * case. 7416 * case.
7417 */ 7417 */
7418 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) 7418 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7419 rx_mode |= RX_MODE_KEEP_VLAN_TAG; 7419 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7420 #endif 7420 #endif
7421 7421
7422 if (dev->flags & IFF_PROMISC) { 7422 if (dev->flags & IFF_PROMISC) {
7423 /* Promiscuous mode. */ 7423 /* Promiscuous mode. */
7424 rx_mode |= RX_MODE_PROMISC; 7424 rx_mode |= RX_MODE_PROMISC;
7425 } else if (dev->flags & IFF_ALLMULTI) { 7425 } else if (dev->flags & IFF_ALLMULTI) {
7426 /* Accept all multicast. */ 7426 /* Accept all multicast. */
7427 tg3_set_multi (tp, 1); 7427 tg3_set_multi (tp, 1);
7428 } else if (dev->mc_count < 1) { 7428 } else if (dev->mc_count < 1) {
7429 /* Reject all multicast. */ 7429 /* Reject all multicast. */
7430 tg3_set_multi (tp, 0); 7430 tg3_set_multi (tp, 0);
7431 } else { 7431 } else {
7432 /* Accept one or more multicast(s). */ 7432 /* Accept one or more multicast(s). */
7433 struct dev_mc_list *mclist; 7433 struct dev_mc_list *mclist;
7434 unsigned int i; 7434 unsigned int i;
7435 u32 mc_filter[4] = { 0, }; 7435 u32 mc_filter[4] = { 0, };
7436 u32 regidx; 7436 u32 regidx;
7437 u32 bit; 7437 u32 bit;
7438 u32 crc; 7438 u32 crc;
7439 7439
7440 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 7440 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7441 i++, mclist = mclist->next) { 7441 i++, mclist = mclist->next) {
7442 7442
7443 crc = calc_crc (mclist->dmi_addr, ETH_ALEN); 7443 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7444 bit = ~crc & 0x7f; 7444 bit = ~crc & 0x7f;
7445 regidx = (bit & 0x60) >> 5; 7445 regidx = (bit & 0x60) >> 5;
7446 bit &= 0x1f; 7446 bit &= 0x1f;
7447 mc_filter[regidx] |= (1 << bit); 7447 mc_filter[regidx] |= (1 << bit);
7448 } 7448 }
7449 7449
7450 tw32(MAC_HASH_REG_0, mc_filter[0]); 7450 tw32(MAC_HASH_REG_0, mc_filter[0]);
7451 tw32(MAC_HASH_REG_1, mc_filter[1]); 7451 tw32(MAC_HASH_REG_1, mc_filter[1]);
7452 tw32(MAC_HASH_REG_2, mc_filter[2]); 7452 tw32(MAC_HASH_REG_2, mc_filter[2]);
7453 tw32(MAC_HASH_REG_3, mc_filter[3]); 7453 tw32(MAC_HASH_REG_3, mc_filter[3]);
7454 } 7454 }
7455 7455
7456 if (rx_mode != tp->rx_mode) { 7456 if (rx_mode != tp->rx_mode) {
7457 tp->rx_mode = rx_mode; 7457 tp->rx_mode = rx_mode;
7458 tw32_f(MAC_RX_MODE, rx_mode); 7458 tw32_f(MAC_RX_MODE, rx_mode);
7459 udelay(10); 7459 udelay(10);
7460 } 7460 }
7461 } 7461 }
7462 7462
7463 static void tg3_set_rx_mode(struct net_device *dev) 7463 static void tg3_set_rx_mode(struct net_device *dev)
7464 { 7464 {
7465 struct tg3 *tp = netdev_priv(dev); 7465 struct tg3 *tp = netdev_priv(dev);
7466 7466
7467 if (!netif_running(dev)) 7467 if (!netif_running(dev))
7468 return; 7468 return;
7469 7469
7470 tg3_full_lock(tp, 0); 7470 tg3_full_lock(tp, 0);
7471 __tg3_set_rx_mode(dev); 7471 __tg3_set_rx_mode(dev);
7472 tg3_full_unlock(tp); 7472 tg3_full_unlock(tp);
7473 } 7473 }
7474 7474
7475 #define TG3_REGDUMP_LEN (32 * 1024) 7475 #define TG3_REGDUMP_LEN (32 * 1024)
7476 7476
7477 static int tg3_get_regs_len(struct net_device *dev) 7477 static int tg3_get_regs_len(struct net_device *dev)
7478 { 7478 {
7479 return TG3_REGDUMP_LEN; 7479 return TG3_REGDUMP_LEN;
7480 } 7480 }
7481 7481
7482 static void tg3_get_regs(struct net_device *dev, 7482 static void tg3_get_regs(struct net_device *dev,
7483 struct ethtool_regs *regs, void *_p) 7483 struct ethtool_regs *regs, void *_p)
7484 { 7484 {
7485 u32 *p = _p; 7485 u32 *p = _p;
7486 struct tg3 *tp = netdev_priv(dev); 7486 struct tg3 *tp = netdev_priv(dev);
7487 u8 *orig_p = _p; 7487 u8 *orig_p = _p;
7488 int i; 7488 int i;
7489 7489
7490 regs->version = 0; 7490 regs->version = 0;
7491 7491
7492 memset(p, 0, TG3_REGDUMP_LEN); 7492 memset(p, 0, TG3_REGDUMP_LEN);
7493 7493
7494 if (tp->link_config.phy_is_low_power) 7494 if (tp->link_config.phy_is_low_power)
7495 return; 7495 return;
7496 7496
7497 tg3_full_lock(tp, 0); 7497 tg3_full_lock(tp, 0);
7498 7498
7499 #define __GET_REG32(reg) (*(p)++ = tr32(reg)) 7499 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7500 #define GET_REG32_LOOP(base,len) \ 7500 #define GET_REG32_LOOP(base,len) \
7501 do { p = (u32 *)(orig_p + (base)); \ 7501 do { p = (u32 *)(orig_p + (base)); \
7502 for (i = 0; i < len; i += 4) \ 7502 for (i = 0; i < len; i += 4) \
7503 __GET_REG32((base) + i); \ 7503 __GET_REG32((base) + i); \
7504 } while (0) 7504 } while (0)
7505 #define GET_REG32_1(reg) \ 7505 #define GET_REG32_1(reg) \
7506 do { p = (u32 *)(orig_p + (reg)); \ 7506 do { p = (u32 *)(orig_p + (reg)); \
7507 __GET_REG32((reg)); \ 7507 __GET_REG32((reg)); \
7508 } while (0) 7508 } while (0)
7509 7509
7510 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); 7510 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7511 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); 7511 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7512 GET_REG32_LOOP(MAC_MODE, 0x4f0); 7512 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7513 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); 7513 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7514 GET_REG32_1(SNDDATAC_MODE); 7514 GET_REG32_1(SNDDATAC_MODE);
7515 GET_REG32_LOOP(SNDBDS_MODE, 0x80); 7515 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7516 GET_REG32_LOOP(SNDBDI_MODE, 0x48); 7516 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7517 GET_REG32_1(SNDBDC_MODE); 7517 GET_REG32_1(SNDBDC_MODE);
7518 GET_REG32_LOOP(RCVLPC_MODE, 0x20); 7518 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7519 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); 7519 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7520 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); 7520 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7521 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); 7521 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7522 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); 7522 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7523 GET_REG32_1(RCVDCC_MODE); 7523 GET_REG32_1(RCVDCC_MODE);
7524 GET_REG32_LOOP(RCVBDI_MODE, 0x20); 7524 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7525 GET_REG32_LOOP(RCVCC_MODE, 0x14); 7525 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7526 GET_REG32_LOOP(RCVLSC_MODE, 0x08); 7526 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7527 GET_REG32_1(MBFREE_MODE); 7527 GET_REG32_1(MBFREE_MODE);
7528 GET_REG32_LOOP(HOSTCC_MODE, 0x100); 7528 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7529 GET_REG32_LOOP(MEMARB_MODE, 0x10); 7529 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7530 GET_REG32_LOOP(BUFMGR_MODE, 0x58); 7530 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7531 GET_REG32_LOOP(RDMAC_MODE, 0x08); 7531 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7532 GET_REG32_LOOP(WDMAC_MODE, 0x08); 7532 GET_REG32_LOOP(WDMAC_MODE, 0x08);
7533 GET_REG32_1(RX_CPU_MODE); 7533 GET_REG32_1(RX_CPU_MODE);
7534 GET_REG32_1(RX_CPU_STATE); 7534 GET_REG32_1(RX_CPU_STATE);
7535 GET_REG32_1(RX_CPU_PGMCTR); 7535 GET_REG32_1(RX_CPU_PGMCTR);
7536 GET_REG32_1(RX_CPU_HWBKPT); 7536 GET_REG32_1(RX_CPU_HWBKPT);
7537 GET_REG32_1(TX_CPU_MODE); 7537 GET_REG32_1(TX_CPU_MODE);
7538 GET_REG32_1(TX_CPU_STATE); 7538 GET_REG32_1(TX_CPU_STATE);
7539 GET_REG32_1(TX_CPU_PGMCTR); 7539 GET_REG32_1(TX_CPU_PGMCTR);
7540 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); 7540 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7541 GET_REG32_LOOP(FTQ_RESET, 0x120); 7541 GET_REG32_LOOP(FTQ_RESET, 0x120);
7542 GET_REG32_LOOP(MSGINT_MODE, 0x0c); 7542 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7543 GET_REG32_1(DMAC_MODE); 7543 GET_REG32_1(DMAC_MODE);
7544 GET_REG32_LOOP(GRC_MODE, 0x4c); 7544 GET_REG32_LOOP(GRC_MODE, 0x4c);
7545 if (tp->tg3_flags & TG3_FLAG_NVRAM) 7545 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7546 GET_REG32_LOOP(NVRAM_CMD, 0x24); 7546 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7547 7547
7548 #undef __GET_REG32 7548 #undef __GET_REG32
7549 #undef GET_REG32_LOOP 7549 #undef GET_REG32_LOOP
7550 #undef GET_REG32_1 7550 #undef GET_REG32_1
7551 7551
7552 tg3_full_unlock(tp); 7552 tg3_full_unlock(tp);
7553 } 7553 }
7554 7554
7555 static int tg3_get_eeprom_len(struct net_device *dev) 7555 static int tg3_get_eeprom_len(struct net_device *dev)
7556 { 7556 {
7557 struct tg3 *tp = netdev_priv(dev); 7557 struct tg3 *tp = netdev_priv(dev);
7558 7558
7559 return tp->nvram_size; 7559 return tp->nvram_size;
7560 } 7560 }
7561 7561
7562 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); 7562 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7563 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); 7563 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7564 7564
7565 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 7565 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7566 { 7566 {
7567 struct tg3 *tp = netdev_priv(dev); 7567 struct tg3 *tp = netdev_priv(dev);
7568 int ret; 7568 int ret;
7569 u8 *pd; 7569 u8 *pd;
7570 u32 i, offset, len, val, b_offset, b_count; 7570 u32 i, offset, len, val, b_offset, b_count;
7571 7571
7572 if (tp->link_config.phy_is_low_power) 7572 if (tp->link_config.phy_is_low_power)
7573 return -EAGAIN; 7573 return -EAGAIN;
7574 7574
7575 offset = eeprom->offset; 7575 offset = eeprom->offset;
7576 len = eeprom->len; 7576 len = eeprom->len;
7577 eeprom->len = 0; 7577 eeprom->len = 0;
7578 7578
7579 eeprom->magic = TG3_EEPROM_MAGIC; 7579 eeprom->magic = TG3_EEPROM_MAGIC;
7580 7580
7581 if (offset & 3) { 7581 if (offset & 3) {
7582 /* adjustments to start on required 4 byte boundary */ 7582 /* adjustments to start on required 4 byte boundary */
7583 b_offset = offset & 3; 7583 b_offset = offset & 3;
7584 b_count = 4 - b_offset; 7584 b_count = 4 - b_offset;
7585 if (b_count > len) { 7585 if (b_count > len) {
7586 /* i.e. offset=1 len=2 */ 7586 /* i.e. offset=1 len=2 */
7587 b_count = len; 7587 b_count = len;
7588 } 7588 }
7589 ret = tg3_nvram_read(tp, offset-b_offset, &val); 7589 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7590 if (ret) 7590 if (ret)
7591 return ret; 7591 return ret;
7592 val = cpu_to_le32(val); 7592 val = cpu_to_le32(val);
7593 memcpy(data, ((char*)&val) + b_offset, b_count); 7593 memcpy(data, ((char*)&val) + b_offset, b_count);
7594 len -= b_count; 7594 len -= b_count;
7595 offset += b_count; 7595 offset += b_count;
7596 eeprom->len += b_count; 7596 eeprom->len += b_count;
7597 } 7597 }
7598 7598
7599 /* read bytes upto the last 4 byte boundary */ 7599 /* read bytes upto the last 4 byte boundary */
7600 pd = &data[eeprom->len]; 7600 pd = &data[eeprom->len];
7601 for (i = 0; i < (len - (len & 3)); i += 4) { 7601 for (i = 0; i < (len - (len & 3)); i += 4) {
7602 ret = tg3_nvram_read(tp, offset + i, &val); 7602 ret = tg3_nvram_read(tp, offset + i, &val);
7603 if (ret) { 7603 if (ret) {
7604 eeprom->len += i; 7604 eeprom->len += i;
7605 return ret; 7605 return ret;
7606 } 7606 }
7607 val = cpu_to_le32(val); 7607 val = cpu_to_le32(val);
7608 memcpy(pd + i, &val, 4); 7608 memcpy(pd + i, &val, 4);
7609 } 7609 }
7610 eeprom->len += i; 7610 eeprom->len += i;
7611 7611
7612 if (len & 3) { 7612 if (len & 3) {
7613 /* read last bytes not ending on 4 byte boundary */ 7613 /* read last bytes not ending on 4 byte boundary */
7614 pd = &data[eeprom->len]; 7614 pd = &data[eeprom->len];
7615 b_count = len & 3; 7615 b_count = len & 3;
7616 b_offset = offset + len - b_count; 7616 b_offset = offset + len - b_count;
7617 ret = tg3_nvram_read(tp, b_offset, &val); 7617 ret = tg3_nvram_read(tp, b_offset, &val);
7618 if (ret) 7618 if (ret)
7619 return ret; 7619 return ret;
7620 val = cpu_to_le32(val); 7620 val = cpu_to_le32(val);
7621 memcpy(pd, ((char*)&val), b_count); 7621 memcpy(pd, ((char*)&val), b_count);
7622 eeprom->len += b_count; 7622 eeprom->len += b_count;
7623 } 7623 }
7624 return 0; 7624 return 0;
7625 } 7625 }
7626 7626
7627 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 7627 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7628 7628
7629 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) 7629 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7630 { 7630 {
7631 struct tg3 *tp = netdev_priv(dev); 7631 struct tg3 *tp = netdev_priv(dev);
7632 int ret; 7632 int ret;
7633 u32 offset, len, b_offset, odd_len, start, end; 7633 u32 offset, len, b_offset, odd_len, start, end;
7634 u8 *buf; 7634 u8 *buf;
7635 7635
7636 if (tp->link_config.phy_is_low_power) 7636 if (tp->link_config.phy_is_low_power)
7637 return -EAGAIN; 7637 return -EAGAIN;
7638 7638
7639 if (eeprom->magic != TG3_EEPROM_MAGIC) 7639 if (eeprom->magic != TG3_EEPROM_MAGIC)
7640 return -EINVAL; 7640 return -EINVAL;
7641 7641
7642 offset = eeprom->offset; 7642 offset = eeprom->offset;
7643 len = eeprom->len; 7643 len = eeprom->len;
7644 7644
7645 if ((b_offset = (offset & 3))) { 7645 if ((b_offset = (offset & 3))) {
7646 /* adjustments to start on required 4 byte boundary */ 7646 /* adjustments to start on required 4 byte boundary */
7647 ret = tg3_nvram_read(tp, offset-b_offset, &start); 7647 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7648 if (ret) 7648 if (ret)
7649 return ret; 7649 return ret;
7650 start = cpu_to_le32(start); 7650 start = cpu_to_le32(start);
7651 len += b_offset; 7651 len += b_offset;
7652 offset &= ~3; 7652 offset &= ~3;
7653 if (len < 4) 7653 if (len < 4)
7654 len = 4; 7654 len = 4;
7655 } 7655 }
7656 7656
7657 odd_len = 0; 7657 odd_len = 0;
7658 if (len & 3) { 7658 if (len & 3) {
7659 /* adjustments to end on required 4 byte boundary */ 7659 /* adjustments to end on required 4 byte boundary */
7660 odd_len = 1; 7660 odd_len = 1;
7661 len = (len + 3) & ~3; 7661 len = (len + 3) & ~3;
7662 ret = tg3_nvram_read(tp, offset+len-4, &end); 7662 ret = tg3_nvram_read(tp, offset+len-4, &end);
7663 if (ret) 7663 if (ret)
7664 return ret; 7664 return ret;
7665 end = cpu_to_le32(end); 7665 end = cpu_to_le32(end);
7666 } 7666 }
7667 7667
7668 buf = data; 7668 buf = data;
7669 if (b_offset || odd_len) { 7669 if (b_offset || odd_len) {
7670 buf = kmalloc(len, GFP_KERNEL); 7670 buf = kmalloc(len, GFP_KERNEL);
7671 if (buf == 0) 7671 if (buf == 0)
7672 return -ENOMEM; 7672 return -ENOMEM;
7673 if (b_offset) 7673 if (b_offset)
7674 memcpy(buf, &start, 4); 7674 memcpy(buf, &start, 4);
7675 if (odd_len) 7675 if (odd_len)
7676 memcpy(buf+len-4, &end, 4); 7676 memcpy(buf+len-4, &end, 4);
7677 memcpy(buf + b_offset, data, eeprom->len); 7677 memcpy(buf + b_offset, data, eeprom->len);
7678 } 7678 }
7679 7679
7680 ret = tg3_nvram_write_block(tp, offset, len, buf); 7680 ret = tg3_nvram_write_block(tp, offset, len, buf);
7681 7681
7682 if (buf != data) 7682 if (buf != data)
7683 kfree(buf); 7683 kfree(buf);
7684 7684
7685 return ret; 7685 return ret;
7686 } 7686 }
7687 7687
7688 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 7688 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7689 { 7689 {
7690 struct tg3 *tp = netdev_priv(dev); 7690 struct tg3 *tp = netdev_priv(dev);
7691 7691
7692 cmd->supported = (SUPPORTED_Autoneg); 7692 cmd->supported = (SUPPORTED_Autoneg);
7693 7693
7694 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 7694 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7695 cmd->supported |= (SUPPORTED_1000baseT_Half | 7695 cmd->supported |= (SUPPORTED_1000baseT_Half |
7696 SUPPORTED_1000baseT_Full); 7696 SUPPORTED_1000baseT_Full);
7697 7697
7698 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { 7698 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7699 cmd->supported |= (SUPPORTED_100baseT_Half | 7699 cmd->supported |= (SUPPORTED_100baseT_Half |
7700 SUPPORTED_100baseT_Full | 7700 SUPPORTED_100baseT_Full |
7701 SUPPORTED_10baseT_Half | 7701 SUPPORTED_10baseT_Half |
7702 SUPPORTED_10baseT_Full | 7702 SUPPORTED_10baseT_Full |
7703 SUPPORTED_MII); 7703 SUPPORTED_MII);
7704 cmd->port = PORT_TP; 7704 cmd->port = PORT_TP;
7705 } else { 7705 } else {
7706 cmd->supported |= SUPPORTED_FIBRE; 7706 cmd->supported |= SUPPORTED_FIBRE;
7707 cmd->port = PORT_FIBRE; 7707 cmd->port = PORT_FIBRE;
7708 } 7708 }
7709 7709
7710 cmd->advertising = tp->link_config.advertising; 7710 cmd->advertising = tp->link_config.advertising;
7711 if (netif_running(dev)) { 7711 if (netif_running(dev)) {
7712 cmd->speed = tp->link_config.active_speed; 7712 cmd->speed = tp->link_config.active_speed;
7713 cmd->duplex = tp->link_config.active_duplex; 7713 cmd->duplex = tp->link_config.active_duplex;
7714 } 7714 }
7715 cmd->phy_address = PHY_ADDR; 7715 cmd->phy_address = PHY_ADDR;
7716 cmd->transceiver = 0; 7716 cmd->transceiver = 0;
7717 cmd->autoneg = tp->link_config.autoneg; 7717 cmd->autoneg = tp->link_config.autoneg;
7718 cmd->maxtxpkt = 0; 7718 cmd->maxtxpkt = 0;
7719 cmd->maxrxpkt = 0; 7719 cmd->maxrxpkt = 0;
7720 return 0; 7720 return 0;
7721 } 7721 }
7722 7722
7723 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 7723 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7724 { 7724 {
7725 struct tg3 *tp = netdev_priv(dev); 7725 struct tg3 *tp = netdev_priv(dev);
7726 7726
7727 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 7727 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7728 /* These are the only valid advertisement bits allowed. */ 7728 /* These are the only valid advertisement bits allowed. */
7729 if (cmd->autoneg == AUTONEG_ENABLE && 7729 if (cmd->autoneg == AUTONEG_ENABLE &&
7730 (cmd->advertising & ~(ADVERTISED_1000baseT_Half | 7730 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7731 ADVERTISED_1000baseT_Full | 7731 ADVERTISED_1000baseT_Full |
7732 ADVERTISED_Autoneg | 7732 ADVERTISED_Autoneg |
7733 ADVERTISED_FIBRE))) 7733 ADVERTISED_FIBRE)))
7734 return -EINVAL; 7734 return -EINVAL;
7735 /* Fiber can only do SPEED_1000. */ 7735 /* Fiber can only do SPEED_1000. */
7736 else if ((cmd->autoneg != AUTONEG_ENABLE) && 7736 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7737 (cmd->speed != SPEED_1000)) 7737 (cmd->speed != SPEED_1000))
7738 return -EINVAL; 7738 return -EINVAL;
7739 /* Copper cannot force SPEED_1000. */ 7739 /* Copper cannot force SPEED_1000. */
7740 } else if ((cmd->autoneg != AUTONEG_ENABLE) && 7740 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7741 (cmd->speed == SPEED_1000)) 7741 (cmd->speed == SPEED_1000))
7742 return -EINVAL; 7742 return -EINVAL;
7743 else if ((cmd->speed == SPEED_1000) && 7743 else if ((cmd->speed == SPEED_1000) &&
7744 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) 7744 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7745 return -EINVAL; 7745 return -EINVAL;
7746 7746
7747 tg3_full_lock(tp, 0); 7747 tg3_full_lock(tp, 0);
7748 7748
7749 tp->link_config.autoneg = cmd->autoneg; 7749 tp->link_config.autoneg = cmd->autoneg;
7750 if (cmd->autoneg == AUTONEG_ENABLE) { 7750 if (cmd->autoneg == AUTONEG_ENABLE) {
7751 tp->link_config.advertising = cmd->advertising; 7751 tp->link_config.advertising = cmd->advertising;
7752 tp->link_config.speed = SPEED_INVALID; 7752 tp->link_config.speed = SPEED_INVALID;
7753 tp->link_config.duplex = DUPLEX_INVALID; 7753 tp->link_config.duplex = DUPLEX_INVALID;
7754 } else { 7754 } else {
7755 tp->link_config.advertising = 0; 7755 tp->link_config.advertising = 0;
7756 tp->link_config.speed = cmd->speed; 7756 tp->link_config.speed = cmd->speed;
7757 tp->link_config.duplex = cmd->duplex; 7757 tp->link_config.duplex = cmd->duplex;
7758 } 7758 }
7759 7759
7760 if (netif_running(dev)) 7760 if (netif_running(dev))
7761 tg3_setup_phy(tp, 1); 7761 tg3_setup_phy(tp, 1);
7762 7762
7763 tg3_full_unlock(tp); 7763 tg3_full_unlock(tp);
7764 7764
7765 return 0; 7765 return 0;
7766 } 7766 }
7767 7767
7768 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 7768 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7769 { 7769 {
7770 struct tg3 *tp = netdev_priv(dev); 7770 struct tg3 *tp = netdev_priv(dev);
7771 7771
7772 strcpy(info->driver, DRV_MODULE_NAME); 7772 strcpy(info->driver, DRV_MODULE_NAME);
7773 strcpy(info->version, DRV_MODULE_VERSION); 7773 strcpy(info->version, DRV_MODULE_VERSION);
7774 strcpy(info->fw_version, tp->fw_ver); 7774 strcpy(info->fw_version, tp->fw_ver);
7775 strcpy(info->bus_info, pci_name(tp->pdev)); 7775 strcpy(info->bus_info, pci_name(tp->pdev));
7776 } 7776 }
7777 7777
7778 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 7778 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7779 { 7779 {
7780 struct tg3 *tp = netdev_priv(dev); 7780 struct tg3 *tp = netdev_priv(dev);
7781 7781
7782 wol->supported = WAKE_MAGIC; 7782 wol->supported = WAKE_MAGIC;
7783 wol->wolopts = 0; 7783 wol->wolopts = 0;
7784 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) 7784 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7785 wol->wolopts = WAKE_MAGIC; 7785 wol->wolopts = WAKE_MAGIC;
7786 memset(&wol->sopass, 0, sizeof(wol->sopass)); 7786 memset(&wol->sopass, 0, sizeof(wol->sopass));
7787 } 7787 }
7788 7788
7789 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 7789 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7790 { 7790 {
7791 struct tg3 *tp = netdev_priv(dev); 7791 struct tg3 *tp = netdev_priv(dev);
7792 7792
7793 if (wol->wolopts & ~WAKE_MAGIC) 7793 if (wol->wolopts & ~WAKE_MAGIC)
7794 return -EINVAL; 7794 return -EINVAL;
7795 if ((wol->wolopts & WAKE_MAGIC) && 7795 if ((wol->wolopts & WAKE_MAGIC) &&
7796 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES && 7796 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7797 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP)) 7797 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7798 return -EINVAL; 7798 return -EINVAL;
7799 7799
7800 spin_lock_bh(&tp->lock); 7800 spin_lock_bh(&tp->lock);
7801 if (wol->wolopts & WAKE_MAGIC) 7801 if (wol->wolopts & WAKE_MAGIC)
7802 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 7802 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7803 else 7803 else
7804 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 7804 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7805 spin_unlock_bh(&tp->lock); 7805 spin_unlock_bh(&tp->lock);
7806 7806
7807 return 0; 7807 return 0;
7808 } 7808 }
7809 7809
7810 static u32 tg3_get_msglevel(struct net_device *dev) 7810 static u32 tg3_get_msglevel(struct net_device *dev)
7811 { 7811 {
7812 struct tg3 *tp = netdev_priv(dev); 7812 struct tg3 *tp = netdev_priv(dev);
7813 return tp->msg_enable; 7813 return tp->msg_enable;
7814 } 7814 }
7815 7815
7816 static void tg3_set_msglevel(struct net_device *dev, u32 value) 7816 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7817 { 7817 {
7818 struct tg3 *tp = netdev_priv(dev); 7818 struct tg3 *tp = netdev_priv(dev);
7819 tp->msg_enable = value; 7819 tp->msg_enable = value;
7820 } 7820 }
7821 7821
7822 #if TG3_TSO_SUPPORT != 0 7822 #if TG3_TSO_SUPPORT != 0
7823 static int tg3_set_tso(struct net_device *dev, u32 value) 7823 static int tg3_set_tso(struct net_device *dev, u32 value)
7824 { 7824 {
7825 struct tg3 *tp = netdev_priv(dev); 7825 struct tg3 *tp = netdev_priv(dev);
7826 7826
7827 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 7827 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7828 if (value) 7828 if (value)
7829 return -EINVAL; 7829 return -EINVAL;
7830 return 0; 7830 return 0;
7831 } 7831 }
7832 return ethtool_op_set_tso(dev, value); 7832 return ethtool_op_set_tso(dev, value);
7833 } 7833 }
7834 #endif 7834 #endif
7835 7835
7836 static int tg3_nway_reset(struct net_device *dev) 7836 static int tg3_nway_reset(struct net_device *dev)
7837 { 7837 {
7838 struct tg3 *tp = netdev_priv(dev); 7838 struct tg3 *tp = netdev_priv(dev);
7839 u32 bmcr; 7839 u32 bmcr;
7840 int r; 7840 int r;
7841 7841
7842 if (!netif_running(dev)) 7842 if (!netif_running(dev))
7843 return -EAGAIN; 7843 return -EAGAIN;
7844 7844
7845 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 7845 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7846 return -EINVAL; 7846 return -EINVAL;
7847 7847
7848 spin_lock_bh(&tp->lock); 7848 spin_lock_bh(&tp->lock);
7849 r = -EINVAL; 7849 r = -EINVAL;
7850 tg3_readphy(tp, MII_BMCR, &bmcr); 7850 tg3_readphy(tp, MII_BMCR, &bmcr);
7851 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7851 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7852 ((bmcr & BMCR_ANENABLE) || 7852 ((bmcr & BMCR_ANENABLE) ||
7853 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { 7853 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7854 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | 7854 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7855 BMCR_ANENABLE); 7855 BMCR_ANENABLE);
7856 r = 0; 7856 r = 0;
7857 } 7857 }
7858 spin_unlock_bh(&tp->lock); 7858 spin_unlock_bh(&tp->lock);
7859 7859
7860 return r; 7860 return r;
7861 } 7861 }
7862 7862
7863 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 7863 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7864 { 7864 {
7865 struct tg3 *tp = netdev_priv(dev); 7865 struct tg3 *tp = netdev_priv(dev);
7866 7866
7867 ering->rx_max_pending = TG3_RX_RING_SIZE - 1; 7867 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7868 ering->rx_mini_max_pending = 0; 7868 ering->rx_mini_max_pending = 0;
7869 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 7869 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7870 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; 7870 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7871 else 7871 else
7872 ering->rx_jumbo_max_pending = 0; 7872 ering->rx_jumbo_max_pending = 0;
7873 7873
7874 ering->tx_max_pending = TG3_TX_RING_SIZE - 1; 7874 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7875 7875
7876 ering->rx_pending = tp->rx_pending; 7876 ering->rx_pending = tp->rx_pending;
7877 ering->rx_mini_pending = 0; 7877 ering->rx_mini_pending = 0;
7878 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 7878 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7879 ering->rx_jumbo_pending = tp->rx_jumbo_pending; 7879 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7880 else 7880 else
7881 ering->rx_jumbo_pending = 0; 7881 ering->rx_jumbo_pending = 0;
7882 7882
7883 ering->tx_pending = tp->tx_pending; 7883 ering->tx_pending = tp->tx_pending;
7884 } 7884 }
7885 7885
7886 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 7886 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7887 { 7887 {
7888 struct tg3 *tp = netdev_priv(dev); 7888 struct tg3 *tp = netdev_priv(dev);
7889 int irq_sync = 0; 7889 int irq_sync = 0;
7890 7890
7891 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 7891 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7892 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 7892 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7893 (ering->tx_pending > TG3_TX_RING_SIZE - 1)) 7893 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7894 return -EINVAL; 7894 return -EINVAL;
7895 7895
7896 if (netif_running(dev)) { 7896 if (netif_running(dev)) {
7897 tg3_netif_stop(tp); 7897 tg3_netif_stop(tp);
7898 irq_sync = 1; 7898 irq_sync = 1;
7899 } 7899 }
7900 7900
7901 tg3_full_lock(tp, irq_sync); 7901 tg3_full_lock(tp, irq_sync);
7902 7902
7903 tp->rx_pending = ering->rx_pending; 7903 tp->rx_pending = ering->rx_pending;
7904 7904
7905 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && 7905 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7906 tp->rx_pending > 63) 7906 tp->rx_pending > 63)
7907 tp->rx_pending = 63; 7907 tp->rx_pending = 63;
7908 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 7908 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7909 tp->tx_pending = ering->tx_pending; 7909 tp->tx_pending = ering->tx_pending;
7910 7910
7911 if (netif_running(dev)) { 7911 if (netif_running(dev)) {
7912 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7912 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7913 tg3_init_hw(tp, 1); 7913 tg3_init_hw(tp, 1);
7914 tg3_netif_start(tp); 7914 tg3_netif_start(tp);
7915 } 7915 }
7916 7916
7917 tg3_full_unlock(tp); 7917 tg3_full_unlock(tp);
7918 7918
7919 return 0; 7919 return 0;
7920 } 7920 }
7921 7921
7922 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 7922 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7923 { 7923 {
7924 struct tg3 *tp = netdev_priv(dev); 7924 struct tg3 *tp = netdev_priv(dev);
7925 7925
7926 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; 7926 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7927 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0; 7927 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7928 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0; 7928 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7929 } 7929 }
7930 7930
7931 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 7931 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7932 { 7932 {
7933 struct tg3 *tp = netdev_priv(dev); 7933 struct tg3 *tp = netdev_priv(dev);
7934 int irq_sync = 0; 7934 int irq_sync = 0;
7935 7935
7936 if (netif_running(dev)) { 7936 if (netif_running(dev)) {
7937 tg3_netif_stop(tp); 7937 tg3_netif_stop(tp);
7938 irq_sync = 1; 7938 irq_sync = 1;
7939 } 7939 }
7940 7940
7941 tg3_full_lock(tp, irq_sync); 7941 tg3_full_lock(tp, irq_sync);
7942 7942
7943 if (epause->autoneg) 7943 if (epause->autoneg)
7944 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 7944 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7945 else 7945 else
7946 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 7946 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7947 if (epause->rx_pause) 7947 if (epause->rx_pause)
7948 tp->tg3_flags |= TG3_FLAG_RX_PAUSE; 7948 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7949 else 7949 else
7950 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE; 7950 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7951 if (epause->tx_pause) 7951 if (epause->tx_pause)
7952 tp->tg3_flags |= TG3_FLAG_TX_PAUSE; 7952 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7953 else 7953 else
7954 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7954 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7955 7955
7956 if (netif_running(dev)) { 7956 if (netif_running(dev)) {
7957 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7957 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7958 tg3_init_hw(tp, 1); 7958 tg3_init_hw(tp, 1);
7959 tg3_netif_start(tp); 7959 tg3_netif_start(tp);
7960 } 7960 }
7961 7961
7962 tg3_full_unlock(tp); 7962 tg3_full_unlock(tp);
7963 7963
7964 return 0; 7964 return 0;
7965 } 7965 }
7966 7966
7967 static u32 tg3_get_rx_csum(struct net_device *dev) 7967 static u32 tg3_get_rx_csum(struct net_device *dev)
7968 { 7968 {
7969 struct tg3 *tp = netdev_priv(dev); 7969 struct tg3 *tp = netdev_priv(dev);
7970 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; 7970 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7971 } 7971 }
7972 7972
7973 static int tg3_set_rx_csum(struct net_device *dev, u32 data) 7973 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7974 { 7974 {
7975 struct tg3 *tp = netdev_priv(dev); 7975 struct tg3 *tp = netdev_priv(dev);
7976 7976
7977 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 7977 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7978 if (data != 0) 7978 if (data != 0)
7979 return -EINVAL; 7979 return -EINVAL;
7980 return 0; 7980 return 0;
7981 } 7981 }
7982 7982
7983 spin_lock_bh(&tp->lock); 7983 spin_lock_bh(&tp->lock);
7984 if (data) 7984 if (data)
7985 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 7985 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7986 else 7986 else
7987 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 7987 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7988 spin_unlock_bh(&tp->lock); 7988 spin_unlock_bh(&tp->lock);
7989 7989
7990 return 0; 7990 return 0;
7991 } 7991 }
7992 7992
7993 static int tg3_set_tx_csum(struct net_device *dev, u32 data) 7993 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7994 { 7994 {
7995 struct tg3 *tp = netdev_priv(dev); 7995 struct tg3 *tp = netdev_priv(dev);
7996 7996
7997 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 7997 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7998 if (data != 0) 7998 if (data != 0)
7999 return -EINVAL; 7999 return -EINVAL;
8000 return 0; 8000 return 0;
8001 } 8001 }
8002 8002
8003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 8003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 8004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8005 ethtool_op_set_tx_hw_csum(dev, data); 8005 ethtool_op_set_tx_hw_csum(dev, data);
8006 else 8006 else
8007 ethtool_op_set_tx_csum(dev, data); 8007 ethtool_op_set_tx_csum(dev, data);
8008 8008
8009 return 0; 8009 return 0;
8010 } 8010 }
8011 8011
8012 static int tg3_get_stats_count (struct net_device *dev) 8012 static int tg3_get_stats_count (struct net_device *dev)
8013 { 8013 {
8014 return TG3_NUM_STATS; 8014 return TG3_NUM_STATS;
8015 } 8015 }
8016 8016
8017 static int tg3_get_test_count (struct net_device *dev) 8017 static int tg3_get_test_count (struct net_device *dev)
8018 { 8018 {
8019 return TG3_NUM_TEST; 8019 return TG3_NUM_TEST;
8020 } 8020 }
8021 8021
8022 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 8022 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8023 { 8023 {
8024 switch (stringset) { 8024 switch (stringset) {
8025 case ETH_SS_STATS: 8025 case ETH_SS_STATS:
8026 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 8026 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8027 break; 8027 break;
8028 case ETH_SS_TEST: 8028 case ETH_SS_TEST:
8029 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys)); 8029 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8030 break; 8030 break;
8031 default: 8031 default:
8032 WARN_ON(1); /* we need a WARN() */ 8032 WARN_ON(1); /* we need a WARN() */
8033 break; 8033 break;
8034 } 8034 }
8035 } 8035 }
8036 8036
8037 static int tg3_phys_id(struct net_device *dev, u32 data) 8037 static int tg3_phys_id(struct net_device *dev, u32 data)
8038 { 8038 {
8039 struct tg3 *tp = netdev_priv(dev); 8039 struct tg3 *tp = netdev_priv(dev);
8040 int i; 8040 int i;
8041 8041
8042 if (!netif_running(tp->dev)) 8042 if (!netif_running(tp->dev))
8043 return -EAGAIN; 8043 return -EAGAIN;
8044 8044
8045 if (data == 0) 8045 if (data == 0)
8046 data = 2; 8046 data = 2;
8047 8047
8048 for (i = 0; i < (data * 2); i++) { 8048 for (i = 0; i < (data * 2); i++) {
8049 if ((i % 2) == 0) 8049 if ((i % 2) == 0)
8050 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 8050 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8051 LED_CTRL_1000MBPS_ON | 8051 LED_CTRL_1000MBPS_ON |
8052 LED_CTRL_100MBPS_ON | 8052 LED_CTRL_100MBPS_ON |
8053 LED_CTRL_10MBPS_ON | 8053 LED_CTRL_10MBPS_ON |
8054 LED_CTRL_TRAFFIC_OVERRIDE | 8054 LED_CTRL_TRAFFIC_OVERRIDE |
8055 LED_CTRL_TRAFFIC_BLINK | 8055 LED_CTRL_TRAFFIC_BLINK |
8056 LED_CTRL_TRAFFIC_LED); 8056 LED_CTRL_TRAFFIC_LED);
8057 8057
8058 else 8058 else
8059 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | 8059 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8060 LED_CTRL_TRAFFIC_OVERRIDE); 8060 LED_CTRL_TRAFFIC_OVERRIDE);
8061 8061
8062 if (msleep_interruptible(500)) 8062 if (msleep_interruptible(500))
8063 break; 8063 break;
8064 } 8064 }
8065 tw32(MAC_LED_CTRL, tp->led_ctrl); 8065 tw32(MAC_LED_CTRL, tp->led_ctrl);
8066 return 0; 8066 return 0;
8067 } 8067 }
8068 8068
8069 static void tg3_get_ethtool_stats (struct net_device *dev, 8069 static void tg3_get_ethtool_stats (struct net_device *dev,
8070 struct ethtool_stats *estats, u64 *tmp_stats) 8070 struct ethtool_stats *estats, u64 *tmp_stats)
8071 { 8071 {
8072 struct tg3 *tp = netdev_priv(dev); 8072 struct tg3 *tp = netdev_priv(dev);
8073 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 8073 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8074 } 8074 }
8075 8075
8076 #define NVRAM_TEST_SIZE 0x100 8076 #define NVRAM_TEST_SIZE 0x100
8077 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 8077 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8078 8078
8079 static int tg3_test_nvram(struct tg3 *tp) 8079 static int tg3_test_nvram(struct tg3 *tp)
8080 { 8080 {
8081 u32 *buf, csum, magic; 8081 u32 *buf, csum, magic;
8082 int i, j, err = 0, size; 8082 int i, j, err = 0, size;
8083 8083
8084 if (tg3_nvram_read_swab(tp, 0, &magic) != 0) 8084 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8085 return -EIO; 8085 return -EIO;
8086 8086
8087 if (magic == TG3_EEPROM_MAGIC) 8087 if (magic == TG3_EEPROM_MAGIC)
8088 size = NVRAM_TEST_SIZE; 8088 size = NVRAM_TEST_SIZE;
8089 else if ((magic & 0xff000000) == 0xa5000000) { 8089 else if ((magic & 0xff000000) == 0xa5000000) {
8090 if ((magic & 0xe00000) == 0x200000) 8090 if ((magic & 0xe00000) == 0x200000)
8091 size = NVRAM_SELFBOOT_FORMAT1_SIZE; 8091 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8092 else 8092 else
8093 return 0; 8093 return 0;
8094 } else 8094 } else
8095 return -EIO; 8095 return -EIO;
8096 8096
8097 buf = kmalloc(size, GFP_KERNEL); 8097 buf = kmalloc(size, GFP_KERNEL);
8098 if (buf == NULL) 8098 if (buf == NULL)
8099 return -ENOMEM; 8099 return -ENOMEM;
8100 8100
8101 err = -EIO; 8101 err = -EIO;
8102 for (i = 0, j = 0; i < size; i += 4, j++) { 8102 for (i = 0, j = 0; i < size; i += 4, j++) {
8103 u32 val; 8103 u32 val;
8104 8104
8105 if ((err = tg3_nvram_read(tp, i, &val)) != 0) 8105 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8106 break; 8106 break;
8107 buf[j] = cpu_to_le32(val); 8107 buf[j] = cpu_to_le32(val);
8108 } 8108 }
8109 if (i < size) 8109 if (i < size)
8110 goto out; 8110 goto out;
8111 8111
8112 /* Selfboot format */ 8112 /* Selfboot format */
8113 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) { 8113 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8114 u8 *buf8 = (u8 *) buf, csum8 = 0; 8114 u8 *buf8 = (u8 *) buf, csum8 = 0;
8115 8115
8116 for (i = 0; i < size; i++) 8116 for (i = 0; i < size; i++)
8117 csum8 += buf8[i]; 8117 csum8 += buf8[i];
8118 8118
8119 if (csum8 == 0) { 8119 if (csum8 == 0) {
8120 err = 0; 8120 err = 0;
8121 goto out; 8121 goto out;
8122 } 8122 }
8123 8123
8124 err = -EIO; 8124 err = -EIO;
8125 goto out; 8125 goto out;
8126 } 8126 }
8127 8127
8128 /* Bootstrap checksum at offset 0x10 */ 8128 /* Bootstrap checksum at offset 0x10 */
8129 csum = calc_crc((unsigned char *) buf, 0x10); 8129 csum = calc_crc((unsigned char *) buf, 0x10);
8130 if(csum != cpu_to_le32(buf[0x10/4])) 8130 if(csum != cpu_to_le32(buf[0x10/4]))
8131 goto out; 8131 goto out;
8132 8132
8133 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ 8133 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8134 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); 8134 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8135 if (csum != cpu_to_le32(buf[0xfc/4])) 8135 if (csum != cpu_to_le32(buf[0xfc/4]))
8136 goto out; 8136 goto out;
8137 8137
8138 err = 0; 8138 err = 0;
8139 8139
8140 out: 8140 out:
8141 kfree(buf); 8141 kfree(buf);
8142 return err; 8142 return err;
8143 } 8143 }
8144 8144
8145 #define TG3_SERDES_TIMEOUT_SEC 2 8145 #define TG3_SERDES_TIMEOUT_SEC 2
8146 #define TG3_COPPER_TIMEOUT_SEC 6 8146 #define TG3_COPPER_TIMEOUT_SEC 6
8147 8147
8148 static int tg3_test_link(struct tg3 *tp) 8148 static int tg3_test_link(struct tg3 *tp)
8149 { 8149 {
8150 int i, max; 8150 int i, max;
8151 8151
8152 if (!netif_running(tp->dev)) 8152 if (!netif_running(tp->dev))
8153 return -ENODEV; 8153 return -ENODEV;
8154 8154
8155 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 8155 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8156 max = TG3_SERDES_TIMEOUT_SEC; 8156 max = TG3_SERDES_TIMEOUT_SEC;
8157 else 8157 else
8158 max = TG3_COPPER_TIMEOUT_SEC; 8158 max = TG3_COPPER_TIMEOUT_SEC;
8159 8159
8160 for (i = 0; i < max; i++) { 8160 for (i = 0; i < max; i++) {
8161 if (netif_carrier_ok(tp->dev)) 8161 if (netif_carrier_ok(tp->dev))
8162 return 0; 8162 return 0;
8163 8163
8164 if (msleep_interruptible(1000)) 8164 if (msleep_interruptible(1000))
8165 break; 8165 break;
8166 } 8166 }
8167 8167
8168 return -EIO; 8168 return -EIO;
8169 } 8169 }
8170 8170
8171 /* Only test the commonly used registers */ 8171 /* Only test the commonly used registers */
8172 static int tg3_test_registers(struct tg3 *tp) 8172 static int tg3_test_registers(struct tg3 *tp)
8173 { 8173 {
8174 int i, is_5705; 8174 int i, is_5705;
8175 u32 offset, read_mask, write_mask, val, save_val, read_val; 8175 u32 offset, read_mask, write_mask, val, save_val, read_val;
8176 static struct { 8176 static struct {
8177 u16 offset; 8177 u16 offset;
8178 u16 flags; 8178 u16 flags;
8179 #define TG3_FL_5705 0x1 8179 #define TG3_FL_5705 0x1
8180 #define TG3_FL_NOT_5705 0x2 8180 #define TG3_FL_NOT_5705 0x2
8181 #define TG3_FL_NOT_5788 0x4 8181 #define TG3_FL_NOT_5788 0x4
8182 u32 read_mask; 8182 u32 read_mask;
8183 u32 write_mask; 8183 u32 write_mask;
8184 } reg_tbl[] = { 8184 } reg_tbl[] = {
8185 /* MAC Control Registers */ 8185 /* MAC Control Registers */
8186 { MAC_MODE, TG3_FL_NOT_5705, 8186 { MAC_MODE, TG3_FL_NOT_5705,
8187 0x00000000, 0x00ef6f8c }, 8187 0x00000000, 0x00ef6f8c },
8188 { MAC_MODE, TG3_FL_5705, 8188 { MAC_MODE, TG3_FL_5705,
8189 0x00000000, 0x01ef6b8c }, 8189 0x00000000, 0x01ef6b8c },
8190 { MAC_STATUS, TG3_FL_NOT_5705, 8190 { MAC_STATUS, TG3_FL_NOT_5705,
8191 0x03800107, 0x00000000 }, 8191 0x03800107, 0x00000000 },
8192 { MAC_STATUS, TG3_FL_5705, 8192 { MAC_STATUS, TG3_FL_5705,
8193 0x03800100, 0x00000000 }, 8193 0x03800100, 0x00000000 },
8194 { MAC_ADDR_0_HIGH, 0x0000, 8194 { MAC_ADDR_0_HIGH, 0x0000,
8195 0x00000000, 0x0000ffff }, 8195 0x00000000, 0x0000ffff },
8196 { MAC_ADDR_0_LOW, 0x0000, 8196 { MAC_ADDR_0_LOW, 0x0000,
8197 0x00000000, 0xffffffff }, 8197 0x00000000, 0xffffffff },
8198 { MAC_RX_MTU_SIZE, 0x0000, 8198 { MAC_RX_MTU_SIZE, 0x0000,
8199 0x00000000, 0x0000ffff }, 8199 0x00000000, 0x0000ffff },
8200 { MAC_TX_MODE, 0x0000, 8200 { MAC_TX_MODE, 0x0000,
8201 0x00000000, 0x00000070 }, 8201 0x00000000, 0x00000070 },
8202 { MAC_TX_LENGTHS, 0x0000, 8202 { MAC_TX_LENGTHS, 0x0000,
8203 0x00000000, 0x00003fff }, 8203 0x00000000, 0x00003fff },
8204 { MAC_RX_MODE, TG3_FL_NOT_5705, 8204 { MAC_RX_MODE, TG3_FL_NOT_5705,
8205 0x00000000, 0x000007fc }, 8205 0x00000000, 0x000007fc },
8206 { MAC_RX_MODE, TG3_FL_5705, 8206 { MAC_RX_MODE, TG3_FL_5705,
8207 0x00000000, 0x000007dc }, 8207 0x00000000, 0x000007dc },
8208 { MAC_HASH_REG_0, 0x0000, 8208 { MAC_HASH_REG_0, 0x0000,
8209 0x00000000, 0xffffffff }, 8209 0x00000000, 0xffffffff },
8210 { MAC_HASH_REG_1, 0x0000, 8210 { MAC_HASH_REG_1, 0x0000,
8211 0x00000000, 0xffffffff }, 8211 0x00000000, 0xffffffff },
8212 { MAC_HASH_REG_2, 0x0000, 8212 { MAC_HASH_REG_2, 0x0000,
8213 0x00000000, 0xffffffff }, 8213 0x00000000, 0xffffffff },
8214 { MAC_HASH_REG_3, 0x0000, 8214 { MAC_HASH_REG_3, 0x0000,
8215 0x00000000, 0xffffffff }, 8215 0x00000000, 0xffffffff },
8216 8216
8217 /* Receive Data and Receive BD Initiator Control Registers. */ 8217 /* Receive Data and Receive BD Initiator Control Registers. */
8218 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, 8218 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8219 0x00000000, 0xffffffff }, 8219 0x00000000, 0xffffffff },
8220 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, 8220 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8221 0x00000000, 0xffffffff }, 8221 0x00000000, 0xffffffff },
8222 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, 8222 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8223 0x00000000, 0x00000003 }, 8223 0x00000000, 0x00000003 },
8224 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, 8224 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8225 0x00000000, 0xffffffff }, 8225 0x00000000, 0xffffffff },
8226 { RCVDBDI_STD_BD+0, 0x0000, 8226 { RCVDBDI_STD_BD+0, 0x0000,
8227 0x00000000, 0xffffffff }, 8227 0x00000000, 0xffffffff },
8228 { RCVDBDI_STD_BD+4, 0x0000, 8228 { RCVDBDI_STD_BD+4, 0x0000,
8229 0x00000000, 0xffffffff }, 8229 0x00000000, 0xffffffff },
8230 { RCVDBDI_STD_BD+8, 0x0000, 8230 { RCVDBDI_STD_BD+8, 0x0000,
8231 0x00000000, 0xffff0002 }, 8231 0x00000000, 0xffff0002 },
8232 { RCVDBDI_STD_BD+0xc, 0x0000, 8232 { RCVDBDI_STD_BD+0xc, 0x0000,
8233 0x00000000, 0xffffffff }, 8233 0x00000000, 0xffffffff },
8234 8234
8235 /* Receive BD Initiator Control Registers. */ 8235 /* Receive BD Initiator Control Registers. */
8236 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, 8236 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8237 0x00000000, 0xffffffff }, 8237 0x00000000, 0xffffffff },
8238 { RCVBDI_STD_THRESH, TG3_FL_5705, 8238 { RCVBDI_STD_THRESH, TG3_FL_5705,
8239 0x00000000, 0x000003ff }, 8239 0x00000000, 0x000003ff },
8240 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, 8240 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8241 0x00000000, 0xffffffff }, 8241 0x00000000, 0xffffffff },
8242 8242
8243 /* Host Coalescing Control Registers. */ 8243 /* Host Coalescing Control Registers. */
8244 { HOSTCC_MODE, TG3_FL_NOT_5705, 8244 { HOSTCC_MODE, TG3_FL_NOT_5705,
8245 0x00000000, 0x00000004 }, 8245 0x00000000, 0x00000004 },
8246 { HOSTCC_MODE, TG3_FL_5705, 8246 { HOSTCC_MODE, TG3_FL_5705,
8247 0x00000000, 0x000000f6 }, 8247 0x00000000, 0x000000f6 },
8248 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, 8248 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8249 0x00000000, 0xffffffff }, 8249 0x00000000, 0xffffffff },
8250 { HOSTCC_RXCOL_TICKS, TG3_FL_5705, 8250 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8251 0x00000000, 0x000003ff }, 8251 0x00000000, 0x000003ff },
8252 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, 8252 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8253 0x00000000, 0xffffffff }, 8253 0x00000000, 0xffffffff },
8254 { HOSTCC_TXCOL_TICKS, TG3_FL_5705, 8254 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8255 0x00000000, 0x000003ff }, 8255 0x00000000, 0x000003ff },
8256 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, 8256 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8257 0x00000000, 0xffffffff }, 8257 0x00000000, 0xffffffff },
8258 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 8258 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8259 0x00000000, 0x000000ff }, 8259 0x00000000, 0x000000ff },
8260 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, 8260 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8261 0x00000000, 0xffffffff }, 8261 0x00000000, 0xffffffff },
8262 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, 8262 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8263 0x00000000, 0x000000ff }, 8263 0x00000000, 0x000000ff },
8264 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, 8264 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8265 0x00000000, 0xffffffff }, 8265 0x00000000, 0xffffffff },
8266 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, 8266 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8267 0x00000000, 0xffffffff }, 8267 0x00000000, 0xffffffff },
8268 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, 8268 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8269 0x00000000, 0xffffffff }, 8269 0x00000000, 0xffffffff },
8270 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 8270 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8271 0x00000000, 0x000000ff }, 8271 0x00000000, 0x000000ff },
8272 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, 8272 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8273 0x00000000, 0xffffffff }, 8273 0x00000000, 0xffffffff },
8274 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, 8274 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8275 0x00000000, 0x000000ff }, 8275 0x00000000, 0x000000ff },
8276 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, 8276 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8277 0x00000000, 0xffffffff }, 8277 0x00000000, 0xffffffff },
8278 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, 8278 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8279 0x00000000, 0xffffffff }, 8279 0x00000000, 0xffffffff },
8280 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, 8280 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8281 0x00000000, 0xffffffff }, 8281 0x00000000, 0xffffffff },
8282 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, 8282 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8283 0x00000000, 0xffffffff }, 8283 0x00000000, 0xffffffff },
8284 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, 8284 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8285 0x00000000, 0xffffffff }, 8285 0x00000000, 0xffffffff },
8286 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, 8286 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8287 0xffffffff, 0x00000000 }, 8287 0xffffffff, 0x00000000 },
8288 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, 8288 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8289 0xffffffff, 0x00000000 }, 8289 0xffffffff, 0x00000000 },
8290 8290
8291 /* Buffer Manager Control Registers. */ 8291 /* Buffer Manager Control Registers. */
8292 { BUFMGR_MB_POOL_ADDR, 0x0000, 8292 { BUFMGR_MB_POOL_ADDR, 0x0000,
8293 0x00000000, 0x007fff80 }, 8293 0x00000000, 0x007fff80 },
8294 { BUFMGR_MB_POOL_SIZE, 0x0000, 8294 { BUFMGR_MB_POOL_SIZE, 0x0000,
8295 0x00000000, 0x007fffff }, 8295 0x00000000, 0x007fffff },
8296 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, 8296 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8297 0x00000000, 0x0000003f }, 8297 0x00000000, 0x0000003f },
8298 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, 8298 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8299 0x00000000, 0x000001ff }, 8299 0x00000000, 0x000001ff },
8300 { BUFMGR_MB_HIGH_WATER, 0x0000, 8300 { BUFMGR_MB_HIGH_WATER, 0x0000,
8301 0x00000000, 0x000001ff }, 8301 0x00000000, 0x000001ff },
8302 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, 8302 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8303 0xffffffff, 0x00000000 }, 8303 0xffffffff, 0x00000000 },
8304 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, 8304 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8305 0xffffffff, 0x00000000 }, 8305 0xffffffff, 0x00000000 },
8306 8306
8307 /* Mailbox Registers */ 8307 /* Mailbox Registers */
8308 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, 8308 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8309 0x00000000, 0x000001ff }, 8309 0x00000000, 0x000001ff },
8310 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, 8310 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8311 0x00000000, 0x000001ff }, 8311 0x00000000, 0x000001ff },
8312 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, 8312 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8313 0x00000000, 0x000007ff }, 8313 0x00000000, 0x000007ff },
8314 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, 8314 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8315 0x00000000, 0x000001ff }, 8315 0x00000000, 0x000001ff },
8316 8316
8317 { 0xffff, 0x0000, 0x00000000, 0x00000000 }, 8317 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8318 }; 8318 };
8319 8319
8320 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8320 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8321 is_5705 = 1; 8321 is_5705 = 1;
8322 else 8322 else
8323 is_5705 = 0; 8323 is_5705 = 0;
8324 8324
8325 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 8325 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8326 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) 8326 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8327 continue; 8327 continue;
8328 8328
8329 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) 8329 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8330 continue; 8330 continue;
8331 8331
8332 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && 8332 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8333 (reg_tbl[i].flags & TG3_FL_NOT_5788)) 8333 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8334 continue; 8334 continue;
8335 8335
8336 offset = (u32) reg_tbl[i].offset; 8336 offset = (u32) reg_tbl[i].offset;
8337 read_mask = reg_tbl[i].read_mask; 8337 read_mask = reg_tbl[i].read_mask;
8338 write_mask = reg_tbl[i].write_mask; 8338 write_mask = reg_tbl[i].write_mask;
8339 8339
8340 /* Save the original register content */ 8340 /* Save the original register content */
8341 save_val = tr32(offset); 8341 save_val = tr32(offset);
8342 8342
8343 /* Determine the read-only value. */ 8343 /* Determine the read-only value. */
8344 read_val = save_val & read_mask; 8344 read_val = save_val & read_mask;
8345 8345
8346 /* Write zero to the register, then make sure the read-only bits 8346 /* Write zero to the register, then make sure the read-only bits
8347 * are not changed and the read/write bits are all zeros. 8347 * are not changed and the read/write bits are all zeros.
8348 */ 8348 */
8349 tw32(offset, 0); 8349 tw32(offset, 0);
8350 8350
8351 val = tr32(offset); 8351 val = tr32(offset);
8352 8352
8353 /* Test the read-only and read/write bits. */ 8353 /* Test the read-only and read/write bits. */
8354 if (((val & read_mask) != read_val) || (val & write_mask)) 8354 if (((val & read_mask) != read_val) || (val & write_mask))
8355 goto out; 8355 goto out;
8356 8356
8357 /* Write ones to all the bits defined by RdMask and WrMask, then 8357 /* Write ones to all the bits defined by RdMask and WrMask, then
8358 * make sure the read-only bits are not changed and the 8358 * make sure the read-only bits are not changed and the
8359 * read/write bits are all ones. 8359 * read/write bits are all ones.
8360 */ 8360 */
8361 tw32(offset, read_mask | write_mask); 8361 tw32(offset, read_mask | write_mask);
8362 8362
8363 val = tr32(offset); 8363 val = tr32(offset);
8364 8364
8365 /* Test the read-only bits. */ 8365 /* Test the read-only bits. */
8366 if ((val & read_mask) != read_val) 8366 if ((val & read_mask) != read_val)
8367 goto out; 8367 goto out;
8368 8368
8369 /* Test the read/write bits. */ 8369 /* Test the read/write bits. */
8370 if ((val & write_mask) != write_mask) 8370 if ((val & write_mask) != write_mask)
8371 goto out; 8371 goto out;
8372 8372
8373 tw32(offset, save_val); 8373 tw32(offset, save_val);
8374 } 8374 }
8375 8375
8376 return 0; 8376 return 0;
8377 8377
8378 out: 8378 out:
8379 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset); 8379 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8380 tw32(offset, save_val); 8380 tw32(offset, save_val);
8381 return -EIO; 8381 return -EIO;
8382 } 8382 }
8383 8383
8384 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) 8384 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8385 { 8385 {
8386 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; 8386 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8387 int i; 8387 int i;
8388 u32 j; 8388 u32 j;
8389 8389
8390 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { 8390 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8391 for (j = 0; j < len; j += 4) { 8391 for (j = 0; j < len; j += 4) {
8392 u32 val; 8392 u32 val;
8393 8393
8394 tg3_write_mem(tp, offset + j, test_pattern[i]); 8394 tg3_write_mem(tp, offset + j, test_pattern[i]);
8395 tg3_read_mem(tp, offset + j, &val); 8395 tg3_read_mem(tp, offset + j, &val);
8396 if (val != test_pattern[i]) 8396 if (val != test_pattern[i])
8397 return -EIO; 8397 return -EIO;
8398 } 8398 }
8399 } 8399 }
8400 return 0; 8400 return 0;
8401 } 8401 }
8402 8402
8403 static int tg3_test_memory(struct tg3 *tp) 8403 static int tg3_test_memory(struct tg3 *tp)
8404 { 8404 {
8405 static struct mem_entry { 8405 static struct mem_entry {
8406 u32 offset; 8406 u32 offset;
8407 u32 len; 8407 u32 len;
8408 } mem_tbl_570x[] = { 8408 } mem_tbl_570x[] = {
8409 { 0x00000000, 0x00b50}, 8409 { 0x00000000, 0x00b50},
8410 { 0x00002000, 0x1c000}, 8410 { 0x00002000, 0x1c000},
8411 { 0xffffffff, 0x00000} 8411 { 0xffffffff, 0x00000}
8412 }, mem_tbl_5705[] = { 8412 }, mem_tbl_5705[] = {
8413 { 0x00000100, 0x0000c}, 8413 { 0x00000100, 0x0000c},
8414 { 0x00000200, 0x00008}, 8414 { 0x00000200, 0x00008},
8415 { 0x00004000, 0x00800}, 8415 { 0x00004000, 0x00800},
8416 { 0x00006000, 0x01000}, 8416 { 0x00006000, 0x01000},
8417 { 0x00008000, 0x02000}, 8417 { 0x00008000, 0x02000},
8418 { 0x00010000, 0x0e000}, 8418 { 0x00010000, 0x0e000},
8419 { 0xffffffff, 0x00000} 8419 { 0xffffffff, 0x00000}
8420 }, mem_tbl_5755[] = { 8420 }, mem_tbl_5755[] = {
8421 { 0x00000200, 0x00008}, 8421 { 0x00000200, 0x00008},
8422 { 0x00004000, 0x00800}, 8422 { 0x00004000, 0x00800},
8423 { 0x00006000, 0x00800}, 8423 { 0x00006000, 0x00800},
8424 { 0x00008000, 0x02000}, 8424 { 0x00008000, 0x02000},
8425 { 0x00010000, 0x0c000}, 8425 { 0x00010000, 0x0c000},
8426 { 0xffffffff, 0x00000} 8426 { 0xffffffff, 0x00000}
8427 }; 8427 };
8428 struct mem_entry *mem_tbl; 8428 struct mem_entry *mem_tbl;
8429 int err = 0; 8429 int err = 0;
8430 int i; 8430 int i;
8431 8431
8432 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 8432 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 8433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 8434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8435 mem_tbl = mem_tbl_5755; 8435 mem_tbl = mem_tbl_5755;
8436 else 8436 else
8437 mem_tbl = mem_tbl_5705; 8437 mem_tbl = mem_tbl_5705;
8438 } else 8438 } else
8439 mem_tbl = mem_tbl_570x; 8439 mem_tbl = mem_tbl_570x;
8440 8440
8441 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 8441 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8442 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, 8442 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8443 mem_tbl[i].len)) != 0) 8443 mem_tbl[i].len)) != 0)
8444 break; 8444 break;
8445 } 8445 }
8446 8446
8447 return err; 8447 return err;
8448 } 8448 }
8449 8449
8450 #define TG3_MAC_LOOPBACK 0 8450 #define TG3_MAC_LOOPBACK 0
8451 #define TG3_PHY_LOOPBACK 1 8451 #define TG3_PHY_LOOPBACK 1
8452 8452
8453 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) 8453 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8454 { 8454 {
8455 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; 8455 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8456 u32 desc_idx; 8456 u32 desc_idx;
8457 struct sk_buff *skb, *rx_skb; 8457 struct sk_buff *skb, *rx_skb;
8458 u8 *tx_data; 8458 u8 *tx_data;
8459 dma_addr_t map; 8459 dma_addr_t map;
8460 int num_pkts, tx_len, rx_len, i, err; 8460 int num_pkts, tx_len, rx_len, i, err;
8461 struct tg3_rx_buffer_desc *desc; 8461 struct tg3_rx_buffer_desc *desc;
8462 8462
8463 if (loopback_mode == TG3_MAC_LOOPBACK) { 8463 if (loopback_mode == TG3_MAC_LOOPBACK) {
8464 /* HW errata - mac loopback fails in some cases on 5780. 8464 /* HW errata - mac loopback fails in some cases on 5780.
8465 * Normal traffic and PHY loopback are not affected by 8465 * Normal traffic and PHY loopback are not affected by
8466 * errata. 8466 * errata.
8467 */ 8467 */
8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 8468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8469 return 0; 8469 return 0;
8470 8470
8471 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8471 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8472 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | 8472 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8473 MAC_MODE_PORT_MODE_GMII; 8473 MAC_MODE_PORT_MODE_GMII;
8474 tw32(MAC_MODE, mac_mode); 8474 tw32(MAC_MODE, mac_mode);
8475 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 8475 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8476 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX | 8476 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8477 BMCR_SPEED1000); 8477 BMCR_SPEED1000);
8478 udelay(40); 8478 udelay(40);
8479 /* reset to prevent losing 1st rx packet intermittently */ 8479 /* reset to prevent losing 1st rx packet intermittently */
8480 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 8480 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8481 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 8481 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8482 udelay(10); 8482 udelay(10);
8483 tw32_f(MAC_RX_MODE, tp->rx_mode); 8483 tw32_f(MAC_RX_MODE, tp->rx_mode);
8484 } 8484 }
8485 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 8485 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8486 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; 8486 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8487 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 8487 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8488 mac_mode &= ~MAC_MODE_LINK_POLARITY; 8488 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8489 tg3_writephy(tp, MII_TG3_EXT_CTRL, 8489 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8490 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 8490 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8491 } 8491 }
8492 tw32(MAC_MODE, mac_mode); 8492 tw32(MAC_MODE, mac_mode);
8493 } 8493 }
8494 else 8494 else
8495 return -EINVAL; 8495 return -EINVAL;
8496 8496
8497 err = -EIO; 8497 err = -EIO;
8498 8498
8499 tx_len = 1514; 8499 tx_len = 1514;
8500 skb = dev_alloc_skb(tx_len); 8500 skb = dev_alloc_skb(tx_len);
8501 if (!skb) 8501 if (!skb)
8502 return -ENOMEM; 8502 return -ENOMEM;
8503 8503
8504 tx_data = skb_put(skb, tx_len); 8504 tx_data = skb_put(skb, tx_len);
8505 memcpy(tx_data, tp->dev->dev_addr, 6); 8505 memcpy(tx_data, tp->dev->dev_addr, 6);
8506 memset(tx_data + 6, 0x0, 8); 8506 memset(tx_data + 6, 0x0, 8);
8507 8507
8508 tw32(MAC_RX_MTU_SIZE, tx_len + 4); 8508 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8509 8509
8510 for (i = 14; i < tx_len; i++) 8510 for (i = 14; i < tx_len; i++)
8511 tx_data[i] = (u8) (i & 0xff); 8511 tx_data[i] = (u8) (i & 0xff);
8512 8512
8513 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 8513 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8514 8514
8515 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 8515 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8516 HOSTCC_MODE_NOW); 8516 HOSTCC_MODE_NOW);
8517 8517
8518 udelay(10); 8518 udelay(10);
8519 8519
8520 rx_start_idx = tp->hw_status->idx[0].rx_producer; 8520 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8521 8521
8522 num_pkts = 0; 8522 num_pkts = 0;
8523 8523
8524 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1); 8524 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8525 8525
8526 tp->tx_prod++; 8526 tp->tx_prod++;
8527 num_pkts++; 8527 num_pkts++;
8528 8528
8529 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 8529 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8530 tp->tx_prod); 8530 tp->tx_prod);
8531 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW); 8531 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8532 8532
8533 udelay(10); 8533 udelay(10);
8534 8534
8535 for (i = 0; i < 10; i++) { 8535 for (i = 0; i < 10; i++) {
8536 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 8536 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8537 HOSTCC_MODE_NOW); 8537 HOSTCC_MODE_NOW);
8538 8538
8539 udelay(10); 8539 udelay(10);
8540 8540
8541 tx_idx = tp->hw_status->idx[0].tx_consumer; 8541 tx_idx = tp->hw_status->idx[0].tx_consumer;
8542 rx_idx = tp->hw_status->idx[0].rx_producer; 8542 rx_idx = tp->hw_status->idx[0].rx_producer;
8543 if ((tx_idx == tp->tx_prod) && 8543 if ((tx_idx == tp->tx_prod) &&
8544 (rx_idx == (rx_start_idx + num_pkts))) 8544 (rx_idx == (rx_start_idx + num_pkts)))
8545 break; 8545 break;
8546 } 8546 }
8547 8547
8548 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 8548 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8549 dev_kfree_skb(skb); 8549 dev_kfree_skb(skb);
8550 8550
8551 if (tx_idx != tp->tx_prod) 8551 if (tx_idx != tp->tx_prod)
8552 goto out; 8552 goto out;
8553 8553
8554 if (rx_idx != rx_start_idx + num_pkts) 8554 if (rx_idx != rx_start_idx + num_pkts)
8555 goto out; 8555 goto out;
8556 8556
8557 desc = &tp->rx_rcb[rx_start_idx]; 8557 desc = &tp->rx_rcb[rx_start_idx];
8558 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 8558 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8559 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 8559 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8560 if (opaque_key != RXD_OPAQUE_RING_STD) 8560 if (opaque_key != RXD_OPAQUE_RING_STD)
8561 goto out; 8561 goto out;
8562 8562
8563 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 8563 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8564 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) 8564 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8565 goto out; 8565 goto out;
8566 8566
8567 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; 8567 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8568 if (rx_len != tx_len) 8568 if (rx_len != tx_len)
8569 goto out; 8569 goto out;
8570 8570
8571 rx_skb = tp->rx_std_buffers[desc_idx].skb; 8571 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8572 8572
8573 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping); 8573 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8574 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 8574 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8575 8575
8576 for (i = 14; i < tx_len; i++) { 8576 for (i = 14; i < tx_len; i++) {
8577 if (*(rx_skb->data + i) != (u8) (i & 0xff)) 8577 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8578 goto out; 8578 goto out;
8579 } 8579 }
8580 err = 0; 8580 err = 0;
8581 8581
8582 /* tg3_free_rings will unmap and free the rx_skb */ 8582 /* tg3_free_rings will unmap and free the rx_skb */
8583 out: 8583 out:
8584 return err; 8584 return err;
8585 } 8585 }
8586 8586
8587 #define TG3_MAC_LOOPBACK_FAILED 1 8587 #define TG3_MAC_LOOPBACK_FAILED 1
8588 #define TG3_PHY_LOOPBACK_FAILED 2 8588 #define TG3_PHY_LOOPBACK_FAILED 2
8589 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \ 8589 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8590 TG3_PHY_LOOPBACK_FAILED) 8590 TG3_PHY_LOOPBACK_FAILED)
8591 8591
8592 static int tg3_test_loopback(struct tg3 *tp) 8592 static int tg3_test_loopback(struct tg3 *tp)
8593 { 8593 {
8594 int err = 0; 8594 int err = 0;
8595 8595
8596 if (!netif_running(tp->dev)) 8596 if (!netif_running(tp->dev))
8597 return TG3_LOOPBACK_FAILED; 8597 return TG3_LOOPBACK_FAILED;
8598 8598
8599 tg3_reset_hw(tp, 1); 8599 tg3_reset_hw(tp, 1);
8600 8600
8601 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8601 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8602 err |= TG3_MAC_LOOPBACK_FAILED; 8602 err |= TG3_MAC_LOOPBACK_FAILED;
8603 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 8603 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8604 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) 8604 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8605 err |= TG3_PHY_LOOPBACK_FAILED; 8605 err |= TG3_PHY_LOOPBACK_FAILED;
8606 } 8606 }
8607 8607
8608 return err; 8608 return err;
8609 } 8609 }
8610 8610
8611 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, 8611 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8612 u64 *data) 8612 u64 *data)
8613 { 8613 {
8614 struct tg3 *tp = netdev_priv(dev); 8614 struct tg3 *tp = netdev_priv(dev);
8615 8615
8616 if (tp->link_config.phy_is_low_power) 8616 if (tp->link_config.phy_is_low_power)
8617 tg3_set_power_state(tp, PCI_D0); 8617 tg3_set_power_state(tp, PCI_D0);
8618 8618
8619 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 8619 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8620 8620
8621 if (tg3_test_nvram(tp) != 0) { 8621 if (tg3_test_nvram(tp) != 0) {
8622 etest->flags |= ETH_TEST_FL_FAILED; 8622 etest->flags |= ETH_TEST_FL_FAILED;
8623 data[0] = 1; 8623 data[0] = 1;
8624 } 8624 }
8625 if (tg3_test_link(tp) != 0) { 8625 if (tg3_test_link(tp) != 0) {
8626 etest->flags |= ETH_TEST_FL_FAILED; 8626 etest->flags |= ETH_TEST_FL_FAILED;
8627 data[1] = 1; 8627 data[1] = 1;
8628 } 8628 }
8629 if (etest->flags & ETH_TEST_FL_OFFLINE) { 8629 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8630 int err, irq_sync = 0; 8630 int err, irq_sync = 0;
8631 8631
8632 if (netif_running(dev)) { 8632 if (netif_running(dev)) {
8633 tg3_netif_stop(tp); 8633 tg3_netif_stop(tp);
8634 irq_sync = 1; 8634 irq_sync = 1;
8635 } 8635 }
8636 8636
8637 tg3_full_lock(tp, irq_sync); 8637 tg3_full_lock(tp, irq_sync);
8638 8638
8639 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 8639 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8640 err = tg3_nvram_lock(tp); 8640 err = tg3_nvram_lock(tp);
8641 tg3_halt_cpu(tp, RX_CPU_BASE); 8641 tg3_halt_cpu(tp, RX_CPU_BASE);
8642 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 8642 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8643 tg3_halt_cpu(tp, TX_CPU_BASE); 8643 tg3_halt_cpu(tp, TX_CPU_BASE);
8644 if (!err) 8644 if (!err)
8645 tg3_nvram_unlock(tp); 8645 tg3_nvram_unlock(tp);
8646 8646
8647 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 8647 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8648 tg3_phy_reset(tp); 8648 tg3_phy_reset(tp);
8649 8649
8650 if (tg3_test_registers(tp) != 0) { 8650 if (tg3_test_registers(tp) != 0) {
8651 etest->flags |= ETH_TEST_FL_FAILED; 8651 etest->flags |= ETH_TEST_FL_FAILED;
8652 data[2] = 1; 8652 data[2] = 1;
8653 } 8653 }
8654 if (tg3_test_memory(tp) != 0) { 8654 if (tg3_test_memory(tp) != 0) {
8655 etest->flags |= ETH_TEST_FL_FAILED; 8655 etest->flags |= ETH_TEST_FL_FAILED;
8656 data[3] = 1; 8656 data[3] = 1;
8657 } 8657 }
8658 if ((data[4] = tg3_test_loopback(tp)) != 0) 8658 if ((data[4] = tg3_test_loopback(tp)) != 0)
8659 etest->flags |= ETH_TEST_FL_FAILED; 8659 etest->flags |= ETH_TEST_FL_FAILED;
8660 8660
8661 tg3_full_unlock(tp); 8661 tg3_full_unlock(tp);
8662 8662
8663 if (tg3_test_interrupt(tp) != 0) { 8663 if (tg3_test_interrupt(tp) != 0) {
8664 etest->flags |= ETH_TEST_FL_FAILED; 8664 etest->flags |= ETH_TEST_FL_FAILED;
8665 data[5] = 1; 8665 data[5] = 1;
8666 } 8666 }
8667 8667
8668 tg3_full_lock(tp, 0); 8668 tg3_full_lock(tp, 0);
8669 8669
8670 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8670 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8671 if (netif_running(dev)) { 8671 if (netif_running(dev)) {
8672 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8672 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8673 tg3_init_hw(tp, 1); 8673 tg3_init_hw(tp, 1);
8674 tg3_netif_start(tp); 8674 tg3_netif_start(tp);
8675 } 8675 }
8676 8676
8677 tg3_full_unlock(tp); 8677 tg3_full_unlock(tp);
8678 } 8678 }
8679 if (tp->link_config.phy_is_low_power) 8679 if (tp->link_config.phy_is_low_power)
8680 tg3_set_power_state(tp, PCI_D3hot); 8680 tg3_set_power_state(tp, PCI_D3hot);
8681 8681
8682 } 8682 }
8683 8683
8684 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 8684 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8685 { 8685 {
8686 struct mii_ioctl_data *data = if_mii(ifr); 8686 struct mii_ioctl_data *data = if_mii(ifr);
8687 struct tg3 *tp = netdev_priv(dev); 8687 struct tg3 *tp = netdev_priv(dev);
8688 int err; 8688 int err;
8689 8689
8690 switch(cmd) { 8690 switch(cmd) {
8691 case SIOCGMIIPHY: 8691 case SIOCGMIIPHY:
8692 data->phy_id = PHY_ADDR; 8692 data->phy_id = PHY_ADDR;
8693 8693
8694 /* fallthru */ 8694 /* fallthru */
8695 case SIOCGMIIREG: { 8695 case SIOCGMIIREG: {
8696 u32 mii_regval; 8696 u32 mii_regval;
8697 8697
8698 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 8698 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8699 break; /* We have no PHY */ 8699 break; /* We have no PHY */
8700 8700
8701 if (tp->link_config.phy_is_low_power) 8701 if (tp->link_config.phy_is_low_power)
8702 return -EAGAIN; 8702 return -EAGAIN;
8703 8703
8704 spin_lock_bh(&tp->lock); 8704 spin_lock_bh(&tp->lock);
8705 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); 8705 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8706 spin_unlock_bh(&tp->lock); 8706 spin_unlock_bh(&tp->lock);
8707 8707
8708 data->val_out = mii_regval; 8708 data->val_out = mii_regval;
8709 8709
8710 return err; 8710 return err;
8711 } 8711 }
8712 8712
8713 case SIOCSMIIREG: 8713 case SIOCSMIIREG:
8714 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 8714 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8715 break; /* We have no PHY */ 8715 break; /* We have no PHY */
8716 8716
8717 if (!capable(CAP_NET_ADMIN)) 8717 if (!capable(CAP_NET_ADMIN))
8718 return -EPERM; 8718 return -EPERM;
8719 8719
8720 if (tp->link_config.phy_is_low_power) 8720 if (tp->link_config.phy_is_low_power)
8721 return -EAGAIN; 8721 return -EAGAIN;
8722 8722
8723 spin_lock_bh(&tp->lock); 8723 spin_lock_bh(&tp->lock);
8724 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); 8724 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8725 spin_unlock_bh(&tp->lock); 8725 spin_unlock_bh(&tp->lock);
8726 8726
8727 return err; 8727 return err;
8728 8728
8729 default: 8729 default:
8730 /* do nothing */ 8730 /* do nothing */
8731 break; 8731 break;
8732 } 8732 }
8733 return -EOPNOTSUPP; 8733 return -EOPNOTSUPP;
8734 } 8734 }
8735 8735
8736 #if TG3_VLAN_TAG_USED 8736 #if TG3_VLAN_TAG_USED
8737 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 8737 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8738 { 8738 {
8739 struct tg3 *tp = netdev_priv(dev); 8739 struct tg3 *tp = netdev_priv(dev);
8740 8740
8741 tg3_full_lock(tp, 0); 8741 tg3_full_lock(tp, 0);
8742 8742
8743 tp->vlgrp = grp; 8743 tp->vlgrp = grp;
8744 8744
8745 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ 8745 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8746 __tg3_set_rx_mode(dev); 8746 __tg3_set_rx_mode(dev);
8747 8747
8748 tg3_full_unlock(tp); 8748 tg3_full_unlock(tp);
8749 } 8749 }
8750 8750
8751 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 8751 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8752 { 8752 {
8753 struct tg3 *tp = netdev_priv(dev); 8753 struct tg3 *tp = netdev_priv(dev);
8754 8754
8755 tg3_full_lock(tp, 0); 8755 tg3_full_lock(tp, 0);
8756 if (tp->vlgrp) 8756 if (tp->vlgrp)
8757 tp->vlgrp->vlan_devices[vid] = NULL; 8757 tp->vlgrp->vlan_devices[vid] = NULL;
8758 tg3_full_unlock(tp); 8758 tg3_full_unlock(tp);
8759 } 8759 }
8760 #endif 8760 #endif
8761 8761
8762 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 8762 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8763 { 8763 {
8764 struct tg3 *tp = netdev_priv(dev); 8764 struct tg3 *tp = netdev_priv(dev);
8765 8765
8766 memcpy(ec, &tp->coal, sizeof(*ec)); 8766 memcpy(ec, &tp->coal, sizeof(*ec));
8767 return 0; 8767 return 0;
8768 } 8768 }
8769 8769
8770 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 8770 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8771 { 8771 {
8772 struct tg3 *tp = netdev_priv(dev); 8772 struct tg3 *tp = netdev_priv(dev);
8773 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; 8773 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8774 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; 8774 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8775 8775
8776 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 8776 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8777 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; 8777 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8778 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; 8778 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8779 max_stat_coal_ticks = MAX_STAT_COAL_TICKS; 8779 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8780 min_stat_coal_ticks = MIN_STAT_COAL_TICKS; 8780 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8781 } 8781 }
8782 8782
8783 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 8783 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8784 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 8784 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8785 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 8785 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8786 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 8786 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8787 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || 8787 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8788 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || 8788 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8789 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || 8789 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8790 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || 8790 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8791 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || 8791 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8792 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) 8792 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8793 return -EINVAL; 8793 return -EINVAL;
8794 8794
8795 /* No rx interrupts will be generated if both are zero */ 8795 /* No rx interrupts will be generated if both are zero */
8796 if ((ec->rx_coalesce_usecs == 0) && 8796 if ((ec->rx_coalesce_usecs == 0) &&
8797 (ec->rx_max_coalesced_frames == 0)) 8797 (ec->rx_max_coalesced_frames == 0))
8798 return -EINVAL; 8798 return -EINVAL;
8799 8799
8800 /* No tx interrupts will be generated if both are zero */ 8800 /* No tx interrupts will be generated if both are zero */
8801 if ((ec->tx_coalesce_usecs == 0) && 8801 if ((ec->tx_coalesce_usecs == 0) &&
8802 (ec->tx_max_coalesced_frames == 0)) 8802 (ec->tx_max_coalesced_frames == 0))
8803 return -EINVAL; 8803 return -EINVAL;
8804 8804
8805 /* Only copy relevant parameters, ignore all others. */ 8805 /* Only copy relevant parameters, ignore all others. */
8806 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; 8806 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8807 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; 8807 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8808 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 8808 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8809 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; 8809 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8810 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; 8810 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8811 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; 8811 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8812 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; 8812 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8813 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; 8813 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8814 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; 8814 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8815 8815
8816 if (netif_running(dev)) { 8816 if (netif_running(dev)) {
8817 tg3_full_lock(tp, 0); 8817 tg3_full_lock(tp, 0);
8818 __tg3_set_coalesce(tp, &tp->coal); 8818 __tg3_set_coalesce(tp, &tp->coal);
8819 tg3_full_unlock(tp); 8819 tg3_full_unlock(tp);
8820 } 8820 }
8821 return 0; 8821 return 0;
8822 } 8822 }
8823 8823
8824 static struct ethtool_ops tg3_ethtool_ops = { 8824 static struct ethtool_ops tg3_ethtool_ops = {
8825 .get_settings = tg3_get_settings, 8825 .get_settings = tg3_get_settings,
8826 .set_settings = tg3_set_settings, 8826 .set_settings = tg3_set_settings,
8827 .get_drvinfo = tg3_get_drvinfo, 8827 .get_drvinfo = tg3_get_drvinfo,
8828 .get_regs_len = tg3_get_regs_len, 8828 .get_regs_len = tg3_get_regs_len,
8829 .get_regs = tg3_get_regs, 8829 .get_regs = tg3_get_regs,
8830 .get_wol = tg3_get_wol, 8830 .get_wol = tg3_get_wol,
8831 .set_wol = tg3_set_wol, 8831 .set_wol = tg3_set_wol,
8832 .get_msglevel = tg3_get_msglevel, 8832 .get_msglevel = tg3_get_msglevel,
8833 .set_msglevel = tg3_set_msglevel, 8833 .set_msglevel = tg3_set_msglevel,
8834 .nway_reset = tg3_nway_reset, 8834 .nway_reset = tg3_nway_reset,
8835 .get_link = ethtool_op_get_link, 8835 .get_link = ethtool_op_get_link,
8836 .get_eeprom_len = tg3_get_eeprom_len, 8836 .get_eeprom_len = tg3_get_eeprom_len,
8837 .get_eeprom = tg3_get_eeprom, 8837 .get_eeprom = tg3_get_eeprom,
8838 .set_eeprom = tg3_set_eeprom, 8838 .set_eeprom = tg3_set_eeprom,
8839 .get_ringparam = tg3_get_ringparam, 8839 .get_ringparam = tg3_get_ringparam,
8840 .set_ringparam = tg3_set_ringparam, 8840 .set_ringparam = tg3_set_ringparam,
8841 .get_pauseparam = tg3_get_pauseparam, 8841 .get_pauseparam = tg3_get_pauseparam,
8842 .set_pauseparam = tg3_set_pauseparam, 8842 .set_pauseparam = tg3_set_pauseparam,
8843 .get_rx_csum = tg3_get_rx_csum, 8843 .get_rx_csum = tg3_get_rx_csum,
8844 .set_rx_csum = tg3_set_rx_csum, 8844 .set_rx_csum = tg3_set_rx_csum,
8845 .get_tx_csum = ethtool_op_get_tx_csum, 8845 .get_tx_csum = ethtool_op_get_tx_csum,
8846 .set_tx_csum = tg3_set_tx_csum, 8846 .set_tx_csum = tg3_set_tx_csum,
8847 .get_sg = ethtool_op_get_sg, 8847 .get_sg = ethtool_op_get_sg,
8848 .set_sg = ethtool_op_set_sg, 8848 .set_sg = ethtool_op_set_sg,
8849 #if TG3_TSO_SUPPORT != 0 8849 #if TG3_TSO_SUPPORT != 0
8850 .get_tso = ethtool_op_get_tso, 8850 .get_tso = ethtool_op_get_tso,
8851 .set_tso = tg3_set_tso, 8851 .set_tso = tg3_set_tso,
8852 #endif 8852 #endif
8853 .self_test_count = tg3_get_test_count, 8853 .self_test_count = tg3_get_test_count,
8854 .self_test = tg3_self_test, 8854 .self_test = tg3_self_test,
8855 .get_strings = tg3_get_strings, 8855 .get_strings = tg3_get_strings,
8856 .phys_id = tg3_phys_id, 8856 .phys_id = tg3_phys_id,
8857 .get_stats_count = tg3_get_stats_count, 8857 .get_stats_count = tg3_get_stats_count,
8858 .get_ethtool_stats = tg3_get_ethtool_stats, 8858 .get_ethtool_stats = tg3_get_ethtool_stats,
8859 .get_coalesce = tg3_get_coalesce, 8859 .get_coalesce = tg3_get_coalesce,
8860 .set_coalesce = tg3_set_coalesce, 8860 .set_coalesce = tg3_set_coalesce,
8861 .get_perm_addr = ethtool_op_get_perm_addr, 8861 .get_perm_addr = ethtool_op_get_perm_addr,
8862 }; 8862 };
8863 8863
8864 static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 8864 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8865 { 8865 {
8866 u32 cursize, val, magic; 8866 u32 cursize, val, magic;
8867 8867
8868 tp->nvram_size = EEPROM_CHIP_SIZE; 8868 tp->nvram_size = EEPROM_CHIP_SIZE;
8869 8869
8870 if (tg3_nvram_read_swab(tp, 0, &magic) != 0) 8870 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8871 return; 8871 return;
8872 8872
8873 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000)) 8873 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8874 return; 8874 return;
8875 8875
8876 /* 8876 /*
8877 * Size the chip by reading offsets at increasing powers of two. 8877 * Size the chip by reading offsets at increasing powers of two.
8878 * When we encounter our validation signature, we know the addressing 8878 * When we encounter our validation signature, we know the addressing
8879 * has wrapped around, and thus have our chip size. 8879 * has wrapped around, and thus have our chip size.
8880 */ 8880 */
8881 cursize = 0x10; 8881 cursize = 0x10;
8882 8882
8883 while (cursize < tp->nvram_size) { 8883 while (cursize < tp->nvram_size) {
8884 if (tg3_nvram_read_swab(tp, cursize, &val) != 0) 8884 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8885 return; 8885 return;
8886 8886
8887 if (val == magic) 8887 if (val == magic)
8888 break; 8888 break;
8889 8889
8890 cursize <<= 1; 8890 cursize <<= 1;
8891 } 8891 }
8892 8892
8893 tp->nvram_size = cursize; 8893 tp->nvram_size = cursize;
8894 } 8894 }
8895 8895
8896 static void __devinit tg3_get_nvram_size(struct tg3 *tp) 8896 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8897 { 8897 {
8898 u32 val; 8898 u32 val;
8899 8899
8900 if (tg3_nvram_read_swab(tp, 0, &val) != 0) 8900 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8901 return; 8901 return;
8902 8902
8903 /* Selfboot format */ 8903 /* Selfboot format */
8904 if (val != TG3_EEPROM_MAGIC) { 8904 if (val != TG3_EEPROM_MAGIC) {
8905 tg3_get_eeprom_size(tp); 8905 tg3_get_eeprom_size(tp);
8906 return; 8906 return;
8907 } 8907 }
8908 8908
8909 if (tg3_nvram_read(tp, 0xf0, &val) == 0) { 8909 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8910 if (val != 0) { 8910 if (val != 0) {
8911 tp->nvram_size = (val >> 16) * 1024; 8911 tp->nvram_size = (val >> 16) * 1024;
8912 return; 8912 return;
8913 } 8913 }
8914 } 8914 }
8915 tp->nvram_size = 0x20000; 8915 tp->nvram_size = 0x20000;
8916 } 8916 }
8917 8917
8918 static void __devinit tg3_get_nvram_info(struct tg3 *tp) 8918 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8919 { 8919 {
8920 u32 nvcfg1; 8920 u32 nvcfg1;
8921 8921
8922 nvcfg1 = tr32(NVRAM_CFG1); 8922 nvcfg1 = tr32(NVRAM_CFG1);
8923 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { 8923 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8924 tp->tg3_flags2 |= TG3_FLG2_FLASH; 8924 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8925 } 8925 }
8926 else { 8926 else {
8927 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 8927 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8928 tw32(NVRAM_CFG1, nvcfg1); 8928 tw32(NVRAM_CFG1, nvcfg1);
8929 } 8929 }
8930 8930
8931 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 8931 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8932 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 8932 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8933 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 8933 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8934 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 8934 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8935 tp->nvram_jedecnum = JEDEC_ATMEL; 8935 tp->nvram_jedecnum = JEDEC_ATMEL;
8936 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 8936 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8937 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8937 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8938 break; 8938 break;
8939 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: 8939 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8940 tp->nvram_jedecnum = JEDEC_ATMEL; 8940 tp->nvram_jedecnum = JEDEC_ATMEL;
8941 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; 8941 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8942 break; 8942 break;
8943 case FLASH_VENDOR_ATMEL_EEPROM: 8943 case FLASH_VENDOR_ATMEL_EEPROM:
8944 tp->nvram_jedecnum = JEDEC_ATMEL; 8944 tp->nvram_jedecnum = JEDEC_ATMEL;
8945 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 8945 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8946 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8946 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8947 break; 8947 break;
8948 case FLASH_VENDOR_ST: 8948 case FLASH_VENDOR_ST:
8949 tp->nvram_jedecnum = JEDEC_ST; 8949 tp->nvram_jedecnum = JEDEC_ST;
8950 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; 8950 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8951 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8951 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8952 break; 8952 break;
8953 case FLASH_VENDOR_SAIFUN: 8953 case FLASH_VENDOR_SAIFUN:
8954 tp->nvram_jedecnum = JEDEC_SAIFUN; 8954 tp->nvram_jedecnum = JEDEC_SAIFUN;
8955 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; 8955 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8956 break; 8956 break;
8957 case FLASH_VENDOR_SST_SMALL: 8957 case FLASH_VENDOR_SST_SMALL:
8958 case FLASH_VENDOR_SST_LARGE: 8958 case FLASH_VENDOR_SST_LARGE:
8959 tp->nvram_jedecnum = JEDEC_SST; 8959 tp->nvram_jedecnum = JEDEC_SST;
8960 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; 8960 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8961 break; 8961 break;
8962 } 8962 }
8963 } 8963 }
8964 else { 8964 else {
8965 tp->nvram_jedecnum = JEDEC_ATMEL; 8965 tp->nvram_jedecnum = JEDEC_ATMEL;
8966 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; 8966 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8967 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8967 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8968 } 8968 }
8969 } 8969 }
8970 8970
8971 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) 8971 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8972 { 8972 {
8973 u32 nvcfg1; 8973 u32 nvcfg1;
8974 8974
8975 nvcfg1 = tr32(NVRAM_CFG1); 8975 nvcfg1 = tr32(NVRAM_CFG1);
8976 8976
8977 /* NVRAM protection for TPM */ 8977 /* NVRAM protection for TPM */
8978 if (nvcfg1 & (1 << 27)) 8978 if (nvcfg1 & (1 << 27))
8979 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 8979 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8980 8980
8981 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 8981 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8982 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 8982 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8983 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: 8983 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8984 tp->nvram_jedecnum = JEDEC_ATMEL; 8984 tp->nvram_jedecnum = JEDEC_ATMEL;
8985 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8985 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8986 break; 8986 break;
8987 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 8987 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8988 tp->nvram_jedecnum = JEDEC_ATMEL; 8988 tp->nvram_jedecnum = JEDEC_ATMEL;
8989 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8989 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8990 tp->tg3_flags2 |= TG3_FLG2_FLASH; 8990 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8991 break; 8991 break;
8992 case FLASH_5752VENDOR_ST_M45PE10: 8992 case FLASH_5752VENDOR_ST_M45PE10:
8993 case FLASH_5752VENDOR_ST_M45PE20: 8993 case FLASH_5752VENDOR_ST_M45PE20:
8994 case FLASH_5752VENDOR_ST_M45PE40: 8994 case FLASH_5752VENDOR_ST_M45PE40:
8995 tp->nvram_jedecnum = JEDEC_ST; 8995 tp->nvram_jedecnum = JEDEC_ST;
8996 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 8996 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8997 tp->tg3_flags2 |= TG3_FLG2_FLASH; 8997 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8998 break; 8998 break;
8999 } 8999 }
9000 9000
9001 if (tp->tg3_flags2 & TG3_FLG2_FLASH) { 9001 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9002 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 9002 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9003 case FLASH_5752PAGE_SIZE_256: 9003 case FLASH_5752PAGE_SIZE_256:
9004 tp->nvram_pagesize = 256; 9004 tp->nvram_pagesize = 256;
9005 break; 9005 break;
9006 case FLASH_5752PAGE_SIZE_512: 9006 case FLASH_5752PAGE_SIZE_512:
9007 tp->nvram_pagesize = 512; 9007 tp->nvram_pagesize = 512;
9008 break; 9008 break;
9009 case FLASH_5752PAGE_SIZE_1K: 9009 case FLASH_5752PAGE_SIZE_1K:
9010 tp->nvram_pagesize = 1024; 9010 tp->nvram_pagesize = 1024;
9011 break; 9011 break;
9012 case FLASH_5752PAGE_SIZE_2K: 9012 case FLASH_5752PAGE_SIZE_2K:
9013 tp->nvram_pagesize = 2048; 9013 tp->nvram_pagesize = 2048;
9014 break; 9014 break;
9015 case FLASH_5752PAGE_SIZE_4K: 9015 case FLASH_5752PAGE_SIZE_4K:
9016 tp->nvram_pagesize = 4096; 9016 tp->nvram_pagesize = 4096;
9017 break; 9017 break;
9018 case FLASH_5752PAGE_SIZE_264: 9018 case FLASH_5752PAGE_SIZE_264:
9019 tp->nvram_pagesize = 264; 9019 tp->nvram_pagesize = 264;
9020 break; 9020 break;
9021 } 9021 }
9022 } 9022 }
9023 else { 9023 else {
9024 /* For eeprom, set pagesize to maximum eeprom size */ 9024 /* For eeprom, set pagesize to maximum eeprom size */
9025 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 9025 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9026 9026
9027 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 9027 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9028 tw32(NVRAM_CFG1, nvcfg1); 9028 tw32(NVRAM_CFG1, nvcfg1);
9029 } 9029 }
9030 } 9030 }
9031 9031
9032 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) 9032 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9033 { 9033 {
9034 u32 nvcfg1; 9034 u32 nvcfg1;
9035 9035
9036 nvcfg1 = tr32(NVRAM_CFG1); 9036 nvcfg1 = tr32(NVRAM_CFG1);
9037 9037
9038 /* NVRAM protection for TPM */ 9038 /* NVRAM protection for TPM */
9039 if (nvcfg1 & (1 << 27)) 9039 if (nvcfg1 & (1 << 27))
9040 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 9040 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9041 9041
9042 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 9042 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9043 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ: 9043 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9044 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ: 9044 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9045 tp->nvram_jedecnum = JEDEC_ATMEL; 9045 tp->nvram_jedecnum = JEDEC_ATMEL;
9046 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9046 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9047 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 9047 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9048 9048
9049 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 9049 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9050 tw32(NVRAM_CFG1, nvcfg1); 9050 tw32(NVRAM_CFG1, nvcfg1);
9051 break; 9051 break;
9052 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 9052 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9053 case FLASH_5755VENDOR_ATMEL_FLASH_1: 9053 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9054 case FLASH_5755VENDOR_ATMEL_FLASH_2: 9054 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9055 case FLASH_5755VENDOR_ATMEL_FLASH_3: 9055 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9056 case FLASH_5755VENDOR_ATMEL_FLASH_4: 9056 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9057 tp->nvram_jedecnum = JEDEC_ATMEL; 9057 tp->nvram_jedecnum = JEDEC_ATMEL;
9058 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9058 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9059 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9059 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9060 tp->nvram_pagesize = 264; 9060 tp->nvram_pagesize = 264;
9061 break; 9061 break;
9062 case FLASH_5752VENDOR_ST_M45PE10: 9062 case FLASH_5752VENDOR_ST_M45PE10:
9063 case FLASH_5752VENDOR_ST_M45PE20: 9063 case FLASH_5752VENDOR_ST_M45PE20:
9064 case FLASH_5752VENDOR_ST_M45PE40: 9064 case FLASH_5752VENDOR_ST_M45PE40:
9065 tp->nvram_jedecnum = JEDEC_ST; 9065 tp->nvram_jedecnum = JEDEC_ST;
9066 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9066 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9067 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9067 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9068 tp->nvram_pagesize = 256; 9068 tp->nvram_pagesize = 256;
9069 break; 9069 break;
9070 } 9070 }
9071 } 9071 }
9072 9072
9073 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) 9073 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9074 { 9074 {
9075 u32 nvcfg1; 9075 u32 nvcfg1;
9076 9076
9077 nvcfg1 = tr32(NVRAM_CFG1); 9077 nvcfg1 = tr32(NVRAM_CFG1);
9078 9078
9079 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 9079 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9080 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: 9080 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9081 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: 9081 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9082 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: 9082 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9083 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: 9083 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9084 tp->nvram_jedecnum = JEDEC_ATMEL; 9084 tp->nvram_jedecnum = JEDEC_ATMEL;
9085 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9085 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9086 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 9086 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9087 9087
9088 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; 9088 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9089 tw32(NVRAM_CFG1, nvcfg1); 9089 tw32(NVRAM_CFG1, nvcfg1);
9090 break; 9090 break;
9091 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: 9091 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9092 case FLASH_5755VENDOR_ATMEL_FLASH_1: 9092 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9093 case FLASH_5755VENDOR_ATMEL_FLASH_2: 9093 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9094 case FLASH_5755VENDOR_ATMEL_FLASH_3: 9094 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9095 tp->nvram_jedecnum = JEDEC_ATMEL; 9095 tp->nvram_jedecnum = JEDEC_ATMEL;
9096 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9096 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9097 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9097 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9098 tp->nvram_pagesize = 264; 9098 tp->nvram_pagesize = 264;
9099 break; 9099 break;
9100 case FLASH_5752VENDOR_ST_M45PE10: 9100 case FLASH_5752VENDOR_ST_M45PE10:
9101 case FLASH_5752VENDOR_ST_M45PE20: 9101 case FLASH_5752VENDOR_ST_M45PE20:
9102 case FLASH_5752VENDOR_ST_M45PE40: 9102 case FLASH_5752VENDOR_ST_M45PE40:
9103 tp->nvram_jedecnum = JEDEC_ST; 9103 tp->nvram_jedecnum = JEDEC_ST;
9104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; 9104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9105 tp->tg3_flags2 |= TG3_FLG2_FLASH; 9105 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9106 tp->nvram_pagesize = 256; 9106 tp->nvram_pagesize = 256;
9107 break; 9107 break;
9108 } 9108 }
9109 } 9109 }
9110 9110
9111 /* Chips other than 5700/5701 use the NVRAM for fetching info. */ 9111 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9112 static void __devinit tg3_nvram_init(struct tg3 *tp) 9112 static void __devinit tg3_nvram_init(struct tg3 *tp)
9113 { 9113 {
9114 int j; 9114 int j;
9115 9115
9116 tw32_f(GRC_EEPROM_ADDR, 9116 tw32_f(GRC_EEPROM_ADDR,
9117 (EEPROM_ADDR_FSM_RESET | 9117 (EEPROM_ADDR_FSM_RESET |
9118 (EEPROM_DEFAULT_CLOCK_PERIOD << 9118 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9119 EEPROM_ADDR_CLKPERD_SHIFT))); 9119 EEPROM_ADDR_CLKPERD_SHIFT)));
9120 9120
9121 /* XXX schedule_timeout() ... */ 9121 /* XXX schedule_timeout() ... */
9122 for (j = 0; j < 100; j++) 9122 for (j = 0; j < 100; j++)
9123 udelay(10); 9123 udelay(10);
9124 9124
9125 /* Enable seeprom accesses. */ 9125 /* Enable seeprom accesses. */
9126 tw32_f(GRC_LOCAL_CTRL, 9126 tw32_f(GRC_LOCAL_CTRL,
9127 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); 9127 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9128 udelay(100); 9128 udelay(100);
9129 9129
9130 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 9130 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9131 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { 9131 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9132 tp->tg3_flags |= TG3_FLAG_NVRAM; 9132 tp->tg3_flags |= TG3_FLAG_NVRAM;
9133 9133
9134 if (tg3_nvram_lock(tp)) { 9134 if (tg3_nvram_lock(tp)) {
9135 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " 9135 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9136 "tg3_nvram_init failed.\n", tp->dev->name); 9136 "tg3_nvram_init failed.\n", tp->dev->name);
9137 return; 9137 return;
9138 } 9138 }
9139 tg3_enable_nvram_access(tp); 9139 tg3_enable_nvram_access(tp);
9140 9140
9141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 9141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9142 tg3_get_5752_nvram_info(tp); 9142 tg3_get_5752_nvram_info(tp);
9143 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 9143 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9144 tg3_get_5755_nvram_info(tp); 9144 tg3_get_5755_nvram_info(tp);
9145 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 9145 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9146 tg3_get_5787_nvram_info(tp); 9146 tg3_get_5787_nvram_info(tp);
9147 else 9147 else
9148 tg3_get_nvram_info(tp); 9148 tg3_get_nvram_info(tp);
9149 9149
9150 tg3_get_nvram_size(tp); 9150 tg3_get_nvram_size(tp);
9151 9151
9152 tg3_disable_nvram_access(tp); 9152 tg3_disable_nvram_access(tp);
9153 tg3_nvram_unlock(tp); 9153 tg3_nvram_unlock(tp);
9154 9154
9155 } else { 9155 } else {
9156 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); 9156 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9157 9157
9158 tg3_get_eeprom_size(tp); 9158 tg3_get_eeprom_size(tp);
9159 } 9159 }
9160 } 9160 }
9161 9161
9162 static int tg3_nvram_read_using_eeprom(struct tg3 *tp, 9162 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9163 u32 offset, u32 *val) 9163 u32 offset, u32 *val)
9164 { 9164 {
9165 u32 tmp; 9165 u32 tmp;
9166 int i; 9166 int i;
9167 9167
9168 if (offset > EEPROM_ADDR_ADDR_MASK || 9168 if (offset > EEPROM_ADDR_ADDR_MASK ||
9169 (offset % 4) != 0) 9169 (offset % 4) != 0)
9170 return -EINVAL; 9170 return -EINVAL;
9171 9171
9172 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | 9172 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9173 EEPROM_ADDR_DEVID_MASK | 9173 EEPROM_ADDR_DEVID_MASK |
9174 EEPROM_ADDR_READ); 9174 EEPROM_ADDR_READ);
9175 tw32(GRC_EEPROM_ADDR, 9175 tw32(GRC_EEPROM_ADDR,
9176 tmp | 9176 tmp |
9177 (0 << EEPROM_ADDR_DEVID_SHIFT) | 9177 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9178 ((offset << EEPROM_ADDR_ADDR_SHIFT) & 9178 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9179 EEPROM_ADDR_ADDR_MASK) | 9179 EEPROM_ADDR_ADDR_MASK) |
9180 EEPROM_ADDR_READ | EEPROM_ADDR_START); 9180 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9181 9181
9182 for (i = 0; i < 10000; i++) { 9182 for (i = 0; i < 10000; i++) {
9183 tmp = tr32(GRC_EEPROM_ADDR); 9183 tmp = tr32(GRC_EEPROM_ADDR);
9184 9184
9185 if (tmp & EEPROM_ADDR_COMPLETE) 9185 if (tmp & EEPROM_ADDR_COMPLETE)
9186 break; 9186 break;
9187 udelay(100); 9187 udelay(100);
9188 } 9188 }
9189 if (!(tmp & EEPROM_ADDR_COMPLETE)) 9189 if (!(tmp & EEPROM_ADDR_COMPLETE))
9190 return -EBUSY; 9190 return -EBUSY;
9191 9191
9192 *val = tr32(GRC_EEPROM_DATA); 9192 *val = tr32(GRC_EEPROM_DATA);
9193 return 0; 9193 return 0;
9194 } 9194 }
9195 9195
9196 #define NVRAM_CMD_TIMEOUT 10000 9196 #define NVRAM_CMD_TIMEOUT 10000
9197 9197
9198 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 9198 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9199 { 9199 {
9200 int i; 9200 int i;
9201 9201
9202 tw32(NVRAM_CMD, nvram_cmd); 9202 tw32(NVRAM_CMD, nvram_cmd);
9203 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 9203 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9204 udelay(10); 9204 udelay(10);
9205 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 9205 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9206 udelay(10); 9206 udelay(10);
9207 break; 9207 break;
9208 } 9208 }
9209 } 9209 }
9210 if (i == NVRAM_CMD_TIMEOUT) { 9210 if (i == NVRAM_CMD_TIMEOUT) {
9211 return -EBUSY; 9211 return -EBUSY;
9212 } 9212 }
9213 return 0; 9213 return 0;
9214 } 9214 }
9215 9215
9216 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) 9216 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9217 { 9217 {
9218 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 9218 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9219 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 9219 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9220 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 9220 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9221 (tp->nvram_jedecnum == JEDEC_ATMEL)) 9221 (tp->nvram_jedecnum == JEDEC_ATMEL))
9222 9222
9223 addr = ((addr / tp->nvram_pagesize) << 9223 addr = ((addr / tp->nvram_pagesize) <<
9224 ATMEL_AT45DB0X1B_PAGE_POS) + 9224 ATMEL_AT45DB0X1B_PAGE_POS) +
9225 (addr % tp->nvram_pagesize); 9225 (addr % tp->nvram_pagesize);
9226 9226
9227 return addr; 9227 return addr;
9228 } 9228 }
9229 9229
9230 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) 9230 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9231 { 9231 {
9232 if ((tp->tg3_flags & TG3_FLAG_NVRAM) && 9232 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9233 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && 9233 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9234 (tp->tg3_flags2 & TG3_FLG2_FLASH) && 9234 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9235 (tp->nvram_jedecnum == JEDEC_ATMEL)) 9235 (tp->nvram_jedecnum == JEDEC_ATMEL))
9236 9236
9237 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * 9237 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9238 tp->nvram_pagesize) + 9238 tp->nvram_pagesize) +
9239 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); 9239 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9240 9240
9241 return addr; 9241 return addr;
9242 } 9242 }
9243 9243
9244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) 9244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9245 { 9245 {
9246 int ret; 9246 int ret;
9247 9247
9248 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) 9248 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9249 return tg3_nvram_read_using_eeprom(tp, offset, val); 9249 return tg3_nvram_read_using_eeprom(tp, offset, val);
9250 9250
9251 offset = tg3_nvram_phys_addr(tp, offset); 9251 offset = tg3_nvram_phys_addr(tp, offset);
9252 9252
9253 if (offset > NVRAM_ADDR_MSK) 9253 if (offset > NVRAM_ADDR_MSK)
9254 return -EINVAL; 9254 return -EINVAL;
9255 9255
9256 ret = tg3_nvram_lock(tp); 9256 ret = tg3_nvram_lock(tp);
9257 if (ret) 9257 if (ret)
9258 return ret; 9258 return ret;
9259 9259
9260 tg3_enable_nvram_access(tp); 9260 tg3_enable_nvram_access(tp);
9261 9261
9262 tw32(NVRAM_ADDR, offset); 9262 tw32(NVRAM_ADDR, offset);
9263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | 9263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); 9264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9265 9265
9266 if (ret == 0) 9266 if (ret == 0)
9267 *val = swab32(tr32(NVRAM_RDDATA)); 9267 *val = swab32(tr32(NVRAM_RDDATA));
9268 9268
9269 tg3_disable_nvram_access(tp); 9269 tg3_disable_nvram_access(tp);
9270 9270
9271 tg3_nvram_unlock(tp); 9271 tg3_nvram_unlock(tp);
9272 9272
9273 return ret; 9273 return ret;
9274 } 9274 }
9275 9275
9276 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) 9276 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9277 { 9277 {
9278 int err; 9278 int err;
9279 u32 tmp; 9279 u32 tmp;
9280 9280
9281 err = tg3_nvram_read(tp, offset, &tmp); 9281 err = tg3_nvram_read(tp, offset, &tmp);
9282 *val = swab32(tmp); 9282 *val = swab32(tmp);
9283 return err; 9283 return err;
9284 } 9284 }
9285 9285
9286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, 9286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9287 u32 offset, u32 len, u8 *buf) 9287 u32 offset, u32 len, u8 *buf)
9288 { 9288 {
9289 int i, j, rc = 0; 9289 int i, j, rc = 0;
9290 u32 val; 9290 u32 val;
9291 9291
9292 for (i = 0; i < len; i += 4) { 9292 for (i = 0; i < len; i += 4) {
9293 u32 addr, data; 9293 u32 addr, data;
9294 9294
9295 addr = offset + i; 9295 addr = offset + i;
9296 9296
9297 memcpy(&data, buf + i, 4); 9297 memcpy(&data, buf + i, 4);
9298 9298
9299 tw32(GRC_EEPROM_DATA, cpu_to_le32(data)); 9299 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9300 9300
9301 val = tr32(GRC_EEPROM_ADDR); 9301 val = tr32(GRC_EEPROM_ADDR);
9302 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); 9302 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9303 9303
9304 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | 9304 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9305 EEPROM_ADDR_READ); 9305 EEPROM_ADDR_READ);
9306 tw32(GRC_EEPROM_ADDR, val | 9306 tw32(GRC_EEPROM_ADDR, val |
9307 (0 << EEPROM_ADDR_DEVID_SHIFT) | 9307 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9308 (addr & EEPROM_ADDR_ADDR_MASK) | 9308 (addr & EEPROM_ADDR_ADDR_MASK) |
9309 EEPROM_ADDR_START | 9309 EEPROM_ADDR_START |
9310 EEPROM_ADDR_WRITE); 9310 EEPROM_ADDR_WRITE);
9311 9311
9312 for (j = 0; j < 10000; j++) { 9312 for (j = 0; j < 10000; j++) {
9313 val = tr32(GRC_EEPROM_ADDR); 9313 val = tr32(GRC_EEPROM_ADDR);
9314 9314
9315 if (val & EEPROM_ADDR_COMPLETE) 9315 if (val & EEPROM_ADDR_COMPLETE)
9316 break; 9316 break;
9317 udelay(100); 9317 udelay(100);
9318 } 9318 }
9319 if (!(val & EEPROM_ADDR_COMPLETE)) { 9319 if (!(val & EEPROM_ADDR_COMPLETE)) {
9320 rc = -EBUSY; 9320 rc = -EBUSY;
9321 break; 9321 break;
9322 } 9322 }
9323 } 9323 }
9324 9324
9325 return rc; 9325 return rc;
9326 } 9326 }
9327 9327
9328 /* offset and length are dword aligned */ 9328 /* offset and length are dword aligned */
9329 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, 9329 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9330 u8 *buf) 9330 u8 *buf)
9331 { 9331 {
9332 int ret = 0; 9332 int ret = 0;
9333 u32 pagesize = tp->nvram_pagesize; 9333 u32 pagesize = tp->nvram_pagesize;
9334 u32 pagemask = pagesize - 1; 9334 u32 pagemask = pagesize - 1;
9335 u32 nvram_cmd; 9335 u32 nvram_cmd;
9336 u8 *tmp; 9336 u8 *tmp;
9337 9337
9338 tmp = kmalloc(pagesize, GFP_KERNEL); 9338 tmp = kmalloc(pagesize, GFP_KERNEL);
9339 if (tmp == NULL) 9339 if (tmp == NULL)
9340 return -ENOMEM; 9340 return -ENOMEM;
9341 9341
9342 while (len) { 9342 while (len) {
9343 int j; 9343 int j;
9344 u32 phy_addr, page_off, size; 9344 u32 phy_addr, page_off, size;
9345 9345
9346 phy_addr = offset & ~pagemask; 9346 phy_addr = offset & ~pagemask;
9347 9347
9348 for (j = 0; j < pagesize; j += 4) { 9348 for (j = 0; j < pagesize; j += 4) {
9349 if ((ret = tg3_nvram_read(tp, phy_addr + j, 9349 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9350 (u32 *) (tmp + j)))) 9350 (u32 *) (tmp + j))))
9351 break; 9351 break;
9352 } 9352 }
9353 if (ret) 9353 if (ret)
9354 break; 9354 break;
9355 9355
9356 page_off = offset & pagemask; 9356 page_off = offset & pagemask;
9357 size = pagesize; 9357 size = pagesize;
9358 if (len < size) 9358 if (len < size)
9359 size = len; 9359 size = len;
9360 9360
9361 len -= size; 9361 len -= size;
9362 9362
9363 memcpy(tmp + page_off, buf, size); 9363 memcpy(tmp + page_off, buf, size);
9364 9364
9365 offset = offset + (pagesize - page_off); 9365 offset = offset + (pagesize - page_off);
9366 9366
9367 tg3_enable_nvram_access(tp); 9367 tg3_enable_nvram_access(tp);
9368 9368
9369 /* 9369 /*
9370 * Before we can erase the flash page, we need 9370 * Before we can erase the flash page, we need
9371 * to issue a special "write enable" command. 9371 * to issue a special "write enable" command.
9372 */ 9372 */
9373 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 9373 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9374 9374
9375 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 9375 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9376 break; 9376 break;
9377 9377
9378 /* Erase the target page */ 9378 /* Erase the target page */
9379 tw32(NVRAM_ADDR, phy_addr); 9379 tw32(NVRAM_ADDR, phy_addr);
9380 9380
9381 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 9381 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9382 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 9382 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9383 9383
9384 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 9384 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9385 break; 9385 break;
9386 9386
9387 /* Issue another write enable to start the write. */ 9387 /* Issue another write enable to start the write. */
9388 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; 9388 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9389 9389
9390 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 9390 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9391 break; 9391 break;
9392 9392
9393 for (j = 0; j < pagesize; j += 4) { 9393 for (j = 0; j < pagesize; j += 4) {
9394 u32 data; 9394 u32 data;
9395 9395
9396 data = *((u32 *) (tmp + j)); 9396 data = *((u32 *) (tmp + j));
9397 tw32(NVRAM_WRDATA, cpu_to_be32(data)); 9397 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9398 9398
9399 tw32(NVRAM_ADDR, phy_addr + j); 9399 tw32(NVRAM_ADDR, phy_addr + j);
9400 9400
9401 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | 9401 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9402 NVRAM_CMD_WR; 9402 NVRAM_CMD_WR;
9403 9403
9404 if (j == 0) 9404 if (j == 0)
9405 nvram_cmd |= NVRAM_CMD_FIRST; 9405 nvram_cmd |= NVRAM_CMD_FIRST;
9406 else if (j == (pagesize - 4)) 9406 else if (j == (pagesize - 4))
9407 nvram_cmd |= NVRAM_CMD_LAST; 9407 nvram_cmd |= NVRAM_CMD_LAST;
9408 9408
9409 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) 9409 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9410 break; 9410 break;
9411 } 9411 }
9412 if (ret) 9412 if (ret)
9413 break; 9413 break;
9414 } 9414 }
9415 9415
9416 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; 9416 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9417 tg3_nvram_exec_cmd(tp, nvram_cmd); 9417 tg3_nvram_exec_cmd(tp, nvram_cmd);
9418 9418
9419 kfree(tmp); 9419 kfree(tmp);
9420 9420
9421 return ret; 9421 return ret;
9422 } 9422 }
9423 9423
9424 /* offset and length are dword aligned */ 9424 /* offset and length are dword aligned */
9425 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, 9425 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9426 u8 *buf) 9426 u8 *buf)
9427 { 9427 {
9428 int i, ret = 0; 9428 int i, ret = 0;
9429 9429
9430 for (i = 0; i < len; i += 4, offset += 4) { 9430 for (i = 0; i < len; i += 4, offset += 4) {
9431 u32 data, page_off, phy_addr, nvram_cmd; 9431 u32 data, page_off, phy_addr, nvram_cmd;
9432 9432
9433 memcpy(&data, buf + i, 4); 9433 memcpy(&data, buf + i, 4);
9434 tw32(NVRAM_WRDATA, cpu_to_be32(data)); 9434 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9435 9435
9436 page_off = offset % tp->nvram_pagesize; 9436 page_off = offset % tp->nvram_pagesize;
9437 9437
9438 phy_addr = tg3_nvram_phys_addr(tp, offset); 9438 phy_addr = tg3_nvram_phys_addr(tp, offset);
9439 9439
9440 tw32(NVRAM_ADDR, phy_addr); 9440 tw32(NVRAM_ADDR, phy_addr);
9441 9441
9442 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 9442 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9443 9443
9444 if ((page_off == 0) || (i == 0)) 9444 if ((page_off == 0) || (i == 0))
9445 nvram_cmd |= NVRAM_CMD_FIRST; 9445 nvram_cmd |= NVRAM_CMD_FIRST;
9446 if (page_off == (tp->nvram_pagesize - 4)) 9446 if (page_off == (tp->nvram_pagesize - 4))
9447 nvram_cmd |= NVRAM_CMD_LAST; 9447 nvram_cmd |= NVRAM_CMD_LAST;
9448 9448
9449 if (i == (len - 4)) 9449 if (i == (len - 4))
9450 nvram_cmd |= NVRAM_CMD_LAST; 9450 nvram_cmd |= NVRAM_CMD_LAST;
9451 9451
9452 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && 9452 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9453 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && 9453 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9454 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && 9454 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9455 (tp->nvram_jedecnum == JEDEC_ST) && 9455 (tp->nvram_jedecnum == JEDEC_ST) &&
9456 (nvram_cmd & NVRAM_CMD_FIRST)) { 9456 (nvram_cmd & NVRAM_CMD_FIRST)) {
9457 9457
9458 if ((ret = tg3_nvram_exec_cmd(tp, 9458 if ((ret = tg3_nvram_exec_cmd(tp,
9459 NVRAM_CMD_WREN | NVRAM_CMD_GO | 9459 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9460 NVRAM_CMD_DONE))) 9460 NVRAM_CMD_DONE)))
9461 9461
9462 break; 9462 break;
9463 } 9463 }
9464 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { 9464 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9465 /* We always do complete word writes to eeprom. */ 9465 /* We always do complete word writes to eeprom. */
9466 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); 9466 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9467 } 9467 }
9468 9468
9469 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) 9469 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9470 break; 9470 break;
9471 } 9471 }
9472 return ret; 9472 return ret;
9473 } 9473 }
9474 9474
9475 /* offset and length are dword aligned */ 9475 /* offset and length are dword aligned */
9476 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) 9476 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9477 { 9477 {
9478 int ret; 9478 int ret;
9479 9479
9480 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 9480 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9481 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & 9481 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9482 ~GRC_LCLCTRL_GPIO_OUTPUT1); 9482 ~GRC_LCLCTRL_GPIO_OUTPUT1);
9483 udelay(40); 9483 udelay(40);
9484 } 9484 }
9485 9485
9486 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { 9486 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9487 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 9487 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9488 } 9488 }
9489 else { 9489 else {
9490 u32 grc_mode; 9490 u32 grc_mode;
9491 9491
9492 ret = tg3_nvram_lock(tp); 9492 ret = tg3_nvram_lock(tp);
9493 if (ret) 9493 if (ret)
9494 return ret; 9494 return ret;
9495 9495
9496 tg3_enable_nvram_access(tp); 9496 tg3_enable_nvram_access(tp);
9497 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 9497 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9498 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 9498 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9499 tw32(NVRAM_WRITE1, 0x406); 9499 tw32(NVRAM_WRITE1, 0x406);
9500 9500
9501 grc_mode = tr32(GRC_MODE); 9501 grc_mode = tr32(GRC_MODE);
9502 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); 9502 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9503 9503
9504 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || 9504 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9505 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) { 9505 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9506 9506
9507 ret = tg3_nvram_write_block_buffered(tp, offset, len, 9507 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9508 buf); 9508 buf);
9509 } 9509 }
9510 else { 9510 else {
9511 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 9511 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9512 buf); 9512 buf);
9513 } 9513 }
9514 9514
9515 grc_mode = tr32(GRC_MODE); 9515 grc_mode = tr32(GRC_MODE);
9516 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); 9516 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9517 9517
9518 tg3_disable_nvram_access(tp); 9518 tg3_disable_nvram_access(tp);
9519 tg3_nvram_unlock(tp); 9519 tg3_nvram_unlock(tp);
9520 } 9520 }
9521 9521
9522 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { 9522 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9523 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 9523 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9524 udelay(40); 9524 udelay(40);
9525 } 9525 }
9526 9526
9527 return ret; 9527 return ret;
9528 } 9528 }
9529 9529
9530 struct subsys_tbl_ent { 9530 struct subsys_tbl_ent {
9531 u16 subsys_vendor, subsys_devid; 9531 u16 subsys_vendor, subsys_devid;
9532 u32 phy_id; 9532 u32 phy_id;
9533 }; 9533 };
9534 9534
9535 static struct subsys_tbl_ent subsys_id_to_phy_id[] = { 9535 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9536 /* Broadcom boards. */ 9536 /* Broadcom boards. */
9537 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ 9537 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9538 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ 9538 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9539 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ 9539 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9540 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */ 9540 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9541 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ 9541 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9542 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ 9542 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9543 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */ 9543 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9544 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ 9544 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9545 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ 9545 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9546 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ 9546 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9547 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ 9547 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9548 9548
9549 /* 3com boards. */ 9549 /* 3com boards. */
9550 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ 9550 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9551 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ 9551 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9552 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */ 9552 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9553 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ 9553 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9554 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ 9554 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9555 9555
9556 /* DELL boards. */ 9556 /* DELL boards. */
9557 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ 9557 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9558 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ 9558 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9559 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ 9559 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9560 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ 9560 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9561 9561
9562 /* Compaq boards. */ 9562 /* Compaq boards. */
9563 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ 9563 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9564 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ 9564 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9565 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */ 9565 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9566 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ 9566 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9567 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ 9567 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9568 9568
9569 /* IBM boards. */ 9569 /* IBM boards. */
9570 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ 9570 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9571 }; 9571 };
9572 9572
9573 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) 9573 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9574 { 9574 {
9575 int i; 9575 int i;
9576 9576
9577 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { 9577 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9578 if ((subsys_id_to_phy_id[i].subsys_vendor == 9578 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9579 tp->pdev->subsystem_vendor) && 9579 tp->pdev->subsystem_vendor) &&
9580 (subsys_id_to_phy_id[i].subsys_devid == 9580 (subsys_id_to_phy_id[i].subsys_devid ==
9581 tp->pdev->subsystem_device)) 9581 tp->pdev->subsystem_device))
9582 return &subsys_id_to_phy_id[i]; 9582 return &subsys_id_to_phy_id[i];
9583 } 9583 }
9584 return NULL; 9584 return NULL;
9585 } 9585 }
9586 9586
9587 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) 9587 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9588 { 9588 {
9589 u32 val; 9589 u32 val;
9590 u16 pmcsr; 9590 u16 pmcsr;
9591 9591
9592 /* On some early chips the SRAM cannot be accessed in D3hot state, 9592 /* On some early chips the SRAM cannot be accessed in D3hot state,
9593 * so need make sure we're in D0. 9593 * so need make sure we're in D0.
9594 */ 9594 */
9595 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); 9595 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9596 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 9596 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9597 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); 9597 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9598 msleep(1); 9598 msleep(1);
9599 9599
9600 /* Make sure register accesses (indirect or otherwise) 9600 /* Make sure register accesses (indirect or otherwise)
9601 * will function correctly. 9601 * will function correctly.
9602 */ 9602 */
9603 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 9603 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9604 tp->misc_host_ctrl); 9604 tp->misc_host_ctrl);
9605 9605
9606 /* The memory arbiter has to be enabled in order for SRAM accesses 9606 /* The memory arbiter has to be enabled in order for SRAM accesses
9607 * to succeed. Normally on powerup the tg3 chip firmware will make 9607 * to succeed. Normally on powerup the tg3 chip firmware will make
9608 * sure it is enabled, but other entities such as system netboot 9608 * sure it is enabled, but other entities such as system netboot
9609 * code might disable it. 9609 * code might disable it.
9610 */ 9610 */
9611 val = tr32(MEMARB_MODE); 9611 val = tr32(MEMARB_MODE);
9612 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 9612 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9613 9613
9614 tp->phy_id = PHY_ID_INVALID; 9614 tp->phy_id = PHY_ID_INVALID;
9615 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9615 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9616 9616
9617 /* Assume an onboard device by default. */ 9617 /* Assume an onboard device by default. */
9618 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 9618 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9619 9619
9620 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 9620 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9621 if (val == NIC_SRAM_DATA_SIG_MAGIC) { 9621 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9622 u32 nic_cfg, led_cfg; 9622 u32 nic_cfg, led_cfg;
9623 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id; 9623 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9624 int eeprom_phy_serdes = 0; 9624 int eeprom_phy_serdes = 0;
9625 9625
9626 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); 9626 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9627 tp->nic_sram_data_cfg = nic_cfg; 9627 tp->nic_sram_data_cfg = nic_cfg;
9628 9628
9629 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); 9629 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9630 ver >>= NIC_SRAM_DATA_VER_SHIFT; 9630 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9631 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && 9631 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9632 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && 9632 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9633 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && 9633 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9634 (ver > 0) && (ver < 0x100)) 9634 (ver > 0) && (ver < 0x100))
9635 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); 9635 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9636 9636
9637 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == 9637 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9638 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) 9638 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9639 eeprom_phy_serdes = 1; 9639 eeprom_phy_serdes = 1;
9640 9640
9641 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); 9641 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9642 if (nic_phy_id != 0) { 9642 if (nic_phy_id != 0) {
9643 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; 9643 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9644 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; 9644 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9645 9645
9646 eeprom_phy_id = (id1 >> 16) << 10; 9646 eeprom_phy_id = (id1 >> 16) << 10;
9647 eeprom_phy_id |= (id2 & 0xfc00) << 16; 9647 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9648 eeprom_phy_id |= (id2 & 0x03ff) << 0; 9648 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9649 } else 9649 } else
9650 eeprom_phy_id = 0; 9650 eeprom_phy_id = 0;
9651 9651
9652 tp->phy_id = eeprom_phy_id; 9652 tp->phy_id = eeprom_phy_id;
9653 if (eeprom_phy_serdes) { 9653 if (eeprom_phy_serdes) {
9654 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 9654 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9655 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 9655 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9656 else 9656 else
9657 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9657 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9658 } 9658 }
9659 9659
9660 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 9660 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9661 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | 9661 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9662 SHASTA_EXT_LED_MODE_MASK); 9662 SHASTA_EXT_LED_MODE_MASK);
9663 else 9663 else
9664 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; 9664 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9665 9665
9666 switch (led_cfg) { 9666 switch (led_cfg) {
9667 default: 9667 default:
9668 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: 9668 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9669 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9669 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9670 break; 9670 break;
9671 9671
9672 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: 9672 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9673 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 9673 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9674 break; 9674 break;
9675 9675
9676 case NIC_SRAM_DATA_CFG_LED_MODE_MAC: 9676 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9677 tp->led_ctrl = LED_CTRL_MODE_MAC; 9677 tp->led_ctrl = LED_CTRL_MODE_MAC;
9678 9678
9679 /* Default to PHY_1_MODE if 0 (MAC_MODE) is 9679 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9680 * read on some older 5700/5701 bootcode. 9680 * read on some older 5700/5701 bootcode.
9681 */ 9681 */
9682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == 9682 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9683 ASIC_REV_5700 || 9683 ASIC_REV_5700 ||
9684 GET_ASIC_REV(tp->pci_chip_rev_id) == 9684 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9685 ASIC_REV_5701) 9685 ASIC_REV_5701)
9686 tp->led_ctrl = LED_CTRL_MODE_PHY_1; 9686 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9687 9687
9688 break; 9688 break;
9689 9689
9690 case SHASTA_EXT_LED_SHARED: 9690 case SHASTA_EXT_LED_SHARED:
9691 tp->led_ctrl = LED_CTRL_MODE_SHARED; 9691 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9692 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 9692 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9693 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) 9693 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9694 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 9694 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9695 LED_CTRL_MODE_PHY_2); 9695 LED_CTRL_MODE_PHY_2);
9696 break; 9696 break;
9697 9697
9698 case SHASTA_EXT_LED_MAC: 9698 case SHASTA_EXT_LED_MAC:
9699 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; 9699 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9700 break; 9700 break;
9701 9701
9702 case SHASTA_EXT_LED_COMBO: 9702 case SHASTA_EXT_LED_COMBO:
9703 tp->led_ctrl = LED_CTRL_MODE_COMBO; 9703 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9704 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) 9704 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9705 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | 9705 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9706 LED_CTRL_MODE_PHY_2); 9706 LED_CTRL_MODE_PHY_2);
9707 break; 9707 break;
9708 9708
9709 }; 9709 };
9710 9710
9711 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 9711 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && 9712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9713 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) 9713 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9714 tp->led_ctrl = LED_CTRL_MODE_PHY_2; 9714 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9715 9715
9716 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) 9716 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9717 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; 9717 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9718 else 9718 else
9719 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; 9719 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9720 9720
9721 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { 9721 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9722 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; 9722 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9723 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 9723 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9724 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 9724 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9725 } 9725 }
9726 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL) 9726 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9727 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP; 9727 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9728 9728
9729 if (cfg2 & (1 << 17)) 9729 if (cfg2 & (1 << 17))
9730 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; 9730 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9731 9731
9732 /* serdes signal pre-emphasis in register 0x590 set by */ 9732 /* serdes signal pre-emphasis in register 0x590 set by */
9733 /* bootcode if bit 18 is set */ 9733 /* bootcode if bit 18 is set */
9734 if (cfg2 & (1 << 18)) 9734 if (cfg2 & (1 << 18))
9735 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; 9735 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9736 } 9736 }
9737 } 9737 }
9738 9738
9739 static int __devinit tg3_phy_probe(struct tg3 *tp) 9739 static int __devinit tg3_phy_probe(struct tg3 *tp)
9740 { 9740 {
9741 u32 hw_phy_id_1, hw_phy_id_2; 9741 u32 hw_phy_id_1, hw_phy_id_2;
9742 u32 hw_phy_id, hw_phy_id_masked; 9742 u32 hw_phy_id, hw_phy_id_masked;
9743 int err; 9743 int err;
9744 9744
9745 /* Reading the PHY ID register can conflict with ASF 9745 /* Reading the PHY ID register can conflict with ASF
9746 * firwmare access to the PHY hardware. 9746 * firwmare access to the PHY hardware.
9747 */ 9747 */
9748 err = 0; 9748 err = 0;
9749 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 9749 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9750 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; 9750 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9751 } else { 9751 } else {
9752 /* Now read the physical PHY_ID from the chip and verify 9752 /* Now read the physical PHY_ID from the chip and verify
9753 * that it is sane. If it doesn't look good, we fall back 9753 * that it is sane. If it doesn't look good, we fall back
9754 * to either the hard-coded table based PHY_ID and failing 9754 * to either the hard-coded table based PHY_ID and failing
9755 * that the value found in the eeprom area. 9755 * that the value found in the eeprom area.
9756 */ 9756 */
9757 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); 9757 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9758 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); 9758 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9759 9759
9760 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; 9760 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9761 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; 9761 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9762 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; 9762 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9763 9763
9764 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; 9764 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9765 } 9765 }
9766 9766
9767 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { 9767 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9768 tp->phy_id = hw_phy_id; 9768 tp->phy_id = hw_phy_id;
9769 if (hw_phy_id_masked == PHY_ID_BCM8002) 9769 if (hw_phy_id_masked == PHY_ID_BCM8002)
9770 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9770 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9771 else 9771 else
9772 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; 9772 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9773 } else { 9773 } else {
9774 if (tp->phy_id != PHY_ID_INVALID) { 9774 if (tp->phy_id != PHY_ID_INVALID) {
9775 /* Do nothing, phy ID already set up in 9775 /* Do nothing, phy ID already set up in
9776 * tg3_get_eeprom_hw_cfg(). 9776 * tg3_get_eeprom_hw_cfg().
9777 */ 9777 */
9778 } else { 9778 } else {
9779 struct subsys_tbl_ent *p; 9779 struct subsys_tbl_ent *p;
9780 9780
9781 /* No eeprom signature? Try the hardcoded 9781 /* No eeprom signature? Try the hardcoded
9782 * subsys device table. 9782 * subsys device table.
9783 */ 9783 */
9784 p = lookup_by_subsys(tp); 9784 p = lookup_by_subsys(tp);
9785 if (!p) 9785 if (!p)
9786 return -ENODEV; 9786 return -ENODEV;
9787 9787
9788 tp->phy_id = p->phy_id; 9788 tp->phy_id = p->phy_id;
9789 if (!tp->phy_id || 9789 if (!tp->phy_id ||
9790 tp->phy_id == PHY_ID_BCM8002) 9790 tp->phy_id == PHY_ID_BCM8002)
9791 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9791 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9792 } 9792 }
9793 } 9793 }
9794 9794
9795 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && 9795 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9796 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 9796 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9797 u32 bmsr, adv_reg, tg3_ctrl; 9797 u32 bmsr, adv_reg, tg3_ctrl;
9798 9798
9799 tg3_readphy(tp, MII_BMSR, &bmsr); 9799 tg3_readphy(tp, MII_BMSR, &bmsr);
9800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 9800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9801 (bmsr & BMSR_LSTATUS)) 9801 (bmsr & BMSR_LSTATUS))
9802 goto skip_phy_reset; 9802 goto skip_phy_reset;
9803 9803
9804 err = tg3_phy_reset(tp); 9804 err = tg3_phy_reset(tp);
9805 if (err) 9805 if (err)
9806 return err; 9806 return err;
9807 9807
9808 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | 9808 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9809 ADVERTISE_100HALF | ADVERTISE_100FULL | 9809 ADVERTISE_100HALF | ADVERTISE_100FULL |
9810 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); 9810 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9811 tg3_ctrl = 0; 9811 tg3_ctrl = 0;
9812 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { 9812 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9813 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | 9813 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9814 MII_TG3_CTRL_ADV_1000_FULL); 9814 MII_TG3_CTRL_ADV_1000_FULL);
9815 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 9815 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9816 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) 9816 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9817 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | 9817 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9818 MII_TG3_CTRL_ENABLE_AS_MASTER); 9818 MII_TG3_CTRL_ENABLE_AS_MASTER);
9819 } 9819 }
9820 9820
9821 if (!tg3_copper_is_advertising_all(tp)) { 9821 if (!tg3_copper_is_advertising_all(tp)) {
9822 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 9822 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9823 9823
9824 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9824 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9825 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 9825 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9826 9826
9827 tg3_writephy(tp, MII_BMCR, 9827 tg3_writephy(tp, MII_BMCR,
9828 BMCR_ANENABLE | BMCR_ANRESTART); 9828 BMCR_ANENABLE | BMCR_ANRESTART);
9829 } 9829 }
9830 tg3_phy_set_wirespeed(tp); 9830 tg3_phy_set_wirespeed(tp);
9831 9831
9832 tg3_writephy(tp, MII_ADVERTISE, adv_reg); 9832 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9833 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) 9833 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9834 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); 9834 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9835 } 9835 }
9836 9836
9837 skip_phy_reset: 9837 skip_phy_reset:
9838 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 9838 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9839 err = tg3_init_5401phy_dsp(tp); 9839 err = tg3_init_5401phy_dsp(tp);
9840 if (err) 9840 if (err)
9841 return err; 9841 return err;
9842 } 9842 }
9843 9843
9844 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) { 9844 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9845 err = tg3_init_5401phy_dsp(tp); 9845 err = tg3_init_5401phy_dsp(tp);
9846 } 9846 }
9847 9847
9848 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 9848 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9849 tp->link_config.advertising = 9849 tp->link_config.advertising =
9850 (ADVERTISED_1000baseT_Half | 9850 (ADVERTISED_1000baseT_Half |
9851 ADVERTISED_1000baseT_Full | 9851 ADVERTISED_1000baseT_Full |
9852 ADVERTISED_Autoneg | 9852 ADVERTISED_Autoneg |
9853 ADVERTISED_FIBRE); 9853 ADVERTISED_FIBRE);
9854 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) 9854 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9855 tp->link_config.advertising &= 9855 tp->link_config.advertising &=
9856 ~(ADVERTISED_1000baseT_Half | 9856 ~(ADVERTISED_1000baseT_Half |
9857 ADVERTISED_1000baseT_Full); 9857 ADVERTISED_1000baseT_Full);
9858 9858
9859 return err; 9859 return err;
9860 } 9860 }
9861 9861
9862 static void __devinit tg3_read_partno(struct tg3 *tp) 9862 static void __devinit tg3_read_partno(struct tg3 *tp)
9863 { 9863 {
9864 unsigned char vpd_data[256]; 9864 unsigned char vpd_data[256];
9865 int i; 9865 int i;
9866 u32 magic; 9866 u32 magic;
9867 9867
9868 if (tg3_nvram_read_swab(tp, 0x0, &magic)) 9868 if (tg3_nvram_read_swab(tp, 0x0, &magic))
9869 goto out_not_found; 9869 goto out_not_found;
9870 9870
9871 if (magic == TG3_EEPROM_MAGIC) { 9871 if (magic == TG3_EEPROM_MAGIC) {
9872 for (i = 0; i < 256; i += 4) { 9872 for (i = 0; i < 256; i += 4) {
9873 u32 tmp; 9873 u32 tmp;
9874 9874
9875 if (tg3_nvram_read(tp, 0x100 + i, &tmp)) 9875 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9876 goto out_not_found; 9876 goto out_not_found;
9877 9877
9878 vpd_data[i + 0] = ((tmp >> 0) & 0xff); 9878 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9879 vpd_data[i + 1] = ((tmp >> 8) & 0xff); 9879 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9880 vpd_data[i + 2] = ((tmp >> 16) & 0xff); 9880 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9881 vpd_data[i + 3] = ((tmp >> 24) & 0xff); 9881 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9882 } 9882 }
9883 } else { 9883 } else {
9884 int vpd_cap; 9884 int vpd_cap;
9885 9885
9886 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); 9886 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9887 for (i = 0; i < 256; i += 4) { 9887 for (i = 0; i < 256; i += 4) {
9888 u32 tmp, j = 0; 9888 u32 tmp, j = 0;
9889 u16 tmp16; 9889 u16 tmp16;
9890 9890
9891 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, 9891 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9892 i); 9892 i);
9893 while (j++ < 100) { 9893 while (j++ < 100) {
9894 pci_read_config_word(tp->pdev, vpd_cap + 9894 pci_read_config_word(tp->pdev, vpd_cap +
9895 PCI_VPD_ADDR, &tmp16); 9895 PCI_VPD_ADDR, &tmp16);
9896 if (tmp16 & 0x8000) 9896 if (tmp16 & 0x8000)
9897 break; 9897 break;
9898 msleep(1); 9898 msleep(1);
9899 } 9899 }
9900 if (!(tmp16 & 0x8000)) 9900 if (!(tmp16 & 0x8000))
9901 goto out_not_found; 9901 goto out_not_found;
9902 9902
9903 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, 9903 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9904 &tmp); 9904 &tmp);
9905 tmp = cpu_to_le32(tmp); 9905 tmp = cpu_to_le32(tmp);
9906 memcpy(&vpd_data[i], &tmp, 4); 9906 memcpy(&vpd_data[i], &tmp, 4);
9907 } 9907 }
9908 } 9908 }
9909 9909
9910 /* Now parse and find the part number. */ 9910 /* Now parse and find the part number. */
9911 for (i = 0; i < 256; ) { 9911 for (i = 0; i < 256; ) {
9912 unsigned char val = vpd_data[i]; 9912 unsigned char val = vpd_data[i];
9913 int block_end; 9913 int block_end;
9914 9914
9915 if (val == 0x82 || val == 0x91) { 9915 if (val == 0x82 || val == 0x91) {
9916 i = (i + 3 + 9916 i = (i + 3 +
9917 (vpd_data[i + 1] + 9917 (vpd_data[i + 1] +
9918 (vpd_data[i + 2] << 8))); 9918 (vpd_data[i + 2] << 8)));
9919 continue; 9919 continue;
9920 } 9920 }
9921 9921
9922 if (val != 0x90) 9922 if (val != 0x90)
9923 goto out_not_found; 9923 goto out_not_found;
9924 9924
9925 block_end = (i + 3 + 9925 block_end = (i + 3 +
9926 (vpd_data[i + 1] + 9926 (vpd_data[i + 1] +
9927 (vpd_data[i + 2] << 8))); 9927 (vpd_data[i + 2] << 8)));
9928 i += 3; 9928 i += 3;
9929 while (i < block_end) { 9929 while (i < block_end) {
9930 if (vpd_data[i + 0] == 'P' && 9930 if (vpd_data[i + 0] == 'P' &&
9931 vpd_data[i + 1] == 'N') { 9931 vpd_data[i + 1] == 'N') {
9932 int partno_len = vpd_data[i + 2]; 9932 int partno_len = vpd_data[i + 2];
9933 9933
9934 if (partno_len > 24) 9934 if (partno_len > 24)
9935 goto out_not_found; 9935 goto out_not_found;
9936 9936
9937 memcpy(tp->board_part_number, 9937 memcpy(tp->board_part_number,
9938 &vpd_data[i + 3], 9938 &vpd_data[i + 3],
9939 partno_len); 9939 partno_len);
9940 9940
9941 /* Success. */ 9941 /* Success. */
9942 return; 9942 return;
9943 } 9943 }
9944 } 9944 }
9945 9945
9946 /* Part number not found. */ 9946 /* Part number not found. */
9947 goto out_not_found; 9947 goto out_not_found;
9948 } 9948 }
9949 9949
9950 out_not_found: 9950 out_not_found:
9951 strcpy(tp->board_part_number, "none"); 9951 strcpy(tp->board_part_number, "none");
9952 } 9952 }
9953 9953
9954 static void __devinit tg3_read_fw_ver(struct tg3 *tp) 9954 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9955 { 9955 {
9956 u32 val, offset, start; 9956 u32 val, offset, start;
9957 9957
9958 if (tg3_nvram_read_swab(tp, 0, &val)) 9958 if (tg3_nvram_read_swab(tp, 0, &val))
9959 return; 9959 return;
9960 9960
9961 if (val != TG3_EEPROM_MAGIC) 9961 if (val != TG3_EEPROM_MAGIC)
9962 return; 9962 return;
9963 9963
9964 if (tg3_nvram_read_swab(tp, 0xc, &offset) || 9964 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9965 tg3_nvram_read_swab(tp, 0x4, &start)) 9965 tg3_nvram_read_swab(tp, 0x4, &start))
9966 return; 9966 return;
9967 9967
9968 offset = tg3_nvram_logical_addr(tp, offset); 9968 offset = tg3_nvram_logical_addr(tp, offset);
9969 if (tg3_nvram_read_swab(tp, offset, &val)) 9969 if (tg3_nvram_read_swab(tp, offset, &val))
9970 return; 9970 return;
9971 9971
9972 if ((val & 0xfc000000) == 0x0c000000) { 9972 if ((val & 0xfc000000) == 0x0c000000) {
9973 u32 ver_offset, addr; 9973 u32 ver_offset, addr;
9974 int i; 9974 int i;
9975 9975
9976 if (tg3_nvram_read_swab(tp, offset + 4, &val) || 9976 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9977 tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) 9977 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9978 return; 9978 return;
9979 9979
9980 if (val != 0) 9980 if (val != 0)
9981 return; 9981 return;
9982 9982
9983 addr = offset + ver_offset - start; 9983 addr = offset + ver_offset - start;
9984 for (i = 0; i < 16; i += 4) { 9984 for (i = 0; i < 16; i += 4) {
9985 if (tg3_nvram_read(tp, addr + i, &val)) 9985 if (tg3_nvram_read(tp, addr + i, &val))
9986 return; 9986 return;
9987 9987
9988 val = cpu_to_le32(val); 9988 val = cpu_to_le32(val);
9989 memcpy(tp->fw_ver + i, &val, 4); 9989 memcpy(tp->fw_ver + i, &val, 4);
9990 } 9990 }
9991 } 9991 }
9992 } 9992 }
9993 9993
9994 static int __devinit tg3_get_invariants(struct tg3 *tp) 9994 static int __devinit tg3_get_invariants(struct tg3 *tp)
9995 { 9995 {
9996 static struct pci_device_id write_reorder_chipsets[] = { 9996 static struct pci_device_id write_reorder_chipsets[] = {
9997 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9997 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9998 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9998 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9999 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 9999 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10000 PCI_DEVICE_ID_VIA_8385_0) }, 10000 PCI_DEVICE_ID_VIA_8385_0) },
10001 { }, 10001 { },
10002 }; 10002 };
10003 u32 misc_ctrl_reg; 10003 u32 misc_ctrl_reg;
10004 u32 cacheline_sz_reg; 10004 u32 cacheline_sz_reg;
10005 u32 pci_state_reg, grc_misc_cfg; 10005 u32 pci_state_reg, grc_misc_cfg;
10006 u32 val; 10006 u32 val;
10007 u16 pci_cmd; 10007 u16 pci_cmd;
10008 int err; 10008 int err;
10009 10009
10010 /* Force memory write invalidate off. If we leave it on, 10010 /* Force memory write invalidate off. If we leave it on,
10011 * then on 5700_BX chips we have to enable a workaround. 10011 * then on 5700_BX chips we have to enable a workaround.
10012 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 10012 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10013 * to match the cacheline size. The Broadcom driver have this 10013 * to match the cacheline size. The Broadcom driver have this
10014 * workaround but turns MWI off all the times so never uses 10014 * workaround but turns MWI off all the times so never uses
10015 * it. This seems to suggest that the workaround is insufficient. 10015 * it. This seems to suggest that the workaround is insufficient.
10016 */ 10016 */
10017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 10017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10018 pci_cmd &= ~PCI_COMMAND_INVALIDATE; 10018 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10019 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 10019 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10020 10020
10021 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL 10021 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10022 * has the register indirect write enable bit set before 10022 * has the register indirect write enable bit set before
10023 * we try to access any of the MMIO registers. It is also 10023 * we try to access any of the MMIO registers. It is also
10024 * critical that the PCI-X hw workaround situation is decided 10024 * critical that the PCI-X hw workaround situation is decided
10025 * before that as well. 10025 * before that as well.
10026 */ 10026 */
10027 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 10027 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10028 &misc_ctrl_reg); 10028 &misc_ctrl_reg);
10029 10029
10030 tp->pci_chip_rev_id = (misc_ctrl_reg >> 10030 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10031 MISC_HOST_CTRL_CHIPREV_SHIFT); 10031 MISC_HOST_CTRL_CHIPREV_SHIFT);
10032 10032
10033 /* Wrong chip ID in 5752 A0. This code can be removed later 10033 /* Wrong chip ID in 5752 A0. This code can be removed later
10034 * as A0 is not in production. 10034 * as A0 is not in production.
10035 */ 10035 */
10036 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 10036 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10037 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 10037 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10038 10038
10039 /* If we have 5702/03 A1 or A2 on certain ICH chipsets, 10039 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10040 * we need to disable memory and use config. cycles 10040 * we need to disable memory and use config. cycles
10041 * only to access all registers. The 5702/03 chips 10041 * only to access all registers. The 5702/03 chips
10042 * can mistakenly decode the special cycles from the 10042 * can mistakenly decode the special cycles from the
10043 * ICH chipsets as memory write cycles, causing corruption 10043 * ICH chipsets as memory write cycles, causing corruption
10044 * of register and memory space. Only certain ICH bridges 10044 * of register and memory space. Only certain ICH bridges
10045 * will drive special cycles with non-zero data during the 10045 * will drive special cycles with non-zero data during the
10046 * address phase which can fall within the 5703's address 10046 * address phase which can fall within the 5703's address
10047 * range. This is not an ICH bug as the PCI spec allows 10047 * range. This is not an ICH bug as the PCI spec allows
10048 * non-zero address during special cycles. However, only 10048 * non-zero address during special cycles. However, only
10049 * these ICH bridges are known to drive non-zero addresses 10049 * these ICH bridges are known to drive non-zero addresses
10050 * during special cycles. 10050 * during special cycles.
10051 * 10051 *
10052 * Since special cycles do not cross PCI bridges, we only 10052 * Since special cycles do not cross PCI bridges, we only
10053 * enable this workaround if the 5703 is on the secondary 10053 * enable this workaround if the 5703 is on the secondary
10054 * bus of these ICH bridges. 10054 * bus of these ICH bridges.
10055 */ 10055 */
10056 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || 10056 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10057 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { 10057 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10058 static struct tg3_dev_id { 10058 static struct tg3_dev_id {
10059 u32 vendor; 10059 u32 vendor;
10060 u32 device; 10060 u32 device;
10061 u32 rev; 10061 u32 rev;
10062 } ich_chipsets[] = { 10062 } ich_chipsets[] = {
10063 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, 10063 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10064 PCI_ANY_ID }, 10064 PCI_ANY_ID },
10065 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, 10065 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10066 PCI_ANY_ID }, 10066 PCI_ANY_ID },
10067 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, 10067 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10068 0xa }, 10068 0xa },
10069 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, 10069 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10070 PCI_ANY_ID }, 10070 PCI_ANY_ID },
10071 { }, 10071 { },
10072 }; 10072 };
10073 struct tg3_dev_id *pci_id = &ich_chipsets[0]; 10073 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10074 struct pci_dev *bridge = NULL; 10074 struct pci_dev *bridge = NULL;
10075 10075
10076 while (pci_id->vendor != 0) { 10076 while (pci_id->vendor != 0) {
10077 bridge = pci_get_device(pci_id->vendor, pci_id->device, 10077 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10078 bridge); 10078 bridge);
10079 if (!bridge) { 10079 if (!bridge) {
10080 pci_id++; 10080 pci_id++;
10081 continue; 10081 continue;
10082 } 10082 }
10083 if (pci_id->rev != PCI_ANY_ID) { 10083 if (pci_id->rev != PCI_ANY_ID) {
10084 u8 rev; 10084 u8 rev;
10085 10085
10086 pci_read_config_byte(bridge, PCI_REVISION_ID, 10086 pci_read_config_byte(bridge, PCI_REVISION_ID,
10087 &rev); 10087 &rev);
10088 if (rev > pci_id->rev) 10088 if (rev > pci_id->rev)
10089 continue; 10089 continue;
10090 } 10090 }
10091 if (bridge->subordinate && 10091 if (bridge->subordinate &&
10092 (bridge->subordinate->number == 10092 (bridge->subordinate->number ==
10093 tp->pdev->bus->number)) { 10093 tp->pdev->bus->number)) {
10094 10094
10095 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; 10095 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10096 pci_dev_put(bridge); 10096 pci_dev_put(bridge);
10097 break; 10097 break;
10098 } 10098 }
10099 } 10099 }
10100 } 10100 }
10101 10101
10102 /* The EPB bridge inside 5714, 5715, and 5780 cannot support 10102 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10103 * DMA addresses > 40-bit. This bridge may have other additional 10103 * DMA addresses > 40-bit. This bridge may have other additional
10104 * 57xx devices behind it in some 4-port NIC designs for example. 10104 * 57xx devices behind it in some 4-port NIC designs for example.
10105 * Any tg3 device found behind the bridge will also need the 40-bit 10105 * Any tg3 device found behind the bridge will also need the 40-bit
10106 * DMA workaround. 10106 * DMA workaround.
10107 */ 10107 */
10108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || 10108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 10109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10110 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 10110 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10111 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 10111 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10112 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 10112 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10113 } 10113 }
10114 else { 10114 else {
10115 struct pci_dev *bridge = NULL; 10115 struct pci_dev *bridge = NULL;
10116 10116
10117 do { 10117 do {
10118 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, 10118 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10119 PCI_DEVICE_ID_SERVERWORKS_EPB, 10119 PCI_DEVICE_ID_SERVERWORKS_EPB,
10120 bridge); 10120 bridge);
10121 if (bridge && bridge->subordinate && 10121 if (bridge && bridge->subordinate &&
10122 (bridge->subordinate->number <= 10122 (bridge->subordinate->number <=
10123 tp->pdev->bus->number) && 10123 tp->pdev->bus->number) &&
10124 (bridge->subordinate->subordinate >= 10124 (bridge->subordinate->subordinate >=
10125 tp->pdev->bus->number)) { 10125 tp->pdev->bus->number)) {
10126 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 10126 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10127 pci_dev_put(bridge); 10127 pci_dev_put(bridge);
10128 break; 10128 break;
10129 } 10129 }
10130 } while (bridge); 10130 } while (bridge);
10131 } 10131 }
10132 10132
10133 /* Initialize misc host control in PCI block. */ 10133 /* Initialize misc host control in PCI block. */
10134 tp->misc_host_ctrl |= (misc_ctrl_reg & 10134 tp->misc_host_ctrl |= (misc_ctrl_reg &
10135 MISC_HOST_CTRL_CHIPREV); 10135 MISC_HOST_CTRL_CHIPREV);
10136 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 10136 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10137 tp->misc_host_ctrl); 10137 tp->misc_host_ctrl);
10138 10138
10139 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ, 10139 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10140 &cacheline_sz_reg); 10140 &cacheline_sz_reg);
10141 10141
10142 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff; 10142 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10143 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff; 10143 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10144 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff; 10144 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10145 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff; 10145 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10146 10146
10147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 10147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 10148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 10150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10151 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 10151 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10152 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 10152 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10153 10153
10154 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 10154 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10155 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 10155 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10156 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 10156 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10157 10157
10158 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 10158 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { 10160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10161 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 10161 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10162 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 10162 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10163 } else 10163 } else
10164 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; 10164 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10165 } 10165 }
10166 10166
10167 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && 10167 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10168 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && 10168 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10169 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && 10169 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10170 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && 10170 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10171 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) 10171 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10172 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 10172 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10173 10173
10174 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) 10174 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10175 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 10175 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10176 10176
10177 /* If we have an AMD 762 or VIA K8T800 chipset, write 10177 /* If we have an AMD 762 or VIA K8T800 chipset, write
10178 * reordering to the mailbox registers done by the host 10178 * reordering to the mailbox registers done by the host
10179 * controller can cause major troubles. We read back from 10179 * controller can cause major troubles. We read back from
10180 * every mailbox register write to force the writes to be 10180 * every mailbox register write to force the writes to be
10181 * posted to the chip in order. 10181 * posted to the chip in order.
10182 */ 10182 */
10183 if (pci_dev_present(write_reorder_chipsets) && 10183 if (pci_dev_present(write_reorder_chipsets) &&
10184 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 10184 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10185 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 10185 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10186 10186
10187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 10187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10188 tp->pci_lat_timer < 64) { 10188 tp->pci_lat_timer < 64) {
10189 tp->pci_lat_timer = 64; 10189 tp->pci_lat_timer = 64;
10190 10190
10191 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0); 10191 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10192 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8); 10192 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10193 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16); 10193 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10194 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24); 10194 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10195 10195
10196 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ, 10196 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10197 cacheline_sz_reg); 10197 cacheline_sz_reg);
10198 } 10198 }
10199 10199
10200 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 10200 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10201 &pci_state_reg); 10201 &pci_state_reg);
10202 10202
10203 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { 10203 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10204 tp->tg3_flags |= TG3_FLAG_PCIX_MODE; 10204 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10205 10205
10206 /* If this is a 5700 BX chipset, and we are in PCI-X 10206 /* If this is a 5700 BX chipset, and we are in PCI-X
10207 * mode, enable register write workaround. 10207 * mode, enable register write workaround.
10208 * 10208 *
10209 * The workaround is to use indirect register accesses 10209 * The workaround is to use indirect register accesses
10210 * for all chip writes not to mailbox registers. 10210 * for all chip writes not to mailbox registers.
10211 */ 10211 */
10212 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { 10212 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10213 u32 pm_reg; 10213 u32 pm_reg;
10214 u16 pci_cmd; 10214 u16 pci_cmd;
10215 10215
10216 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 10216 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10217 10217
10218 /* The chip can have it's power management PCI config 10218 /* The chip can have it's power management PCI config
10219 * space registers clobbered due to this bug. 10219 * space registers clobbered due to this bug.
10220 * So explicitly force the chip into D0 here. 10220 * So explicitly force the chip into D0 here.
10221 */ 10221 */
10222 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, 10222 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10223 &pm_reg); 10223 &pm_reg);
10224 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 10224 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10225 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 10225 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10226 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, 10226 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10227 pm_reg); 10227 pm_reg);
10228 10228
10229 /* Also, force SERR#/PERR# in PCI command. */ 10229 /* Also, force SERR#/PERR# in PCI command. */
10230 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 10230 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10231 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; 10231 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10232 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 10232 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10233 } 10233 }
10234 } 10234 }
10235 10235
10236 /* 5700 BX chips need to have their TX producer index mailboxes 10236 /* 5700 BX chips need to have their TX producer index mailboxes
10237 * written twice to workaround a bug. 10237 * written twice to workaround a bug.
10238 */ 10238 */
10239 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) 10239 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10240 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; 10240 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10241 10241
10242 /* Back to back register writes can cause problems on this chip, 10242 /* Back to back register writes can cause problems on this chip,
10243 * the workaround is to read back all reg writes except those to 10243 * the workaround is to read back all reg writes except those to
10244 * mailbox regs. See tg3_write_indirect_reg32(). 10244 * mailbox regs. See tg3_write_indirect_reg32().
10245 * 10245 *
10246 * PCI Express 5750_A0 rev chips need this workaround too. 10246 * PCI Express 5750_A0 rev chips need this workaround too.
10247 */ 10247 */
10248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 10248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10249 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 10249 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10250 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) 10250 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10251 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG; 10251 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10252 10252
10253 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 10253 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10254 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; 10254 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10255 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 10255 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10256 tp->tg3_flags |= TG3_FLAG_PCI_32BIT; 10256 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10257 10257
10258 /* Chip-specific fixup from Broadcom driver */ 10258 /* Chip-specific fixup from Broadcom driver */
10259 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && 10259 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10260 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { 10260 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10261 pci_state_reg |= PCISTATE_RETRY_SAME_DMA; 10261 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10262 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); 10262 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10263 } 10263 }
10264 10264
10265 /* Default fast path register access methods */ 10265 /* Default fast path register access methods */
10266 tp->read32 = tg3_read32; 10266 tp->read32 = tg3_read32;
10267 tp->write32 = tg3_write32; 10267 tp->write32 = tg3_write32;
10268 tp->read32_mbox = tg3_read32; 10268 tp->read32_mbox = tg3_read32;
10269 tp->write32_mbox = tg3_write32; 10269 tp->write32_mbox = tg3_write32;
10270 tp->write32_tx_mbox = tg3_write32; 10270 tp->write32_tx_mbox = tg3_write32;
10271 tp->write32_rx_mbox = tg3_write32; 10271 tp->write32_rx_mbox = tg3_write32;
10272 10272
10273 /* Various workaround register access methods */ 10273 /* Various workaround register access methods */
10274 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) 10274 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10275 tp->write32 = tg3_write_indirect_reg32; 10275 tp->write32 = tg3_write_indirect_reg32;
10276 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) 10276 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10277 tp->write32 = tg3_write_flush_reg32; 10277 tp->write32 = tg3_write_flush_reg32;
10278 10278
10279 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || 10279 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10280 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { 10280 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10281 tp->write32_tx_mbox = tg3_write32_tx_mbox; 10281 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10282 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) 10282 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10283 tp->write32_rx_mbox = tg3_write_flush_reg32; 10283 tp->write32_rx_mbox = tg3_write_flush_reg32;
10284 } 10284 }
10285 10285
10286 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { 10286 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10287 tp->read32 = tg3_read_indirect_reg32; 10287 tp->read32 = tg3_read_indirect_reg32;
10288 tp->write32 = tg3_write_indirect_reg32; 10288 tp->write32 = tg3_write_indirect_reg32;
10289 tp->read32_mbox = tg3_read_indirect_mbox; 10289 tp->read32_mbox = tg3_read_indirect_mbox;
10290 tp->write32_mbox = tg3_write_indirect_mbox; 10290 tp->write32_mbox = tg3_write_indirect_mbox;
10291 tp->write32_tx_mbox = tg3_write_indirect_mbox; 10291 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10292 tp->write32_rx_mbox = tg3_write_indirect_mbox; 10292 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10293 10293
10294 iounmap(tp->regs); 10294 iounmap(tp->regs);
10295 tp->regs = NULL; 10295 tp->regs = NULL;
10296 10296
10297 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 10297 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10298 pci_cmd &= ~PCI_COMMAND_MEMORY; 10298 pci_cmd &= ~PCI_COMMAND_MEMORY;
10299 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); 10299 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10300 } 10300 }
10301 10301
10302 if (tp->write32 == tg3_write_indirect_reg32 || 10302 if (tp->write32 == tg3_write_indirect_reg32 ||
10303 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 10303 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) 10305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10306 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; 10306 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10307 10307
10308 /* Get eeprom hw config before calling tg3_set_power_state(). 10308 /* Get eeprom hw config before calling tg3_set_power_state().
10309 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be 10309 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10310 * determined before calling tg3_set_power_state() so that 10310 * determined before calling tg3_set_power_state() so that
10311 * we know whether or not to switch out of Vaux power. 10311 * we know whether or not to switch out of Vaux power.
10312 * When the flag is set, it means that GPIO1 is used for eeprom 10312 * When the flag is set, it means that GPIO1 is used for eeprom
10313 * write protect and also implies that it is a LOM where GPIOs 10313 * write protect and also implies that it is a LOM where GPIOs
10314 * are not used to switch power. 10314 * are not used to switch power.
10315 */ 10315 */
10316 tg3_get_eeprom_hw_cfg(tp); 10316 tg3_get_eeprom_hw_cfg(tp);
10317 10317
10318 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). 10318 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10319 * GPIO1 driven high will bring 5700's external PHY out of reset. 10319 * GPIO1 driven high will bring 5700's external PHY out of reset.
10320 * It is also used as eeprom write protect on LOMs. 10320 * It is also used as eeprom write protect on LOMs.
10321 */ 10321 */
10322 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; 10322 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10323 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 10323 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10324 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) 10324 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10325 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | 10325 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10326 GRC_LCLCTRL_GPIO_OUTPUT1); 10326 GRC_LCLCTRL_GPIO_OUTPUT1);
10327 /* Unused GPIO3 must be driven as output on 5752 because there 10327 /* Unused GPIO3 must be driven as output on 5752 because there
10328 * are no pull-up resistors on unused GPIO pins. 10328 * are no pull-up resistors on unused GPIO pins.
10329 */ 10329 */
10330 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 10330 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10331 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 10331 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10332 10332
10333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 10333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10334 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 10334 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10335 10335
10336 /* Force the chip into D0. */ 10336 /* Force the chip into D0. */
10337 err = tg3_set_power_state(tp, PCI_D0); 10337 err = tg3_set_power_state(tp, PCI_D0);
10338 if (err) { 10338 if (err) {
10339 printk(KERN_ERR PFX "(%s) transition to D0 failed\n", 10339 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10340 pci_name(tp->pdev)); 10340 pci_name(tp->pdev));
10341 return err; 10341 return err;
10342 } 10342 }
10343 10343
10344 /* 5700 B0 chips do not support checksumming correctly due 10344 /* 5700 B0 chips do not support checksumming correctly due
10345 * to hardware bugs. 10345 * to hardware bugs.
10346 */ 10346 */
10347 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) 10347 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10348 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; 10348 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10349 10349
10350 /* Derive initial jumbo mode from MTU assigned in 10350 /* Derive initial jumbo mode from MTU assigned in
10351 * ether_setup() via the alloc_etherdev() call 10351 * ether_setup() via the alloc_etherdev() call
10352 */ 10352 */
10353 if (tp->dev->mtu > ETH_DATA_LEN && 10353 if (tp->dev->mtu > ETH_DATA_LEN &&
10354 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 10354 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10355 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 10355 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10356 10356
10357 /* Determine WakeOnLan speed to use. */ 10357 /* Determine WakeOnLan speed to use. */
10358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10359 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || 10359 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10360 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || 10360 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10361 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { 10361 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10362 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); 10362 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10363 } else { 10363 } else {
10364 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; 10364 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10365 } 10365 }
10366 10366
10367 /* A few boards don't want Ethernet@WireSpeed phy feature */ 10367 /* A few boards don't want Ethernet@WireSpeed phy feature */
10368 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || 10368 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10369 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 10369 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10370 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && 10370 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10371 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || 10371 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10372 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 10372 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10373 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; 10373 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10374 10374
10375 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || 10375 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10376 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) 10376 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10377 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; 10377 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10378 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 10378 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10379 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 10379 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10380 10380
10381 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10381 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 10383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10384 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 10384 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10385 else 10385 else
10386 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10386 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10387 } 10387 }
10388 10388
10389 tp->coalesce_mode = 0; 10389 tp->coalesce_mode = 0;
10390 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 10390 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10391 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 10391 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10392 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 10392 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10393 10393
10394 /* Initialize MAC MI mode, polling disabled. */ 10394 /* Initialize MAC MI mode, polling disabled. */
10395 tw32_f(MAC_MI_MODE, tp->mi_mode); 10395 tw32_f(MAC_MI_MODE, tp->mi_mode);
10396 udelay(80); 10396 udelay(80);
10397 10397
10398 /* Initialize data/descriptor byte/word swapping. */ 10398 /* Initialize data/descriptor byte/word swapping. */
10399 val = tr32(GRC_MODE); 10399 val = tr32(GRC_MODE);
10400 val &= GRC_MODE_HOST_STACKUP; 10400 val &= GRC_MODE_HOST_STACKUP;
10401 tw32(GRC_MODE, val | tp->grc_mode); 10401 tw32(GRC_MODE, val | tp->grc_mode);
10402 10402
10403 tg3_switch_clocks(tp); 10403 tg3_switch_clocks(tp);
10404 10404
10405 /* Clear this out for sanity. */ 10405 /* Clear this out for sanity. */
10406 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 10406 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10407 10407
10408 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 10408 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10409 &pci_state_reg); 10409 &pci_state_reg);
10410 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && 10410 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10411 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { 10411 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10412 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); 10412 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10413 10413
10414 if (chiprevid == CHIPREV_ID_5701_A0 || 10414 if (chiprevid == CHIPREV_ID_5701_A0 ||
10415 chiprevid == CHIPREV_ID_5701_B0 || 10415 chiprevid == CHIPREV_ID_5701_B0 ||
10416 chiprevid == CHIPREV_ID_5701_B2 || 10416 chiprevid == CHIPREV_ID_5701_B2 ||
10417 chiprevid == CHIPREV_ID_5701_B5) { 10417 chiprevid == CHIPREV_ID_5701_B5) {
10418 void __iomem *sram_base; 10418 void __iomem *sram_base;
10419 10419
10420 /* Write some dummy words into the SRAM status block 10420 /* Write some dummy words into the SRAM status block
10421 * area, see if it reads back correctly. If the return 10421 * area, see if it reads back correctly. If the return
10422 * value is bad, force enable the PCIX workaround. 10422 * value is bad, force enable the PCIX workaround.
10423 */ 10423 */
10424 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; 10424 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10425 10425
10426 writel(0x00000000, sram_base); 10426 writel(0x00000000, sram_base);
10427 writel(0x00000000, sram_base + 4); 10427 writel(0x00000000, sram_base + 4);
10428 writel(0xffffffff, sram_base + 4); 10428 writel(0xffffffff, sram_base + 4);
10429 if (readl(sram_base) != 0x00000000) 10429 if (readl(sram_base) != 0x00000000)
10430 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 10430 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10431 } 10431 }
10432 } 10432 }
10433 10433
10434 udelay(50); 10434 udelay(50);
10435 tg3_nvram_init(tp); 10435 tg3_nvram_init(tp);
10436 10436
10437 grc_misc_cfg = tr32(GRC_MISC_CFG); 10437 grc_misc_cfg = tr32(GRC_MISC_CFG);
10438 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 10438 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10439 10439
10440 /* Broadcom's driver says that CIOBE multisplit has a bug */ 10440 /* Broadcom's driver says that CIOBE multisplit has a bug */
10441 #if 0 10441 #if 0
10442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 10442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10443 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) { 10443 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10444 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE; 10444 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10445 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ; 10445 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10446 } 10446 }
10447 #endif 10447 #endif
10448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 10448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10449 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || 10449 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10450 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 10450 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10451 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 10451 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10452 10452
10453 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && 10453 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10454 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) 10454 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10455 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; 10455 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10456 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { 10456 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10457 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | 10457 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10458 HOSTCC_MODE_CLRTICK_TXBD); 10458 HOSTCC_MODE_CLRTICK_TXBD);
10459 10459
10460 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; 10460 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 10461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10462 tp->misc_host_ctrl); 10462 tp->misc_host_ctrl);
10463 } 10463 }
10464 10464
10465 /* these are limited to 10/100 only */ 10465 /* these are limited to 10/100 only */
10466 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 10466 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10467 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 10467 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10468 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && 10468 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10469 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && 10469 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10470 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || 10470 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10471 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || 10471 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10472 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || 10472 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10473 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && 10473 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10474 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || 10474 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10475 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F))) 10475 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10476 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 10476 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10477 10477
10478 err = tg3_phy_probe(tp); 10478 err = tg3_phy_probe(tp);
10479 if (err) { 10479 if (err) {
10480 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", 10480 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10481 pci_name(tp->pdev), err); 10481 pci_name(tp->pdev), err);
10482 /* ... but do not return immediately ... */ 10482 /* ... but do not return immediately ... */
10483 } 10483 }
10484 10484
10485 tg3_read_partno(tp); 10485 tg3_read_partno(tp);
10486 tg3_read_fw_ver(tp); 10486 tg3_read_fw_ver(tp);
10487 10487
10488 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 10488 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10489 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 10489 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10490 } else { 10490 } else {
10491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 10491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10492 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; 10492 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10493 else 10493 else
10494 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; 10494 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10495 } 10495 }
10496 10496
10497 /* 5700 {AX,BX} chips have a broken status block link 10497 /* 5700 {AX,BX} chips have a broken status block link
10498 * change bit implementation, so we must use the 10498 * change bit implementation, so we must use the
10499 * status register in those cases. 10499 * status register in those cases.
10500 */ 10500 */
10501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) 10501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10502 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; 10502 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10503 else 10503 else
10504 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; 10504 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10505 10505
10506 /* The led_ctrl is set during tg3_phy_probe, here we might 10506 /* The led_ctrl is set during tg3_phy_probe, here we might
10507 * have to force the link status polling mechanism based 10507 * have to force the link status polling mechanism based
10508 * upon subsystem IDs. 10508 * upon subsystem IDs.
10509 */ 10509 */
10510 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && 10510 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10511 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 10511 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10512 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | 10512 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10513 TG3_FLAG_USE_LINKCHG_REG); 10513 TG3_FLAG_USE_LINKCHG_REG);
10514 } 10514 }
10515 10515
10516 /* For all SERDES we poll the MAC status register. */ 10516 /* For all SERDES we poll the MAC status register. */
10517 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) 10517 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10518 tp->tg3_flags |= TG3_FLAG_POLL_SERDES; 10518 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10519 else 10519 else
10520 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 10520 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10521 10521
10522 /* All chips before 5787 can get confused if TX buffers 10522 /* All chips before 5787 can get confused if TX buffers
10523 * straddle the 4GB address boundary in some cases. 10523 * straddle the 4GB address boundary in some cases.
10524 */ 10524 */
10525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 10526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10527 tp->dev->hard_start_xmit = tg3_start_xmit; 10527 tp->dev->hard_start_xmit = tg3_start_xmit;
10528 else 10528 else
10529 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; 10529 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10530 10530
10531 tp->rx_offset = 2; 10531 tp->rx_offset = 2;
10532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 10532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10533 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) 10533 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10534 tp->rx_offset = 0; 10534 tp->rx_offset = 0;
10535 10535
10536 /* By default, disable wake-on-lan. User can change this 10536 /* By default, disable wake-on-lan. User can change this
10537 * using ETHTOOL_SWOL. 10537 * using ETHTOOL_SWOL.
10538 */ 10538 */
10539 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 10539 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10540 10540
10541 return err; 10541 return err;
10542 } 10542 }
10543 10543
10544 #ifdef CONFIG_SPARC64 10544 #ifdef CONFIG_SPARC64
10545 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) 10545 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10546 { 10546 {
10547 struct net_device *dev = tp->dev; 10547 struct net_device *dev = tp->dev;
10548 struct pci_dev *pdev = tp->pdev; 10548 struct pci_dev *pdev = tp->pdev;
10549 struct pcidev_cookie *pcp = pdev->sysdata; 10549 struct pcidev_cookie *pcp = pdev->sysdata;
10550 10550
10551 if (pcp != NULL) { 10551 if (pcp != NULL) {
10552 int node = pcp->prom_node; 10552 unsigned char *addr;
10553 int len;
10553 10554
10554 if (prom_getproplen(node, "local-mac-address") == 6) { 10555 addr = of_get_property(pcp->prom_node, "local-mac-address",
10555 prom_getproperty(node, "local-mac-address", 10556 &len);
10556 dev->dev_addr, 6); 10557 if (addr && len == 6) {
10558 memcpy(dev->dev_addr, addr, 6);
10557 memcpy(dev->perm_addr, dev->dev_addr, 6); 10559 memcpy(dev->perm_addr, dev->dev_addr, 6);
10558 return 0; 10560 return 0;
10559 } 10561 }
10560 } 10562 }
10561 return -ENODEV; 10563 return -ENODEV;
10562 } 10564 }
10563 10565
10564 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) 10566 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10565 { 10567 {
10566 struct net_device *dev = tp->dev; 10568 struct net_device *dev = tp->dev;
10567 10569
10568 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 10570 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10569 memcpy(dev->perm_addr, idprom->id_ethaddr, 6); 10571 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10570 return 0; 10572 return 0;
10571 } 10573 }
10572 #endif 10574 #endif
10573 10575
10574 static int __devinit tg3_get_device_address(struct tg3 *tp) 10576 static int __devinit tg3_get_device_address(struct tg3 *tp)
10575 { 10577 {
10576 struct net_device *dev = tp->dev; 10578 struct net_device *dev = tp->dev;
10577 u32 hi, lo, mac_offset; 10579 u32 hi, lo, mac_offset;
10578 int addr_ok = 0; 10580 int addr_ok = 0;
10579 10581
10580 #ifdef CONFIG_SPARC64 10582 #ifdef CONFIG_SPARC64
10581 if (!tg3_get_macaddr_sparc(tp)) 10583 if (!tg3_get_macaddr_sparc(tp))
10582 return 0; 10584 return 0;
10583 #endif 10585 #endif
10584 10586
10585 mac_offset = 0x7c; 10587 mac_offset = 0x7c;
10586 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 10588 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10587 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 10589 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10588 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 10590 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10589 mac_offset = 0xcc; 10591 mac_offset = 0xcc;
10590 if (tg3_nvram_lock(tp)) 10592 if (tg3_nvram_lock(tp))
10591 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); 10593 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10592 else 10594 else
10593 tg3_nvram_unlock(tp); 10595 tg3_nvram_unlock(tp);
10594 } 10596 }
10595 10597
10596 /* First try to get it from MAC address mailbox. */ 10598 /* First try to get it from MAC address mailbox. */
10597 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); 10599 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10598 if ((hi >> 16) == 0x484b) { 10600 if ((hi >> 16) == 0x484b) {
10599 dev->dev_addr[0] = (hi >> 8) & 0xff; 10601 dev->dev_addr[0] = (hi >> 8) & 0xff;
10600 dev->dev_addr[1] = (hi >> 0) & 0xff; 10602 dev->dev_addr[1] = (hi >> 0) & 0xff;
10601 10603
10602 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); 10604 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10603 dev->dev_addr[2] = (lo >> 24) & 0xff; 10605 dev->dev_addr[2] = (lo >> 24) & 0xff;
10604 dev->dev_addr[3] = (lo >> 16) & 0xff; 10606 dev->dev_addr[3] = (lo >> 16) & 0xff;
10605 dev->dev_addr[4] = (lo >> 8) & 0xff; 10607 dev->dev_addr[4] = (lo >> 8) & 0xff;
10606 dev->dev_addr[5] = (lo >> 0) & 0xff; 10608 dev->dev_addr[5] = (lo >> 0) & 0xff;
10607 10609
10608 /* Some old bootcode may report a 0 MAC address in SRAM */ 10610 /* Some old bootcode may report a 0 MAC address in SRAM */
10609 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); 10611 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10610 } 10612 }
10611 if (!addr_ok) { 10613 if (!addr_ok) {
10612 /* Next, try NVRAM. */ 10614 /* Next, try NVRAM. */
10613 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) && 10615 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10614 !tg3_nvram_read(tp, mac_offset + 4, &lo)) { 10616 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10615 dev->dev_addr[0] = ((hi >> 16) & 0xff); 10617 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10616 dev->dev_addr[1] = ((hi >> 24) & 0xff); 10618 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10617 dev->dev_addr[2] = ((lo >> 0) & 0xff); 10619 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10618 dev->dev_addr[3] = ((lo >> 8) & 0xff); 10620 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10619 dev->dev_addr[4] = ((lo >> 16) & 0xff); 10621 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10620 dev->dev_addr[5] = ((lo >> 24) & 0xff); 10622 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10621 } 10623 }
10622 /* Finally just fetch it out of the MAC control regs. */ 10624 /* Finally just fetch it out of the MAC control regs. */
10623 else { 10625 else {
10624 hi = tr32(MAC_ADDR_0_HIGH); 10626 hi = tr32(MAC_ADDR_0_HIGH);
10625 lo = tr32(MAC_ADDR_0_LOW); 10627 lo = tr32(MAC_ADDR_0_LOW);
10626 10628
10627 dev->dev_addr[5] = lo & 0xff; 10629 dev->dev_addr[5] = lo & 0xff;
10628 dev->dev_addr[4] = (lo >> 8) & 0xff; 10630 dev->dev_addr[4] = (lo >> 8) & 0xff;
10629 dev->dev_addr[3] = (lo >> 16) & 0xff; 10631 dev->dev_addr[3] = (lo >> 16) & 0xff;
10630 dev->dev_addr[2] = (lo >> 24) & 0xff; 10632 dev->dev_addr[2] = (lo >> 24) & 0xff;
10631 dev->dev_addr[1] = hi & 0xff; 10633 dev->dev_addr[1] = hi & 0xff;
10632 dev->dev_addr[0] = (hi >> 8) & 0xff; 10634 dev->dev_addr[0] = (hi >> 8) & 0xff;
10633 } 10635 }
10634 } 10636 }
10635 10637
10636 if (!is_valid_ether_addr(&dev->dev_addr[0])) { 10638 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10637 #ifdef CONFIG_SPARC64 10639 #ifdef CONFIG_SPARC64
10638 if (!tg3_get_default_macaddr_sparc(tp)) 10640 if (!tg3_get_default_macaddr_sparc(tp))
10639 return 0; 10641 return 0;
10640 #endif 10642 #endif
10641 return -EINVAL; 10643 return -EINVAL;
10642 } 10644 }
10643 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 10645 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10644 return 0; 10646 return 0;
10645 } 10647 }
10646 10648
10647 #define BOUNDARY_SINGLE_CACHELINE 1 10649 #define BOUNDARY_SINGLE_CACHELINE 1
10648 #define BOUNDARY_MULTI_CACHELINE 2 10650 #define BOUNDARY_MULTI_CACHELINE 2
10649 10651
10650 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 10652 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10651 { 10653 {
10652 int cacheline_size; 10654 int cacheline_size;
10653 u8 byte; 10655 u8 byte;
10654 int goal; 10656 int goal;
10655 10657
10656 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); 10658 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10657 if (byte == 0) 10659 if (byte == 0)
10658 cacheline_size = 1024; 10660 cacheline_size = 1024;
10659 else 10661 else
10660 cacheline_size = (int) byte * 4; 10662 cacheline_size = (int) byte * 4;
10661 10663
10662 /* On 5703 and later chips, the boundary bits have no 10664 /* On 5703 and later chips, the boundary bits have no
10663 * effect. 10665 * effect.
10664 */ 10666 */
10665 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 10667 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10666 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && 10668 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10667 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 10669 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10668 goto out; 10670 goto out;
10669 10671
10670 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 10672 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10671 goal = BOUNDARY_MULTI_CACHELINE; 10673 goal = BOUNDARY_MULTI_CACHELINE;
10672 #else 10674 #else
10673 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) 10675 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10674 goal = BOUNDARY_SINGLE_CACHELINE; 10676 goal = BOUNDARY_SINGLE_CACHELINE;
10675 #else 10677 #else
10676 goal = 0; 10678 goal = 0;
10677 #endif 10679 #endif
10678 #endif 10680 #endif
10679 10681
10680 if (!goal) 10682 if (!goal)
10681 goto out; 10683 goto out;
10682 10684
10683 /* PCI controllers on most RISC systems tend to disconnect 10685 /* PCI controllers on most RISC systems tend to disconnect
10684 * when a device tries to burst across a cache-line boundary. 10686 * when a device tries to burst across a cache-line boundary.
10685 * Therefore, letting tg3 do so just wastes PCI bandwidth. 10687 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10686 * 10688 *
10687 * Unfortunately, for PCI-E there are only limited 10689 * Unfortunately, for PCI-E there are only limited
10688 * write-side controls for this, and thus for reads 10690 * write-side controls for this, and thus for reads
10689 * we will still get the disconnects. We'll also waste 10691 * we will still get the disconnects. We'll also waste
10690 * these PCI cycles for both read and write for chips 10692 * these PCI cycles for both read and write for chips
10691 * other than 5700 and 5701 which do not implement the 10693 * other than 5700 and 5701 which do not implement the
10692 * boundary bits. 10694 * boundary bits.
10693 */ 10695 */
10694 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && 10696 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10695 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { 10697 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10696 switch (cacheline_size) { 10698 switch (cacheline_size) {
10697 case 16: 10699 case 16:
10698 case 32: 10700 case 32:
10699 case 64: 10701 case 64:
10700 case 128: 10702 case 128:
10701 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10703 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10702 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | 10704 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10703 DMA_RWCTRL_WRITE_BNDRY_128_PCIX); 10705 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10704 } else { 10706 } else {
10705 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 10707 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10706 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 10708 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10707 } 10709 }
10708 break; 10710 break;
10709 10711
10710 case 256: 10712 case 256:
10711 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | 10713 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10712 DMA_RWCTRL_WRITE_BNDRY_256_PCIX); 10714 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10713 break; 10715 break;
10714 10716
10715 default: 10717 default:
10716 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | 10718 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10717 DMA_RWCTRL_WRITE_BNDRY_384_PCIX); 10719 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10718 break; 10720 break;
10719 }; 10721 };
10720 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 10722 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10721 switch (cacheline_size) { 10723 switch (cacheline_size) {
10722 case 16: 10724 case 16:
10723 case 32: 10725 case 32:
10724 case 64: 10726 case 64:
10725 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10727 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10726 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 10728 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10727 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; 10729 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10728 break; 10730 break;
10729 } 10731 }
10730 /* fallthrough */ 10732 /* fallthrough */
10731 case 128: 10733 case 128:
10732 default: 10734 default:
10733 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; 10735 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10734 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; 10736 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10735 break; 10737 break;
10736 }; 10738 };
10737 } else { 10739 } else {
10738 switch (cacheline_size) { 10740 switch (cacheline_size) {
10739 case 16: 10741 case 16:
10740 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10742 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10741 val |= (DMA_RWCTRL_READ_BNDRY_16 | 10743 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10742 DMA_RWCTRL_WRITE_BNDRY_16); 10744 DMA_RWCTRL_WRITE_BNDRY_16);
10743 break; 10745 break;
10744 } 10746 }
10745 /* fallthrough */ 10747 /* fallthrough */
10746 case 32: 10748 case 32:
10747 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10749 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10748 val |= (DMA_RWCTRL_READ_BNDRY_32 | 10750 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10749 DMA_RWCTRL_WRITE_BNDRY_32); 10751 DMA_RWCTRL_WRITE_BNDRY_32);
10750 break; 10752 break;
10751 } 10753 }
10752 /* fallthrough */ 10754 /* fallthrough */
10753 case 64: 10755 case 64:
10754 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10756 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10755 val |= (DMA_RWCTRL_READ_BNDRY_64 | 10757 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10756 DMA_RWCTRL_WRITE_BNDRY_64); 10758 DMA_RWCTRL_WRITE_BNDRY_64);
10757 break; 10759 break;
10758 } 10760 }
10759 /* fallthrough */ 10761 /* fallthrough */
10760 case 128: 10762 case 128:
10761 if (goal == BOUNDARY_SINGLE_CACHELINE) { 10763 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10762 val |= (DMA_RWCTRL_READ_BNDRY_128 | 10764 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10763 DMA_RWCTRL_WRITE_BNDRY_128); 10765 DMA_RWCTRL_WRITE_BNDRY_128);
10764 break; 10766 break;
10765 } 10767 }
10766 /* fallthrough */ 10768 /* fallthrough */
10767 case 256: 10769 case 256:
10768 val |= (DMA_RWCTRL_READ_BNDRY_256 | 10770 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10769 DMA_RWCTRL_WRITE_BNDRY_256); 10771 DMA_RWCTRL_WRITE_BNDRY_256);
10770 break; 10772 break;
10771 case 512: 10773 case 512:
10772 val |= (DMA_RWCTRL_READ_BNDRY_512 | 10774 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10773 DMA_RWCTRL_WRITE_BNDRY_512); 10775 DMA_RWCTRL_WRITE_BNDRY_512);
10774 break; 10776 break;
10775 case 1024: 10777 case 1024:
10776 default: 10778 default:
10777 val |= (DMA_RWCTRL_READ_BNDRY_1024 | 10779 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10778 DMA_RWCTRL_WRITE_BNDRY_1024); 10780 DMA_RWCTRL_WRITE_BNDRY_1024);
10779 break; 10781 break;
10780 }; 10782 };
10781 } 10783 }
10782 10784
10783 out: 10785 out:
10784 return val; 10786 return val;
10785 } 10787 }
10786 10788
10787 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 10789 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10788 { 10790 {
10789 struct tg3_internal_buffer_desc test_desc; 10791 struct tg3_internal_buffer_desc test_desc;
10790 u32 sram_dma_descs; 10792 u32 sram_dma_descs;
10791 int i, ret; 10793 int i, ret;
10792 10794
10793 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; 10795 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10794 10796
10795 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); 10797 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10796 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); 10798 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10797 tw32(RDMAC_STATUS, 0); 10799 tw32(RDMAC_STATUS, 0);
10798 tw32(WDMAC_STATUS, 0); 10800 tw32(WDMAC_STATUS, 0);
10799 10801
10800 tw32(BUFMGR_MODE, 0); 10802 tw32(BUFMGR_MODE, 0);
10801 tw32(FTQ_RESET, 0); 10803 tw32(FTQ_RESET, 0);
10802 10804
10803 test_desc.addr_hi = ((u64) buf_dma) >> 32; 10805 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10804 test_desc.addr_lo = buf_dma & 0xffffffff; 10806 test_desc.addr_lo = buf_dma & 0xffffffff;
10805 test_desc.nic_mbuf = 0x00002100; 10807 test_desc.nic_mbuf = 0x00002100;
10806 test_desc.len = size; 10808 test_desc.len = size;
10807 10809
10808 /* 10810 /*
10809 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz 10811 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10810 * the *second* time the tg3 driver was getting loaded after an 10812 * the *second* time the tg3 driver was getting loaded after an
10811 * initial scan. 10813 * initial scan.
10812 * 10814 *
10813 * Broadcom tells me: 10815 * Broadcom tells me:
10814 * ...the DMA engine is connected to the GRC block and a DMA 10816 * ...the DMA engine is connected to the GRC block and a DMA
10815 * reset may affect the GRC block in some unpredictable way... 10817 * reset may affect the GRC block in some unpredictable way...
10816 * The behavior of resets to individual blocks has not been tested. 10818 * The behavior of resets to individual blocks has not been tested.
10817 * 10819 *
10818 * Broadcom noted the GRC reset will also reset all sub-components. 10820 * Broadcom noted the GRC reset will also reset all sub-components.
10819 */ 10821 */
10820 if (to_device) { 10822 if (to_device) {
10821 test_desc.cqid_sqid = (13 << 8) | 2; 10823 test_desc.cqid_sqid = (13 << 8) | 2;
10822 10824
10823 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); 10825 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10824 udelay(40); 10826 udelay(40);
10825 } else { 10827 } else {
10826 test_desc.cqid_sqid = (16 << 8) | 7; 10828 test_desc.cqid_sqid = (16 << 8) | 7;
10827 10829
10828 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); 10830 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10829 udelay(40); 10831 udelay(40);
10830 } 10832 }
10831 test_desc.flags = 0x00000005; 10833 test_desc.flags = 0x00000005;
10832 10834
10833 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { 10835 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10834 u32 val; 10836 u32 val;
10835 10837
10836 val = *(((u32 *)&test_desc) + i); 10838 val = *(((u32 *)&test_desc) + i);
10837 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 10839 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10838 sram_dma_descs + (i * sizeof(u32))); 10840 sram_dma_descs + (i * sizeof(u32)));
10839 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); 10841 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10840 } 10842 }
10841 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 10843 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10842 10844
10843 if (to_device) { 10845 if (to_device) {
10844 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 10846 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10845 } else { 10847 } else {
10846 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 10848 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10847 } 10849 }
10848 10850
10849 ret = -ENODEV; 10851 ret = -ENODEV;
10850 for (i = 0; i < 40; i++) { 10852 for (i = 0; i < 40; i++) {
10851 u32 val; 10853 u32 val;
10852 10854
10853 if (to_device) 10855 if (to_device)
10854 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); 10856 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10855 else 10857 else
10856 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); 10858 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10857 if ((val & 0xffff) == sram_dma_descs) { 10859 if ((val & 0xffff) == sram_dma_descs) {
10858 ret = 0; 10860 ret = 0;
10859 break; 10861 break;
10860 } 10862 }
10861 10863
10862 udelay(100); 10864 udelay(100);
10863 } 10865 }
10864 10866
10865 return ret; 10867 return ret;
10866 } 10868 }
10867 10869
10868 #define TEST_BUFFER_SIZE 0x2000 10870 #define TEST_BUFFER_SIZE 0x2000
10869 10871
10870 static int __devinit tg3_test_dma(struct tg3 *tp) 10872 static int __devinit tg3_test_dma(struct tg3 *tp)
10871 { 10873 {
10872 dma_addr_t buf_dma; 10874 dma_addr_t buf_dma;
10873 u32 *buf, saved_dma_rwctrl; 10875 u32 *buf, saved_dma_rwctrl;
10874 int ret; 10876 int ret;
10875 10877
10876 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 10878 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10877 if (!buf) { 10879 if (!buf) {
10878 ret = -ENOMEM; 10880 ret = -ENOMEM;
10879 goto out_nofree; 10881 goto out_nofree;
10880 } 10882 }
10881 10883
10882 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 10884 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10883 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 10885 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10884 10886
10885 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 10887 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10886 10888
10887 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 10889 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10888 /* DMA read watermark not used on PCIE */ 10890 /* DMA read watermark not used on PCIE */
10889 tp->dma_rwctrl |= 0x00180000; 10891 tp->dma_rwctrl |= 0x00180000;
10890 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { 10892 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || 10893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) 10894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10893 tp->dma_rwctrl |= 0x003f0000; 10895 tp->dma_rwctrl |= 0x003f0000;
10894 else 10896 else
10895 tp->dma_rwctrl |= 0x003f000f; 10897 tp->dma_rwctrl |= 0x003f000f;
10896 } else { 10898 } else {
10897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 10899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 10900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10899 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); 10901 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10900 10902
10901 /* If the 5704 is behind the EPB bridge, we can 10903 /* If the 5704 is behind the EPB bridge, we can
10902 * do the less restrictive ONE_DMA workaround for 10904 * do the less restrictive ONE_DMA workaround for
10903 * better performance. 10905 * better performance.
10904 */ 10906 */
10905 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && 10907 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 10908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10907 tp->dma_rwctrl |= 0x8000; 10909 tp->dma_rwctrl |= 0x8000;
10908 else if (ccval == 0x6 || ccval == 0x7) 10910 else if (ccval == 0x6 || ccval == 0x7)
10909 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 10911 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10910 10912
10911 /* Set bit 23 to enable PCIX hw bug fix */ 10913 /* Set bit 23 to enable PCIX hw bug fix */
10912 tp->dma_rwctrl |= 0x009f0000; 10914 tp->dma_rwctrl |= 0x009f0000;
10913 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 10915 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10914 /* 5780 always in PCIX mode */ 10916 /* 5780 always in PCIX mode */
10915 tp->dma_rwctrl |= 0x00144000; 10917 tp->dma_rwctrl |= 0x00144000;
10916 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { 10918 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10917 /* 5714 always in PCIX mode */ 10919 /* 5714 always in PCIX mode */
10918 tp->dma_rwctrl |= 0x00148000; 10920 tp->dma_rwctrl |= 0x00148000;
10919 } else { 10921 } else {
10920 tp->dma_rwctrl |= 0x001b000f; 10922 tp->dma_rwctrl |= 0x001b000f;
10921 } 10923 }
10922 } 10924 }
10923 10925
10924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 10926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) 10927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10926 tp->dma_rwctrl &= 0xfffffff0; 10928 tp->dma_rwctrl &= 0xfffffff0;
10927 10929
10928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 10930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { 10931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10930 /* Remove this if it causes problems for some boards. */ 10932 /* Remove this if it causes problems for some boards. */
10931 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; 10933 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10932 10934
10933 /* On 5700/5701 chips, we need to set this bit. 10935 /* On 5700/5701 chips, we need to set this bit.
10934 * Otherwise the chip will issue cacheline transactions 10936 * Otherwise the chip will issue cacheline transactions
10935 * to streamable DMA memory with not all the byte 10937 * to streamable DMA memory with not all the byte
10936 * enables turned on. This is an error on several 10938 * enables turned on. This is an error on several
10937 * RISC PCI controllers, in particular sparc64. 10939 * RISC PCI controllers, in particular sparc64.
10938 * 10940 *
10939 * On 5703/5704 chips, this bit has been reassigned 10941 * On 5703/5704 chips, this bit has been reassigned
10940 * a different meaning. In particular, it is used 10942 * a different meaning. In particular, it is used
10941 * on those chips to enable a PCI-X workaround. 10943 * on those chips to enable a PCI-X workaround.
10942 */ 10944 */
10943 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; 10945 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10944 } 10946 }
10945 10947
10946 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10948 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10947 10949
10948 #if 0 10950 #if 0
10949 /* Unneeded, already done by tg3_get_invariants. */ 10951 /* Unneeded, already done by tg3_get_invariants. */
10950 tg3_switch_clocks(tp); 10952 tg3_switch_clocks(tp);
10951 #endif 10953 #endif
10952 10954
10953 ret = 0; 10955 ret = 0;
10954 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 10956 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10955 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 10957 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10956 goto out; 10958 goto out;
10957 10959
10958 /* It is best to perform DMA test with maximum write burst size 10960 /* It is best to perform DMA test with maximum write burst size
10959 * to expose the 5700/5701 write DMA bug. 10961 * to expose the 5700/5701 write DMA bug.
10960 */ 10962 */
10961 saved_dma_rwctrl = tp->dma_rwctrl; 10963 saved_dma_rwctrl = tp->dma_rwctrl;
10962 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 10964 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10963 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 10965 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10964 10966
10965 while (1) { 10967 while (1) {
10966 u32 *p = buf, i; 10968 u32 *p = buf, i;
10967 10969
10968 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) 10970 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10969 p[i] = i; 10971 p[i] = i;
10970 10972
10971 /* Send the buffer to the chip. */ 10973 /* Send the buffer to the chip. */
10972 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 10974 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10973 if (ret) { 10975 if (ret) {
10974 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); 10976 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10975 break; 10977 break;
10976 } 10978 }
10977 10979
10978 #if 0 10980 #if 0
10979 /* validate data reached card RAM correctly. */ 10981 /* validate data reached card RAM correctly. */
10980 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 10982 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10981 u32 val; 10983 u32 val;
10982 tg3_read_mem(tp, 0x2100 + (i*4), &val); 10984 tg3_read_mem(tp, 0x2100 + (i*4), &val);
10983 if (le32_to_cpu(val) != p[i]) { 10985 if (le32_to_cpu(val) != p[i]) {
10984 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i); 10986 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
10985 /* ret = -ENODEV here? */ 10987 /* ret = -ENODEV here? */
10986 } 10988 }
10987 p[i] = 0; 10989 p[i] = 0;
10988 } 10990 }
10989 #endif 10991 #endif
10990 /* Now read it back. */ 10992 /* Now read it back. */
10991 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 10993 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10992 if (ret) { 10994 if (ret) {
10993 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); 10995 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10994 10996
10995 break; 10997 break;
10996 } 10998 }
10997 10999
10998 /* Verify it. */ 11000 /* Verify it. */
10999 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { 11001 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11000 if (p[i] == i) 11002 if (p[i] == i)
11001 continue; 11003 continue;
11002 11004
11003 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 11005 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11004 DMA_RWCTRL_WRITE_BNDRY_16) { 11006 DMA_RWCTRL_WRITE_BNDRY_16) {
11005 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 11007 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11006 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 11008 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11007 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 11009 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11008 break; 11010 break;
11009 } else { 11011 } else {
11010 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); 11012 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11011 ret = -ENODEV; 11013 ret = -ENODEV;
11012 goto out; 11014 goto out;
11013 } 11015 }
11014 } 11016 }
11015 11017
11016 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { 11018 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11017 /* Success. */ 11019 /* Success. */
11018 ret = 0; 11020 ret = 0;
11019 break; 11021 break;
11020 } 11022 }
11021 } 11023 }
11022 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 11024 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11023 DMA_RWCTRL_WRITE_BNDRY_16) { 11025 DMA_RWCTRL_WRITE_BNDRY_16) {
11024 static struct pci_device_id dma_wait_state_chipsets[] = { 11026 static struct pci_device_id dma_wait_state_chipsets[] = {
11025 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 11027 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11026 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, 11028 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11027 { }, 11029 { },
11028 }; 11030 };
11029 11031
11030 /* DMA test passed without adjusting DMA boundary, 11032 /* DMA test passed without adjusting DMA boundary,
11031 * now look for chipsets that are known to expose the 11033 * now look for chipsets that are known to expose the
11032 * DMA bug without failing the test. 11034 * DMA bug without failing the test.
11033 */ 11035 */
11034 if (pci_dev_present(dma_wait_state_chipsets)) { 11036 if (pci_dev_present(dma_wait_state_chipsets)) {
11035 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 11037 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11036 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 11038 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11037 } 11039 }
11038 else 11040 else
11039 /* Safe to use the calculated DMA boundary. */ 11041 /* Safe to use the calculated DMA boundary. */
11040 tp->dma_rwctrl = saved_dma_rwctrl; 11042 tp->dma_rwctrl = saved_dma_rwctrl;
11041 11043
11042 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 11044 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11043 } 11045 }
11044 11046
11045 out: 11047 out:
11046 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 11048 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11047 out_nofree: 11049 out_nofree:
11048 return ret; 11050 return ret;
11049 } 11051 }
11050 11052
11051 static void __devinit tg3_init_link_config(struct tg3 *tp) 11053 static void __devinit tg3_init_link_config(struct tg3 *tp)
11052 { 11054 {
11053 tp->link_config.advertising = 11055 tp->link_config.advertising =
11054 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 11056 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11055 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 11057 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11056 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | 11058 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11057 ADVERTISED_Autoneg | ADVERTISED_MII); 11059 ADVERTISED_Autoneg | ADVERTISED_MII);
11058 tp->link_config.speed = SPEED_INVALID; 11060 tp->link_config.speed = SPEED_INVALID;
11059 tp->link_config.duplex = DUPLEX_INVALID; 11061 tp->link_config.duplex = DUPLEX_INVALID;
11060 tp->link_config.autoneg = AUTONEG_ENABLE; 11062 tp->link_config.autoneg = AUTONEG_ENABLE;
11061 tp->link_config.active_speed = SPEED_INVALID; 11063 tp->link_config.active_speed = SPEED_INVALID;
11062 tp->link_config.active_duplex = DUPLEX_INVALID; 11064 tp->link_config.active_duplex = DUPLEX_INVALID;
11063 tp->link_config.phy_is_low_power = 0; 11065 tp->link_config.phy_is_low_power = 0;
11064 tp->link_config.orig_speed = SPEED_INVALID; 11066 tp->link_config.orig_speed = SPEED_INVALID;
11065 tp->link_config.orig_duplex = DUPLEX_INVALID; 11067 tp->link_config.orig_duplex = DUPLEX_INVALID;
11066 tp->link_config.orig_autoneg = AUTONEG_INVALID; 11068 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11067 } 11069 }
11068 11070
11069 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 11071 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11070 { 11072 {
11071 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 11073 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11072 tp->bufmgr_config.mbuf_read_dma_low_water = 11074 tp->bufmgr_config.mbuf_read_dma_low_water =
11073 DEFAULT_MB_RDMA_LOW_WATER_5705; 11075 DEFAULT_MB_RDMA_LOW_WATER_5705;
11074 tp->bufmgr_config.mbuf_mac_rx_low_water = 11076 tp->bufmgr_config.mbuf_mac_rx_low_water =
11075 DEFAULT_MB_MACRX_LOW_WATER_5705; 11077 DEFAULT_MB_MACRX_LOW_WATER_5705;
11076 tp->bufmgr_config.mbuf_high_water = 11078 tp->bufmgr_config.mbuf_high_water =
11077 DEFAULT_MB_HIGH_WATER_5705; 11079 DEFAULT_MB_HIGH_WATER_5705;
11078 11080
11079 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 11081 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11080 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; 11082 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11081 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 11083 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11082 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; 11084 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11083 tp->bufmgr_config.mbuf_high_water_jumbo = 11085 tp->bufmgr_config.mbuf_high_water_jumbo =
11084 DEFAULT_MB_HIGH_WATER_JUMBO_5780; 11086 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11085 } else { 11087 } else {
11086 tp->bufmgr_config.mbuf_read_dma_low_water = 11088 tp->bufmgr_config.mbuf_read_dma_low_water =
11087 DEFAULT_MB_RDMA_LOW_WATER; 11089 DEFAULT_MB_RDMA_LOW_WATER;
11088 tp->bufmgr_config.mbuf_mac_rx_low_water = 11090 tp->bufmgr_config.mbuf_mac_rx_low_water =
11089 DEFAULT_MB_MACRX_LOW_WATER; 11091 DEFAULT_MB_MACRX_LOW_WATER;
11090 tp->bufmgr_config.mbuf_high_water = 11092 tp->bufmgr_config.mbuf_high_water =
11091 DEFAULT_MB_HIGH_WATER; 11093 DEFAULT_MB_HIGH_WATER;
11092 11094
11093 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = 11095 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11094 DEFAULT_MB_RDMA_LOW_WATER_JUMBO; 11096 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11095 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = 11097 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11096 DEFAULT_MB_MACRX_LOW_WATER_JUMBO; 11098 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11097 tp->bufmgr_config.mbuf_high_water_jumbo = 11099 tp->bufmgr_config.mbuf_high_water_jumbo =
11098 DEFAULT_MB_HIGH_WATER_JUMBO; 11100 DEFAULT_MB_HIGH_WATER_JUMBO;
11099 } 11101 }
11100 11102
11101 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; 11103 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11102 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 11104 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11103 } 11105 }
11104 11106
11105 static char * __devinit tg3_phy_string(struct tg3 *tp) 11107 static char * __devinit tg3_phy_string(struct tg3 *tp)
11106 { 11108 {
11107 switch (tp->phy_id & PHY_ID_MASK) { 11109 switch (tp->phy_id & PHY_ID_MASK) {
11108 case PHY_ID_BCM5400: return "5400"; 11110 case PHY_ID_BCM5400: return "5400";
11109 case PHY_ID_BCM5401: return "5401"; 11111 case PHY_ID_BCM5401: return "5401";
11110 case PHY_ID_BCM5411: return "5411"; 11112 case PHY_ID_BCM5411: return "5411";
11111 case PHY_ID_BCM5701: return "5701"; 11113 case PHY_ID_BCM5701: return "5701";
11112 case PHY_ID_BCM5703: return "5703"; 11114 case PHY_ID_BCM5703: return "5703";
11113 case PHY_ID_BCM5704: return "5704"; 11115 case PHY_ID_BCM5704: return "5704";
11114 case PHY_ID_BCM5705: return "5705"; 11116 case PHY_ID_BCM5705: return "5705";
11115 case PHY_ID_BCM5750: return "5750"; 11117 case PHY_ID_BCM5750: return "5750";
11116 case PHY_ID_BCM5752: return "5752"; 11118 case PHY_ID_BCM5752: return "5752";
11117 case PHY_ID_BCM5714: return "5714"; 11119 case PHY_ID_BCM5714: return "5714";
11118 case PHY_ID_BCM5780: return "5780"; 11120 case PHY_ID_BCM5780: return "5780";
11119 case PHY_ID_BCM5755: return "5755"; 11121 case PHY_ID_BCM5755: return "5755";
11120 case PHY_ID_BCM5787: return "5787"; 11122 case PHY_ID_BCM5787: return "5787";
11121 case PHY_ID_BCM8002: return "8002/serdes"; 11123 case PHY_ID_BCM8002: return "8002/serdes";
11122 case 0: return "serdes"; 11124 case 0: return "serdes";
11123 default: return "unknown"; 11125 default: return "unknown";
11124 }; 11126 };
11125 } 11127 }
11126 11128
11127 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 11129 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11128 { 11130 {
11129 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 11131 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11130 strcpy(str, "PCI Express"); 11132 strcpy(str, "PCI Express");
11131 return str; 11133 return str;
11132 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { 11134 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11133 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; 11135 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11134 11136
11135 strcpy(str, "PCIX:"); 11137 strcpy(str, "PCIX:");
11136 11138
11137 if ((clock_ctrl == 7) || 11139 if ((clock_ctrl == 7) ||
11138 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == 11140 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11139 GRC_MISC_CFG_BOARD_ID_5704CIOBE)) 11141 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11140 strcat(str, "133MHz"); 11142 strcat(str, "133MHz");
11141 else if (clock_ctrl == 0) 11143 else if (clock_ctrl == 0)
11142 strcat(str, "33MHz"); 11144 strcat(str, "33MHz");
11143 else if (clock_ctrl == 2) 11145 else if (clock_ctrl == 2)
11144 strcat(str, "50MHz"); 11146 strcat(str, "50MHz");
11145 else if (clock_ctrl == 4) 11147 else if (clock_ctrl == 4)
11146 strcat(str, "66MHz"); 11148 strcat(str, "66MHz");
11147 else if (clock_ctrl == 6) 11149 else if (clock_ctrl == 6)
11148 strcat(str, "100MHz"); 11150 strcat(str, "100MHz");
11149 } else { 11151 } else {
11150 strcpy(str, "PCI:"); 11152 strcpy(str, "PCI:");
11151 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) 11153 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11152 strcat(str, "66MHz"); 11154 strcat(str, "66MHz");
11153 else 11155 else
11154 strcat(str, "33MHz"); 11156 strcat(str, "33MHz");
11155 } 11157 }
11156 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) 11158 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11157 strcat(str, ":32-bit"); 11159 strcat(str, ":32-bit");
11158 else 11160 else
11159 strcat(str, ":64-bit"); 11161 strcat(str, ":64-bit");
11160 return str; 11162 return str;
11161 } 11163 }
11162 11164
11163 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) 11165 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11164 { 11166 {
11165 struct pci_dev *peer; 11167 struct pci_dev *peer;
11166 unsigned int func, devnr = tp->pdev->devfn & ~7; 11168 unsigned int func, devnr = tp->pdev->devfn & ~7;
11167 11169
11168 for (func = 0; func < 8; func++) { 11170 for (func = 0; func < 8; func++) {
11169 peer = pci_get_slot(tp->pdev->bus, devnr | func); 11171 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11170 if (peer && peer != tp->pdev) 11172 if (peer && peer != tp->pdev)
11171 break; 11173 break;
11172 pci_dev_put(peer); 11174 pci_dev_put(peer);
11173 } 11175 }
11174 /* 5704 can be configured in single-port mode, set peer to 11176 /* 5704 can be configured in single-port mode, set peer to
11175 * tp->pdev in that case. 11177 * tp->pdev in that case.
11176 */ 11178 */
11177 if (!peer) { 11179 if (!peer) {
11178 peer = tp->pdev; 11180 peer = tp->pdev;
11179 return peer; 11181 return peer;
11180 } 11182 }
11181 11183
11182 /* 11184 /*
11183 * We don't need to keep the refcount elevated; there's no way 11185 * We don't need to keep the refcount elevated; there's no way
11184 * to remove one half of this device without removing the other 11186 * to remove one half of this device without removing the other
11185 */ 11187 */
11186 pci_dev_put(peer); 11188 pci_dev_put(peer);
11187 11189
11188 return peer; 11190 return peer;
11189 } 11191 }
11190 11192
11191 static void __devinit tg3_init_coal(struct tg3 *tp) 11193 static void __devinit tg3_init_coal(struct tg3 *tp)
11192 { 11194 {
11193 struct ethtool_coalesce *ec = &tp->coal; 11195 struct ethtool_coalesce *ec = &tp->coal;
11194 11196
11195 memset(ec, 0, sizeof(*ec)); 11197 memset(ec, 0, sizeof(*ec));
11196 ec->cmd = ETHTOOL_GCOALESCE; 11198 ec->cmd = ETHTOOL_GCOALESCE;
11197 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; 11199 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11198 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; 11200 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11199 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; 11201 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11200 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; 11202 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11201 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; 11203 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11202 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; 11204 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11203 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; 11205 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11204 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; 11206 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11205 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; 11207 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11206 11208
11207 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | 11209 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11208 HOSTCC_MODE_CLRTICK_TXBD)) { 11210 HOSTCC_MODE_CLRTICK_TXBD)) {
11209 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; 11211 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11210 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; 11212 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11211 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; 11213 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11212 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; 11214 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11213 } 11215 }
11214 11216
11215 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 11217 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11216 ec->rx_coalesce_usecs_irq = 0; 11218 ec->rx_coalesce_usecs_irq = 0;
11217 ec->tx_coalesce_usecs_irq = 0; 11219 ec->tx_coalesce_usecs_irq = 0;
11218 ec->stats_block_coalesce_usecs = 0; 11220 ec->stats_block_coalesce_usecs = 0;
11219 } 11221 }
11220 } 11222 }
11221 11223
11222 static int __devinit tg3_init_one(struct pci_dev *pdev, 11224 static int __devinit tg3_init_one(struct pci_dev *pdev,
11223 const struct pci_device_id *ent) 11225 const struct pci_device_id *ent)
11224 { 11226 {
11225 static int tg3_version_printed = 0; 11227 static int tg3_version_printed = 0;
11226 unsigned long tg3reg_base, tg3reg_len; 11228 unsigned long tg3reg_base, tg3reg_len;
11227 struct net_device *dev; 11229 struct net_device *dev;
11228 struct tg3 *tp; 11230 struct tg3 *tp;
11229 int i, err, pm_cap; 11231 int i, err, pm_cap;
11230 char str[40]; 11232 char str[40];
11231 u64 dma_mask, persist_dma_mask; 11233 u64 dma_mask, persist_dma_mask;
11232 11234
11233 if (tg3_version_printed++ == 0) 11235 if (tg3_version_printed++ == 0)
11234 printk(KERN_INFO "%s", version); 11236 printk(KERN_INFO "%s", version);
11235 11237
11236 err = pci_enable_device(pdev); 11238 err = pci_enable_device(pdev);
11237 if (err) { 11239 if (err) {
11238 printk(KERN_ERR PFX "Cannot enable PCI device, " 11240 printk(KERN_ERR PFX "Cannot enable PCI device, "
11239 "aborting.\n"); 11241 "aborting.\n");
11240 return err; 11242 return err;
11241 } 11243 }
11242 11244
11243 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 11245 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11244 printk(KERN_ERR PFX "Cannot find proper PCI device " 11246 printk(KERN_ERR PFX "Cannot find proper PCI device "
11245 "base address, aborting.\n"); 11247 "base address, aborting.\n");
11246 err = -ENODEV; 11248 err = -ENODEV;
11247 goto err_out_disable_pdev; 11249 goto err_out_disable_pdev;
11248 } 11250 }
11249 11251
11250 err = pci_request_regions(pdev, DRV_MODULE_NAME); 11252 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11251 if (err) { 11253 if (err) {
11252 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 11254 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11253 "aborting.\n"); 11255 "aborting.\n");
11254 goto err_out_disable_pdev; 11256 goto err_out_disable_pdev;
11255 } 11257 }
11256 11258
11257 pci_set_master(pdev); 11259 pci_set_master(pdev);
11258 11260
11259 /* Find power-management capability. */ 11261 /* Find power-management capability. */
11260 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 11262 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11261 if (pm_cap == 0) { 11263 if (pm_cap == 0) {
11262 printk(KERN_ERR PFX "Cannot find PowerManagement capability, " 11264 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11263 "aborting.\n"); 11265 "aborting.\n");
11264 err = -EIO; 11266 err = -EIO;
11265 goto err_out_free_res; 11267 goto err_out_free_res;
11266 } 11268 }
11267 11269
11268 tg3reg_base = pci_resource_start(pdev, 0); 11270 tg3reg_base = pci_resource_start(pdev, 0);
11269 tg3reg_len = pci_resource_len(pdev, 0); 11271 tg3reg_len = pci_resource_len(pdev, 0);
11270 11272
11271 dev = alloc_etherdev(sizeof(*tp)); 11273 dev = alloc_etherdev(sizeof(*tp));
11272 if (!dev) { 11274 if (!dev) {
11273 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 11275 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11274 err = -ENOMEM; 11276 err = -ENOMEM;
11275 goto err_out_free_res; 11277 goto err_out_free_res;
11276 } 11278 }
11277 11279
11278 SET_MODULE_OWNER(dev); 11280 SET_MODULE_OWNER(dev);
11279 SET_NETDEV_DEV(dev, &pdev->dev); 11281 SET_NETDEV_DEV(dev, &pdev->dev);
11280 11282
11281 #if TG3_VLAN_TAG_USED 11283 #if TG3_VLAN_TAG_USED
11282 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 11284 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11283 dev->vlan_rx_register = tg3_vlan_rx_register; 11285 dev->vlan_rx_register = tg3_vlan_rx_register;
11284 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid; 11286 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11285 #endif 11287 #endif
11286 11288
11287 tp = netdev_priv(dev); 11289 tp = netdev_priv(dev);
11288 tp->pdev = pdev; 11290 tp->pdev = pdev;
11289 tp->dev = dev; 11291 tp->dev = dev;
11290 tp->pm_cap = pm_cap; 11292 tp->pm_cap = pm_cap;
11291 tp->mac_mode = TG3_DEF_MAC_MODE; 11293 tp->mac_mode = TG3_DEF_MAC_MODE;
11292 tp->rx_mode = TG3_DEF_RX_MODE; 11294 tp->rx_mode = TG3_DEF_RX_MODE;
11293 tp->tx_mode = TG3_DEF_TX_MODE; 11295 tp->tx_mode = TG3_DEF_TX_MODE;
11294 tp->mi_mode = MAC_MI_MODE_BASE; 11296 tp->mi_mode = MAC_MI_MODE_BASE;
11295 if (tg3_debug > 0) 11297 if (tg3_debug > 0)
11296 tp->msg_enable = tg3_debug; 11298 tp->msg_enable = tg3_debug;
11297 else 11299 else
11298 tp->msg_enable = TG3_DEF_MSG_ENABLE; 11300 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11299 11301
11300 /* The word/byte swap controls here control register access byte 11302 /* The word/byte swap controls here control register access byte
11301 * swapping. DMA data byte swapping is controlled in the GRC_MODE 11303 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11302 * setting below. 11304 * setting below.
11303 */ 11305 */
11304 tp->misc_host_ctrl = 11306 tp->misc_host_ctrl =
11305 MISC_HOST_CTRL_MASK_PCI_INT | 11307 MISC_HOST_CTRL_MASK_PCI_INT |
11306 MISC_HOST_CTRL_WORD_SWAP | 11308 MISC_HOST_CTRL_WORD_SWAP |
11307 MISC_HOST_CTRL_INDIR_ACCESS | 11309 MISC_HOST_CTRL_INDIR_ACCESS |
11308 MISC_HOST_CTRL_PCISTATE_RW; 11310 MISC_HOST_CTRL_PCISTATE_RW;
11309 11311
11310 /* The NONFRM (non-frame) byte/word swap controls take effect 11312 /* The NONFRM (non-frame) byte/word swap controls take effect
11311 * on descriptor entries, anything which isn't packet data. 11313 * on descriptor entries, anything which isn't packet data.
11312 * 11314 *
11313 * The StrongARM chips on the board (one for tx, one for rx) 11315 * The StrongARM chips on the board (one for tx, one for rx)
11314 * are running in big-endian mode. 11316 * are running in big-endian mode.
11315 */ 11317 */
11316 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | 11318 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11317 GRC_MODE_WSWAP_NONFRM_DATA); 11319 GRC_MODE_WSWAP_NONFRM_DATA);
11318 #ifdef __BIG_ENDIAN 11320 #ifdef __BIG_ENDIAN
11319 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 11321 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11320 #endif 11322 #endif
11321 spin_lock_init(&tp->lock); 11323 spin_lock_init(&tp->lock);
11322 spin_lock_init(&tp->tx_lock); 11324 spin_lock_init(&tp->tx_lock);
11323 spin_lock_init(&tp->indirect_lock); 11325 spin_lock_init(&tp->indirect_lock);
11324 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11326 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11325 11327
11326 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); 11328 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11327 if (tp->regs == 0UL) { 11329 if (tp->regs == 0UL) {
11328 printk(KERN_ERR PFX "Cannot map device registers, " 11330 printk(KERN_ERR PFX "Cannot map device registers, "
11329 "aborting.\n"); 11331 "aborting.\n");
11330 err = -ENOMEM; 11332 err = -ENOMEM;
11331 goto err_out_free_dev; 11333 goto err_out_free_dev;
11332 } 11334 }
11333 11335
11334 tg3_init_link_config(tp); 11336 tg3_init_link_config(tp);
11335 11337
11336 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 11338 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11337 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 11339 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11338 tp->tx_pending = TG3_DEF_TX_RING_PENDING; 11340 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11339 11341
11340 dev->open = tg3_open; 11342 dev->open = tg3_open;
11341 dev->stop = tg3_close; 11343 dev->stop = tg3_close;
11342 dev->get_stats = tg3_get_stats; 11344 dev->get_stats = tg3_get_stats;
11343 dev->set_multicast_list = tg3_set_rx_mode; 11345 dev->set_multicast_list = tg3_set_rx_mode;
11344 dev->set_mac_address = tg3_set_mac_addr; 11346 dev->set_mac_address = tg3_set_mac_addr;
11345 dev->do_ioctl = tg3_ioctl; 11347 dev->do_ioctl = tg3_ioctl;
11346 dev->tx_timeout = tg3_tx_timeout; 11348 dev->tx_timeout = tg3_tx_timeout;
11347 dev->poll = tg3_poll; 11349 dev->poll = tg3_poll;
11348 dev->ethtool_ops = &tg3_ethtool_ops; 11350 dev->ethtool_ops = &tg3_ethtool_ops;
11349 dev->weight = 64; 11351 dev->weight = 64;
11350 dev->watchdog_timeo = TG3_TX_TIMEOUT; 11352 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11351 dev->change_mtu = tg3_change_mtu; 11353 dev->change_mtu = tg3_change_mtu;
11352 dev->irq = pdev->irq; 11354 dev->irq = pdev->irq;
11353 #ifdef CONFIG_NET_POLL_CONTROLLER 11355 #ifdef CONFIG_NET_POLL_CONTROLLER
11354 dev->poll_controller = tg3_poll_controller; 11356 dev->poll_controller = tg3_poll_controller;
11355 #endif 11357 #endif
11356 11358
11357 err = tg3_get_invariants(tp); 11359 err = tg3_get_invariants(tp);
11358 if (err) { 11360 if (err) {
11359 printk(KERN_ERR PFX "Problem fetching invariants of chip, " 11361 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11360 "aborting.\n"); 11362 "aborting.\n");
11361 goto err_out_iounmap; 11363 goto err_out_iounmap;
11362 } 11364 }
11363 11365
11364 /* The EPB bridge inside 5714, 5715, and 5780 and any 11366 /* The EPB bridge inside 5714, 5715, and 5780 and any
11365 * device behind the EPB cannot support DMA addresses > 40-bit. 11367 * device behind the EPB cannot support DMA addresses > 40-bit.
11366 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 11368 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11367 * On 64-bit systems without IOMMU, use 64-bit dma_mask and 11369 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11368 * do DMA address check in tg3_start_xmit(). 11370 * do DMA address check in tg3_start_xmit().
11369 */ 11371 */
11370 if (tp->tg3_flags2 & TG3_FLG2_IS_5788) 11372 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11371 persist_dma_mask = dma_mask = DMA_32BIT_MASK; 11373 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11372 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { 11374 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11373 persist_dma_mask = dma_mask = DMA_40BIT_MASK; 11375 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11374 #ifdef CONFIG_HIGHMEM 11376 #ifdef CONFIG_HIGHMEM
11375 dma_mask = DMA_64BIT_MASK; 11377 dma_mask = DMA_64BIT_MASK;
11376 #endif 11378 #endif
11377 } else 11379 } else
11378 persist_dma_mask = dma_mask = DMA_64BIT_MASK; 11380 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11379 11381
11380 /* Configure DMA attributes. */ 11382 /* Configure DMA attributes. */
11381 if (dma_mask > DMA_32BIT_MASK) { 11383 if (dma_mask > DMA_32BIT_MASK) {
11382 err = pci_set_dma_mask(pdev, dma_mask); 11384 err = pci_set_dma_mask(pdev, dma_mask);
11383 if (!err) { 11385 if (!err) {
11384 dev->features |= NETIF_F_HIGHDMA; 11386 dev->features |= NETIF_F_HIGHDMA;
11385 err = pci_set_consistent_dma_mask(pdev, 11387 err = pci_set_consistent_dma_mask(pdev,
11386 persist_dma_mask); 11388 persist_dma_mask);
11387 if (err < 0) { 11389 if (err < 0) {
11388 printk(KERN_ERR PFX "Unable to obtain 64 bit " 11390 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11389 "DMA for consistent allocations\n"); 11391 "DMA for consistent allocations\n");
11390 goto err_out_iounmap; 11392 goto err_out_iounmap;
11391 } 11393 }
11392 } 11394 }
11393 } 11395 }
11394 if (err || dma_mask == DMA_32BIT_MASK) { 11396 if (err || dma_mask == DMA_32BIT_MASK) {
11395 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 11397 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11396 if (err) { 11398 if (err) {
11397 printk(KERN_ERR PFX "No usable DMA configuration, " 11399 printk(KERN_ERR PFX "No usable DMA configuration, "
11398 "aborting.\n"); 11400 "aborting.\n");
11399 goto err_out_iounmap; 11401 goto err_out_iounmap;
11400 } 11402 }
11401 } 11403 }
11402 11404
11403 tg3_init_bufmgr_config(tp); 11405 tg3_init_bufmgr_config(tp);
11404 11406
11405 #if TG3_TSO_SUPPORT != 0 11407 #if TG3_TSO_SUPPORT != 0
11406 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 11408 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11407 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11409 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11408 } 11410 }
11409 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 11411 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 11412 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11411 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || 11413 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11412 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { 11414 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11413 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 11415 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11414 } else { 11416 } else {
11415 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11417 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11416 } 11418 }
11417 11419
11418 /* TSO is on by default on chips that support hardware TSO. 11420 /* TSO is on by default on chips that support hardware TSO.
11419 * Firmware TSO on older chips gives lower performance, so it 11421 * Firmware TSO on older chips gives lower performance, so it
11420 * is off by default, but can be enabled using ethtool. 11422 * is off by default, but can be enabled using ethtool.
11421 */ 11423 */
11422 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 11424 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11423 dev->features |= NETIF_F_TSO; 11425 dev->features |= NETIF_F_TSO;
11424 11426
11425 #endif 11427 #endif
11426 11428
11427 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 11429 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11428 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 11430 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11429 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 11431 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11430 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; 11432 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11431 tp->rx_pending = 63; 11433 tp->rx_pending = 63;
11432 } 11434 }
11433 11435
11434 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 11436 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11435 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) 11437 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11436 tp->pdev_peer = tg3_find_peer(tp); 11438 tp->pdev_peer = tg3_find_peer(tp);
11437 11439
11438 err = tg3_get_device_address(tp); 11440 err = tg3_get_device_address(tp);
11439 if (err) { 11441 if (err) {
11440 printk(KERN_ERR PFX "Could not obtain valid ethernet address, " 11442 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11441 "aborting.\n"); 11443 "aborting.\n");
11442 goto err_out_iounmap; 11444 goto err_out_iounmap;
11443 } 11445 }
11444 11446
11445 /* 11447 /*
11446 * Reset chip in case UNDI or EFI driver did not shutdown 11448 * Reset chip in case UNDI or EFI driver did not shutdown
11447 * DMA self test will enable WDMAC and we'll see (spurious) 11449 * DMA self test will enable WDMAC and we'll see (spurious)
11448 * pending DMA on the PCI bus at that point. 11450 * pending DMA on the PCI bus at that point.
11449 */ 11451 */
11450 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 11452 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11451 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 11453 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11452 pci_save_state(tp->pdev); 11454 pci_save_state(tp->pdev);
11453 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 11455 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11454 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11455 } 11457 }
11456 11458
11457 err = tg3_test_dma(tp); 11459 err = tg3_test_dma(tp);
11458 if (err) { 11460 if (err) {
11459 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); 11461 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11460 goto err_out_iounmap; 11462 goto err_out_iounmap;
11461 } 11463 }
11462 11464
11463 /* Tigon3 can do ipv4 only... and some chips have buggy 11465 /* Tigon3 can do ipv4 only... and some chips have buggy
11464 * checksumming. 11466 * checksumming.
11465 */ 11467 */
11466 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { 11468 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 11469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 11470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11469 dev->features |= NETIF_F_HW_CSUM; 11471 dev->features |= NETIF_F_HW_CSUM;
11470 else 11472 else
11471 dev->features |= NETIF_F_IP_CSUM; 11473 dev->features |= NETIF_F_IP_CSUM;
11472 dev->features |= NETIF_F_SG; 11474 dev->features |= NETIF_F_SG;
11473 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; 11475 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11474 } else 11476 } else
11475 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; 11477 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11476 11478
11477 /* flow control autonegotiation is default behavior */ 11479 /* flow control autonegotiation is default behavior */
11478 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 11480 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11479 11481
11480 tg3_init_coal(tp); 11482 tg3_init_coal(tp);
11481 11483
11482 /* Now that we have fully setup the chip, save away a snapshot 11484 /* Now that we have fully setup the chip, save away a snapshot
11483 * of the PCI config space. We need to restore this after 11485 * of the PCI config space. We need to restore this after
11484 * GRC_MISC_CFG core clock resets and some resume events. 11486 * GRC_MISC_CFG core clock resets and some resume events.
11485 */ 11487 */
11486 pci_save_state(tp->pdev); 11488 pci_save_state(tp->pdev);
11487 11489
11488 err = register_netdev(dev); 11490 err = register_netdev(dev);
11489 if (err) { 11491 if (err) {
11490 printk(KERN_ERR PFX "Cannot register net device, " 11492 printk(KERN_ERR PFX "Cannot register net device, "
11491 "aborting.\n"); 11493 "aborting.\n");
11492 goto err_out_iounmap; 11494 goto err_out_iounmap;
11493 } 11495 }
11494 11496
11495 pci_set_drvdata(pdev, dev); 11497 pci_set_drvdata(pdev, dev);
11496 11498
11497 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ", 11499 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11498 dev->name, 11500 dev->name,
11499 tp->board_part_number, 11501 tp->board_part_number,
11500 tp->pci_chip_rev_id, 11502 tp->pci_chip_rev_id,
11501 tg3_phy_string(tp), 11503 tg3_phy_string(tp),
11502 tg3_bus_string(tp, str), 11504 tg3_bus_string(tp, str),
11503 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000"); 11505 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11504 11506
11505 for (i = 0; i < 6; i++) 11507 for (i = 0; i < 6; i++)
11506 printk("%2.2x%c", dev->dev_addr[i], 11508 printk("%2.2x%c", dev->dev_addr[i],
11507 i == 5 ? '\n' : ':'); 11509 i == 5 ? '\n' : ':');
11508 11510
11509 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] " 11511 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11510 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] " 11512 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11511 "TSOcap[%d] \n", 11513 "TSOcap[%d] \n",
11512 dev->name, 11514 dev->name,
11513 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 11515 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11514 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 11516 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11515 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, 11517 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11516 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 11518 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11517 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 11519 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11518 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 11520 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11519 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 11521 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11520 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", 11522 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11521 dev->name, tp->dma_rwctrl, 11523 dev->name, tp->dma_rwctrl,
11522 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : 11524 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11523 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); 11525 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11524 11526
11525 netif_carrier_off(tp->dev); 11527 netif_carrier_off(tp->dev);
11526 11528
11527 return 0; 11529 return 0;
11528 11530
11529 err_out_iounmap: 11531 err_out_iounmap:
11530 if (tp->regs) { 11532 if (tp->regs) {
11531 iounmap(tp->regs); 11533 iounmap(tp->regs);
11532 tp->regs = NULL; 11534 tp->regs = NULL;
11533 } 11535 }
11534 11536
11535 err_out_free_dev: 11537 err_out_free_dev:
11536 free_netdev(dev); 11538 free_netdev(dev);
11537 11539
11538 err_out_free_res: 11540 err_out_free_res:
11539 pci_release_regions(pdev); 11541 pci_release_regions(pdev);
11540 11542
11541 err_out_disable_pdev: 11543 err_out_disable_pdev:
11542 pci_disable_device(pdev); 11544 pci_disable_device(pdev);
11543 pci_set_drvdata(pdev, NULL); 11545 pci_set_drvdata(pdev, NULL);
11544 return err; 11546 return err;
11545 } 11547 }
11546 11548
11547 static void __devexit tg3_remove_one(struct pci_dev *pdev) 11549 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11548 { 11550 {
11549 struct net_device *dev = pci_get_drvdata(pdev); 11551 struct net_device *dev = pci_get_drvdata(pdev);
11550 11552
11551 if (dev) { 11553 if (dev) {
11552 struct tg3 *tp = netdev_priv(dev); 11554 struct tg3 *tp = netdev_priv(dev);
11553 11555
11554 flush_scheduled_work(); 11556 flush_scheduled_work();
11555 unregister_netdev(dev); 11557 unregister_netdev(dev);
11556 if (tp->regs) { 11558 if (tp->regs) {
11557 iounmap(tp->regs); 11559 iounmap(tp->regs);
11558 tp->regs = NULL; 11560 tp->regs = NULL;
11559 } 11561 }
11560 free_netdev(dev); 11562 free_netdev(dev);
11561 pci_release_regions(pdev); 11563 pci_release_regions(pdev);
11562 pci_disable_device(pdev); 11564 pci_disable_device(pdev);
11563 pci_set_drvdata(pdev, NULL); 11565 pci_set_drvdata(pdev, NULL);
11564 } 11566 }
11565 } 11567 }
11566 11568
11567 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) 11569 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11568 { 11570 {
11569 struct net_device *dev = pci_get_drvdata(pdev); 11571 struct net_device *dev = pci_get_drvdata(pdev);
11570 struct tg3 *tp = netdev_priv(dev); 11572 struct tg3 *tp = netdev_priv(dev);
11571 int err; 11573 int err;
11572 11574
11573 if (!netif_running(dev)) 11575 if (!netif_running(dev))
11574 return 0; 11576 return 0;
11575 11577
11576 flush_scheduled_work(); 11578 flush_scheduled_work();
11577 tg3_netif_stop(tp); 11579 tg3_netif_stop(tp);
11578 11580
11579 del_timer_sync(&tp->timer); 11581 del_timer_sync(&tp->timer);
11580 11582
11581 tg3_full_lock(tp, 1); 11583 tg3_full_lock(tp, 1);
11582 tg3_disable_ints(tp); 11584 tg3_disable_ints(tp);
11583 tg3_full_unlock(tp); 11585 tg3_full_unlock(tp);
11584 11586
11585 netif_device_detach(dev); 11587 netif_device_detach(dev);
11586 11588
11587 tg3_full_lock(tp, 0); 11589 tg3_full_lock(tp, 0);
11588 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 11590 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11589 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 11591 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11590 tg3_full_unlock(tp); 11592 tg3_full_unlock(tp);
11591 11593
11592 err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); 11594 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11593 if (err) { 11595 if (err) {
11594 tg3_full_lock(tp, 0); 11596 tg3_full_lock(tp, 0);
11595 11597
11596 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11598 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11597 tg3_init_hw(tp, 1); 11599 tg3_init_hw(tp, 1);
11598 11600
11599 tp->timer.expires = jiffies + tp->timer_offset; 11601 tp->timer.expires = jiffies + tp->timer_offset;
11600 add_timer(&tp->timer); 11602 add_timer(&tp->timer);
11601 11603
11602 netif_device_attach(dev); 11604 netif_device_attach(dev);
11603 tg3_netif_start(tp); 11605 tg3_netif_start(tp);
11604 11606
11605 tg3_full_unlock(tp); 11607 tg3_full_unlock(tp);
11606 } 11608 }
11607 11609
11608 return err; 11610 return err;
11609 } 11611 }
11610 11612
11611 static int tg3_resume(struct pci_dev *pdev) 11613 static int tg3_resume(struct pci_dev *pdev)
11612 { 11614 {
11613 struct net_device *dev = pci_get_drvdata(pdev); 11615 struct net_device *dev = pci_get_drvdata(pdev);
11614 struct tg3 *tp = netdev_priv(dev); 11616 struct tg3 *tp = netdev_priv(dev);
11615 int err; 11617 int err;
11616 11618
11617 if (!netif_running(dev)) 11619 if (!netif_running(dev))
11618 return 0; 11620 return 0;
11619 11621
11620 pci_restore_state(tp->pdev); 11622 pci_restore_state(tp->pdev);
11621 11623
11622 err = tg3_set_power_state(tp, PCI_D0); 11624 err = tg3_set_power_state(tp, PCI_D0);
11623 if (err) 11625 if (err)
11624 return err; 11626 return err;
11625 11627
11626 netif_device_attach(dev); 11628 netif_device_attach(dev);
11627 11629
11628 tg3_full_lock(tp, 0); 11630 tg3_full_lock(tp, 0);
11629 11631
11630 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11632 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11631 tg3_init_hw(tp, 1); 11633 tg3_init_hw(tp, 1);
11632 11634
11633 tp->timer.expires = jiffies + tp->timer_offset; 11635 tp->timer.expires = jiffies + tp->timer_offset;
11634 add_timer(&tp->timer); 11636 add_timer(&tp->timer);
11635 11637
11636 tg3_netif_start(tp); 11638 tg3_netif_start(tp);
11637 11639
11638 tg3_full_unlock(tp); 11640 tg3_full_unlock(tp);
11639 11641
11640 return 0; 11642 return 0;
11641 } 11643 }
11642 11644
11643 static struct pci_driver tg3_driver = { 11645 static struct pci_driver tg3_driver = {
11644 .name = DRV_MODULE_NAME, 11646 .name = DRV_MODULE_NAME,
11645 .id_table = tg3_pci_tbl, 11647 .id_table = tg3_pci_tbl,
11646 .probe = tg3_init_one, 11648 .probe = tg3_init_one,
11647 .remove = __devexit_p(tg3_remove_one), 11649 .remove = __devexit_p(tg3_remove_one),
11648 .suspend = tg3_suspend, 11650 .suspend = tg3_suspend,
11649 .resume = tg3_resume 11651 .resume = tg3_resume
11650 }; 11652 };
11651 11653
11652 static int __init tg3_init(void) 11654 static int __init tg3_init(void)
11653 { 11655 {
11654 return pci_module_init(&tg3_driver); 11656 return pci_module_init(&tg3_driver);
11655 } 11657 }
11656 11658
11657 static void __exit tg3_cleanup(void) 11659 static void __exit tg3_cleanup(void)
11658 { 11660 {
11659 pci_unregister_driver(&tg3_driver); 11661 pci_unregister_driver(&tg3_driver);
11660 } 11662 }
11661 11663
11662 module_init(tg3_init); 11664 module_init(tg3_init);
11663 module_exit(tg3_cleanup); 11665 module_exit(tg3_cleanup);
11664 11666
drivers/net/tulip/tulip_core.c
1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */ 1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
2 2
3 /* 3 /*
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
8 This software may be used and distributed according to the terms 8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference. 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} 11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project 12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/ 13 Web page at http://sourceforge.net/projects/tulip/
14 14
15 */ 15 */
16 16
17 #include <linux/config.h> 17 #include <linux/config.h>
18 18
19 #define DRV_NAME "tulip" 19 #define DRV_NAME "tulip"
20 #ifdef CONFIG_TULIP_NAPI 20 #ifdef CONFIG_TULIP_NAPI
21 #define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */ 21 #define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */
22 #else 22 #else
23 #define DRV_VERSION "1.1.13" 23 #define DRV_VERSION "1.1.13"
24 #endif 24 #endif
25 #define DRV_RELDATE "May 11, 2002" 25 #define DRV_RELDATE "May 11, 2002"
26 26
27 27
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/pci.h> 29 #include <linux/pci.h>
30 #include "tulip.h" 30 #include "tulip.h"
31 #include <linux/init.h> 31 #include <linux/init.h>
32 #include <linux/etherdevice.h> 32 #include <linux/etherdevice.h>
33 #include <linux/delay.h> 33 #include <linux/delay.h>
34 #include <linux/mii.h> 34 #include <linux/mii.h>
35 #include <linux/ethtool.h> 35 #include <linux/ethtool.h>
36 #include <linux/crc32.h> 36 #include <linux/crc32.h>
37 #include <asm/unaligned.h> 37 #include <asm/unaligned.h>
38 #include <asm/uaccess.h> 38 #include <asm/uaccess.h>
39 39
40 #ifdef __sparc__ 40 #ifdef __sparc__
41 #include <asm/pbm.h> 41 #include <asm/pbm.h>
42 #endif 42 #endif
43 43
44 static char version[] __devinitdata = 44 static char version[] __devinitdata =
45 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; 45 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
46 46
47 47
48 /* A few user-configurable values. */ 48 /* A few user-configurable values. */
49 49
50 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ 50 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
51 static unsigned int max_interrupt_work = 25; 51 static unsigned int max_interrupt_work = 25;
52 52
53 #define MAX_UNITS 8 53 #define MAX_UNITS 8
54 /* Used to pass the full-duplex flag, etc. */ 54 /* Used to pass the full-duplex flag, etc. */
55 static int full_duplex[MAX_UNITS]; 55 static int full_duplex[MAX_UNITS];
56 static int options[MAX_UNITS]; 56 static int options[MAX_UNITS];
57 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */ 57 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
58 58
59 /* The possible media types that can be set in options[] are: */ 59 /* The possible media types that can be set in options[] are: */
60 const char * const medianame[32] = { 60 const char * const medianame[32] = {
61 "10baseT", "10base2", "AUI", "100baseTx", 61 "10baseT", "10base2", "AUI", "100baseTx",
62 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx", 62 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
63 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII", 63 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
64 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4", 64 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
65 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19", 65 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
66 "","","","", "","","","", "","","","Transceiver reset", 66 "","","","", "","","","", "","","","Transceiver reset",
67 }; 67 };
68 68
69 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ 69 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
70 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ 70 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
71 || defined(__sparc_) || defined(__ia64__) \ 71 || defined(__sparc_) || defined(__ia64__) \
72 || defined(__sh__) || defined(__mips__) 72 || defined(__sh__) || defined(__mips__)
73 static int rx_copybreak = 1518; 73 static int rx_copybreak = 1518;
74 #else 74 #else
75 static int rx_copybreak = 100; 75 static int rx_copybreak = 100;
76 #endif 76 #endif
77 77
78 /* 78 /*
79 Set the bus performance register. 79 Set the bus performance register.
80 Typical: Set 16 longword cache alignment, no burst limit. 80 Typical: Set 16 longword cache alignment, no burst limit.
81 Cache alignment bits 15:14 Burst length 13:8 81 Cache alignment bits 15:14 Burst length 13:8
82 0000 No alignment 0x00000000 unlimited 0800 8 longwords 82 0000 No alignment 0x00000000 unlimited 0800 8 longwords
83 4000 8 longwords 0100 1 longword 1000 16 longwords 83 4000 8 longwords 0100 1 longword 1000 16 longwords
84 8000 16 longwords 0200 2 longwords 2000 32 longwords 84 8000 16 longwords 0200 2 longwords 2000 32 longwords
85 C000 32 longwords 0400 4 longwords 85 C000 32 longwords 0400 4 longwords
86 Warning: many older 486 systems are broken and require setting 0x00A04800 86 Warning: many older 486 systems are broken and require setting 0x00A04800
87 8 longword cache alignment, 8 longword burst. 87 8 longword cache alignment, 8 longword burst.
88 ToDo: Non-Intel setting could be better. 88 ToDo: Non-Intel setting could be better.
89 */ 89 */
90 90
91 #if defined(__alpha__) || defined(__ia64__) 91 #if defined(__alpha__) || defined(__ia64__)
92 static int csr0 = 0x01A00000 | 0xE000; 92 static int csr0 = 0x01A00000 | 0xE000;
93 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) 93 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
94 static int csr0 = 0x01A00000 | 0x8000; 94 static int csr0 = 0x01A00000 | 0x8000;
95 #elif defined(__sparc__) || defined(__hppa__) 95 #elif defined(__sparc__) || defined(__hppa__)
96 /* The UltraSparc PCI controllers will disconnect at every 64-byte 96 /* The UltraSparc PCI controllers will disconnect at every 64-byte
97 * crossing anyways so it makes no sense to tell Tulip to burst 97 * crossing anyways so it makes no sense to tell Tulip to burst
98 * any more than that. 98 * any more than that.
99 */ 99 */
100 static int csr0 = 0x01A00000 | 0x9000; 100 static int csr0 = 0x01A00000 | 0x9000;
101 #elif defined(__arm__) || defined(__sh__) 101 #elif defined(__arm__) || defined(__sh__)
102 static int csr0 = 0x01A00000 | 0x4800; 102 static int csr0 = 0x01A00000 | 0x4800;
103 #elif defined(__mips__) 103 #elif defined(__mips__)
104 static int csr0 = 0x00200000 | 0x4000; 104 static int csr0 = 0x00200000 | 0x4000;
105 #else 105 #else
106 #warning Processor architecture undefined! 106 #warning Processor architecture undefined!
107 static int csr0 = 0x00A00000 | 0x4800; 107 static int csr0 = 0x00A00000 | 0x4800;
108 #endif 108 #endif
109 109
110 /* Operational parameters that usually are not changed. */ 110 /* Operational parameters that usually are not changed. */
111 /* Time in jiffies before concluding the transmitter is hung. */ 111 /* Time in jiffies before concluding the transmitter is hung. */
112 #define TX_TIMEOUT (4*HZ) 112 #define TX_TIMEOUT (4*HZ)
113 113
114 114
115 MODULE_AUTHOR("The Linux Kernel Team"); 115 MODULE_AUTHOR("The Linux Kernel Team");
116 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver"); 116 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
117 MODULE_LICENSE("GPL"); 117 MODULE_LICENSE("GPL");
118 MODULE_VERSION(DRV_VERSION); 118 MODULE_VERSION(DRV_VERSION);
119 module_param(tulip_debug, int, 0); 119 module_param(tulip_debug, int, 0);
120 module_param(max_interrupt_work, int, 0); 120 module_param(max_interrupt_work, int, 0);
121 module_param(rx_copybreak, int, 0); 121 module_param(rx_copybreak, int, 0);
122 module_param(csr0, int, 0); 122 module_param(csr0, int, 0);
123 module_param_array(options, int, NULL, 0); 123 module_param_array(options, int, NULL, 0);
124 module_param_array(full_duplex, int, NULL, 0); 124 module_param_array(full_duplex, int, NULL, 0);
125 125
126 #define PFX DRV_NAME ": " 126 #define PFX DRV_NAME ": "
127 127
128 #ifdef TULIP_DEBUG 128 #ifdef TULIP_DEBUG
129 int tulip_debug = TULIP_DEBUG; 129 int tulip_debug = TULIP_DEBUG;
130 #else 130 #else
131 int tulip_debug = 1; 131 int tulip_debug = 1;
132 #endif 132 #endif
133 133
134 134
135 135
136 /* 136 /*
137 * This table use during operation for capabilities and media timer. 137 * This table use during operation for capabilities and media timer.
138 * 138 *
139 * It is indexed via the values in 'enum chips' 139 * It is indexed via the values in 'enum chips'
140 */ 140 */
141 141
142 struct tulip_chip_table tulip_tbl[] = { 142 struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */ 143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */ 144 { }, /* placeholder for array, slot unused currently */
145 145
146 /* DC21140 */ 146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef, 147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer }, 148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer },
149 149
150 /* DC21142, DC21143 */ 150 /* DC21142, DC21143 */
151 { "Digital DS21143 Tulip", 128, 0x0801fbff, 151 { "Digital DS21143 Tulip", 128, 0x0801fbff,
152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY 152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer }, 153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer },
154 154
155 /* LC82C168 */ 155 /* LC82C168 */
156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef, 156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 HAS_MII | HAS_PNICNWAY, pnic_timer }, 157 HAS_MII | HAS_PNICNWAY, pnic_timer },
158 158
159 /* MX98713 */ 159 /* MX98713 */
160 { "Macronix 98713 PMAC", 128, 0x0001ebef, 160 { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer }, 161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
162 162
163 /* MX98715 */ 163 /* MX98715 */
164 { "Macronix 98715 PMAC", 256, 0x0001ebef, 164 { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 HAS_MEDIA_TABLE, mxic_timer }, 165 HAS_MEDIA_TABLE, mxic_timer },
166 166
167 /* MX98725 */ 167 /* MX98725 */
168 { "Macronix 98725 PMAC", 256, 0x0001ebef, 168 { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer }, 169 HAS_MEDIA_TABLE, mxic_timer },
170 170
171 /* AX88140 */ 171 /* AX88140 */
172 { "ASIX AX88140", 128, 0x0001fbff, 172 { "ASIX AX88140", 128, 0x0001fbff,
173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY 173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 | IS_ASIX, tulip_timer }, 174 | IS_ASIX, tulip_timer },
175 175
176 /* PNIC2 */ 176 /* PNIC2 */
177 { "Lite-On PNIC-II", 256, 0x0801fbff, 177 { "Lite-On PNIC-II", 256, 0x0801fbff,
178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer }, 178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer },
179 179
180 /* COMET */ 180 /* COMET */
181 { "ADMtek Comet", 256, 0x0001abef, 181 { "ADMtek Comet", 256, 0x0001abef,
182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer }, 182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
183 183
184 /* COMPEX9881 */ 184 /* COMPEX9881 */
185 { "Compex 9881 PMAC", 128, 0x0001ebef, 185 { "Compex 9881 PMAC", 128, 0x0001ebef,
186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer }, 186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
187 187
188 /* I21145 */ 188 /* I21145 */
189 { "Intel DS21145 Tulip", 128, 0x0801fbff, 189 { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI 190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 | HAS_NWAY | HAS_PCI_MWI, t21142_timer }, 191 | HAS_NWAY | HAS_PCI_MWI, t21142_timer },
192 192
193 /* DM910X */ 193 /* DM910X */
194 { "Davicom DM9102/DM9102A", 128, 0x0001ebef, 194 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
195 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, 195 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
196 tulip_timer }, 196 tulip_timer },
197 197
198 /* RS7112 */ 198 /* RS7112 */
199 { "Conexant LANfinity", 256, 0x0001ebef, 199 { "Conexant LANfinity", 256, 0x0001ebef,
200 HAS_MII | HAS_ACPI, tulip_timer }, 200 HAS_MII | HAS_ACPI, tulip_timer },
201 201
202 }; 202 };
203 203
204 204
205 static struct pci_device_id tulip_pci_tbl[] = { 205 static struct pci_device_id tulip_pci_tbl[] = {
206 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, 206 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
207 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, 207 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
208 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, 208 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
209 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 }, 209 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
210 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, 210 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
211 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/ 211 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
212 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 }, 212 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
213 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 }, 213 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
214 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 214 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 215 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 216 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 217 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
218 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 218 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 219 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 220 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 221 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 222 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 223 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, 224 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
225 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, 225 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
226 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 226 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
227 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 227 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
228 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 228 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, 229 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
230 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 230 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 231 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 232 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 233 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, 234 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
235 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 235 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 236 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 237 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 238 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 239 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 240 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
241 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 241 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { } /* terminate list */ 242 { } /* terminate list */
243 }; 243 };
244 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); 244 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
245 245
246 246
247 /* A full-duplex map for media types. */ 247 /* A full-duplex map for media types. */
248 const char tulip_media_cap[32] = 248 const char tulip_media_cap[32] =
249 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, }; 249 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
250 250
251 static void tulip_tx_timeout(struct net_device *dev); 251 static void tulip_tx_timeout(struct net_device *dev);
252 static void tulip_init_ring(struct net_device *dev); 252 static void tulip_init_ring(struct net_device *dev);
253 static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev); 253 static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
254 static int tulip_open(struct net_device *dev); 254 static int tulip_open(struct net_device *dev);
255 static int tulip_close(struct net_device *dev); 255 static int tulip_close(struct net_device *dev);
256 static void tulip_up(struct net_device *dev); 256 static void tulip_up(struct net_device *dev);
257 static void tulip_down(struct net_device *dev); 257 static void tulip_down(struct net_device *dev);
258 static struct net_device_stats *tulip_get_stats(struct net_device *dev); 258 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
259 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 259 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
260 static void set_rx_mode(struct net_device *dev); 260 static void set_rx_mode(struct net_device *dev);
261 #ifdef CONFIG_NET_POLL_CONTROLLER 261 #ifdef CONFIG_NET_POLL_CONTROLLER
262 static void poll_tulip(struct net_device *dev); 262 static void poll_tulip(struct net_device *dev);
263 #endif 263 #endif
264 264
265 static void tulip_set_power_state (struct tulip_private *tp, 265 static void tulip_set_power_state (struct tulip_private *tp,
266 int sleep, int snooze) 266 int sleep, int snooze)
267 { 267 {
268 if (tp->flags & HAS_ACPI) { 268 if (tp->flags & HAS_ACPI) {
269 u32 tmp, newtmp; 269 u32 tmp, newtmp;
270 pci_read_config_dword (tp->pdev, CFDD, &tmp); 270 pci_read_config_dword (tp->pdev, CFDD, &tmp);
271 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze); 271 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
272 if (sleep) 272 if (sleep)
273 newtmp |= CFDD_Sleep; 273 newtmp |= CFDD_Sleep;
274 else if (snooze) 274 else if (snooze)
275 newtmp |= CFDD_Snooze; 275 newtmp |= CFDD_Snooze;
276 if (tmp != newtmp) 276 if (tmp != newtmp)
277 pci_write_config_dword (tp->pdev, CFDD, newtmp); 277 pci_write_config_dword (tp->pdev, CFDD, newtmp);
278 } 278 }
279 279
280 } 280 }
281 281
282 282
283 static void tulip_up(struct net_device *dev) 283 static void tulip_up(struct net_device *dev)
284 { 284 {
285 struct tulip_private *tp = netdev_priv(dev); 285 struct tulip_private *tp = netdev_priv(dev);
286 void __iomem *ioaddr = tp->base_addr; 286 void __iomem *ioaddr = tp->base_addr;
287 int next_tick = 3*HZ; 287 int next_tick = 3*HZ;
288 int i; 288 int i;
289 289
290 /* Wake the chip from sleep/snooze mode. */ 290 /* Wake the chip from sleep/snooze mode. */
291 tulip_set_power_state (tp, 0, 0); 291 tulip_set_power_state (tp, 0, 0);
292 292
293 /* On some chip revs we must set the MII/SYM port before the reset!? */ 293 /* On some chip revs we must set the MII/SYM port before the reset!? */
294 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) 294 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
295 iowrite32(0x00040000, ioaddr + CSR6); 295 iowrite32(0x00040000, ioaddr + CSR6);
296 296
297 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ 297 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
298 iowrite32(0x00000001, ioaddr + CSR0); 298 iowrite32(0x00000001, ioaddr + CSR0);
299 udelay(100); 299 udelay(100);
300 300
301 /* Deassert reset. 301 /* Deassert reset.
302 Wait the specified 50 PCI cycles after a reset by initializing 302 Wait the specified 50 PCI cycles after a reset by initializing
303 Tx and Rx queues and the address filter list. */ 303 Tx and Rx queues and the address filter list. */
304 iowrite32(tp->csr0, ioaddr + CSR0); 304 iowrite32(tp->csr0, ioaddr + CSR0);
305 udelay(100); 305 udelay(100);
306 306
307 if (tulip_debug > 1) 307 if (tulip_debug > 1)
308 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq); 308 printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
309 309
310 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 310 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
311 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 311 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
312 tp->cur_rx = tp->cur_tx = 0; 312 tp->cur_rx = tp->cur_tx = 0;
313 tp->dirty_rx = tp->dirty_tx = 0; 313 tp->dirty_rx = tp->dirty_tx = 0;
314 314
315 if (tp->flags & MC_HASH_ONLY) { 315 if (tp->flags & MC_HASH_ONLY) {
316 u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr)); 316 u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr));
317 u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4))); 317 u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4)));
318 if (tp->chip_id == AX88140) { 318 if (tp->chip_id == AX88140) {
319 iowrite32(0, ioaddr + CSR13); 319 iowrite32(0, ioaddr + CSR13);
320 iowrite32(addr_low, ioaddr + CSR14); 320 iowrite32(addr_low, ioaddr + CSR14);
321 iowrite32(1, ioaddr + CSR13); 321 iowrite32(1, ioaddr + CSR13);
322 iowrite32(addr_high, ioaddr + CSR14); 322 iowrite32(addr_high, ioaddr + CSR14);
323 } else if (tp->flags & COMET_MAC_ADDR) { 323 } else if (tp->flags & COMET_MAC_ADDR) {
324 iowrite32(addr_low, ioaddr + 0xA4); 324 iowrite32(addr_low, ioaddr + 0xA4);
325 iowrite32(addr_high, ioaddr + 0xA8); 325 iowrite32(addr_high, ioaddr + 0xA8);
326 iowrite32(0, ioaddr + 0xAC); 326 iowrite32(0, ioaddr + 0xAC);
327 iowrite32(0, ioaddr + 0xB0); 327 iowrite32(0, ioaddr + 0xB0);
328 } 328 }
329 } else { 329 } else {
330 /* This is set_rx_mode(), but without starting the transmitter. */ 330 /* This is set_rx_mode(), but without starting the transmitter. */
331 u16 *eaddrs = (u16 *)dev->dev_addr; 331 u16 *eaddrs = (u16 *)dev->dev_addr;
332 u16 *setup_frm = &tp->setup_frame[15*6]; 332 u16 *setup_frm = &tp->setup_frame[15*6];
333 dma_addr_t mapping; 333 dma_addr_t mapping;
334 334
335 /* 21140 bug: you must add the broadcast address. */ 335 /* 21140 bug: you must add the broadcast address. */
336 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); 336 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
337 /* Fill the final entry of the table with our physical address. */ 337 /* Fill the final entry of the table with our physical address. */
338 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; 338 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
339 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; 339 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
340 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; 340 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
341 341
342 mapping = pci_map_single(tp->pdev, tp->setup_frame, 342 mapping = pci_map_single(tp->pdev, tp->setup_frame,
343 sizeof(tp->setup_frame), 343 sizeof(tp->setup_frame),
344 PCI_DMA_TODEVICE); 344 PCI_DMA_TODEVICE);
345 tp->tx_buffers[tp->cur_tx].skb = NULL; 345 tp->tx_buffers[tp->cur_tx].skb = NULL;
346 tp->tx_buffers[tp->cur_tx].mapping = mapping; 346 tp->tx_buffers[tp->cur_tx].mapping = mapping;
347 347
348 /* Put the setup frame on the Tx list. */ 348 /* Put the setup frame on the Tx list. */
349 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); 349 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
350 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); 350 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
351 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); 351 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
352 352
353 tp->cur_tx++; 353 tp->cur_tx++;
354 } 354 }
355 355
356 tp->saved_if_port = dev->if_port; 356 tp->saved_if_port = dev->if_port;
357 if (dev->if_port == 0) 357 if (dev->if_port == 0)
358 dev->if_port = tp->default_port; 358 dev->if_port = tp->default_port;
359 359
360 /* Allow selecting a default media. */ 360 /* Allow selecting a default media. */
361 i = 0; 361 i = 0;
362 if (tp->mtable == NULL) 362 if (tp->mtable == NULL)
363 goto media_picked; 363 goto media_picked;
364 if (dev->if_port) { 364 if (dev->if_port) {
365 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 : 365 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
366 (dev->if_port == 12 ? 0 : dev->if_port); 366 (dev->if_port == 12 ? 0 : dev->if_port);
367 for (i = 0; i < tp->mtable->leafcount; i++) 367 for (i = 0; i < tp->mtable->leafcount; i++)
368 if (tp->mtable->mleaf[i].media == looking_for) { 368 if (tp->mtable->mleaf[i].media == looking_for) {
369 printk(KERN_INFO "%s: Using user-specified media %s.\n", 369 printk(KERN_INFO "%s: Using user-specified media %s.\n",
370 dev->name, medianame[dev->if_port]); 370 dev->name, medianame[dev->if_port]);
371 goto media_picked; 371 goto media_picked;
372 } 372 }
373 } 373 }
374 if ((tp->mtable->defaultmedia & 0x0800) == 0) { 374 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
375 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; 375 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
376 for (i = 0; i < tp->mtable->leafcount; i++) 376 for (i = 0; i < tp->mtable->leafcount; i++)
377 if (tp->mtable->mleaf[i].media == looking_for) { 377 if (tp->mtable->mleaf[i].media == looking_for) {
378 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n", 378 printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
379 dev->name, medianame[looking_for]); 379 dev->name, medianame[looking_for]);
380 goto media_picked; 380 goto media_picked;
381 } 381 }
382 } 382 }
383 /* Start sensing first non-full-duplex media. */ 383 /* Start sensing first non-full-duplex media. */
384 for (i = tp->mtable->leafcount - 1; 384 for (i = tp->mtable->leafcount - 1;
385 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--) 385 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
386 ; 386 ;
387 media_picked: 387 media_picked:
388 388
389 tp->csr6 = 0; 389 tp->csr6 = 0;
390 tp->cur_index = i; 390 tp->cur_index = i;
391 tp->nwayset = 0; 391 tp->nwayset = 0;
392 392
393 if (dev->if_port) { 393 if (dev->if_port) {
394 if (tp->chip_id == DC21143 && 394 if (tp->chip_id == DC21143 &&
395 (tulip_media_cap[dev->if_port] & MediaIsMII)) { 395 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
396 /* We must reset the media CSRs when we force-select MII mode. */ 396 /* We must reset the media CSRs when we force-select MII mode. */
397 iowrite32(0x0000, ioaddr + CSR13); 397 iowrite32(0x0000, ioaddr + CSR13);
398 iowrite32(0x0000, ioaddr + CSR14); 398 iowrite32(0x0000, ioaddr + CSR14);
399 iowrite32(0x0008, ioaddr + CSR15); 399 iowrite32(0x0008, ioaddr + CSR15);
400 } 400 }
401 tulip_select_media(dev, 1); 401 tulip_select_media(dev, 1);
402 } else if (tp->chip_id == DC21142) { 402 } else if (tp->chip_id == DC21142) {
403 if (tp->mii_cnt) { 403 if (tp->mii_cnt) {
404 tulip_select_media(dev, 1); 404 tulip_select_media(dev, 1);
405 if (tulip_debug > 1) 405 if (tulip_debug > 1)
406 printk(KERN_INFO "%s: Using MII transceiver %d, status " 406 printk(KERN_INFO "%s: Using MII transceiver %d, status "
407 "%4.4x.\n", 407 "%4.4x.\n",
408 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1)); 408 dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
409 iowrite32(csr6_mask_defstate, ioaddr + CSR6); 409 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
410 tp->csr6 = csr6_mask_hdcap; 410 tp->csr6 = csr6_mask_hdcap;
411 dev->if_port = 11; 411 dev->if_port = 11;
412 iowrite32(0x0000, ioaddr + CSR13); 412 iowrite32(0x0000, ioaddr + CSR13);
413 iowrite32(0x0000, ioaddr + CSR14); 413 iowrite32(0x0000, ioaddr + CSR14);
414 } else 414 } else
415 t21142_start_nway(dev); 415 t21142_start_nway(dev);
416 } else if (tp->chip_id == PNIC2) { 416 } else if (tp->chip_id == PNIC2) {
417 /* for initial startup advertise 10/100 Full and Half */ 417 /* for initial startup advertise 10/100 Full and Half */
418 tp->sym_advertise = 0x01E0; 418 tp->sym_advertise = 0x01E0;
419 /* enable autonegotiate end interrupt */ 419 /* enable autonegotiate end interrupt */
420 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5); 420 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
421 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7); 421 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
422 pnic2_start_nway(dev); 422 pnic2_start_nway(dev);
423 } else if (tp->chip_id == LC82C168 && ! tp->medialock) { 423 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
424 if (tp->mii_cnt) { 424 if (tp->mii_cnt) {
425 dev->if_port = 11; 425 dev->if_port = 11;
426 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0); 426 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
427 iowrite32(0x0001, ioaddr + CSR15); 427 iowrite32(0x0001, ioaddr + CSR15);
428 } else if (ioread32(ioaddr + CSR5) & TPLnkPass) 428 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
429 pnic_do_nway(dev); 429 pnic_do_nway(dev);
430 else { 430 else {
431 /* Start with 10mbps to do autonegotiation. */ 431 /* Start with 10mbps to do autonegotiation. */
432 iowrite32(0x32, ioaddr + CSR12); 432 iowrite32(0x32, ioaddr + CSR12);
433 tp->csr6 = 0x00420000; 433 tp->csr6 = 0x00420000;
434 iowrite32(0x0001B078, ioaddr + 0xB8); 434 iowrite32(0x0001B078, ioaddr + 0xB8);
435 iowrite32(0x0201B078, ioaddr + 0xB8); 435 iowrite32(0x0201B078, ioaddr + 0xB8);
436 next_tick = 1*HZ; 436 next_tick = 1*HZ;
437 } 437 }
438 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) 438 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
439 && ! tp->medialock) { 439 && ! tp->medialock) {
440 dev->if_port = 0; 440 dev->if_port = 0;
441 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); 441 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
442 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); 442 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
443 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { 443 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
444 /* Provided by BOLO, Macronix - 12/10/1998. */ 444 /* Provided by BOLO, Macronix - 12/10/1998. */
445 dev->if_port = 0; 445 dev->if_port = 0;
446 tp->csr6 = 0x01a80200; 446 tp->csr6 = 0x01a80200;
447 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); 447 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
448 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0); 448 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
449 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) { 449 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
450 /* Enable automatic Tx underrun recovery. */ 450 /* Enable automatic Tx underrun recovery. */
451 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88); 451 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
452 dev->if_port = tp->mii_cnt ? 11 : 0; 452 dev->if_port = tp->mii_cnt ? 11 : 0;
453 tp->csr6 = 0x00040000; 453 tp->csr6 = 0x00040000;
454 } else if (tp->chip_id == AX88140) { 454 } else if (tp->chip_id == AX88140) {
455 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; 455 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
456 } else 456 } else
457 tulip_select_media(dev, 1); 457 tulip_select_media(dev, 1);
458 458
459 /* Start the chip's Tx to process setup frame. */ 459 /* Start the chip's Tx to process setup frame. */
460 tulip_stop_rxtx(tp); 460 tulip_stop_rxtx(tp);
461 barrier(); 461 barrier();
462 udelay(5); 462 udelay(5);
463 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); 463 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
464 464
465 /* Enable interrupts by setting the interrupt mask. */ 465 /* Enable interrupts by setting the interrupt mask. */
466 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); 466 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
467 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); 467 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
468 tulip_start_rxtx(tp); 468 tulip_start_rxtx(tp);
469 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ 469 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
470 470
471 if (tulip_debug > 2) { 471 if (tulip_debug > 2) {
472 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n", 472 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
473 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5), 473 dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
474 ioread32(ioaddr + CSR6)); 474 ioread32(ioaddr + CSR6));
475 } 475 }
476 476
477 /* Set the timer to switch to check for link beat and perhaps switch 477 /* Set the timer to switch to check for link beat and perhaps switch
478 to an alternate media type. */ 478 to an alternate media type. */
479 tp->timer.expires = RUN_AT(next_tick); 479 tp->timer.expires = RUN_AT(next_tick);
480 add_timer(&tp->timer); 480 add_timer(&tp->timer);
481 #ifdef CONFIG_TULIP_NAPI 481 #ifdef CONFIG_TULIP_NAPI
482 init_timer(&tp->oom_timer); 482 init_timer(&tp->oom_timer);
483 tp->oom_timer.data = (unsigned long)dev; 483 tp->oom_timer.data = (unsigned long)dev;
484 tp->oom_timer.function = oom_timer; 484 tp->oom_timer.function = oom_timer;
485 #endif 485 #endif
486 } 486 }
487 487
488 static int 488 static int
489 tulip_open(struct net_device *dev) 489 tulip_open(struct net_device *dev)
490 { 490 {
491 int retval; 491 int retval;
492 492
493 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) 493 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
494 return retval; 494 return retval;
495 495
496 tulip_init_ring (dev); 496 tulip_init_ring (dev);
497 497
498 tulip_up (dev); 498 tulip_up (dev);
499 499
500 netif_start_queue (dev); 500 netif_start_queue (dev);
501 501
502 return 0; 502 return 0;
503 } 503 }
504 504
505 505
506 static void tulip_tx_timeout(struct net_device *dev) 506 static void tulip_tx_timeout(struct net_device *dev)
507 { 507 {
508 struct tulip_private *tp = netdev_priv(dev); 508 struct tulip_private *tp = netdev_priv(dev);
509 void __iomem *ioaddr = tp->base_addr; 509 void __iomem *ioaddr = tp->base_addr;
510 unsigned long flags; 510 unsigned long flags;
511 511
512 spin_lock_irqsave (&tp->lock, flags); 512 spin_lock_irqsave (&tp->lock, flags);
513 513
514 if (tulip_media_cap[dev->if_port] & MediaIsMII) { 514 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
515 /* Do nothing -- the media monitor should handle this. */ 515 /* Do nothing -- the media monitor should handle this. */
516 if (tulip_debug > 1) 516 if (tulip_debug > 1)
517 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n", 517 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
518 dev->name); 518 dev->name);
519 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 519 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
520 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 520 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
521 || tp->chip_id == DM910X) { 521 || tp->chip_id == DM910X) {
522 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " 522 printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
523 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 523 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
524 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 524 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
525 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 525 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
526 if ( ! tp->medialock && tp->mtable) { 526 if ( ! tp->medialock && tp->mtable) {
527 do 527 do
528 --tp->cur_index; 528 --tp->cur_index;
529 while (tp->cur_index >= 0 529 while (tp->cur_index >= 0
530 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media] 530 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
531 & MediaIsFD)); 531 & MediaIsFD));
532 if (--tp->cur_index < 0) { 532 if (--tp->cur_index < 0) {
533 /* We start again, but should instead look for default. */ 533 /* We start again, but should instead look for default. */
534 tp->cur_index = tp->mtable->leafcount - 1; 534 tp->cur_index = tp->mtable->leafcount - 1;
535 } 535 }
536 tulip_select_media(dev, 0); 536 tulip_select_media(dev, 0);
537 printk(KERN_WARNING "%s: transmit timed out, switching to %s " 537 printk(KERN_WARNING "%s: transmit timed out, switching to %s "
538 "media.\n", dev->name, medianame[dev->if_port]); 538 "media.\n", dev->name, medianame[dev->if_port]);
539 } 539 }
540 } else if (tp->chip_id == PNIC2) { 540 } else if (tp->chip_id == PNIC2) {
541 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, " 541 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
542 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n", 542 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
543 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6), 543 dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
544 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12)); 544 (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
545 } else { 545 } else {
546 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 " 546 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
547 "%8.8x, resetting...\n", 547 "%8.8x, resetting...\n",
548 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); 548 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
549 dev->if_port = 0; 549 dev->if_port = 0;
550 } 550 }
551 551
552 #if defined(way_too_many_messages) 552 #if defined(way_too_many_messages)
553 if (tulip_debug > 3) { 553 if (tulip_debug > 3) {
554 int i; 554 int i;
555 for (i = 0; i < RX_RING_SIZE; i++) { 555 for (i = 0; i < RX_RING_SIZE; i++) {
556 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); 556 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
557 int j; 557 int j;
558 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x " 558 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
559 "%2.2x %2.2x %2.2x.\n", 559 "%2.2x %2.2x %2.2x.\n",
560 i, (unsigned int)tp->rx_ring[i].status, 560 i, (unsigned int)tp->rx_ring[i].status,
561 (unsigned int)tp->rx_ring[i].length, 561 (unsigned int)tp->rx_ring[i].length,
562 (unsigned int)tp->rx_ring[i].buffer1, 562 (unsigned int)tp->rx_ring[i].buffer1,
563 (unsigned int)tp->rx_ring[i].buffer2, 563 (unsigned int)tp->rx_ring[i].buffer2,
564 buf[0], buf[1], buf[2]); 564 buf[0], buf[1], buf[2]);
565 for (j = 0; buf[j] != 0xee && j < 1600; j++) 565 for (j = 0; buf[j] != 0xee && j < 1600; j++)
566 if (j < 100) printk(" %2.2x", buf[j]); 566 if (j < 100) printk(" %2.2x", buf[j]);
567 printk(" j=%d.\n", j); 567 printk(" j=%d.\n", j);
568 } 568 }
569 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring); 569 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
570 for (i = 0; i < RX_RING_SIZE; i++) 570 for (i = 0; i < RX_RING_SIZE; i++)
571 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status); 571 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
572 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring); 572 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
573 for (i = 0; i < TX_RING_SIZE; i++) 573 for (i = 0; i < TX_RING_SIZE; i++)
574 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status); 574 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
575 printk("\n"); 575 printk("\n");
576 } 576 }
577 #endif 577 #endif
578 578
579 /* Stop and restart the chip's Tx processes . */ 579 /* Stop and restart the chip's Tx processes . */
580 580
581 tulip_restart_rxtx(tp); 581 tulip_restart_rxtx(tp);
582 /* Trigger an immediate transmit demand. */ 582 /* Trigger an immediate transmit demand. */
583 iowrite32(0, ioaddr + CSR1); 583 iowrite32(0, ioaddr + CSR1);
584 584
585 tp->stats.tx_errors++; 585 tp->stats.tx_errors++;
586 586
587 spin_unlock_irqrestore (&tp->lock, flags); 587 spin_unlock_irqrestore (&tp->lock, flags);
588 dev->trans_start = jiffies; 588 dev->trans_start = jiffies;
589 netif_wake_queue (dev); 589 netif_wake_queue (dev);
590 } 590 }
591 591
592 592
593 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 593 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
594 static void tulip_init_ring(struct net_device *dev) 594 static void tulip_init_ring(struct net_device *dev)
595 { 595 {
596 struct tulip_private *tp = netdev_priv(dev); 596 struct tulip_private *tp = netdev_priv(dev);
597 int i; 597 int i;
598 598
599 tp->susp_rx = 0; 599 tp->susp_rx = 0;
600 tp->ttimer = 0; 600 tp->ttimer = 0;
601 tp->nir = 0; 601 tp->nir = 0;
602 602
603 for (i = 0; i < RX_RING_SIZE; i++) { 603 for (i = 0; i < RX_RING_SIZE; i++) {
604 tp->rx_ring[i].status = 0x00000000; 604 tp->rx_ring[i].status = 0x00000000;
605 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); 605 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
606 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); 606 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
607 tp->rx_buffers[i].skb = NULL; 607 tp->rx_buffers[i].skb = NULL;
608 tp->rx_buffers[i].mapping = 0; 608 tp->rx_buffers[i].mapping = 0;
609 } 609 }
610 /* Mark the last entry as wrapping the ring. */ 610 /* Mark the last entry as wrapping the ring. */
611 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); 611 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
612 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); 612 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
613 613
614 for (i = 0; i < RX_RING_SIZE; i++) { 614 for (i = 0; i < RX_RING_SIZE; i++) {
615 dma_addr_t mapping; 615 dma_addr_t mapping;
616 616
617 /* Note the receive buffer must be longword aligned. 617 /* Note the receive buffer must be longword aligned.
618 dev_alloc_skb() provides 16 byte alignment. But do *not* 618 dev_alloc_skb() provides 16 byte alignment. But do *not*
619 use skb_reserve() to align the IP header! */ 619 use skb_reserve() to align the IP header! */
620 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); 620 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
621 tp->rx_buffers[i].skb = skb; 621 tp->rx_buffers[i].skb = skb;
622 if (skb == NULL) 622 if (skb == NULL)
623 break; 623 break;
624 mapping = pci_map_single(tp->pdev, skb->data, 624 mapping = pci_map_single(tp->pdev, skb->data,
625 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 625 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
626 tp->rx_buffers[i].mapping = mapping; 626 tp->rx_buffers[i].mapping = mapping;
627 skb->dev = dev; /* Mark as being used by this device. */ 627 skb->dev = dev; /* Mark as being used by this device. */
628 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ 628 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
629 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); 629 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
630 } 630 }
631 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 631 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
632 632
633 /* The Tx buffer descriptor is filled in as needed, but we 633 /* The Tx buffer descriptor is filled in as needed, but we
634 do need to clear the ownership bit. */ 634 do need to clear the ownership bit. */
635 for (i = 0; i < TX_RING_SIZE; i++) { 635 for (i = 0; i < TX_RING_SIZE; i++) {
636 tp->tx_buffers[i].skb = NULL; 636 tp->tx_buffers[i].skb = NULL;
637 tp->tx_buffers[i].mapping = 0; 637 tp->tx_buffers[i].mapping = 0;
638 tp->tx_ring[i].status = 0x00000000; 638 tp->tx_ring[i].status = 0x00000000;
639 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); 639 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
640 } 640 }
641 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); 641 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
642 } 642 }
643 643
644 static int 644 static int
645 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) 645 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
646 { 646 {
647 struct tulip_private *tp = netdev_priv(dev); 647 struct tulip_private *tp = netdev_priv(dev);
648 int entry; 648 int entry;
649 u32 flag; 649 u32 flag;
650 dma_addr_t mapping; 650 dma_addr_t mapping;
651 651
652 spin_lock_irq(&tp->lock); 652 spin_lock_irq(&tp->lock);
653 653
654 /* Calculate the next Tx descriptor entry. */ 654 /* Calculate the next Tx descriptor entry. */
655 entry = tp->cur_tx % TX_RING_SIZE; 655 entry = tp->cur_tx % TX_RING_SIZE;
656 656
657 tp->tx_buffers[entry].skb = skb; 657 tp->tx_buffers[entry].skb = skb;
658 mapping = pci_map_single(tp->pdev, skb->data, 658 mapping = pci_map_single(tp->pdev, skb->data,
659 skb->len, PCI_DMA_TODEVICE); 659 skb->len, PCI_DMA_TODEVICE);
660 tp->tx_buffers[entry].mapping = mapping; 660 tp->tx_buffers[entry].mapping = mapping;
661 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); 661 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
662 662
663 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ 663 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
664 flag = 0x60000000; /* No interrupt */ 664 flag = 0x60000000; /* No interrupt */
665 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { 665 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
666 flag = 0xe0000000; /* Tx-done intr. */ 666 flag = 0xe0000000; /* Tx-done intr. */
667 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { 667 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
668 flag = 0x60000000; /* No Tx-done intr. */ 668 flag = 0x60000000; /* No Tx-done intr. */
669 } else { /* Leave room for set_rx_mode() to fill entries. */ 669 } else { /* Leave room for set_rx_mode() to fill entries. */
670 flag = 0xe0000000; /* Tx-done intr. */ 670 flag = 0xe0000000; /* Tx-done intr. */
671 netif_stop_queue(dev); 671 netif_stop_queue(dev);
672 } 672 }
673 if (entry == TX_RING_SIZE-1) 673 if (entry == TX_RING_SIZE-1)
674 flag = 0xe0000000 | DESC_RING_WRAP; 674 flag = 0xe0000000 | DESC_RING_WRAP;
675 675
676 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); 676 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
677 /* if we were using Transmit Automatic Polling, we would need a 677 /* if we were using Transmit Automatic Polling, we would need a
678 * wmb() here. */ 678 * wmb() here. */
679 tp->tx_ring[entry].status = cpu_to_le32(DescOwned); 679 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
680 wmb(); 680 wmb();
681 681
682 tp->cur_tx++; 682 tp->cur_tx++;
683 683
684 /* Trigger an immediate transmit demand. */ 684 /* Trigger an immediate transmit demand. */
685 iowrite32(0, tp->base_addr + CSR1); 685 iowrite32(0, tp->base_addr + CSR1);
686 686
687 spin_unlock_irq(&tp->lock); 687 spin_unlock_irq(&tp->lock);
688 688
689 dev->trans_start = jiffies; 689 dev->trans_start = jiffies;
690 690
691 return 0; 691 return 0;
692 } 692 }
693 693
694 static void tulip_clean_tx_ring(struct tulip_private *tp) 694 static void tulip_clean_tx_ring(struct tulip_private *tp)
695 { 695 {
696 unsigned int dirty_tx; 696 unsigned int dirty_tx;
697 697
698 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; 698 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
699 dirty_tx++) { 699 dirty_tx++) {
700 int entry = dirty_tx % TX_RING_SIZE; 700 int entry = dirty_tx % TX_RING_SIZE;
701 int status = le32_to_cpu(tp->tx_ring[entry].status); 701 int status = le32_to_cpu(tp->tx_ring[entry].status);
702 702
703 if (status < 0) { 703 if (status < 0) {
704 tp->stats.tx_errors++; /* It wasn't Txed */ 704 tp->stats.tx_errors++; /* It wasn't Txed */
705 tp->tx_ring[entry].status = 0; 705 tp->tx_ring[entry].status = 0;
706 } 706 }
707 707
708 /* Check for Tx filter setup frames. */ 708 /* Check for Tx filter setup frames. */
709 if (tp->tx_buffers[entry].skb == NULL) { 709 if (tp->tx_buffers[entry].skb == NULL) {
710 /* test because dummy frames not mapped */ 710 /* test because dummy frames not mapped */
711 if (tp->tx_buffers[entry].mapping) 711 if (tp->tx_buffers[entry].mapping)
712 pci_unmap_single(tp->pdev, 712 pci_unmap_single(tp->pdev,
713 tp->tx_buffers[entry].mapping, 713 tp->tx_buffers[entry].mapping,
714 sizeof(tp->setup_frame), 714 sizeof(tp->setup_frame),
715 PCI_DMA_TODEVICE); 715 PCI_DMA_TODEVICE);
716 continue; 716 continue;
717 } 717 }
718 718
719 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, 719 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
720 tp->tx_buffers[entry].skb->len, 720 tp->tx_buffers[entry].skb->len,
721 PCI_DMA_TODEVICE); 721 PCI_DMA_TODEVICE);
722 722
723 /* Free the original skb. */ 723 /* Free the original skb. */
724 dev_kfree_skb_irq(tp->tx_buffers[entry].skb); 724 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
725 tp->tx_buffers[entry].skb = NULL; 725 tp->tx_buffers[entry].skb = NULL;
726 tp->tx_buffers[entry].mapping = 0; 726 tp->tx_buffers[entry].mapping = 0;
727 } 727 }
728 } 728 }
729 729
730 static void tulip_down (struct net_device *dev) 730 static void tulip_down (struct net_device *dev)
731 { 731 {
732 struct tulip_private *tp = netdev_priv(dev); 732 struct tulip_private *tp = netdev_priv(dev);
733 void __iomem *ioaddr = tp->base_addr; 733 void __iomem *ioaddr = tp->base_addr;
734 unsigned long flags; 734 unsigned long flags;
735 735
736 del_timer_sync (&tp->timer); 736 del_timer_sync (&tp->timer);
737 #ifdef CONFIG_TULIP_NAPI 737 #ifdef CONFIG_TULIP_NAPI
738 del_timer_sync (&tp->oom_timer); 738 del_timer_sync (&tp->oom_timer);
739 #endif 739 #endif
740 spin_lock_irqsave (&tp->lock, flags); 740 spin_lock_irqsave (&tp->lock, flags);
741 741
742 /* Disable interrupts by clearing the interrupt mask. */ 742 /* Disable interrupts by clearing the interrupt mask. */
743 iowrite32 (0x00000000, ioaddr + CSR7); 743 iowrite32 (0x00000000, ioaddr + CSR7);
744 744
745 /* Stop the Tx and Rx processes. */ 745 /* Stop the Tx and Rx processes. */
746 tulip_stop_rxtx(tp); 746 tulip_stop_rxtx(tp);
747 747
748 /* prepare receive buffers */ 748 /* prepare receive buffers */
749 tulip_refill_rx(dev); 749 tulip_refill_rx(dev);
750 750
751 /* release any unconsumed transmit buffers */ 751 /* release any unconsumed transmit buffers */
752 tulip_clean_tx_ring(tp); 752 tulip_clean_tx_ring(tp);
753 753
754 if (ioread32 (ioaddr + CSR6) != 0xffffffff) 754 if (ioread32 (ioaddr + CSR6) != 0xffffffff)
755 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff; 755 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
756 756
757 spin_unlock_irqrestore (&tp->lock, flags); 757 spin_unlock_irqrestore (&tp->lock, flags);
758 758
759 init_timer(&tp->timer); 759 init_timer(&tp->timer);
760 tp->timer.data = (unsigned long)dev; 760 tp->timer.data = (unsigned long)dev;
761 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 761 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
762 762
763 dev->if_port = tp->saved_if_port; 763 dev->if_port = tp->saved_if_port;
764 764
765 /* Leave the driver in snooze, not sleep, mode. */ 765 /* Leave the driver in snooze, not sleep, mode. */
766 tulip_set_power_state (tp, 0, 1); 766 tulip_set_power_state (tp, 0, 1);
767 } 767 }
768 768
769 769
770 static int tulip_close (struct net_device *dev) 770 static int tulip_close (struct net_device *dev)
771 { 771 {
772 struct tulip_private *tp = netdev_priv(dev); 772 struct tulip_private *tp = netdev_priv(dev);
773 void __iomem *ioaddr = tp->base_addr; 773 void __iomem *ioaddr = tp->base_addr;
774 int i; 774 int i;
775 775
776 netif_stop_queue (dev); 776 netif_stop_queue (dev);
777 777
778 tulip_down (dev); 778 tulip_down (dev);
779 779
780 if (tulip_debug > 1) 780 if (tulip_debug > 1)
781 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 781 printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
782 dev->name, ioread32 (ioaddr + CSR5)); 782 dev->name, ioread32 (ioaddr + CSR5));
783 783
784 free_irq (dev->irq, dev); 784 free_irq (dev->irq, dev);
785 785
786 /* Free all the skbuffs in the Rx queue. */ 786 /* Free all the skbuffs in the Rx queue. */
787 for (i = 0; i < RX_RING_SIZE; i++) { 787 for (i = 0; i < RX_RING_SIZE; i++) {
788 struct sk_buff *skb = tp->rx_buffers[i].skb; 788 struct sk_buff *skb = tp->rx_buffers[i].skb;
789 dma_addr_t mapping = tp->rx_buffers[i].mapping; 789 dma_addr_t mapping = tp->rx_buffers[i].mapping;
790 790
791 tp->rx_buffers[i].skb = NULL; 791 tp->rx_buffers[i].skb = NULL;
792 tp->rx_buffers[i].mapping = 0; 792 tp->rx_buffers[i].mapping = 0;
793 793
794 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ 794 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
795 tp->rx_ring[i].length = 0; 795 tp->rx_ring[i].length = 0;
796 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */ 796 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
797 if (skb) { 797 if (skb) {
798 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, 798 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
799 PCI_DMA_FROMDEVICE); 799 PCI_DMA_FROMDEVICE);
800 dev_kfree_skb (skb); 800 dev_kfree_skb (skb);
801 } 801 }
802 } 802 }
803 for (i = 0; i < TX_RING_SIZE; i++) { 803 for (i = 0; i < TX_RING_SIZE; i++) {
804 struct sk_buff *skb = tp->tx_buffers[i].skb; 804 struct sk_buff *skb = tp->tx_buffers[i].skb;
805 805
806 if (skb != NULL) { 806 if (skb != NULL) {
807 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, 807 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
808 skb->len, PCI_DMA_TODEVICE); 808 skb->len, PCI_DMA_TODEVICE);
809 dev_kfree_skb (skb); 809 dev_kfree_skb (skb);
810 } 810 }
811 tp->tx_buffers[i].skb = NULL; 811 tp->tx_buffers[i].skb = NULL;
812 tp->tx_buffers[i].mapping = 0; 812 tp->tx_buffers[i].mapping = 0;
813 } 813 }
814 814
815 return 0; 815 return 0;
816 } 816 }
817 817
818 static struct net_device_stats *tulip_get_stats(struct net_device *dev) 818 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
819 { 819 {
820 struct tulip_private *tp = netdev_priv(dev); 820 struct tulip_private *tp = netdev_priv(dev);
821 void __iomem *ioaddr = tp->base_addr; 821 void __iomem *ioaddr = tp->base_addr;
822 822
823 if (netif_running(dev)) { 823 if (netif_running(dev)) {
824 unsigned long flags; 824 unsigned long flags;
825 825
826 spin_lock_irqsave (&tp->lock, flags); 826 spin_lock_irqsave (&tp->lock, flags);
827 827
828 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; 828 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
829 829
830 spin_unlock_irqrestore(&tp->lock, flags); 830 spin_unlock_irqrestore(&tp->lock, flags);
831 } 831 }
832 832
833 return &tp->stats; 833 return &tp->stats;
834 } 834 }
835 835
836 836
837 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 837 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
838 { 838 {
839 struct tulip_private *np = netdev_priv(dev); 839 struct tulip_private *np = netdev_priv(dev);
840 strcpy(info->driver, DRV_NAME); 840 strcpy(info->driver, DRV_NAME);
841 strcpy(info->version, DRV_VERSION); 841 strcpy(info->version, DRV_VERSION);
842 strcpy(info->bus_info, pci_name(np->pdev)); 842 strcpy(info->bus_info, pci_name(np->pdev));
843 } 843 }
844 844
845 static struct ethtool_ops ops = { 845 static struct ethtool_ops ops = {
846 .get_drvinfo = tulip_get_drvinfo 846 .get_drvinfo = tulip_get_drvinfo
847 }; 847 };
848 848
849 /* Provide ioctl() calls to examine the MII xcvr state. */ 849 /* Provide ioctl() calls to examine the MII xcvr state. */
850 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 850 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
851 { 851 {
852 struct tulip_private *tp = netdev_priv(dev); 852 struct tulip_private *tp = netdev_priv(dev);
853 void __iomem *ioaddr = tp->base_addr; 853 void __iomem *ioaddr = tp->base_addr;
854 struct mii_ioctl_data *data = if_mii(rq); 854 struct mii_ioctl_data *data = if_mii(rq);
855 const unsigned int phy_idx = 0; 855 const unsigned int phy_idx = 0;
856 int phy = tp->phys[phy_idx] & 0x1f; 856 int phy = tp->phys[phy_idx] & 0x1f;
857 unsigned int regnum = data->reg_num; 857 unsigned int regnum = data->reg_num;
858 858
859 switch (cmd) { 859 switch (cmd) {
860 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 860 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
861 if (tp->mii_cnt) 861 if (tp->mii_cnt)
862 data->phy_id = phy; 862 data->phy_id = phy;
863 else if (tp->flags & HAS_NWAY) 863 else if (tp->flags & HAS_NWAY)
864 data->phy_id = 32; 864 data->phy_id = 32;
865 else if (tp->chip_id == COMET) 865 else if (tp->chip_id == COMET)
866 data->phy_id = 1; 866 data->phy_id = 1;
867 else 867 else
868 return -ENODEV; 868 return -ENODEV;
869 869
870 case SIOCGMIIREG: /* Read MII PHY register. */ 870 case SIOCGMIIREG: /* Read MII PHY register. */
871 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { 871 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
872 int csr12 = ioread32 (ioaddr + CSR12); 872 int csr12 = ioread32 (ioaddr + CSR12);
873 int csr14 = ioread32 (ioaddr + CSR14); 873 int csr14 = ioread32 (ioaddr + CSR14);
874 switch (regnum) { 874 switch (regnum) {
875 case 0: 875 case 0:
876 if (((csr14<<5) & 0x1000) || 876 if (((csr14<<5) & 0x1000) ||
877 (dev->if_port == 5 && tp->nwayset)) 877 (dev->if_port == 5 && tp->nwayset))
878 data->val_out = 0x1000; 878 data->val_out = 0x1000;
879 else 879 else
880 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) 880 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
881 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); 881 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
882 break; 882 break;
883 case 1: 883 case 1:
884 data->val_out = 884 data->val_out =
885 0x1848 + 885 0x1848 +
886 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + 886 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
887 ((csr12&0x06) == 6 ? 0 : 4); 887 ((csr12&0x06) == 6 ? 0 : 4);
888 data->val_out |= 0x6048; 888 data->val_out |= 0x6048;
889 break; 889 break;
890 case 4: 890 case 4:
891 /* Advertised value, bogus 10baseTx-FD value from CSR6. */ 891 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
892 data->val_out = 892 data->val_out =
893 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) + 893 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
894 ((csr14 >> 1) & 0x20) + 1; 894 ((csr14 >> 1) & 0x20) + 1;
895 data->val_out |= ((csr14 >> 9) & 0x03C0); 895 data->val_out |= ((csr14 >> 9) & 0x03C0);
896 break; 896 break;
897 case 5: data->val_out = tp->lpar; break; 897 case 5: data->val_out = tp->lpar; break;
898 default: data->val_out = 0; break; 898 default: data->val_out = 0; break;
899 } 899 }
900 } else { 900 } else {
901 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum); 901 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
902 } 902 }
903 return 0; 903 return 0;
904 904
905 case SIOCSMIIREG: /* Write MII PHY register. */ 905 case SIOCSMIIREG: /* Write MII PHY register. */
906 if (!capable (CAP_NET_ADMIN)) 906 if (!capable (CAP_NET_ADMIN))
907 return -EPERM; 907 return -EPERM;
908 if (regnum & ~0x1f) 908 if (regnum & ~0x1f)
909 return -EINVAL; 909 return -EINVAL;
910 if (data->phy_id == phy) { 910 if (data->phy_id == phy) {
911 u16 value = data->val_in; 911 u16 value = data->val_in;
912 switch (regnum) { 912 switch (regnum) {
913 case 0: /* Check for autonegotiation on or reset. */ 913 case 0: /* Check for autonegotiation on or reset. */
914 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; 914 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
915 if (tp->full_duplex_lock) 915 if (tp->full_duplex_lock)
916 tp->full_duplex = (value & 0x0100) ? 1 : 0; 916 tp->full_duplex = (value & 0x0100) ? 1 : 0;
917 break; 917 break;
918 case 4: 918 case 4:
919 tp->advertising[phy_idx] = 919 tp->advertising[phy_idx] =
920 tp->mii_advertise = data->val_in; 920 tp->mii_advertise = data->val_in;
921 break; 921 break;
922 } 922 }
923 } 923 }
924 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { 924 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
925 u16 value = data->val_in; 925 u16 value = data->val_in;
926 if (regnum == 0) { 926 if (regnum == 0) {
927 if ((value & 0x1200) == 0x1200) { 927 if ((value & 0x1200) == 0x1200) {
928 if (tp->chip_id == PNIC2) { 928 if (tp->chip_id == PNIC2) {
929 pnic2_start_nway (dev); 929 pnic2_start_nway (dev);
930 } else { 930 } else {
931 t21142_start_nway (dev); 931 t21142_start_nway (dev);
932 } 932 }
933 } 933 }
934 } else if (regnum == 4) 934 } else if (regnum == 4)
935 tp->sym_advertise = value; 935 tp->sym_advertise = value;
936 } else { 936 } else {
937 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); 937 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
938 } 938 }
939 return 0; 939 return 0;
940 default: 940 default:
941 return -EOPNOTSUPP; 941 return -EOPNOTSUPP;
942 } 942 }
943 943
944 return -EOPNOTSUPP; 944 return -EOPNOTSUPP;
945 } 945 }
946 946
947 947
948 /* Set or clear the multicast filter for this adaptor. 948 /* Set or clear the multicast filter for this adaptor.
949 Note that we only use exclusion around actually queueing the 949 Note that we only use exclusion around actually queueing the
950 new frame, not around filling tp->setup_frame. This is non-deterministic 950 new frame, not around filling tp->setup_frame. This is non-deterministic
951 when re-entered but still correct. */ 951 when re-entered but still correct. */
952 952
953 #undef set_bit_le 953 #undef set_bit_le
954 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) 954 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
955 955
956 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) 956 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
957 { 957 {
958 struct tulip_private *tp = netdev_priv(dev); 958 struct tulip_private *tp = netdev_priv(dev);
959 u16 hash_table[32]; 959 u16 hash_table[32];
960 struct dev_mc_list *mclist; 960 struct dev_mc_list *mclist;
961 int i; 961 int i;
962 u16 *eaddrs; 962 u16 *eaddrs;
963 963
964 memset(hash_table, 0, sizeof(hash_table)); 964 memset(hash_table, 0, sizeof(hash_table));
965 set_bit_le(255, hash_table); /* Broadcast entry */ 965 set_bit_le(255, hash_table); /* Broadcast entry */
966 /* This should work on big-endian machines as well. */ 966 /* This should work on big-endian machines as well. */
967 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 967 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
968 i++, mclist = mclist->next) { 968 i++, mclist = mclist->next) {
969 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 969 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
970 970
971 set_bit_le(index, hash_table); 971 set_bit_le(index, hash_table);
972 972
973 } 973 }
974 for (i = 0; i < 32; i++) { 974 for (i = 0; i < 32; i++) {
975 *setup_frm++ = hash_table[i]; 975 *setup_frm++ = hash_table[i];
976 *setup_frm++ = hash_table[i]; 976 *setup_frm++ = hash_table[i];
977 } 977 }
978 setup_frm = &tp->setup_frame[13*6]; 978 setup_frm = &tp->setup_frame[13*6];
979 979
980 /* Fill the final entry with our physical address. */ 980 /* Fill the final entry with our physical address. */
981 eaddrs = (u16 *)dev->dev_addr; 981 eaddrs = (u16 *)dev->dev_addr;
982 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; 982 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
983 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; 983 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
984 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; 984 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
985 } 985 }
986 986
987 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 987 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
988 { 988 {
989 struct tulip_private *tp = netdev_priv(dev); 989 struct tulip_private *tp = netdev_priv(dev);
990 struct dev_mc_list *mclist; 990 struct dev_mc_list *mclist;
991 int i; 991 int i;
992 u16 *eaddrs; 992 u16 *eaddrs;
993 993
994 /* We have <= 14 addresses so we can use the wonderful 994 /* We have <= 14 addresses so we can use the wonderful
995 16 address perfect filtering of the Tulip. */ 995 16 address perfect filtering of the Tulip. */
996 for (i = 0, mclist = dev->mc_list; i < dev->mc_count; 996 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
997 i++, mclist = mclist->next) { 997 i++, mclist = mclist->next) {
998 eaddrs = (u16 *)mclist->dmi_addr; 998 eaddrs = (u16 *)mclist->dmi_addr;
999 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 999 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1000 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1000 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1001 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1001 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1002 } 1002 }
1003 /* Fill the unused entries with the broadcast address. */ 1003 /* Fill the unused entries with the broadcast address. */
1004 memset(setup_frm, 0xff, (15-i)*12); 1004 memset(setup_frm, 0xff, (15-i)*12);
1005 setup_frm = &tp->setup_frame[15*6]; 1005 setup_frm = &tp->setup_frame[15*6];
1006 1006
1007 /* Fill the final entry with our physical address. */ 1007 /* Fill the final entry with our physical address. */
1008 eaddrs = (u16 *)dev->dev_addr; 1008 eaddrs = (u16 *)dev->dev_addr;
1009 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; 1009 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1010 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; 1010 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1011 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; 1011 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1012 } 1012 }
1013 1013
1014 1014
1015 static void set_rx_mode(struct net_device *dev) 1015 static void set_rx_mode(struct net_device *dev)
1016 { 1016 {
1017 struct tulip_private *tp = netdev_priv(dev); 1017 struct tulip_private *tp = netdev_priv(dev);
1018 void __iomem *ioaddr = tp->base_addr; 1018 void __iomem *ioaddr = tp->base_addr;
1019 int csr6; 1019 int csr6;
1020 1020
1021 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; 1021 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1022 1022
1023 tp->csr6 &= ~0x00D5; 1023 tp->csr6 &= ~0x00D5;
1024 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1024 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1025 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; 1025 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1026 csr6 |= AcceptAllMulticast | AcceptAllPhys; 1026 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1027 /* Unconditionally log net taps. */ 1027 /* Unconditionally log net taps. */
1028 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); 1028 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1029 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { 1029 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1030 /* Too many to filter well -- accept all multicasts. */ 1030 /* Too many to filter well -- accept all multicasts. */
1031 tp->csr6 |= AcceptAllMulticast; 1031 tp->csr6 |= AcceptAllMulticast;
1032 csr6 |= AcceptAllMulticast; 1032 csr6 |= AcceptAllMulticast;
1033 } else if (tp->flags & MC_HASH_ONLY) { 1033 } else if (tp->flags & MC_HASH_ONLY) {
1034 /* Some work-alikes have only a 64-entry hash filter table. */ 1034 /* Some work-alikes have only a 64-entry hash filter table. */
1035 /* Should verify correctness on big-endian/__powerpc__ */ 1035 /* Should verify correctness on big-endian/__powerpc__ */
1036 struct dev_mc_list *mclist; 1036 struct dev_mc_list *mclist;
1037 int i; 1037 int i;
1038 if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */ 1038 if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
1039 tp->csr6 |= AcceptAllMulticast; 1039 tp->csr6 |= AcceptAllMulticast;
1040 csr6 |= AcceptAllMulticast; 1040 csr6 |= AcceptAllMulticast;
1041 } else { 1041 } else {
1042 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ 1042 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1043 int filterbit; 1043 int filterbit;
1044 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1044 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1045 i++, mclist = mclist->next) { 1045 i++, mclist = mclist->next) {
1046 if (tp->flags & COMET_MAC_ADDR) 1046 if (tp->flags & COMET_MAC_ADDR)
1047 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 1047 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1048 else 1048 else
1049 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1049 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1050 filterbit &= 0x3f; 1050 filterbit &= 0x3f;
1051 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1051 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1052 if (tulip_debug > 2) { 1052 if (tulip_debug > 2) {
1053 printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:" 1053 printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
1054 "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name, 1054 "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
1055 mclist->dmi_addr[0], mclist->dmi_addr[1], 1055 mclist->dmi_addr[0], mclist->dmi_addr[1],
1056 mclist->dmi_addr[2], mclist->dmi_addr[3], 1056 mclist->dmi_addr[2], mclist->dmi_addr[3],
1057 mclist->dmi_addr[4], mclist->dmi_addr[5], 1057 mclist->dmi_addr[4], mclist->dmi_addr[5],
1058 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1058 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1059 } 1059 }
1060 } 1060 }
1061 if (mc_filter[0] == tp->mc_filter[0] && 1061 if (mc_filter[0] == tp->mc_filter[0] &&
1062 mc_filter[1] == tp->mc_filter[1]) 1062 mc_filter[1] == tp->mc_filter[1])
1063 ; /* No change. */ 1063 ; /* No change. */
1064 else if (tp->flags & IS_ASIX) { 1064 else if (tp->flags & IS_ASIX) {
1065 iowrite32(2, ioaddr + CSR13); 1065 iowrite32(2, ioaddr + CSR13);
1066 iowrite32(mc_filter[0], ioaddr + CSR14); 1066 iowrite32(mc_filter[0], ioaddr + CSR14);
1067 iowrite32(3, ioaddr + CSR13); 1067 iowrite32(3, ioaddr + CSR13);
1068 iowrite32(mc_filter[1], ioaddr + CSR14); 1068 iowrite32(mc_filter[1], ioaddr + CSR14);
1069 } else if (tp->flags & COMET_MAC_ADDR) { 1069 } else if (tp->flags & COMET_MAC_ADDR) {
1070 iowrite32(mc_filter[0], ioaddr + 0xAC); 1070 iowrite32(mc_filter[0], ioaddr + 0xAC);
1071 iowrite32(mc_filter[1], ioaddr + 0xB0); 1071 iowrite32(mc_filter[1], ioaddr + 0xB0);
1072 } 1072 }
1073 tp->mc_filter[0] = mc_filter[0]; 1073 tp->mc_filter[0] = mc_filter[0];
1074 tp->mc_filter[1] = mc_filter[1]; 1074 tp->mc_filter[1] = mc_filter[1];
1075 } 1075 }
1076 } else { 1076 } else {
1077 unsigned long flags; 1077 unsigned long flags;
1078 u32 tx_flags = 0x08000000 | 192; 1078 u32 tx_flags = 0x08000000 | 192;
1079 1079
1080 /* Note that only the low-address shortword of setup_frame is valid! 1080 /* Note that only the low-address shortword of setup_frame is valid!
1081 The values are doubled for big-endian architectures. */ 1081 The values are doubled for big-endian architectures. */
1082 if (dev->mc_count > 14) { /* Must use a multicast hash table. */ 1082 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1083 build_setup_frame_hash(tp->setup_frame, dev); 1083 build_setup_frame_hash(tp->setup_frame, dev);
1084 tx_flags = 0x08400000 | 192; 1084 tx_flags = 0x08400000 | 192;
1085 } else { 1085 } else {
1086 build_setup_frame_perfect(tp->setup_frame, dev); 1086 build_setup_frame_perfect(tp->setup_frame, dev);
1087 } 1087 }
1088 1088
1089 spin_lock_irqsave(&tp->lock, flags); 1089 spin_lock_irqsave(&tp->lock, flags);
1090 1090
1091 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { 1091 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1092 /* Same setup recently queued, we need not add it. */ 1092 /* Same setup recently queued, we need not add it. */
1093 } else { 1093 } else {
1094 unsigned int entry; 1094 unsigned int entry;
1095 int dummy = -1; 1095 int dummy = -1;
1096 1096
1097 /* Now add this frame to the Tx list. */ 1097 /* Now add this frame to the Tx list. */
1098 1098
1099 entry = tp->cur_tx++ % TX_RING_SIZE; 1099 entry = tp->cur_tx++ % TX_RING_SIZE;
1100 1100
1101 if (entry != 0) { 1101 if (entry != 0) {
1102 /* Avoid a chip errata by prefixing a dummy entry. */ 1102 /* Avoid a chip errata by prefixing a dummy entry. */
1103 tp->tx_buffers[entry].skb = NULL; 1103 tp->tx_buffers[entry].skb = NULL;
1104 tp->tx_buffers[entry].mapping = 0; 1104 tp->tx_buffers[entry].mapping = 0;
1105 tp->tx_ring[entry].length = 1105 tp->tx_ring[entry].length =
1106 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; 1106 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1107 tp->tx_ring[entry].buffer1 = 0; 1107 tp->tx_ring[entry].buffer1 = 0;
1108 /* Must set DescOwned later to avoid race with chip */ 1108 /* Must set DescOwned later to avoid race with chip */
1109 dummy = entry; 1109 dummy = entry;
1110 entry = tp->cur_tx++ % TX_RING_SIZE; 1110 entry = tp->cur_tx++ % TX_RING_SIZE;
1111 1111
1112 } 1112 }
1113 1113
1114 tp->tx_buffers[entry].skb = NULL; 1114 tp->tx_buffers[entry].skb = NULL;
1115 tp->tx_buffers[entry].mapping = 1115 tp->tx_buffers[entry].mapping =
1116 pci_map_single(tp->pdev, tp->setup_frame, 1116 pci_map_single(tp->pdev, tp->setup_frame,
1117 sizeof(tp->setup_frame), 1117 sizeof(tp->setup_frame),
1118 PCI_DMA_TODEVICE); 1118 PCI_DMA_TODEVICE);
1119 /* Put the setup frame on the Tx list. */ 1119 /* Put the setup frame on the Tx list. */
1120 if (entry == TX_RING_SIZE-1) 1120 if (entry == TX_RING_SIZE-1)
1121 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ 1121 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1122 tp->tx_ring[entry].length = cpu_to_le32(tx_flags); 1122 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1123 tp->tx_ring[entry].buffer1 = 1123 tp->tx_ring[entry].buffer1 =
1124 cpu_to_le32(tp->tx_buffers[entry].mapping); 1124 cpu_to_le32(tp->tx_buffers[entry].mapping);
1125 tp->tx_ring[entry].status = cpu_to_le32(DescOwned); 1125 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1126 if (dummy >= 0) 1126 if (dummy >= 0)
1127 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); 1127 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1128 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) 1128 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1129 netif_stop_queue(dev); 1129 netif_stop_queue(dev);
1130 1130
1131 /* Trigger an immediate transmit demand. */ 1131 /* Trigger an immediate transmit demand. */
1132 iowrite32(0, ioaddr + CSR1); 1132 iowrite32(0, ioaddr + CSR1);
1133 } 1133 }
1134 1134
1135 spin_unlock_irqrestore(&tp->lock, flags); 1135 spin_unlock_irqrestore(&tp->lock, flags);
1136 } 1136 }
1137 1137
1138 iowrite32(csr6, ioaddr + CSR6); 1138 iowrite32(csr6, ioaddr + CSR6);
1139 } 1139 }
1140 1140
1141 #ifdef CONFIG_TULIP_MWI 1141 #ifdef CONFIG_TULIP_MWI
1142 static void __devinit tulip_mwi_config (struct pci_dev *pdev, 1142 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1143 struct net_device *dev) 1143 struct net_device *dev)
1144 { 1144 {
1145 struct tulip_private *tp = netdev_priv(dev); 1145 struct tulip_private *tp = netdev_priv(dev);
1146 u8 cache; 1146 u8 cache;
1147 u16 pci_command; 1147 u16 pci_command;
1148 u32 csr0; 1148 u32 csr0;
1149 1149
1150 if (tulip_debug > 3) 1150 if (tulip_debug > 3)
1151 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev)); 1151 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
1152 1152
1153 tp->csr0 = csr0 = 0; 1153 tp->csr0 = csr0 = 0;
1154 1154
1155 /* if we have any cache line size at all, we can do MRM */ 1155 /* if we have any cache line size at all, we can do MRM */
1156 csr0 |= MRM; 1156 csr0 |= MRM;
1157 1157
1158 /* ...and barring hardware bugs, MWI */ 1158 /* ...and barring hardware bugs, MWI */
1159 if (!(tp->chip_id == DC21143 && tp->revision == 65)) 1159 if (!(tp->chip_id == DC21143 && tp->revision == 65))
1160 csr0 |= MWI; 1160 csr0 |= MWI;
1161 1161
1162 /* set or disable MWI in the standard PCI command bit. 1162 /* set or disable MWI in the standard PCI command bit.
1163 * Check for the case where mwi is desired but not available 1163 * Check for the case where mwi is desired but not available
1164 */ 1164 */
1165 if (csr0 & MWI) pci_set_mwi(pdev); 1165 if (csr0 & MWI) pci_set_mwi(pdev);
1166 else pci_clear_mwi(pdev); 1166 else pci_clear_mwi(pdev);
1167 1167
1168 /* read result from hardware (in case bit refused to enable) */ 1168 /* read result from hardware (in case bit refused to enable) */
1169 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1169 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1170 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) 1170 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1171 csr0 &= ~MWI; 1171 csr0 &= ~MWI;
1172 1172
1173 /* if cache line size hardwired to zero, no MWI */ 1173 /* if cache line size hardwired to zero, no MWI */
1174 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); 1174 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1175 if ((csr0 & MWI) && (cache == 0)) { 1175 if ((csr0 & MWI) && (cache == 0)) {
1176 csr0 &= ~MWI; 1176 csr0 &= ~MWI;
1177 pci_clear_mwi(pdev); 1177 pci_clear_mwi(pdev);
1178 } 1178 }
1179 1179
1180 /* assign per-cacheline-size cache alignment and 1180 /* assign per-cacheline-size cache alignment and
1181 * burst length values 1181 * burst length values
1182 */ 1182 */
1183 switch (cache) { 1183 switch (cache) {
1184 case 8: 1184 case 8:
1185 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); 1185 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1186 break; 1186 break;
1187 case 16: 1187 case 16:
1188 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); 1188 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1189 break; 1189 break;
1190 case 32: 1190 case 32:
1191 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); 1191 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1192 break; 1192 break;
1193 default: 1193 default:
1194 cache = 0; 1194 cache = 0;
1195 break; 1195 break;
1196 } 1196 }
1197 1197
1198 /* if we have a good cache line size, we by now have a good 1198 /* if we have a good cache line size, we by now have a good
1199 * csr0, so save it and exit 1199 * csr0, so save it and exit
1200 */ 1200 */
1201 if (cache) 1201 if (cache)
1202 goto out; 1202 goto out;
1203 1203
1204 /* we don't have a good csr0 or cache line size, disable MWI */ 1204 /* we don't have a good csr0 or cache line size, disable MWI */
1205 if (csr0 & MWI) { 1205 if (csr0 & MWI) {
1206 pci_clear_mwi(pdev); 1206 pci_clear_mwi(pdev);
1207 csr0 &= ~MWI; 1207 csr0 &= ~MWI;
1208 } 1208 }
1209 1209
1210 /* sane defaults for burst length and cache alignment 1210 /* sane defaults for burst length and cache alignment
1211 * originally from de4x5 driver 1211 * originally from de4x5 driver
1212 */ 1212 */
1213 csr0 |= (8 << BurstLenShift) | (1 << CALShift); 1213 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1214 1214
1215 out: 1215 out:
1216 tp->csr0 = csr0; 1216 tp->csr0 = csr0;
1217 if (tulip_debug > 2) 1217 if (tulip_debug > 2)
1218 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n", 1218 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
1219 pci_name(pdev), cache, csr0); 1219 pci_name(pdev), cache, csr0);
1220 } 1220 }
1221 #endif 1221 #endif
1222 1222
1223 /* 1223 /*
1224 * Chips that have the MRM/reserved bit quirk and the burst quirk. That 1224 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1225 * is the DM910X and the on chip ULi devices 1225 * is the DM910X and the on chip ULi devices
1226 */ 1226 */
1227 1227
1228 static int tulip_uli_dm_quirk(struct pci_dev *pdev) 1228 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1229 { 1229 {
1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102) 1230 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1231 return 1; 1231 return 1;
1232 return 0; 1232 return 0;
1233 } 1233 }
1234 1234
1235 static int __devinit tulip_init_one (struct pci_dev *pdev, 1235 static int __devinit tulip_init_one (struct pci_dev *pdev,
1236 const struct pci_device_id *ent) 1236 const struct pci_device_id *ent)
1237 { 1237 {
1238 struct tulip_private *tp; 1238 struct tulip_private *tp;
1239 /* See note below on the multiport cards. */ 1239 /* See note below on the multiport cards. */
1240 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; 1240 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1241 static struct pci_device_id early_486_chipsets[] = { 1241 static struct pci_device_id early_486_chipsets[] = {
1242 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, 1242 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1243 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, 1243 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1244 { }, 1244 { },
1245 }; 1245 };
1246 static int last_irq; 1246 static int last_irq;
1247 static int multiport_cnt; /* For four-port boards w/one EEPROM */ 1247 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1248 u8 chip_rev; 1248 u8 chip_rev;
1249 int i, irq; 1249 int i, irq;
1250 unsigned short sum; 1250 unsigned short sum;
1251 unsigned char *ee_data; 1251 unsigned char *ee_data;
1252 struct net_device *dev; 1252 struct net_device *dev;
1253 void __iomem *ioaddr; 1253 void __iomem *ioaddr;
1254 static int board_idx = -1; 1254 static int board_idx = -1;
1255 int chip_idx = ent->driver_data; 1255 int chip_idx = ent->driver_data;
1256 const char *chip_name = tulip_tbl[chip_idx].chip_name; 1256 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1257 unsigned int eeprom_missing = 0; 1257 unsigned int eeprom_missing = 0;
1258 unsigned int force_csr0 = 0; 1258 unsigned int force_csr0 = 0;
1259 1259
1260 #ifndef MODULE 1260 #ifndef MODULE
1261 static int did_version; /* Already printed version info. */ 1261 static int did_version; /* Already printed version info. */
1262 if (tulip_debug > 0 && did_version++ == 0) 1262 if (tulip_debug > 0 && did_version++ == 0)
1263 printk (KERN_INFO "%s", version); 1263 printk (KERN_INFO "%s", version);
1264 #endif 1264 #endif
1265 1265
1266 board_idx++; 1266 board_idx++;
1267 1267
1268 /* 1268 /*
1269 * Lan media wire a tulip chip to a wan interface. Needs a very 1269 * Lan media wire a tulip chip to a wan interface. Needs a very
1270 * different driver (lmc driver) 1270 * different driver (lmc driver)
1271 */ 1271 */
1272 1272
1273 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { 1273 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1274 printk (KERN_ERR PFX "skipping LMC card.\n"); 1274 printk (KERN_ERR PFX "skipping LMC card.\n");
1275 return -ENODEV; 1275 return -ENODEV;
1276 } 1276 }
1277 1277
1278 /* 1278 /*
1279 * Early DM9100's need software CRC and the DMFE driver 1279 * Early DM9100's need software CRC and the DMFE driver
1280 */ 1280 */
1281 1281
1282 if (pdev->vendor == 0x1282 && pdev->device == 0x9100) 1282 if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
1283 { 1283 {
1284 u32 dev_rev; 1284 u32 dev_rev;
1285 /* Read Chip revision */ 1285 /* Read Chip revision */
1286 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); 1286 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
1287 if(dev_rev < 0x02000030) 1287 if(dev_rev < 0x02000030)
1288 { 1288 {
1289 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); 1289 printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1290 return -ENODEV; 1290 return -ENODEV;
1291 } 1291 }
1292 } 1292 }
1293 1293
1294 /* 1294 /*
1295 * Looks for early PCI chipsets where people report hangs 1295 * Looks for early PCI chipsets where people report hangs
1296 * without the workarounds being on. 1296 * without the workarounds being on.
1297 */ 1297 */
1298 1298
1299 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache 1299 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1300 aligned. Aries might need this too. The Saturn errata are not 1300 aligned. Aries might need this too. The Saturn errata are not
1301 pretty reading but thankfully it's an old 486 chipset. 1301 pretty reading but thankfully it's an old 486 chipset.
1302 1302
1303 2. The dreaded SiS496 486 chipset. Same workaround as Intel 1303 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1304 Saturn. 1304 Saturn.
1305 */ 1305 */
1306 1306
1307 if (pci_dev_present(early_486_chipsets)) { 1307 if (pci_dev_present(early_486_chipsets)) {
1308 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); 1308 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1309 force_csr0 = 1; 1309 force_csr0 = 1;
1310 } 1310 }
1311 1311
1312 /* bugfix: the ASIX must have a burst limit or horrible things happen. */ 1312 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1313 if (chip_idx == AX88140) { 1313 if (chip_idx == AX88140) {
1314 if ((csr0 & 0x3f00) == 0) 1314 if ((csr0 & 0x3f00) == 0)
1315 csr0 |= 0x2000; 1315 csr0 |= 0x2000;
1316 } 1316 }
1317 1317
1318 /* PNIC doesn't have MWI/MRL/MRM... */ 1318 /* PNIC doesn't have MWI/MRL/MRM... */
1319 if (chip_idx == LC82C168) 1319 if (chip_idx == LC82C168)
1320 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ 1320 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1321 1321
1322 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ 1322 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1323 if (tulip_uli_dm_quirk(pdev)) { 1323 if (tulip_uli_dm_quirk(pdev)) {
1324 csr0 &= ~0x01f100ff; 1324 csr0 &= ~0x01f100ff;
1325 #if defined(__sparc__) 1325 #if defined(__sparc__)
1326 csr0 = (csr0 & ~0xff00) | 0xe000; 1326 csr0 = (csr0 & ~0xff00) | 0xe000;
1327 #endif 1327 #endif
1328 } 1328 }
1329 /* 1329 /*
1330 * And back to business 1330 * And back to business
1331 */ 1331 */
1332 1332
1333 i = pci_enable_device(pdev); 1333 i = pci_enable_device(pdev);
1334 if (i) { 1334 if (i) {
1335 printk (KERN_ERR PFX 1335 printk (KERN_ERR PFX
1336 "Cannot enable tulip board #%d, aborting\n", 1336 "Cannot enable tulip board #%d, aborting\n",
1337 board_idx); 1337 board_idx);
1338 return i; 1338 return i;
1339 } 1339 }
1340 1340
1341 irq = pdev->irq; 1341 irq = pdev->irq;
1342 1342
1343 /* alloc_etherdev ensures aligned and zeroed private structures */ 1343 /* alloc_etherdev ensures aligned and zeroed private structures */
1344 dev = alloc_etherdev (sizeof (*tp)); 1344 dev = alloc_etherdev (sizeof (*tp));
1345 if (!dev) { 1345 if (!dev) {
1346 printk (KERN_ERR PFX "ether device alloc failed, aborting\n"); 1346 printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
1347 return -ENOMEM; 1347 return -ENOMEM;
1348 } 1348 }
1349 1349
1350 SET_MODULE_OWNER(dev); 1350 SET_MODULE_OWNER(dev);
1351 SET_NETDEV_DEV(dev, &pdev->dev); 1351 SET_NETDEV_DEV(dev, &pdev->dev);
1352 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { 1352 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1353 printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, " 1353 printk (KERN_ERR PFX "%s: I/O region (0x%lx@0x%lx) too small, "
1354 "aborting\n", pci_name(pdev), 1354 "aborting\n", pci_name(pdev),
1355 pci_resource_len (pdev, 0), 1355 pci_resource_len (pdev, 0),
1356 pci_resource_start (pdev, 0)); 1356 pci_resource_start (pdev, 0));
1357 goto err_out_free_netdev; 1357 goto err_out_free_netdev;
1358 } 1358 }
1359 1359
1360 /* grab all resources from both PIO and MMIO regions, as we 1360 /* grab all resources from both PIO and MMIO regions, as we
1361 * don't want anyone else messing around with our hardware */ 1361 * don't want anyone else messing around with our hardware */
1362 if (pci_request_regions (pdev, "tulip")) 1362 if (pci_request_regions (pdev, "tulip"))
1363 goto err_out_free_netdev; 1363 goto err_out_free_netdev;
1364 1364
1365 #ifndef USE_IO_OPS 1365 #ifndef USE_IO_OPS
1366 ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size); 1366 ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size);
1367 #else 1367 #else
1368 ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size); 1368 ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
1369 #endif 1369 #endif
1370 if (!ioaddr) 1370 if (!ioaddr)
1371 goto err_out_free_res; 1371 goto err_out_free_res;
1372 1372
1373 pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev); 1373 pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
1374 1374
1375 /* 1375 /*
1376 * initialize private data structure 'tp' 1376 * initialize private data structure 'tp'
1377 * it is zeroed and aligned in alloc_etherdev 1377 * it is zeroed and aligned in alloc_etherdev
1378 */ 1378 */
1379 tp = netdev_priv(dev); 1379 tp = netdev_priv(dev);
1380 1380
1381 tp->rx_ring = pci_alloc_consistent(pdev, 1381 tp->rx_ring = pci_alloc_consistent(pdev,
1382 sizeof(struct tulip_rx_desc) * RX_RING_SIZE + 1382 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1383 sizeof(struct tulip_tx_desc) * TX_RING_SIZE, 1383 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1384 &tp->rx_ring_dma); 1384 &tp->rx_ring_dma);
1385 if (!tp->rx_ring) 1385 if (!tp->rx_ring)
1386 goto err_out_mtable; 1386 goto err_out_mtable;
1387 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); 1387 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1388 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; 1388 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1389 1389
1390 tp->chip_id = chip_idx; 1390 tp->chip_id = chip_idx;
1391 tp->flags = tulip_tbl[chip_idx].flags; 1391 tp->flags = tulip_tbl[chip_idx].flags;
1392 tp->pdev = pdev; 1392 tp->pdev = pdev;
1393 tp->base_addr = ioaddr; 1393 tp->base_addr = ioaddr;
1394 tp->revision = chip_rev; 1394 tp->revision = chip_rev;
1395 tp->csr0 = csr0; 1395 tp->csr0 = csr0;
1396 spin_lock_init(&tp->lock); 1396 spin_lock_init(&tp->lock);
1397 spin_lock_init(&tp->mii_lock); 1397 spin_lock_init(&tp->mii_lock);
1398 init_timer(&tp->timer); 1398 init_timer(&tp->timer);
1399 tp->timer.data = (unsigned long)dev; 1399 tp->timer.data = (unsigned long)dev;
1400 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 1400 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1401 1401
1402 dev->base_addr = (unsigned long)ioaddr; 1402 dev->base_addr = (unsigned long)ioaddr;
1403 1403
1404 #ifdef CONFIG_TULIP_MWI 1404 #ifdef CONFIG_TULIP_MWI
1405 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1405 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1406 tulip_mwi_config (pdev, dev); 1406 tulip_mwi_config (pdev, dev);
1407 #else 1407 #else
1408 /* MWI is broken for DC21143 rev 65... */ 1408 /* MWI is broken for DC21143 rev 65... */
1409 if (chip_idx == DC21143 && chip_rev == 65) 1409 if (chip_idx == DC21143 && chip_rev == 65)
1410 tp->csr0 &= ~MWI; 1410 tp->csr0 &= ~MWI;
1411 #endif 1411 #endif
1412 1412
1413 /* Stop the chip's Tx and Rx processes. */ 1413 /* Stop the chip's Tx and Rx processes. */
1414 tulip_stop_rxtx(tp); 1414 tulip_stop_rxtx(tp);
1415 1415
1416 pci_set_master(pdev); 1416 pci_set_master(pdev);
1417 1417
1418 #ifdef CONFIG_GSC 1418 #ifdef CONFIG_GSC
1419 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { 1419 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1420 switch (pdev->subsystem_device) { 1420 switch (pdev->subsystem_device) {
1421 default: 1421 default:
1422 break; 1422 break;
1423 case 0x1061: 1423 case 0x1061:
1424 case 0x1062: 1424 case 0x1062:
1425 case 0x1063: 1425 case 0x1063:
1426 case 0x1098: 1426 case 0x1098:
1427 case 0x1099: 1427 case 0x1099:
1428 case 0x10EE: 1428 case 0x10EE:
1429 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE; 1429 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1430 chip_name = "GSC DS21140 Tulip"; 1430 chip_name = "GSC DS21140 Tulip";
1431 } 1431 }
1432 } 1432 }
1433 #endif 1433 #endif
1434 1434
1435 /* Clear the missed-packet counter. */ 1435 /* Clear the missed-packet counter. */
1436 ioread32(ioaddr + CSR8); 1436 ioread32(ioaddr + CSR8);
1437 1437
1438 /* The station address ROM is read byte serially. The register must 1438 /* The station address ROM is read byte serially. The register must
1439 be polled, waiting for the value to be read bit serially from the 1439 be polled, waiting for the value to be read bit serially from the
1440 EEPROM. 1440 EEPROM.
1441 */ 1441 */
1442 ee_data = tp->eeprom; 1442 ee_data = tp->eeprom;
1443 sum = 0; 1443 sum = 0;
1444 if (chip_idx == LC82C168) { 1444 if (chip_idx == LC82C168) {
1445 for (i = 0; i < 3; i++) { 1445 for (i = 0; i < 3; i++) {
1446 int value, boguscnt = 100000; 1446 int value, boguscnt = 100000;
1447 iowrite32(0x600 | i, ioaddr + 0x98); 1447 iowrite32(0x600 | i, ioaddr + 0x98);
1448 do 1448 do
1449 value = ioread32(ioaddr + CSR9); 1449 value = ioread32(ioaddr + CSR9);
1450 while (value < 0 && --boguscnt > 0); 1450 while (value < 0 && --boguscnt > 0);
1451 put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i); 1451 put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i);
1452 sum += value & 0xffff; 1452 sum += value & 0xffff;
1453 } 1453 }
1454 } else if (chip_idx == COMET) { 1454 } else if (chip_idx == COMET) {
1455 /* No need to read the EEPROM. */ 1455 /* No need to read the EEPROM. */
1456 put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr); 1456 put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
1457 put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4)); 1457 put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4));
1458 for (i = 0; i < 6; i ++) 1458 for (i = 0; i < 6; i ++)
1459 sum += dev->dev_addr[i]; 1459 sum += dev->dev_addr[i];
1460 } else { 1460 } else {
1461 /* A serial EEPROM interface, we read now and sort it out later. */ 1461 /* A serial EEPROM interface, we read now and sort it out later. */
1462 int sa_offset = 0; 1462 int sa_offset = 0;
1463 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6; 1463 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1464 1464
1465 for (i = 0; i < sizeof(tp->eeprom); i+=2) { 1465 for (i = 0; i < sizeof(tp->eeprom); i+=2) {
1466 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size); 1466 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1467 ee_data[i] = data & 0xff; 1467 ee_data[i] = data & 0xff;
1468 ee_data[i + 1] = data >> 8; 1468 ee_data[i + 1] = data >> 8;
1469 } 1469 }
1470 1470
1471 /* DEC now has a specification (see Notes) but early board makers 1471 /* DEC now has a specification (see Notes) but early board makers
1472 just put the address in the first EEPROM locations. */ 1472 just put the address in the first EEPROM locations. */
1473 /* This does memcmp(ee_data, ee_data+16, 8) */ 1473 /* This does memcmp(ee_data, ee_data+16, 8) */
1474 for (i = 0; i < 8; i ++) 1474 for (i = 0; i < 8; i ++)
1475 if (ee_data[i] != ee_data[16+i]) 1475 if (ee_data[i] != ee_data[16+i])
1476 sa_offset = 20; 1476 sa_offset = 20;
1477 if (chip_idx == CONEXANT) { 1477 if (chip_idx == CONEXANT) {
1478 /* Check that the tuple type and length is correct. */ 1478 /* Check that the tuple type and length is correct. */
1479 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6) 1479 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1480 sa_offset = 0x19A; 1480 sa_offset = 0x19A;
1481 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && 1481 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1482 ee_data[2] == 0) { 1482 ee_data[2] == 0) {
1483 sa_offset = 2; /* Grrr, damn Matrox boards. */ 1483 sa_offset = 2; /* Grrr, damn Matrox boards. */
1484 multiport_cnt = 4; 1484 multiport_cnt = 4;
1485 } 1485 }
1486 #ifdef CONFIG_DDB5477 1486 #ifdef CONFIG_DDB5477
1487 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) { 1487 if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
1488 /* DDB5477 MAC address in first EEPROM locations. */ 1488 /* DDB5477 MAC address in first EEPROM locations. */
1489 sa_offset = 0; 1489 sa_offset = 0;
1490 /* No media table either */ 1490 /* No media table either */
1491 tp->flags &= ~HAS_MEDIA_TABLE; 1491 tp->flags &= ~HAS_MEDIA_TABLE;
1492 } 1492 }
1493 #endif 1493 #endif
1494 #ifdef CONFIG_MIPS_COBALT 1494 #ifdef CONFIG_MIPS_COBALT
1495 if ((pdev->bus->number == 0) && 1495 if ((pdev->bus->number == 0) &&
1496 ((PCI_SLOT(pdev->devfn) == 7) || 1496 ((PCI_SLOT(pdev->devfn) == 7) ||
1497 (PCI_SLOT(pdev->devfn) == 12))) { 1497 (PCI_SLOT(pdev->devfn) == 12))) {
1498 /* Cobalt MAC address in first EEPROM locations. */ 1498 /* Cobalt MAC address in first EEPROM locations. */
1499 sa_offset = 0; 1499 sa_offset = 0;
1500 /* Ensure our media table fixup get's applied */ 1500 /* Ensure our media table fixup get's applied */
1501 memcpy(ee_data + 16, ee_data, 8); 1501 memcpy(ee_data + 16, ee_data, 8);
1502 } 1502 }
1503 #endif 1503 #endif
1504 #ifdef CONFIG_GSC 1504 #ifdef CONFIG_GSC
1505 /* Check to see if we have a broken srom */ 1505 /* Check to see if we have a broken srom */
1506 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { 1506 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1507 /* pci_vendor_id and subsystem_id are swapped */ 1507 /* pci_vendor_id and subsystem_id are swapped */
1508 ee_data[0] = ee_data[2]; 1508 ee_data[0] = ee_data[2];
1509 ee_data[1] = ee_data[3]; 1509 ee_data[1] = ee_data[3];
1510 ee_data[2] = 0x61; 1510 ee_data[2] = 0x61;
1511 ee_data[3] = 0x10; 1511 ee_data[3] = 0x10;
1512 1512
1513 /* HSC-PCI boards need to be byte-swaped and shifted 1513 /* HSC-PCI boards need to be byte-swaped and shifted
1514 * up 1 word. This shift needs to happen at the end 1514 * up 1 word. This shift needs to happen at the end
1515 * of the MAC first because of the 2 byte overlap. 1515 * of the MAC first because of the 2 byte overlap.
1516 */ 1516 */
1517 for (i = 4; i >= 0; i -= 2) { 1517 for (i = 4; i >= 0; i -= 2) {
1518 ee_data[17 + i + 3] = ee_data[17 + i]; 1518 ee_data[17 + i + 3] = ee_data[17 + i];
1519 ee_data[16 + i + 5] = ee_data[16 + i]; 1519 ee_data[16 + i + 5] = ee_data[16 + i];
1520 } 1520 }
1521 } 1521 }
1522 #endif 1522 #endif
1523 1523
1524 for (i = 0; i < 6; i ++) { 1524 for (i = 0; i < 6; i ++) {
1525 dev->dev_addr[i] = ee_data[i + sa_offset]; 1525 dev->dev_addr[i] = ee_data[i + sa_offset];
1526 sum += ee_data[i + sa_offset]; 1526 sum += ee_data[i + sa_offset];
1527 } 1527 }
1528 } 1528 }
1529 /* Lite-On boards have the address byte-swapped. */ 1529 /* Lite-On boards have the address byte-swapped. */
1530 if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02) 1530 if ((dev->dev_addr[0] == 0xA0 || dev->dev_addr[0] == 0xC0 || dev->dev_addr[0] == 0x02)
1531 && dev->dev_addr[1] == 0x00) 1531 && dev->dev_addr[1] == 0x00)
1532 for (i = 0; i < 6; i+=2) { 1532 for (i = 0; i < 6; i+=2) {
1533 char tmp = dev->dev_addr[i]; 1533 char tmp = dev->dev_addr[i];
1534 dev->dev_addr[i] = dev->dev_addr[i+1]; 1534 dev->dev_addr[i] = dev->dev_addr[i+1];
1535 dev->dev_addr[i+1] = tmp; 1535 dev->dev_addr[i+1] = tmp;
1536 } 1536 }
1537 /* On the Zynx 315 Etherarray and other multiport boards only the 1537 /* On the Zynx 315 Etherarray and other multiport boards only the
1538 first Tulip has an EEPROM. 1538 first Tulip has an EEPROM.
1539 On Sparc systems the mac address is held in the OBP property 1539 On Sparc systems the mac address is held in the OBP property
1540 "local-mac-address". 1540 "local-mac-address".
1541 The addresses of the subsequent ports are derived from the first. 1541 The addresses of the subsequent ports are derived from the first.
1542 Many PCI BIOSes also incorrectly report the IRQ line, so we correct 1542 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1543 that here as well. */ 1543 that here as well. */
1544 if (sum == 0 || sum == 6*0xff) { 1544 if (sum == 0 || sum == 6*0xff) {
1545 #if defined(__sparc__) 1545 #if defined(__sparc__)
1546 struct pcidev_cookie *pcp = pdev->sysdata; 1546 struct pcidev_cookie *pcp = pdev->sysdata;
1547 #endif 1547 #endif
1548 eeprom_missing = 1; 1548 eeprom_missing = 1;
1549 for (i = 0; i < 5; i++) 1549 for (i = 0; i < 5; i++)
1550 dev->dev_addr[i] = last_phys_addr[i]; 1550 dev->dev_addr[i] = last_phys_addr[i];
1551 dev->dev_addr[i] = last_phys_addr[i] + 1; 1551 dev->dev_addr[i] = last_phys_addr[i] + 1;
1552 #if defined(__sparc__) 1552 #if defined(__sparc__)
1553 if ((pcp != NULL) && prom_getproplen(pcp->prom_node, 1553 if (pcp) {
1554 "local-mac-address") == 6) { 1554 unsigned char *addr;
1555 prom_getproperty(pcp->prom_node, "local-mac-address", 1555 int len;
1556 dev->dev_addr, 6); 1556
1557 addr = of_get_property(pcp->prom_node,
1558 "local-mac-address", &len);
1559 if (addr && len == 6)
1560 memcpy(dev->dev_addr, addr, 6);
1557 } 1561 }
1558 #endif 1562 #endif
1559 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ 1563 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1560 if (last_irq) 1564 if (last_irq)
1561 irq = last_irq; 1565 irq = last_irq;
1562 #endif 1566 #endif
1563 } 1567 }
1564 1568
1565 for (i = 0; i < 6; i++) 1569 for (i = 0; i < 6; i++)
1566 last_phys_addr[i] = dev->dev_addr[i]; 1570 last_phys_addr[i] = dev->dev_addr[i];
1567 last_irq = irq; 1571 last_irq = irq;
1568 dev->irq = irq; 1572 dev->irq = irq;
1569 1573
1570 /* The lower four bits are the media type. */ 1574 /* The lower four bits are the media type. */
1571 if (board_idx >= 0 && board_idx < MAX_UNITS) { 1575 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1572 if (options[board_idx] & MEDIA_MASK) 1576 if (options[board_idx] & MEDIA_MASK)
1573 tp->default_port = options[board_idx] & MEDIA_MASK; 1577 tp->default_port = options[board_idx] & MEDIA_MASK;
1574 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0) 1578 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1575 tp->full_duplex = 1; 1579 tp->full_duplex = 1;
1576 if (mtu[board_idx] > 0) 1580 if (mtu[board_idx] > 0)
1577 dev->mtu = mtu[board_idx]; 1581 dev->mtu = mtu[board_idx];
1578 } 1582 }
1579 if (dev->mem_start & MEDIA_MASK) 1583 if (dev->mem_start & MEDIA_MASK)
1580 tp->default_port = dev->mem_start & MEDIA_MASK; 1584 tp->default_port = dev->mem_start & MEDIA_MASK;
1581 if (tp->default_port) { 1585 if (tp->default_port) {
1582 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n", 1586 printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
1583 board_idx, medianame[tp->default_port & MEDIA_MASK]); 1587 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1584 tp->medialock = 1; 1588 tp->medialock = 1;
1585 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) 1589 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1586 tp->full_duplex = 1; 1590 tp->full_duplex = 1;
1587 } 1591 }
1588 if (tp->full_duplex) 1592 if (tp->full_duplex)
1589 tp->full_duplex_lock = 1; 1593 tp->full_duplex_lock = 1;
1590 1594
1591 if (tulip_media_cap[tp->default_port] & MediaIsMII) { 1595 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1592 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 }; 1596 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
1593 tp->mii_advertise = media2advert[tp->default_port - 9]; 1597 tp->mii_advertise = media2advert[tp->default_port - 9];
1594 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ 1598 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1595 } 1599 }
1596 1600
1597 if (tp->flags & HAS_MEDIA_TABLE) { 1601 if (tp->flags & HAS_MEDIA_TABLE) {
1598 sprintf(dev->name, "tulip%d", board_idx); /* hack */ 1602 sprintf(dev->name, "tulip%d", board_idx); /* hack */
1599 tulip_parse_eeprom(dev); 1603 tulip_parse_eeprom(dev);
1600 strcpy(dev->name, "eth%d"); /* un-hack */ 1604 strcpy(dev->name, "eth%d"); /* un-hack */
1601 } 1605 }
1602 1606
1603 if ((tp->flags & ALWAYS_CHECK_MII) || 1607 if ((tp->flags & ALWAYS_CHECK_MII) ||
1604 (tp->mtable && tp->mtable->has_mii) || 1608 (tp->mtable && tp->mtable->has_mii) ||
1605 ( ! tp->mtable && (tp->flags & HAS_MII))) { 1609 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1606 if (tp->mtable && tp->mtable->has_mii) { 1610 if (tp->mtable && tp->mtable->has_mii) {
1607 for (i = 0; i < tp->mtable->leafcount; i++) 1611 for (i = 0; i < tp->mtable->leafcount; i++)
1608 if (tp->mtable->mleaf[i].media == 11) { 1612 if (tp->mtable->mleaf[i].media == 11) {
1609 tp->cur_index = i; 1613 tp->cur_index = i;
1610 tp->saved_if_port = dev->if_port; 1614 tp->saved_if_port = dev->if_port;
1611 tulip_select_media(dev, 2); 1615 tulip_select_media(dev, 2);
1612 dev->if_port = tp->saved_if_port; 1616 dev->if_port = tp->saved_if_port;
1613 break; 1617 break;
1614 } 1618 }
1615 } 1619 }
1616 1620
1617 /* Find the connected MII xcvrs. 1621 /* Find the connected MII xcvrs.
1618 Doing this in open() would allow detecting external xcvrs 1622 Doing this in open() would allow detecting external xcvrs
1619 later, but takes much time. */ 1623 later, but takes much time. */
1620 tulip_find_mii (dev, board_idx); 1624 tulip_find_mii (dev, board_idx);
1621 } 1625 }
1622 1626
1623 /* The Tulip-specific entries in the device structure. */ 1627 /* The Tulip-specific entries in the device structure. */
1624 dev->open = tulip_open; 1628 dev->open = tulip_open;
1625 dev->hard_start_xmit = tulip_start_xmit; 1629 dev->hard_start_xmit = tulip_start_xmit;
1626 dev->tx_timeout = tulip_tx_timeout; 1630 dev->tx_timeout = tulip_tx_timeout;
1627 dev->watchdog_timeo = TX_TIMEOUT; 1631 dev->watchdog_timeo = TX_TIMEOUT;
1628 #ifdef CONFIG_TULIP_NAPI 1632 #ifdef CONFIG_TULIP_NAPI
1629 dev->poll = tulip_poll; 1633 dev->poll = tulip_poll;
1630 dev->weight = 16; 1634 dev->weight = 16;
1631 #endif 1635 #endif
1632 dev->stop = tulip_close; 1636 dev->stop = tulip_close;
1633 dev->get_stats = tulip_get_stats; 1637 dev->get_stats = tulip_get_stats;
1634 dev->do_ioctl = private_ioctl; 1638 dev->do_ioctl = private_ioctl;
1635 dev->set_multicast_list = set_rx_mode; 1639 dev->set_multicast_list = set_rx_mode;
1636 #ifdef CONFIG_NET_POLL_CONTROLLER 1640 #ifdef CONFIG_NET_POLL_CONTROLLER
1637 dev->poll_controller = &poll_tulip; 1641 dev->poll_controller = &poll_tulip;
1638 #endif 1642 #endif
1639 SET_ETHTOOL_OPS(dev, &ops); 1643 SET_ETHTOOL_OPS(dev, &ops);
1640 1644
1641 if (register_netdev(dev)) 1645 if (register_netdev(dev))
1642 goto err_out_free_ring; 1646 goto err_out_free_ring;
1643 1647
1644 printk(KERN_INFO "%s: %s rev %d at %p,", 1648 printk(KERN_INFO "%s: %s rev %d at %p,",
1645 dev->name, chip_name, chip_rev, ioaddr); 1649 dev->name, chip_name, chip_rev, ioaddr);
1646 pci_set_drvdata(pdev, dev); 1650 pci_set_drvdata(pdev, dev);
1647 1651
1648 if (eeprom_missing) 1652 if (eeprom_missing)
1649 printk(" EEPROM not present,"); 1653 printk(" EEPROM not present,");
1650 for (i = 0; i < 6; i++) 1654 for (i = 0; i < 6; i++)
1651 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]); 1655 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
1652 printk(", IRQ %d.\n", irq); 1656 printk(", IRQ %d.\n", irq);
1653 1657
1654 if (tp->chip_id == PNIC2) 1658 if (tp->chip_id == PNIC2)
1655 tp->link_change = pnic2_lnk_change; 1659 tp->link_change = pnic2_lnk_change;
1656 else if (tp->flags & HAS_NWAY) 1660 else if (tp->flags & HAS_NWAY)
1657 tp->link_change = t21142_lnk_change; 1661 tp->link_change = t21142_lnk_change;
1658 else if (tp->flags & HAS_PNICNWAY) 1662 else if (tp->flags & HAS_PNICNWAY)
1659 tp->link_change = pnic_lnk_change; 1663 tp->link_change = pnic_lnk_change;
1660 1664
1661 /* Reset the xcvr interface and turn on heartbeat. */ 1665 /* Reset the xcvr interface and turn on heartbeat. */
1662 switch (chip_idx) { 1666 switch (chip_idx) {
1663 case DC21140: 1667 case DC21140:
1664 case DM910X: 1668 case DM910X:
1665 default: 1669 default:
1666 if (tp->mtable) 1670 if (tp->mtable)
1667 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); 1671 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1668 break; 1672 break;
1669 case DC21142: 1673 case DC21142:
1670 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) { 1674 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1671 iowrite32(csr6_mask_defstate, ioaddr + CSR6); 1675 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1672 iowrite32(0x0000, ioaddr + CSR13); 1676 iowrite32(0x0000, ioaddr + CSR13);
1673 iowrite32(0x0000, ioaddr + CSR14); 1677 iowrite32(0x0000, ioaddr + CSR14);
1674 iowrite32(csr6_mask_hdcap, ioaddr + CSR6); 1678 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1675 } else 1679 } else
1676 t21142_start_nway(dev); 1680 t21142_start_nway(dev);
1677 break; 1681 break;
1678 case PNIC2: 1682 case PNIC2:
1679 /* just do a reset for sanity sake */ 1683 /* just do a reset for sanity sake */
1680 iowrite32(0x0000, ioaddr + CSR13); 1684 iowrite32(0x0000, ioaddr + CSR13);
1681 iowrite32(0x0000, ioaddr + CSR14); 1685 iowrite32(0x0000, ioaddr + CSR14);
1682 break; 1686 break;
1683 case LC82C168: 1687 case LC82C168:
1684 if ( ! tp->mii_cnt) { 1688 if ( ! tp->mii_cnt) {
1685 tp->nway = 1; 1689 tp->nway = 1;
1686 tp->nwayset = 0; 1690 tp->nwayset = 0;
1687 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6); 1691 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1688 iowrite32(0x30, ioaddr + CSR12); 1692 iowrite32(0x30, ioaddr + CSR12);
1689 iowrite32(0x0001F078, ioaddr + CSR6); 1693 iowrite32(0x0001F078, ioaddr + CSR6);
1690 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */ 1694 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1691 } 1695 }
1692 break; 1696 break;
1693 case MX98713: 1697 case MX98713:
1694 case COMPEX9881: 1698 case COMPEX9881:
1695 iowrite32(0x00000000, ioaddr + CSR6); 1699 iowrite32(0x00000000, ioaddr + CSR6);
1696 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */ 1700 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1697 iowrite32(0x00000001, ioaddr + CSR13); 1701 iowrite32(0x00000001, ioaddr + CSR13);
1698 break; 1702 break;
1699 case MX98715: 1703 case MX98715:
1700 case MX98725: 1704 case MX98725:
1701 iowrite32(0x01a80000, ioaddr + CSR6); 1705 iowrite32(0x01a80000, ioaddr + CSR6);
1702 iowrite32(0xFFFFFFFF, ioaddr + CSR14); 1706 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1703 iowrite32(0x00001000, ioaddr + CSR12); 1707 iowrite32(0x00001000, ioaddr + CSR12);
1704 break; 1708 break;
1705 case COMET: 1709 case COMET:
1706 /* No initialization necessary. */ 1710 /* No initialization necessary. */
1707 break; 1711 break;
1708 } 1712 }
1709 1713
1710 /* put the chip in snooze mode until opened */ 1714 /* put the chip in snooze mode until opened */
1711 tulip_set_power_state (tp, 0, 1); 1715 tulip_set_power_state (tp, 0, 1);
1712 1716
1713 return 0; 1717 return 0;
1714 1718
1715 err_out_free_ring: 1719 err_out_free_ring:
1716 pci_free_consistent (pdev, 1720 pci_free_consistent (pdev,
1717 sizeof (struct tulip_rx_desc) * RX_RING_SIZE + 1721 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1718 sizeof (struct tulip_tx_desc) * TX_RING_SIZE, 1722 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1719 tp->rx_ring, tp->rx_ring_dma); 1723 tp->rx_ring, tp->rx_ring_dma);
1720 1724
1721 err_out_mtable: 1725 err_out_mtable:
1722 kfree (tp->mtable); 1726 kfree (tp->mtable);
1723 pci_iounmap(pdev, ioaddr); 1727 pci_iounmap(pdev, ioaddr);
1724 1728
1725 err_out_free_res: 1729 err_out_free_res:
1726 pci_release_regions (pdev); 1730 pci_release_regions (pdev);
1727 1731
1728 err_out_free_netdev: 1732 err_out_free_netdev:
1729 free_netdev (dev); 1733 free_netdev (dev);
1730 return -ENODEV; 1734 return -ENODEV;
1731 } 1735 }
1732 1736
1733 1737
1734 #ifdef CONFIG_PM 1738 #ifdef CONFIG_PM
1735 1739
1736 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) 1740 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1737 { 1741 {
1738 struct net_device *dev = pci_get_drvdata(pdev); 1742 struct net_device *dev = pci_get_drvdata(pdev);
1739 1743
1740 if (!dev) 1744 if (!dev)
1741 return -EINVAL; 1745 return -EINVAL;
1742 1746
1743 if (netif_running(dev)) 1747 if (netif_running(dev))
1744 tulip_down(dev); 1748 tulip_down(dev);
1745 1749
1746 netif_device_detach(dev); 1750 netif_device_detach(dev);
1747 free_irq(dev->irq, dev); 1751 free_irq(dev->irq, dev);
1748 1752
1749 pci_save_state(pdev); 1753 pci_save_state(pdev);
1750 pci_disable_device(pdev); 1754 pci_disable_device(pdev);
1751 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1755 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1752 1756
1753 return 0; 1757 return 0;
1754 } 1758 }
1755 1759
1756 1760
1757 static int tulip_resume(struct pci_dev *pdev) 1761 static int tulip_resume(struct pci_dev *pdev)
1758 { 1762 {
1759 struct net_device *dev = pci_get_drvdata(pdev); 1763 struct net_device *dev = pci_get_drvdata(pdev);
1760 int retval; 1764 int retval;
1761 1765
1762 if (!dev) 1766 if (!dev)
1763 return -EINVAL; 1767 return -EINVAL;
1764 1768
1765 pci_set_power_state(pdev, PCI_D0); 1769 pci_set_power_state(pdev, PCI_D0);
1766 pci_restore_state(pdev); 1770 pci_restore_state(pdev);
1767 1771
1768 pci_enable_device(pdev); 1772 pci_enable_device(pdev);
1769 1773
1770 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) { 1774 if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))) {
1771 printk (KERN_ERR "tulip: request_irq failed in resume\n"); 1775 printk (KERN_ERR "tulip: request_irq failed in resume\n");
1772 return retval; 1776 return retval;
1773 } 1777 }
1774 1778
1775 netif_device_attach(dev); 1779 netif_device_attach(dev);
1776 1780
1777 if (netif_running(dev)) 1781 if (netif_running(dev))
1778 tulip_up(dev); 1782 tulip_up(dev);
1779 1783
1780 return 0; 1784 return 0;
1781 } 1785 }
1782 1786
1783 #endif /* CONFIG_PM */ 1787 #endif /* CONFIG_PM */
1784 1788
1785 1789
1786 static void __devexit tulip_remove_one (struct pci_dev *pdev) 1790 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1787 { 1791 {
1788 struct net_device *dev = pci_get_drvdata (pdev); 1792 struct net_device *dev = pci_get_drvdata (pdev);
1789 struct tulip_private *tp; 1793 struct tulip_private *tp;
1790 1794
1791 if (!dev) 1795 if (!dev)
1792 return; 1796 return;
1793 1797
1794 tp = netdev_priv(dev); 1798 tp = netdev_priv(dev);
1795 unregister_netdev(dev); 1799 unregister_netdev(dev);
1796 pci_free_consistent (pdev, 1800 pci_free_consistent (pdev,
1797 sizeof (struct tulip_rx_desc) * RX_RING_SIZE + 1801 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1798 sizeof (struct tulip_tx_desc) * TX_RING_SIZE, 1802 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1799 tp->rx_ring, tp->rx_ring_dma); 1803 tp->rx_ring, tp->rx_ring_dma);
1800 kfree (tp->mtable); 1804 kfree (tp->mtable);
1801 pci_iounmap(pdev, tp->base_addr); 1805 pci_iounmap(pdev, tp->base_addr);
1802 free_netdev (dev); 1806 free_netdev (dev);
1803 pci_release_regions (pdev); 1807 pci_release_regions (pdev);
1804 pci_set_drvdata (pdev, NULL); 1808 pci_set_drvdata (pdev, NULL);
1805 1809
1806 /* pci_power_off (pdev, -1); */ 1810 /* pci_power_off (pdev, -1); */
1807 } 1811 }
1808 1812
1809 #ifdef CONFIG_NET_POLL_CONTROLLER 1813 #ifdef CONFIG_NET_POLL_CONTROLLER
1810 /* 1814 /*
1811 * Polling 'interrupt' - used by things like netconsole to send skbs 1815 * Polling 'interrupt' - used by things like netconsole to send skbs
1812 * without having to re-enable interrupts. It's not called while 1816 * without having to re-enable interrupts. It's not called while
1813 * the interrupt routine is executing. 1817 * the interrupt routine is executing.
1814 */ 1818 */
1815 1819
1816 static void poll_tulip (struct net_device *dev) 1820 static void poll_tulip (struct net_device *dev)
1817 { 1821 {
1818 /* disable_irq here is not very nice, but with the lockless 1822 /* disable_irq here is not very nice, but with the lockless
1819 interrupt handler we have no other choice. */ 1823 interrupt handler we have no other choice. */
1820 disable_irq(dev->irq); 1824 disable_irq(dev->irq);
1821 tulip_interrupt (dev->irq, dev, NULL); 1825 tulip_interrupt (dev->irq, dev, NULL);
1822 enable_irq(dev->irq); 1826 enable_irq(dev->irq);
1823 } 1827 }
1824 #endif 1828 #endif
1825 1829
1826 static struct pci_driver tulip_driver = { 1830 static struct pci_driver tulip_driver = {
1827 .name = DRV_NAME, 1831 .name = DRV_NAME,
1828 .id_table = tulip_pci_tbl, 1832 .id_table = tulip_pci_tbl,
1829 .probe = tulip_init_one, 1833 .probe = tulip_init_one,
1830 .remove = __devexit_p(tulip_remove_one), 1834 .remove = __devexit_p(tulip_remove_one),
1831 #ifdef CONFIG_PM 1835 #ifdef CONFIG_PM
1832 .suspend = tulip_suspend, 1836 .suspend = tulip_suspend,
1833 .resume = tulip_resume, 1837 .resume = tulip_resume,
1834 #endif /* CONFIG_PM */ 1838 #endif /* CONFIG_PM */
1835 }; 1839 };
1836 1840
1837 1841
1838 static int __init tulip_init (void) 1842 static int __init tulip_init (void)
1839 { 1843 {
1840 #ifdef MODULE 1844 #ifdef MODULE
1841 printk (KERN_INFO "%s", version); 1845 printk (KERN_INFO "%s", version);
1842 #endif 1846 #endif
1843 1847
1844 /* copy module parms into globals */ 1848 /* copy module parms into globals */
1845 tulip_rx_copybreak = rx_copybreak; 1849 tulip_rx_copybreak = rx_copybreak;
1846 tulip_max_interrupt_work = max_interrupt_work; 1850 tulip_max_interrupt_work = max_interrupt_work;
1847 1851
1848 /* probe for and init boards */ 1852 /* probe for and init boards */
1849 return pci_module_init (&tulip_driver); 1853 return pci_module_init (&tulip_driver);
1850 } 1854 }
1851 1855
1852 1856
1853 static void __exit tulip_cleanup (void) 1857 static void __exit tulip_cleanup (void)
1854 { 1858 {
1855 pci_unregister_driver (&tulip_driver); 1859 pci_unregister_driver (&tulip_driver);
1856 } 1860 }
1857 1861
1858 1862
1859 module_init(tulip_init); 1863 module_init(tulip_init);
1860 module_exit(tulip_cleanup); 1864 module_exit(tulip_cleanup);
1861 1865
drivers/sbus/char/openprom.c
1 /* 1 /*
2 * Linux/SPARC PROM Configuration Driver 2 * Linux/SPARC PROM Configuration Driver
3 * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu) 3 * Copyright (C) 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
5 * 5 *
6 * This character device driver allows user programs to access the 6 * This character device driver allows user programs to access the
7 * PROM device tree. It is compatible with the SunOS /dev/openprom 7 * PROM device tree. It is compatible with the SunOS /dev/openprom
8 * driver and the NetBSD /dev/openprom driver. The SunOS eeprom 8 * driver and the NetBSD /dev/openprom driver. The SunOS eeprom
9 * utility works without any modifications. 9 * utility works without any modifications.
10 * 10 *
11 * The driver uses a minor number under the misc device major. The 11 * The driver uses a minor number under the misc device major. The
12 * file read/write mode determines the type of access to the PROM. 12 * file read/write mode determines the type of access to the PROM.
13 * Interrupts are disabled whenever the driver calls into the PROM for 13 * Interrupts are disabled whenever the driver calls into the PROM for
14 * sanity's sake. 14 * sanity's sake.
15 */ 15 */
16 16
17 /* This program is free software; you can redistribute it and/or 17 /* This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as 18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of the 19 * published by the Free Software Foundation; either version 2 of the
20 * License, or (at your option) any later version. 20 * License, or (at your option) any later version.
21 * 21 *
22 * This program is distributed in the hope that it will be useful, but 22 * This program is distributed in the hope that it will be useful, but
23 * WITHOUT ANY WARRANTY; without even the implied warranty of 23 * WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * General Public License for more details. 25 * General Public License for more details.
26 * 26 *
27 * You should have received a copy of the GNU General Public License 27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software 28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */ 30 */
31 31
32 #define PROMLIB_INTERNAL 32 #define PROMLIB_INTERNAL
33 33
34 #include <linux/config.h> 34 #include <linux/config.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/kernel.h> 36 #include <linux/kernel.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/errno.h> 38 #include <linux/errno.h>
39 #include <linux/slab.h> 39 #include <linux/slab.h>
40 #include <linux/string.h> 40 #include <linux/string.h>
41 #include <linux/miscdevice.h> 41 #include <linux/miscdevice.h>
42 #include <linux/smp_lock.h> 42 #include <linux/smp_lock.h>
43 #include <linux/init.h> 43 #include <linux/init.h>
44 #include <linux/fs.h> 44 #include <linux/fs.h>
45 #include <asm/oplib.h> 45 #include <asm/oplib.h>
46 #include <asm/system.h> 46 #include <asm/system.h>
47 #include <asm/uaccess.h> 47 #include <asm/uaccess.h>
48 #include <asm/openpromio.h> 48 #include <asm/openpromio.h>
49 #ifdef CONFIG_PCI 49 #ifdef CONFIG_PCI
50 #include <linux/pci.h> 50 #include <linux/pci.h>
51 #include <asm/pbm.h> 51 #include <asm/pbm.h>
52 #endif 52 #endif
53 53
54 /* Private data kept by the driver for each descriptor. */ 54 /* Private data kept by the driver for each descriptor. */
55 typedef struct openprom_private_data 55 typedef struct openprom_private_data
56 { 56 {
57 int current_node; /* Current node for SunOS ioctls. */ 57 int current_node; /* Current node for SunOS ioctls. */
58 int lastnode; /* Last valid node used by BSD ioctls. */ 58 int lastnode; /* Last valid node used by BSD ioctls. */
59 } DATA; 59 } DATA;
60 60
61 /* ID of the PROM node containing all of the EEPROM options. */ 61 /* ID of the PROM node containing all of the EEPROM options. */
62 static int options_node = 0; 62 static int options_node = 0;
63 63
64 /* 64 /*
65 * Copy an openpromio structure into kernel space from user space. 65 * Copy an openpromio structure into kernel space from user space.
66 * This routine does error checking to make sure that all memory 66 * This routine does error checking to make sure that all memory
67 * accesses are within bounds. A pointer to the allocated openpromio 67 * accesses are within bounds. A pointer to the allocated openpromio
68 * structure will be placed in "*opp_p". Return value is the length 68 * structure will be placed in "*opp_p". Return value is the length
69 * of the user supplied buffer. 69 * of the user supplied buffer.
70 */ 70 */
71 static int copyin(struct openpromio __user *info, struct openpromio **opp_p) 71 static int copyin(struct openpromio __user *info, struct openpromio **opp_p)
72 { 72 {
73 unsigned int bufsize; 73 unsigned int bufsize;
74 74
75 if (!info || !opp_p) 75 if (!info || !opp_p)
76 return -EFAULT; 76 return -EFAULT;
77 77
78 if (get_user(bufsize, &info->oprom_size)) 78 if (get_user(bufsize, &info->oprom_size))
79 return -EFAULT; 79 return -EFAULT;
80 80
81 if (bufsize == 0) 81 if (bufsize == 0)
82 return -EINVAL; 82 return -EINVAL;
83 83
84 /* If the bufsize is too large, just limit it. 84 /* If the bufsize is too large, just limit it.
85 * Fix from Jason Rappleye. 85 * Fix from Jason Rappleye.
86 */ 86 */
87 if (bufsize > OPROMMAXPARAM) 87 if (bufsize > OPROMMAXPARAM)
88 bufsize = OPROMMAXPARAM; 88 bufsize = OPROMMAXPARAM;
89 89
90 if (!(*opp_p = kmalloc(sizeof(int) + bufsize + 1, GFP_KERNEL))) 90 if (!(*opp_p = kmalloc(sizeof(int) + bufsize + 1, GFP_KERNEL)))
91 return -ENOMEM; 91 return -ENOMEM;
92 memset(*opp_p, 0, sizeof(int) + bufsize + 1); 92 memset(*opp_p, 0, sizeof(int) + bufsize + 1);
93 93
94 if (copy_from_user(&(*opp_p)->oprom_array, 94 if (copy_from_user(&(*opp_p)->oprom_array,
95 &info->oprom_array, bufsize)) { 95 &info->oprom_array, bufsize)) {
96 kfree(*opp_p); 96 kfree(*opp_p);
97 return -EFAULT; 97 return -EFAULT;
98 } 98 }
99 return bufsize; 99 return bufsize;
100 } 100 }
101 101
102 static int getstrings(struct openpromio __user *info, struct openpromio **opp_p) 102 static int getstrings(struct openpromio __user *info, struct openpromio **opp_p)
103 { 103 {
104 int n, bufsize; 104 int n, bufsize;
105 char c; 105 char c;
106 106
107 if (!info || !opp_p) 107 if (!info || !opp_p)
108 return -EFAULT; 108 return -EFAULT;
109 109
110 if (!(*opp_p = kmalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL))) 110 if (!(*opp_p = kmalloc(sizeof(int) + OPROMMAXPARAM + 1, GFP_KERNEL)))
111 return -ENOMEM; 111 return -ENOMEM;
112 112
113 memset(*opp_p, 0, sizeof(int) + OPROMMAXPARAM + 1); 113 memset(*opp_p, 0, sizeof(int) + OPROMMAXPARAM + 1);
114 (*opp_p)->oprom_size = 0; 114 (*opp_p)->oprom_size = 0;
115 115
116 n = bufsize = 0; 116 n = bufsize = 0;
117 while ((n < 2) && (bufsize < OPROMMAXPARAM)) { 117 while ((n < 2) && (bufsize < OPROMMAXPARAM)) {
118 if (get_user(c, &info->oprom_array[bufsize])) { 118 if (get_user(c, &info->oprom_array[bufsize])) {
119 kfree(*opp_p); 119 kfree(*opp_p);
120 return -EFAULT; 120 return -EFAULT;
121 } 121 }
122 if (c == '\0') 122 if (c == '\0')
123 n++; 123 n++;
124 (*opp_p)->oprom_array[bufsize++] = c; 124 (*opp_p)->oprom_array[bufsize++] = c;
125 } 125 }
126 if (!n) { 126 if (!n) {
127 kfree(*opp_p); 127 kfree(*opp_p);
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
130 return bufsize; 130 return bufsize;
131 } 131 }
132 132
133 /* 133 /*
134 * Copy an openpromio structure in kernel space back to user space. 134 * Copy an openpromio structure in kernel space back to user space.
135 */ 135 */
136 static int copyout(void __user *info, struct openpromio *opp, int len) 136 static int copyout(void __user *info, struct openpromio *opp, int len)
137 { 137 {
138 if (copy_to_user(info, opp, len)) 138 if (copy_to_user(info, opp, len))
139 return -EFAULT; 139 return -EFAULT;
140 return 0; 140 return 0;
141 } 141 }
142 142
143 /* 143 /*
144 * SunOS and Solaris /dev/openprom ioctl calls. 144 * SunOS and Solaris /dev/openprom ioctl calls.
145 */ 145 */
146 static int openprom_sunos_ioctl(struct inode * inode, struct file * file, 146 static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
147 unsigned int cmd, unsigned long arg, int node) 147 unsigned int cmd, unsigned long arg, int node)
148 { 148 {
149 DATA *data = (DATA *) file->private_data; 149 DATA *data = (DATA *) file->private_data;
150 char buffer[OPROMMAXPARAM+1], *buf; 150 char buffer[OPROMMAXPARAM+1], *buf;
151 struct openpromio *opp; 151 struct openpromio *opp;
152 int bufsize, len, error = 0; 152 int bufsize, len, error = 0;
153 static int cnt; 153 static int cnt;
154 void __user *argp = (void __user *)arg; 154 void __user *argp = (void __user *)arg;
155 155
156 if (cmd == OPROMSETOPT) 156 if (cmd == OPROMSETOPT)
157 bufsize = getstrings(argp, &opp); 157 bufsize = getstrings(argp, &opp);
158 else 158 else
159 bufsize = copyin(argp, &opp); 159 bufsize = copyin(argp, &opp);
160 160
161 if (bufsize < 0) 161 if (bufsize < 0)
162 return bufsize; 162 return bufsize;
163 163
164 switch (cmd) { 164 switch (cmd) {
165 case OPROMGETOPT: 165 case OPROMGETOPT:
166 case OPROMGETPROP: 166 case OPROMGETPROP:
167 len = prom_getproplen(node, opp->oprom_array); 167 len = prom_getproplen(node, opp->oprom_array);
168 168
169 if (len <= 0 || len > bufsize) { 169 if (len <= 0 || len > bufsize) {
170 error = copyout(argp, opp, sizeof(int)); 170 error = copyout(argp, opp, sizeof(int));
171 break; 171 break;
172 } 172 }
173 173
174 len = prom_getproperty(node, opp->oprom_array, buffer, bufsize); 174 len = prom_getproperty(node, opp->oprom_array, buffer, bufsize);
175 175
176 memcpy(opp->oprom_array, buffer, len); 176 memcpy(opp->oprom_array, buffer, len);
177 opp->oprom_array[len] = '\0'; 177 opp->oprom_array[len] = '\0';
178 opp->oprom_size = len; 178 opp->oprom_size = len;
179 179
180 error = copyout(argp, opp, sizeof(int) + bufsize); 180 error = copyout(argp, opp, sizeof(int) + bufsize);
181 break; 181 break;
182 182
183 case OPROMNXTOPT: 183 case OPROMNXTOPT:
184 case OPROMNXTPROP: 184 case OPROMNXTPROP:
185 buf = prom_nextprop(node, opp->oprom_array, buffer); 185 buf = prom_nextprop(node, opp->oprom_array, buffer);
186 186
187 len = strlen(buf); 187 len = strlen(buf);
188 if (len == 0 || len + 1 > bufsize) { 188 if (len == 0 || len + 1 > bufsize) {
189 error = copyout(argp, opp, sizeof(int)); 189 error = copyout(argp, opp, sizeof(int));
190 break; 190 break;
191 } 191 }
192 192
193 memcpy(opp->oprom_array, buf, len); 193 memcpy(opp->oprom_array, buf, len);
194 opp->oprom_array[len] = '\0'; 194 opp->oprom_array[len] = '\0';
195 opp->oprom_size = ++len; 195 opp->oprom_size = ++len;
196 196
197 error = copyout(argp, opp, sizeof(int) + bufsize); 197 error = copyout(argp, opp, sizeof(int) + bufsize);
198 break; 198 break;
199 199
200 case OPROMSETOPT: 200 case OPROMSETOPT:
201 case OPROMSETOPT2: 201 case OPROMSETOPT2:
202 buf = opp->oprom_array + strlen(opp->oprom_array) + 1; 202 buf = opp->oprom_array + strlen(opp->oprom_array) + 1;
203 len = opp->oprom_array + bufsize - buf; 203 len = opp->oprom_array + bufsize - buf;
204 204
205 error = prom_setprop(options_node, opp->oprom_array, 205 error = prom_setprop(options_node, opp->oprom_array,
206 buf, len); 206 buf, len);
207 207
208 if (error < 0) 208 if (error < 0)
209 error = -EINVAL; 209 error = -EINVAL;
210 break; 210 break;
211 211
212 case OPROMNEXT: 212 case OPROMNEXT:
213 case OPROMCHILD: 213 case OPROMCHILD:
214 case OPROMSETCUR: 214 case OPROMSETCUR:
215 if (bufsize < sizeof(int)) { 215 if (bufsize < sizeof(int)) {
216 error = -EINVAL; 216 error = -EINVAL;
217 break; 217 break;
218 } 218 }
219 219
220 node = *((int *) opp->oprom_array); 220 node = *((int *) opp->oprom_array);
221 221
222 switch (cmd) { 222 switch (cmd) {
223 case OPROMNEXT: node = __prom_getsibling(node); break; 223 case OPROMNEXT: node = __prom_getsibling(node); break;
224 case OPROMCHILD: node = __prom_getchild(node); break; 224 case OPROMCHILD: node = __prom_getchild(node); break;
225 case OPROMSETCUR: break; 225 case OPROMSETCUR: break;
226 } 226 }
227 227
228 data->current_node = node; 228 data->current_node = node;
229 *((int *)opp->oprom_array) = node; 229 *((int *)opp->oprom_array) = node;
230 opp->oprom_size = sizeof(int); 230 opp->oprom_size = sizeof(int);
231 231
232 error = copyout(argp, opp, bufsize + sizeof(int)); 232 error = copyout(argp, opp, bufsize + sizeof(int));
233 break; 233 break;
234 234
235 case OPROMPCI2NODE: 235 case OPROMPCI2NODE:
236 error = -EINVAL; 236 error = -EINVAL;
237 237
238 if (bufsize >= 2*sizeof(int)) { 238 if (bufsize >= 2*sizeof(int)) {
239 #ifdef CONFIG_PCI 239 #ifdef CONFIG_PCI
240 struct pci_dev *pdev; 240 struct pci_dev *pdev;
241 struct pcidev_cookie *pcp; 241 struct pcidev_cookie *pcp;
242 pdev = pci_find_slot (((int *) opp->oprom_array)[0], 242 pdev = pci_find_slot (((int *) opp->oprom_array)[0],
243 ((int *) opp->oprom_array)[1]); 243 ((int *) opp->oprom_array)[1]);
244 244
245 pcp = pdev->sysdata; 245 pcp = pdev->sysdata;
246 if (pcp != NULL && pcp->prom_node != -1 && pcp->prom_node) { 246 if (pcp != NULL) {
247 node = pcp->prom_node; 247 node = pcp->prom_node->node;
248 data->current_node = node; 248 data->current_node = node;
249 *((int *)opp->oprom_array) = node; 249 *((int *)opp->oprom_array) = node;
250 opp->oprom_size = sizeof(int); 250 opp->oprom_size = sizeof(int);
251 error = copyout(argp, opp, bufsize + sizeof(int)); 251 error = copyout(argp, opp, bufsize + sizeof(int));
252 } 252 }
253 #endif 253 #endif
254 } 254 }
255 break; 255 break;
256 256
257 case OPROMPATH2NODE: 257 case OPROMPATH2NODE:
258 node = prom_finddevice(opp->oprom_array); 258 node = prom_finddevice(opp->oprom_array);
259 data->current_node = node; 259 data->current_node = node;
260 *((int *)opp->oprom_array) = node; 260 *((int *)opp->oprom_array) = node;
261 opp->oprom_size = sizeof(int); 261 opp->oprom_size = sizeof(int);
262 262
263 error = copyout(argp, opp, bufsize + sizeof(int)); 263 error = copyout(argp, opp, bufsize + sizeof(int));
264 break; 264 break;
265 265
266 case OPROMGETBOOTARGS: 266 case OPROMGETBOOTARGS:
267 buf = saved_command_line; 267 buf = saved_command_line;
268 268
269 len = strlen(buf); 269 len = strlen(buf);
270 270
271 if (len > bufsize) { 271 if (len > bufsize) {
272 error = -EINVAL; 272 error = -EINVAL;
273 break; 273 break;
274 } 274 }
275 275
276 strcpy(opp->oprom_array, buf); 276 strcpy(opp->oprom_array, buf);
277 opp->oprom_size = len; 277 opp->oprom_size = len;
278 278
279 error = copyout(argp, opp, bufsize + sizeof(int)); 279 error = copyout(argp, opp, bufsize + sizeof(int));
280 break; 280 break;
281 281
282 case OPROMU2P: 282 case OPROMU2P:
283 case OPROMGETCONS: 283 case OPROMGETCONS:
284 case OPROMGETFBNAME: 284 case OPROMGETFBNAME:
285 if (cnt++ < 10) 285 if (cnt++ < 10)
286 printk(KERN_INFO "openprom_sunos_ioctl: unimplemented ioctl\n"); 286 printk(KERN_INFO "openprom_sunos_ioctl: unimplemented ioctl\n");
287 error = -EINVAL; 287 error = -EINVAL;
288 break; 288 break;
289 default: 289 default:
290 if (cnt++ < 10) 290 if (cnt++ < 10)
291 printk(KERN_INFO "openprom_sunos_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg); 291 printk(KERN_INFO "openprom_sunos_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg);
292 error = -EINVAL; 292 error = -EINVAL;
293 break; 293 break;
294 } 294 }
295 295
296 kfree(opp); 296 kfree(opp);
297 return error; 297 return error;
298 } 298 }
299 299
300 300
301 /* Return nonzero if a specific node is in the PROM device tree. */ 301 /* Return nonzero if a specific node is in the PROM device tree. */
302 static int intree(int root, int node) 302 static int intree(int root, int node)
303 { 303 {
304 for (; root != 0; root = prom_getsibling(root)) 304 for (; root != 0; root = prom_getsibling(root))
305 if (root == node || intree(prom_getchild(root),node)) 305 if (root == node || intree(prom_getchild(root),node))
306 return 1; 306 return 1;
307 return 0; 307 return 0;
308 } 308 }
309 309
310 /* Return nonzero if a specific node is "valid". */ 310 /* Return nonzero if a specific node is "valid". */
311 static int goodnode(int n, DATA *data) 311 static int goodnode(int n, DATA *data)
312 { 312 {
313 if (n == data->lastnode || n == prom_root_node || n == options_node) 313 if (n == data->lastnode || n == prom_root_node || n == options_node)
314 return 1; 314 return 1;
315 if (n == 0 || n == -1 || !intree(prom_root_node,n)) 315 if (n == 0 || n == -1 || !intree(prom_root_node,n))
316 return 0; 316 return 0;
317 data->lastnode = n; 317 data->lastnode = n;
318 return 1; 318 return 1;
319 } 319 }
320 320
321 /* Copy in a whole string from userspace into kernelspace. */ 321 /* Copy in a whole string from userspace into kernelspace. */
322 static int copyin_string(char __user *user, size_t len, char **ptr) 322 static int copyin_string(char __user *user, size_t len, char **ptr)
323 { 323 {
324 char *tmp; 324 char *tmp;
325 325
326 if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0) 326 if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0)
327 return -EINVAL; 327 return -EINVAL;
328 328
329 tmp = kmalloc(len + 1, GFP_KERNEL); 329 tmp = kmalloc(len + 1, GFP_KERNEL);
330 if (!tmp) 330 if (!tmp)
331 return -ENOMEM; 331 return -ENOMEM;
332 332
333 if(copy_from_user(tmp, user, len)) { 333 if(copy_from_user(tmp, user, len)) {
334 kfree(tmp); 334 kfree(tmp);
335 return -EFAULT; 335 return -EFAULT;
336 } 336 }
337 337
338 tmp[len] = '\0'; 338 tmp[len] = '\0';
339 339
340 *ptr = tmp; 340 *ptr = tmp;
341 341
342 return 0; 342 return 0;
343 } 343 }
344 344
345 /* 345 /*
346 * NetBSD /dev/openprom ioctl calls. 346 * NetBSD /dev/openprom ioctl calls.
347 */ 347 */
348 static int openprom_bsd_ioctl(struct inode * inode, struct file * file, 348 static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
349 unsigned int cmd, unsigned long arg) 349 unsigned int cmd, unsigned long arg)
350 { 350 {
351 DATA *data = (DATA *) file->private_data; 351 DATA *data = (DATA *) file->private_data;
352 void __user *argp = (void __user *)arg; 352 void __user *argp = (void __user *)arg;
353 struct opiocdesc op; 353 struct opiocdesc op;
354 int error, node, len; 354 int error, node, len;
355 char *str, *tmp; 355 char *str, *tmp;
356 char buffer[64]; 356 char buffer[64];
357 static int cnt; 357 static int cnt;
358 358
359 switch (cmd) { 359 switch (cmd) {
360 case OPIOCGET: 360 case OPIOCGET:
361 if (copy_from_user(&op, argp, sizeof(op))) 361 if (copy_from_user(&op, argp, sizeof(op)))
362 return -EFAULT; 362 return -EFAULT;
363 363
364 if (!goodnode(op.op_nodeid,data)) 364 if (!goodnode(op.op_nodeid,data))
365 return -EINVAL; 365 return -EINVAL;
366 366
367 error = copyin_string(op.op_name, op.op_namelen, &str); 367 error = copyin_string(op.op_name, op.op_namelen, &str);
368 if (error) 368 if (error)
369 return error; 369 return error;
370 370
371 len = prom_getproplen(op.op_nodeid,str); 371 len = prom_getproplen(op.op_nodeid,str);
372 372
373 if (len > op.op_buflen) { 373 if (len > op.op_buflen) {
374 kfree(str); 374 kfree(str);
375 return -ENOMEM; 375 return -ENOMEM;
376 } 376 }
377 377
378 op.op_buflen = len; 378 op.op_buflen = len;
379 379
380 if (len <= 0) { 380 if (len <= 0) {
381 kfree(str); 381 kfree(str);
382 /* Verified by the above copy_from_user */ 382 /* Verified by the above copy_from_user */
383 if (__copy_to_user(argp, &op, 383 if (__copy_to_user(argp, &op,
384 sizeof(op))) 384 sizeof(op)))
385 return -EFAULT; 385 return -EFAULT;
386 return 0; 386 return 0;
387 } 387 }
388 388
389 tmp = kmalloc(len + 1, GFP_KERNEL); 389 tmp = kmalloc(len + 1, GFP_KERNEL);
390 if (!tmp) { 390 if (!tmp) {
391 kfree(str); 391 kfree(str);
392 return -ENOMEM; 392 return -ENOMEM;
393 } 393 }
394 394
395 cnt = prom_getproperty(op.op_nodeid, str, tmp, len); 395 cnt = prom_getproperty(op.op_nodeid, str, tmp, len);
396 if (cnt <= 0) { 396 if (cnt <= 0) {
397 error = -EINVAL; 397 error = -EINVAL;
398 } else { 398 } else {
399 tmp[len] = '\0'; 399 tmp[len] = '\0';
400 400
401 if (__copy_to_user(argp, &op, sizeof(op)) != 0 || 401 if (__copy_to_user(argp, &op, sizeof(op)) != 0 ||
402 copy_to_user(op.op_buf, tmp, len) != 0) 402 copy_to_user(op.op_buf, tmp, len) != 0)
403 error = -EFAULT; 403 error = -EFAULT;
404 } 404 }
405 405
406 kfree(tmp); 406 kfree(tmp);
407 kfree(str); 407 kfree(str);
408 408
409 return error; 409 return error;
410 410
411 case OPIOCNEXTPROP: 411 case OPIOCNEXTPROP:
412 if (copy_from_user(&op, argp, sizeof(op))) 412 if (copy_from_user(&op, argp, sizeof(op)))
413 return -EFAULT; 413 return -EFAULT;
414 414
415 if (!goodnode(op.op_nodeid,data)) 415 if (!goodnode(op.op_nodeid,data))
416 return -EINVAL; 416 return -EINVAL;
417 417
418 error = copyin_string(op.op_name, op.op_namelen, &str); 418 error = copyin_string(op.op_name, op.op_namelen, &str);
419 if (error) 419 if (error)
420 return error; 420 return error;
421 421
422 tmp = prom_nextprop(op.op_nodeid,str,buffer); 422 tmp = prom_nextprop(op.op_nodeid,str,buffer);
423 423
424 if (tmp) { 424 if (tmp) {
425 len = strlen(tmp); 425 len = strlen(tmp);
426 if (len > op.op_buflen) 426 if (len > op.op_buflen)
427 len = op.op_buflen; 427 len = op.op_buflen;
428 else 428 else
429 op.op_buflen = len; 429 op.op_buflen = len;
430 } else { 430 } else {
431 len = op.op_buflen = 0; 431 len = op.op_buflen = 0;
432 } 432 }
433 433
434 if (!access_ok(VERIFY_WRITE, argp, sizeof(op))) { 434 if (!access_ok(VERIFY_WRITE, argp, sizeof(op))) {
435 kfree(str); 435 kfree(str);
436 return -EFAULT; 436 return -EFAULT;
437 } 437 }
438 438
439 if (!access_ok(VERIFY_WRITE, op.op_buf, len)) { 439 if (!access_ok(VERIFY_WRITE, op.op_buf, len)) {
440 kfree(str); 440 kfree(str);
441 return -EFAULT; 441 return -EFAULT;
442 } 442 }
443 443
444 error = __copy_to_user(argp, &op, sizeof(op)); 444 error = __copy_to_user(argp, &op, sizeof(op));
445 if (!error) error = __copy_to_user(op.op_buf, tmp, len); 445 if (!error) error = __copy_to_user(op.op_buf, tmp, len);
446 446
447 kfree(str); 447 kfree(str);
448 448
449 return error; 449 return error;
450 450
451 case OPIOCSET: 451 case OPIOCSET:
452 if (copy_from_user(&op, argp, sizeof(op))) 452 if (copy_from_user(&op, argp, sizeof(op)))
453 return -EFAULT; 453 return -EFAULT;
454 454
455 if (!goodnode(op.op_nodeid,data)) 455 if (!goodnode(op.op_nodeid,data))
456 return -EINVAL; 456 return -EINVAL;
457 457
458 error = copyin_string(op.op_name, op.op_namelen, &str); 458 error = copyin_string(op.op_name, op.op_namelen, &str);
459 if (error) 459 if (error)
460 return error; 460 return error;
461 461
462 error = copyin_string(op.op_buf, op.op_buflen, &tmp); 462 error = copyin_string(op.op_buf, op.op_buflen, &tmp);
463 if (error) { 463 if (error) {
464 kfree(str); 464 kfree(str);
465 return error; 465 return error;
466 } 466 }
467 467
468 len = prom_setprop(op.op_nodeid,str,tmp,op.op_buflen+1); 468 len = prom_setprop(op.op_nodeid,str,tmp,op.op_buflen+1);
469 469
470 if (len != op.op_buflen) 470 if (len != op.op_buflen)
471 return -EINVAL; 471 return -EINVAL;
472 472
473 kfree(str); 473 kfree(str);
474 kfree(tmp); 474 kfree(tmp);
475 475
476 return 0; 476 return 0;
477 477
478 case OPIOCGETOPTNODE: 478 case OPIOCGETOPTNODE:
479 if (copy_to_user(argp, &options_node, sizeof(int))) 479 if (copy_to_user(argp, &options_node, sizeof(int)))
480 return -EFAULT; 480 return -EFAULT;
481 return 0; 481 return 0;
482 482
483 case OPIOCGETNEXT: 483 case OPIOCGETNEXT:
484 case OPIOCGETCHILD: 484 case OPIOCGETCHILD:
485 if (copy_from_user(&node, argp, sizeof(int))) 485 if (copy_from_user(&node, argp, sizeof(int)))
486 return -EFAULT; 486 return -EFAULT;
487 487
488 if (cmd == OPIOCGETNEXT) 488 if (cmd == OPIOCGETNEXT)
489 node = __prom_getsibling(node); 489 node = __prom_getsibling(node);
490 else 490 else
491 node = __prom_getchild(node); 491 node = __prom_getchild(node);
492 492
493 if (__copy_to_user(argp, &node, sizeof(int))) 493 if (__copy_to_user(argp, &node, sizeof(int)))
494 return -EFAULT; 494 return -EFAULT;
495 495
496 return 0; 496 return 0;
497 497
498 default: 498 default:
499 if (cnt++ < 10) 499 if (cnt++ < 10)
500 printk(KERN_INFO "openprom_bsd_ioctl: cmd 0x%X\n", cmd); 500 printk(KERN_INFO "openprom_bsd_ioctl: cmd 0x%X\n", cmd);
501 return -EINVAL; 501 return -EINVAL;
502 502
503 } 503 }
504 } 504 }
505 505
506 506
507 /* 507 /*
508 * Handoff control to the correct ioctl handler. 508 * Handoff control to the correct ioctl handler.
509 */ 509 */
510 static int openprom_ioctl(struct inode * inode, struct file * file, 510 static int openprom_ioctl(struct inode * inode, struct file * file,
511 unsigned int cmd, unsigned long arg) 511 unsigned int cmd, unsigned long arg)
512 { 512 {
513 DATA *data = (DATA *) file->private_data; 513 DATA *data = (DATA *) file->private_data;
514 static int cnt; 514 static int cnt;
515 515
516 switch (cmd) { 516 switch (cmd) {
517 case OPROMGETOPT: 517 case OPROMGETOPT:
518 case OPROMNXTOPT: 518 case OPROMNXTOPT:
519 if ((file->f_mode & FMODE_READ) == 0) 519 if ((file->f_mode & FMODE_READ) == 0)
520 return -EPERM; 520 return -EPERM;
521 return openprom_sunos_ioctl(inode, file, cmd, arg, 521 return openprom_sunos_ioctl(inode, file, cmd, arg,
522 options_node); 522 options_node);
523 523
524 case OPROMSETOPT: 524 case OPROMSETOPT:
525 case OPROMSETOPT2: 525 case OPROMSETOPT2:
526 if ((file->f_mode & FMODE_WRITE) == 0) 526 if ((file->f_mode & FMODE_WRITE) == 0)
527 return -EPERM; 527 return -EPERM;
528 return openprom_sunos_ioctl(inode, file, cmd, arg, 528 return openprom_sunos_ioctl(inode, file, cmd, arg,
529 options_node); 529 options_node);
530 530
531 case OPROMNEXT: 531 case OPROMNEXT:
532 case OPROMCHILD: 532 case OPROMCHILD:
533 case OPROMGETPROP: 533 case OPROMGETPROP:
534 case OPROMNXTPROP: 534 case OPROMNXTPROP:
535 if ((file->f_mode & FMODE_READ) == 0) 535 if ((file->f_mode & FMODE_READ) == 0)
536 return -EPERM; 536 return -EPERM;
537 return openprom_sunos_ioctl(inode, file, cmd, arg, 537 return openprom_sunos_ioctl(inode, file, cmd, arg,
538 data->current_node); 538 data->current_node);
539 539
540 case OPROMU2P: 540 case OPROMU2P:
541 case OPROMGETCONS: 541 case OPROMGETCONS:
542 case OPROMGETFBNAME: 542 case OPROMGETFBNAME:
543 case OPROMGETBOOTARGS: 543 case OPROMGETBOOTARGS:
544 case OPROMSETCUR: 544 case OPROMSETCUR:
545 case OPROMPCI2NODE: 545 case OPROMPCI2NODE:
546 case OPROMPATH2NODE: 546 case OPROMPATH2NODE:
547 if ((file->f_mode & FMODE_READ) == 0) 547 if ((file->f_mode & FMODE_READ) == 0)
548 return -EPERM; 548 return -EPERM;
549 return openprom_sunos_ioctl(inode, file, cmd, arg, 0); 549 return openprom_sunos_ioctl(inode, file, cmd, arg, 0);
550 550
551 case OPIOCGET: 551 case OPIOCGET:
552 case OPIOCNEXTPROP: 552 case OPIOCNEXTPROP:
553 case OPIOCGETOPTNODE: 553 case OPIOCGETOPTNODE:
554 case OPIOCGETNEXT: 554 case OPIOCGETNEXT:
555 case OPIOCGETCHILD: 555 case OPIOCGETCHILD:
556 if ((file->f_mode & FMODE_READ) == 0) 556 if ((file->f_mode & FMODE_READ) == 0)
557 return -EBADF; 557 return -EBADF;
558 return openprom_bsd_ioctl(inode,file,cmd,arg); 558 return openprom_bsd_ioctl(inode,file,cmd,arg);
559 559
560 case OPIOCSET: 560 case OPIOCSET:
561 if ((file->f_mode & FMODE_WRITE) == 0) 561 if ((file->f_mode & FMODE_WRITE) == 0)
562 return -EBADF; 562 return -EBADF;
563 return openprom_bsd_ioctl(inode,file,cmd,arg); 563 return openprom_bsd_ioctl(inode,file,cmd,arg);
564 564
565 default: 565 default:
566 if (cnt++ < 10) 566 if (cnt++ < 10)
567 printk("openprom_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg); 567 printk("openprom_ioctl: cmd 0x%X, arg 0x%lX\n", cmd, arg);
568 return -EINVAL; 568 return -EINVAL;
569 } 569 }
570 } 570 }
571 571
572 static long openprom_compat_ioctl(struct file *file, unsigned int cmd, 572 static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
573 unsigned long arg) 573 unsigned long arg)
574 { 574 {
575 long rval = -ENOTTY; 575 long rval = -ENOTTY;
576 576
577 /* 577 /*
578 * SunOS/Solaris only, the NetBSD one's have embedded pointers in 578 * SunOS/Solaris only, the NetBSD one's have embedded pointers in
579 * the arg which we'd need to clean up... 579 * the arg which we'd need to clean up...
580 */ 580 */
581 switch (cmd) { 581 switch (cmd) {
582 case OPROMGETOPT: 582 case OPROMGETOPT:
583 case OPROMSETOPT: 583 case OPROMSETOPT:
584 case OPROMNXTOPT: 584 case OPROMNXTOPT:
585 case OPROMSETOPT2: 585 case OPROMSETOPT2:
586 case OPROMNEXT: 586 case OPROMNEXT:
587 case OPROMCHILD: 587 case OPROMCHILD:
588 case OPROMGETPROP: 588 case OPROMGETPROP:
589 case OPROMNXTPROP: 589 case OPROMNXTPROP:
590 case OPROMU2P: 590 case OPROMU2P:
591 case OPROMGETCONS: 591 case OPROMGETCONS:
592 case OPROMGETFBNAME: 592 case OPROMGETFBNAME:
593 case OPROMGETBOOTARGS: 593 case OPROMGETBOOTARGS:
594 case OPROMSETCUR: 594 case OPROMSETCUR:
595 case OPROMPCI2NODE: 595 case OPROMPCI2NODE:
596 case OPROMPATH2NODE: 596 case OPROMPATH2NODE:
597 lock_kernel(); 597 lock_kernel();
598 rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg); 598 rval = openprom_ioctl(file->f_dentry->d_inode, file, cmd, arg);
599 lock_kernel(); 599 lock_kernel();
600 break; 600 break;
601 } 601 }
602 602
603 return rval; 603 return rval;
604 } 604 }
605 605
606 static int openprom_open(struct inode * inode, struct file * file) 606 static int openprom_open(struct inode * inode, struct file * file)
607 { 607 {
608 DATA *data; 608 DATA *data;
609 609
610 data = (DATA *) kmalloc(sizeof(DATA), GFP_KERNEL); 610 data = (DATA *) kmalloc(sizeof(DATA), GFP_KERNEL);
611 if (!data) 611 if (!data)
612 return -ENOMEM; 612 return -ENOMEM;
613 613
614 data->current_node = prom_root_node; 614 data->current_node = prom_root_node;
615 data->lastnode = prom_root_node; 615 data->lastnode = prom_root_node;
616 file->private_data = (void *)data; 616 file->private_data = (void *)data;
617 617
618 return 0; 618 return 0;
619 } 619 }
620 620
621 static int openprom_release(struct inode * inode, struct file * file) 621 static int openprom_release(struct inode * inode, struct file * file)
622 { 622 {
623 kfree(file->private_data); 623 kfree(file->private_data);
624 return 0; 624 return 0;
625 } 625 }
626 626
627 static struct file_operations openprom_fops = { 627 static struct file_operations openprom_fops = {
628 .owner = THIS_MODULE, 628 .owner = THIS_MODULE,
629 .llseek = no_llseek, 629 .llseek = no_llseek,
630 .ioctl = openprom_ioctl, 630 .ioctl = openprom_ioctl,
631 .compat_ioctl = openprom_compat_ioctl, 631 .compat_ioctl = openprom_compat_ioctl,
632 .open = openprom_open, 632 .open = openprom_open,
633 .release = openprom_release, 633 .release = openprom_release,
634 }; 634 };
635 635
636 static struct miscdevice openprom_dev = { 636 static struct miscdevice openprom_dev = {
637 SUN_OPENPROM_MINOR, "openprom", &openprom_fops 637 SUN_OPENPROM_MINOR, "openprom", &openprom_fops
638 }; 638 };
639 639
640 static int __init openprom_init(void) 640 static int __init openprom_init(void)
641 { 641 {
642 int error; 642 int error;
643 643
644 error = misc_register(&openprom_dev); 644 error = misc_register(&openprom_dev);
645 if (error) { 645 if (error) {
646 printk(KERN_ERR "openprom: unable to get misc minor\n"); 646 printk(KERN_ERR "openprom: unable to get misc minor\n");
647 return error; 647 return error;
648 } 648 }
649 649
650 options_node = prom_getchild(prom_root_node); 650 options_node = prom_getchild(prom_root_node);
651 options_node = prom_searchsiblings(options_node,"options"); 651 options_node = prom_searchsiblings(options_node,"options");
652 652
653 if (options_node == 0 || options_node == -1) { 653 if (options_node == 0 || options_node == -1) {
654 printk(KERN_ERR "openprom: unable to find options node\n"); 654 printk(KERN_ERR "openprom: unable to find options node\n");
655 misc_deregister(&openprom_dev); 655 misc_deregister(&openprom_dev);
656 return -EIO; 656 return -EIO;
657 } 657 }
658 658
659 return 0; 659 return 0;
660 } 660 }
661 661
662 static void __exit openprom_cleanup(void) 662 static void __exit openprom_cleanup(void)
663 { 663 {
664 misc_deregister(&openprom_dev); 664 misc_deregister(&openprom_dev);
665 } 665 }
666 666
667 module_init(openprom_init); 667 module_init(openprom_init);
668 module_exit(openprom_cleanup); 668 module_exit(openprom_cleanup);
669 MODULE_LICENSE("GPL"); 669 MODULE_LICENSE("GPL");
670 670
drivers/video/aty/atyfb_base.c
1 /* 1 /*
2 * ATI Frame Buffer Device Driver Core 2 * ATI Frame Buffer Device Driver Core
3 * 3 *
4 * Copyright (C) 2004 Alex Kern <alex.kern@gmx.de> 4 * Copyright (C) 2004 Alex Kern <alex.kern@gmx.de>
5 * Copyright (C) 1997-2001 Geert Uytterhoeven 5 * Copyright (C) 1997-2001 Geert Uytterhoeven
6 * Copyright (C) 1998 Bernd Harries 6 * Copyright (C) 1998 Bernd Harries
7 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) 7 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
8 * 8 *
9 * This driver supports the following ATI graphics chips: 9 * This driver supports the following ATI graphics chips:
10 * - ATI Mach64 10 * - ATI Mach64
11 * 11 *
12 * To do: add support for 12 * To do: add support for
13 * - ATI Rage128 (from aty128fb.c) 13 * - ATI Rage128 (from aty128fb.c)
14 * - ATI Radeon (from radeonfb.c) 14 * - ATI Radeon (from radeonfb.c)
15 * 15 *
16 * This driver is partly based on the PowerMac console driver: 16 * This driver is partly based on the PowerMac console driver:
17 * 17 *
18 * Copyright (C) 1996 Paul Mackerras 18 * Copyright (C) 1996 Paul Mackerras
19 * 19 *
20 * and on the PowerMac ATI/mach64 display driver: 20 * and on the PowerMac ATI/mach64 display driver:
21 * 21 *
22 * Copyright (C) 1997 Michael AK Tesch 22 * Copyright (C) 1997 Michael AK Tesch
23 * 23 *
24 * with work by Jon Howell 24 * with work by Jon Howell
25 * Harry AC Eaton 25 * Harry AC Eaton
26 * Anthony Tong <atong@uiuc.edu> 26 * Anthony Tong <atong@uiuc.edu>
27 * 27 *
28 * Generic LCD support written by Daniel Mantione, ported from 2.4.20 by Alex Kern 28 * Generic LCD support written by Daniel Mantione, ported from 2.4.20 by Alex Kern
29 * Many Thanks to Ville Syrjรคlรค for patches and fixing nasting 16 bit color bug. 29 * Many Thanks to Ville Syrjรคlรค for patches and fixing nasting 16 bit color bug.
30 * 30 *
31 * This file is subject to the terms and conditions of the GNU General Public 31 * This file is subject to the terms and conditions of the GNU General Public
32 * License. See the file COPYING in the main directory of this archive for 32 * License. See the file COPYING in the main directory of this archive for
33 * more details. 33 * more details.
34 * 34 *
35 * Many thanks to Nitya from ATI devrel for support and patience ! 35 * Many thanks to Nitya from ATI devrel for support and patience !
36 */ 36 */
37 37
38 /****************************************************************************** 38 /******************************************************************************
39 39
40 TODO: 40 TODO:
41 41
42 - cursor support on all cards and all ramdacs. 42 - cursor support on all cards and all ramdacs.
43 - cursor parameters controlable via ioctl()s. 43 - cursor parameters controlable via ioctl()s.
44 - guess PLL and MCLK based on the original PLL register values initialized 44 - guess PLL and MCLK based on the original PLL register values initialized
45 by Open Firmware (if they are initialized). BIOS is done 45 by Open Firmware (if they are initialized). BIOS is done
46 46
47 (Anyone with Mac to help with this?) 47 (Anyone with Mac to help with this?)
48 48
49 ******************************************************************************/ 49 ******************************************************************************/
50 50
51 51
52 #include <linux/config.h> 52 #include <linux/config.h>
53 #include <linux/module.h> 53 #include <linux/module.h>
54 #include <linux/moduleparam.h> 54 #include <linux/moduleparam.h>
55 #include <linux/kernel.h> 55 #include <linux/kernel.h>
56 #include <linux/errno.h> 56 #include <linux/errno.h>
57 #include <linux/string.h> 57 #include <linux/string.h>
58 #include <linux/mm.h> 58 #include <linux/mm.h>
59 #include <linux/slab.h> 59 #include <linux/slab.h>
60 #include <linux/vmalloc.h> 60 #include <linux/vmalloc.h>
61 #include <linux/delay.h> 61 #include <linux/delay.h>
62 #include <linux/console.h> 62 #include <linux/console.h>
63 #include <linux/fb.h> 63 #include <linux/fb.h>
64 #include <linux/init.h> 64 #include <linux/init.h>
65 #include <linux/pci.h> 65 #include <linux/pci.h>
66 #include <linux/interrupt.h> 66 #include <linux/interrupt.h>
67 #include <linux/spinlock.h> 67 #include <linux/spinlock.h>
68 #include <linux/wait.h> 68 #include <linux/wait.h>
69 69
70 #include <asm/io.h> 70 #include <asm/io.h>
71 #include <asm/uaccess.h> 71 #include <asm/uaccess.h>
72 72
73 #include <video/mach64.h> 73 #include <video/mach64.h>
74 #include "atyfb.h" 74 #include "atyfb.h"
75 #include "ati_ids.h" 75 #include "ati_ids.h"
76 76
77 #ifdef __powerpc__ 77 #ifdef __powerpc__
78 #include <asm/machdep.h> 78 #include <asm/machdep.h>
79 #include <asm/prom.h> 79 #include <asm/prom.h>
80 #include "../macmodes.h" 80 #include "../macmodes.h"
81 #endif 81 #endif
82 #ifdef __sparc__ 82 #ifdef __sparc__
83 #include <asm/pbm.h> 83 #include <asm/pbm.h>
84 #include <asm/fbio.h> 84 #include <asm/fbio.h>
85 #endif 85 #endif
86 86
87 #ifdef CONFIG_ADB_PMU 87 #ifdef CONFIG_ADB_PMU
88 #include <linux/adb.h> 88 #include <linux/adb.h>
89 #include <linux/pmu.h> 89 #include <linux/pmu.h>
90 #endif 90 #endif
91 #ifdef CONFIG_BOOTX_TEXT 91 #ifdef CONFIG_BOOTX_TEXT
92 #include <asm/btext.h> 92 #include <asm/btext.h>
93 #endif 93 #endif
94 #ifdef CONFIG_PMAC_BACKLIGHT 94 #ifdef CONFIG_PMAC_BACKLIGHT
95 #include <asm/backlight.h> 95 #include <asm/backlight.h>
96 #endif 96 #endif
97 #ifdef CONFIG_MTRR 97 #ifdef CONFIG_MTRR
98 #include <asm/mtrr.h> 98 #include <asm/mtrr.h>
99 #endif 99 #endif
100 100
101 /* 101 /*
102 * Debug flags. 102 * Debug flags.
103 */ 103 */
104 #undef DEBUG 104 #undef DEBUG
105 /*#define DEBUG*/ 105 /*#define DEBUG*/
106 106
107 /* Make sure n * PAGE_SIZE is protected at end of Aperture for GUI-regs */ 107 /* Make sure n * PAGE_SIZE is protected at end of Aperture for GUI-regs */
108 /* - must be large enough to catch all GUI-Regs */ 108 /* - must be large enough to catch all GUI-Regs */
109 /* - must be aligned to a PAGE boundary */ 109 /* - must be aligned to a PAGE boundary */
110 #define GUI_RESERVE (1 * PAGE_SIZE) 110 #define GUI_RESERVE (1 * PAGE_SIZE)
111 111
112 /* FIXME: remove the FAIL definition */ 112 /* FIXME: remove the FAIL definition */
113 #define FAIL(msg) do { \ 113 #define FAIL(msg) do { \
114 if (!(var->activate & FB_ACTIVATE_TEST)) \ 114 if (!(var->activate & FB_ACTIVATE_TEST)) \
115 printk(KERN_CRIT "atyfb: " msg "\n"); \ 115 printk(KERN_CRIT "atyfb: " msg "\n"); \
116 return -EINVAL; \ 116 return -EINVAL; \
117 } while (0) 117 } while (0)
118 #define FAIL_MAX(msg, x, _max_) do { \ 118 #define FAIL_MAX(msg, x, _max_) do { \
119 if (x > _max_) { \ 119 if (x > _max_) { \
120 if (!(var->activate & FB_ACTIVATE_TEST)) \ 120 if (!(var->activate & FB_ACTIVATE_TEST)) \
121 printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \ 121 printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \
122 return -EINVAL; \ 122 return -EINVAL; \
123 } \ 123 } \
124 } while (0) 124 } while (0)
125 #ifdef DEBUG 125 #ifdef DEBUG
126 #define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args) 126 #define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args)
127 #else 127 #else
128 #define DPRINTK(fmt, args...) 128 #define DPRINTK(fmt, args...)
129 #endif 129 #endif
130 130
131 #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args) 131 #define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
132 #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args) 132 #define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
133 133
134 #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) 134 #if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD)
135 static const u32 lt_lcd_regs[] = { 135 static const u32 lt_lcd_regs[] = {
136 CONFIG_PANEL_LG, 136 CONFIG_PANEL_LG,
137 LCD_GEN_CNTL_LG, 137 LCD_GEN_CNTL_LG,
138 DSTN_CONTROL_LG, 138 DSTN_CONTROL_LG,
139 HFB_PITCH_ADDR_LG, 139 HFB_PITCH_ADDR_LG,
140 HORZ_STRETCHING_LG, 140 HORZ_STRETCHING_LG,
141 VERT_STRETCHING_LG, 141 VERT_STRETCHING_LG,
142 0, /* EXT_VERT_STRETCH */ 142 0, /* EXT_VERT_STRETCH */
143 LT_GIO_LG, 143 LT_GIO_LG,
144 POWER_MANAGEMENT_LG 144 POWER_MANAGEMENT_LG
145 }; 145 };
146 146
147 void aty_st_lcd(int index, u32 val, const struct atyfb_par *par) 147 void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
148 { 148 {
149 if (M64_HAS(LT_LCD_REGS)) { 149 if (M64_HAS(LT_LCD_REGS)) {
150 aty_st_le32(lt_lcd_regs[index], val, par); 150 aty_st_le32(lt_lcd_regs[index], val, par);
151 } else { 151 } else {
152 unsigned long temp; 152 unsigned long temp;
153 153
154 /* write addr byte */ 154 /* write addr byte */
155 temp = aty_ld_le32(LCD_INDEX, par); 155 temp = aty_ld_le32(LCD_INDEX, par);
156 aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par); 156 aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
157 /* write the register value */ 157 /* write the register value */
158 aty_st_le32(LCD_DATA, val, par); 158 aty_st_le32(LCD_DATA, val, par);
159 } 159 }
160 } 160 }
161 161
162 u32 aty_ld_lcd(int index, const struct atyfb_par *par) 162 u32 aty_ld_lcd(int index, const struct atyfb_par *par)
163 { 163 {
164 if (M64_HAS(LT_LCD_REGS)) { 164 if (M64_HAS(LT_LCD_REGS)) {
165 return aty_ld_le32(lt_lcd_regs[index], par); 165 return aty_ld_le32(lt_lcd_regs[index], par);
166 } else { 166 } else {
167 unsigned long temp; 167 unsigned long temp;
168 168
169 /* write addr byte */ 169 /* write addr byte */
170 temp = aty_ld_le32(LCD_INDEX, par); 170 temp = aty_ld_le32(LCD_INDEX, par);
171 aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par); 171 aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
172 /* read the register value */ 172 /* read the register value */
173 return aty_ld_le32(LCD_DATA, par); 173 return aty_ld_le32(LCD_DATA, par);
174 } 174 }
175 } 175 }
176 #endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */ 176 #endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
177 177
178 #ifdef CONFIG_FB_ATY_GENERIC_LCD 178 #ifdef CONFIG_FB_ATY_GENERIC_LCD
179 /* 179 /*
180 * ATIReduceRatio -- 180 * ATIReduceRatio --
181 * 181 *
182 * Reduce a fraction by factoring out the largest common divider of the 182 * Reduce a fraction by factoring out the largest common divider of the
183 * fraction's numerator and denominator. 183 * fraction's numerator and denominator.
184 */ 184 */
185 static void ATIReduceRatio(int *Numerator, int *Denominator) 185 static void ATIReduceRatio(int *Numerator, int *Denominator)
186 { 186 {
187 int Multiplier, Divider, Remainder; 187 int Multiplier, Divider, Remainder;
188 188
189 Multiplier = *Numerator; 189 Multiplier = *Numerator;
190 Divider = *Denominator; 190 Divider = *Denominator;
191 191
192 while ((Remainder = Multiplier % Divider)) 192 while ((Remainder = Multiplier % Divider))
193 { 193 {
194 Multiplier = Divider; 194 Multiplier = Divider;
195 Divider = Remainder; 195 Divider = Remainder;
196 } 196 }
197 197
198 *Numerator /= Divider; 198 *Numerator /= Divider;
199 *Denominator /= Divider; 199 *Denominator /= Divider;
200 } 200 }
201 #endif 201 #endif
202 /* 202 /*
203 * The Hardware parameters for each card 203 * The Hardware parameters for each card
204 */ 204 */
205 205
206 struct aty_cmap_regs { 206 struct aty_cmap_regs {
207 u8 windex; 207 u8 windex;
208 u8 lut; 208 u8 lut;
209 u8 mask; 209 u8 mask;
210 u8 rindex; 210 u8 rindex;
211 u8 cntl; 211 u8 cntl;
212 }; 212 };
213 213
214 struct pci_mmap_map { 214 struct pci_mmap_map {
215 unsigned long voff; 215 unsigned long voff;
216 unsigned long poff; 216 unsigned long poff;
217 unsigned long size; 217 unsigned long size;
218 unsigned long prot_flag; 218 unsigned long prot_flag;
219 unsigned long prot_mask; 219 unsigned long prot_mask;
220 }; 220 };
221 221
222 static struct fb_fix_screeninfo atyfb_fix __devinitdata = { 222 static struct fb_fix_screeninfo atyfb_fix __devinitdata = {
223 .id = "ATY Mach64", 223 .id = "ATY Mach64",
224 .type = FB_TYPE_PACKED_PIXELS, 224 .type = FB_TYPE_PACKED_PIXELS,
225 .visual = FB_VISUAL_PSEUDOCOLOR, 225 .visual = FB_VISUAL_PSEUDOCOLOR,
226 .xpanstep = 8, 226 .xpanstep = 8,
227 .ypanstep = 1, 227 .ypanstep = 1,
228 }; 228 };
229 229
230 /* 230 /*
231 * Frame buffer device API 231 * Frame buffer device API
232 */ 232 */
233 233
234 static int atyfb_open(struct fb_info *info, int user); 234 static int atyfb_open(struct fb_info *info, int user);
235 static int atyfb_release(struct fb_info *info, int user); 235 static int atyfb_release(struct fb_info *info, int user);
236 static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); 236 static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
237 static int atyfb_set_par(struct fb_info *info); 237 static int atyfb_set_par(struct fb_info *info);
238 static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 238 static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
239 u_int transp, struct fb_info *info); 239 u_int transp, struct fb_info *info);
240 static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); 240 static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
241 static int atyfb_blank(int blank, struct fb_info *info); 241 static int atyfb_blank(int blank, struct fb_info *info);
242 static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg); 242 static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
243 extern void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 243 extern void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
244 extern void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); 244 extern void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
245 extern void atyfb_imageblit(struct fb_info *info, const struct fb_image *image); 245 extern void atyfb_imageblit(struct fb_info *info, const struct fb_image *image);
246 #ifdef __sparc__ 246 #ifdef __sparc__
247 static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma); 247 static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
248 #endif 248 #endif
249 static int atyfb_sync(struct fb_info *info); 249 static int atyfb_sync(struct fb_info *info);
250 250
251 /* 251 /*
252 * Internal routines 252 * Internal routines
253 */ 253 */
254 254
255 static int aty_init(struct fb_info *info, const char *name); 255 static int aty_init(struct fb_info *info, const char *name);
256 #ifdef CONFIG_ATARI 256 #ifdef CONFIG_ATARI
257 static int store_video_par(char *videopar, unsigned char m64_num); 257 static int store_video_par(char *videopar, unsigned char m64_num);
258 #endif 258 #endif
259 259
260 static struct crtc saved_crtc; 260 static struct crtc saved_crtc;
261 static union aty_pll saved_pll; 261 static union aty_pll saved_pll;
262 static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); 262 static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
263 263
264 static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); 264 static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
265 static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc); 265 static int aty_var_to_crtc(const struct fb_info *info, const struct fb_var_screeninfo *var, struct crtc *crtc);
266 static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var); 266 static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var);
267 static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info); 267 static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
268 #ifdef CONFIG_PPC 268 #ifdef CONFIG_PPC
269 static int read_aty_sense(const struct atyfb_par *par); 269 static int read_aty_sense(const struct atyfb_par *par);
270 #endif 270 #endif
271 271
272 272
273 /* 273 /*
274 * Interface used by the world 274 * Interface used by the world
275 */ 275 */
276 276
277 static struct fb_var_screeninfo default_var = { 277 static struct fb_var_screeninfo default_var = {
278 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 278 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
279 640, 480, 640, 480, 0, 0, 8, 0, 279 640, 480, 640, 480, 0, 0, 8, 0,
280 {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 280 {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
281 0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2, 281 0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2,
282 0, FB_VMODE_NONINTERLACED 282 0, FB_VMODE_NONINTERLACED
283 }; 283 };
284 284
285 static struct fb_videomode defmode = { 285 static struct fb_videomode defmode = {
286 /* 640x480 @ 60 Hz, 31.5 kHz hsync */ 286 /* 640x480 @ 60 Hz, 31.5 kHz hsync */
287 NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 287 NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
288 0, FB_VMODE_NONINTERLACED 288 0, FB_VMODE_NONINTERLACED
289 }; 289 };
290 290
291 static struct fb_ops atyfb_ops = { 291 static struct fb_ops atyfb_ops = {
292 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
293 .fb_open = atyfb_open, 293 .fb_open = atyfb_open,
294 .fb_release = atyfb_release, 294 .fb_release = atyfb_release,
295 .fb_check_var = atyfb_check_var, 295 .fb_check_var = atyfb_check_var,
296 .fb_set_par = atyfb_set_par, 296 .fb_set_par = atyfb_set_par,
297 .fb_setcolreg = atyfb_setcolreg, 297 .fb_setcolreg = atyfb_setcolreg,
298 .fb_pan_display = atyfb_pan_display, 298 .fb_pan_display = atyfb_pan_display,
299 .fb_blank = atyfb_blank, 299 .fb_blank = atyfb_blank,
300 .fb_ioctl = atyfb_ioctl, 300 .fb_ioctl = atyfb_ioctl,
301 .fb_fillrect = atyfb_fillrect, 301 .fb_fillrect = atyfb_fillrect,
302 .fb_copyarea = atyfb_copyarea, 302 .fb_copyarea = atyfb_copyarea,
303 .fb_imageblit = atyfb_imageblit, 303 .fb_imageblit = atyfb_imageblit,
304 #ifdef __sparc__ 304 #ifdef __sparc__
305 .fb_mmap = atyfb_mmap, 305 .fb_mmap = atyfb_mmap,
306 #endif 306 #endif
307 .fb_sync = atyfb_sync, 307 .fb_sync = atyfb_sync,
308 }; 308 };
309 309
310 static int noaccel; 310 static int noaccel;
311 #ifdef CONFIG_MTRR 311 #ifdef CONFIG_MTRR
312 static int nomtrr; 312 static int nomtrr;
313 #endif 313 #endif
314 static int vram; 314 static int vram;
315 static int pll; 315 static int pll;
316 static int mclk; 316 static int mclk;
317 static int xclk; 317 static int xclk;
318 static int comp_sync __initdata = -1; 318 static int comp_sync __initdata = -1;
319 static char *mode; 319 static char *mode;
320 320
321 #ifdef CONFIG_PPC 321 #ifdef CONFIG_PPC
322 static int default_vmode __initdata = VMODE_CHOOSE; 322 static int default_vmode __initdata = VMODE_CHOOSE;
323 static int default_cmode __initdata = CMODE_CHOOSE; 323 static int default_cmode __initdata = CMODE_CHOOSE;
324 324
325 module_param_named(vmode, default_vmode, int, 0); 325 module_param_named(vmode, default_vmode, int, 0);
326 MODULE_PARM_DESC(vmode, "int: video mode for mac"); 326 MODULE_PARM_DESC(vmode, "int: video mode for mac");
327 module_param_named(cmode, default_cmode, int, 0); 327 module_param_named(cmode, default_cmode, int, 0);
328 MODULE_PARM_DESC(cmode, "int: color mode for mac"); 328 MODULE_PARM_DESC(cmode, "int: color mode for mac");
329 #endif 329 #endif
330 330
331 #ifdef CONFIG_ATARI 331 #ifdef CONFIG_ATARI
332 static unsigned int mach64_count __initdata = 0; 332 static unsigned int mach64_count __initdata = 0;
333 static unsigned long phys_vmembase[FB_MAX] __initdata = { 0, }; 333 static unsigned long phys_vmembase[FB_MAX] __initdata = { 0, };
334 static unsigned long phys_size[FB_MAX] __initdata = { 0, }; 334 static unsigned long phys_size[FB_MAX] __initdata = { 0, };
335 static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, }; 335 static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, };
336 #endif 336 #endif
337 337
338 /* top -> down is an evolution of mach64 chipset, any corrections? */ 338 /* top -> down is an evolution of mach64 chipset, any corrections? */
339 #define ATI_CHIP_88800GX (M64F_GX) 339 #define ATI_CHIP_88800GX (M64F_GX)
340 #define ATI_CHIP_88800CX (M64F_GX) 340 #define ATI_CHIP_88800CX (M64F_GX)
341 341
342 #define ATI_CHIP_264CT (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO) 342 #define ATI_CHIP_264CT (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
343 #define ATI_CHIP_264ET (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO) 343 #define ATI_CHIP_264ET (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
344 344
345 #define ATI_CHIP_264VT (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_MAGIC_FIFO) 345 #define ATI_CHIP_264VT (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_MAGIC_FIFO)
346 #define ATI_CHIP_264GT (M64F_GT | M64F_INTEGRATED | M64F_MAGIC_FIFO | M64F_EXTRA_BRIGHT) 346 #define ATI_CHIP_264GT (M64F_GT | M64F_INTEGRATED | M64F_MAGIC_FIFO | M64F_EXTRA_BRIGHT)
347 347
348 #define ATI_CHIP_264VTB (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP) 348 #define ATI_CHIP_264VTB (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP)
349 #define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL) 349 #define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL)
350 #define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP) 350 #define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP)
351 351
352 /* FIXME what is this chip? */ 352 /* FIXME what is this chip? */
353 #define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP) 353 #define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP)
354 354
355 /* make sets shorter */ 355 /* make sets shorter */
356 #define ATI_MODERN_SET (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP | M64F_EXTRA_BRIGHT) 356 #define ATI_MODERN_SET (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP | M64F_EXTRA_BRIGHT)
357 357
358 #define ATI_CHIP_264GTB (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL) 358 #define ATI_CHIP_264GTB (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
359 /*#define ATI_CHIP_264GTDVD ?*/ 359 /*#define ATI_CHIP_264GTDVD ?*/
360 #define ATI_CHIP_264LTG (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL) 360 #define ATI_CHIP_264LTG (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
361 361
362 #define ATI_CHIP_264GT2C (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE) 362 #define ATI_CHIP_264GT2C (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE)
363 #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 363 #define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
364 #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D) 364 #define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
365 365
366 #define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4) 366 #define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
367 #define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS) 367 #define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
368 368
369 static struct { 369 static struct {
370 u16 pci_id; 370 u16 pci_id;
371 const char *name; 371 const char *name;
372 int pll, mclk, xclk, ecp_max; 372 int pll, mclk, xclk, ecp_max;
373 u32 features; 373 u32 features;
374 } aty_chips[] __devinitdata = { 374 } aty_chips[] __devinitdata = {
375 #ifdef CONFIG_FB_ATY_GX 375 #ifdef CONFIG_FB_ATY_GX
376 /* Mach64 GX */ 376 /* Mach64 GX */
377 { PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX }, 377 { PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX },
378 { PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX }, 378 { PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX },
379 #endif /* CONFIG_FB_ATY_GX */ 379 #endif /* CONFIG_FB_ATY_GX */
380 380
381 #ifdef CONFIG_FB_ATY_CT 381 #ifdef CONFIG_FB_ATY_CT
382 { PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT }, 382 { PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT },
383 { PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET }, 383 { PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET },
384 384
385 /* FIXME what is this chip? */ 385 /* FIXME what is this chip? */
386 { PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT }, 386 { PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT },
387 387
388 { PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT }, 388 { PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT },
389 { PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT }, 389 { PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT },
390 390
391 { PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 }, 391 { PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 },
392 { PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB }, 392 { PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB },
393 393
394 { PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 }, 394 { PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 },
395 395
396 { PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 }, 396 { PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 },
397 397
398 { PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, 398 { PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
399 { PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, 399 { PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
400 { PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, 400 { PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
401 { PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C }, 401 { PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
402 402
403 { PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, 403 { PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
404 { PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, 404 { PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
405 { PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE }, 405 { PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE },
406 { PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, 406 { PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
407 { PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO }, 407 { PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
408 408
409 { PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO }, 409 { PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO },
410 { PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO }, 410 { PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
411 { PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 }, 411 { PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 },
412 { PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO }, 412 { PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
413 { PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO }, 413 { PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
414 414
415 { PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL }, 415 { PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
416 { PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL }, 416 { PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
417 { PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL }, 417 { PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
418 { PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL }, 418 { PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
419 { PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL }, 419 { PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
420 { PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL }, 420 { PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL },
421 421
422 { PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, 422 { PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
423 { PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, 423 { PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
424 { PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, 424 { PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
425 { PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY }, 425 { PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
426 #endif /* CONFIG_FB_ATY_CT */ 426 #endif /* CONFIG_FB_ATY_CT */
427 }; 427 };
428 428
429 /* can not fail */ 429 /* can not fail */
430 static int __devinit correct_chipset(struct atyfb_par *par) 430 static int __devinit correct_chipset(struct atyfb_par *par)
431 { 431 {
432 u8 rev; 432 u8 rev;
433 u16 type; 433 u16 type;
434 u32 chip_id; 434 u32 chip_id;
435 const char *name; 435 const char *name;
436 int i; 436 int i;
437 437
438 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) 438 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
439 if (par->pci_id == aty_chips[i].pci_id) 439 if (par->pci_id == aty_chips[i].pci_id)
440 break; 440 break;
441 441
442 name = aty_chips[i].name; 442 name = aty_chips[i].name;
443 par->pll_limits.pll_max = aty_chips[i].pll; 443 par->pll_limits.pll_max = aty_chips[i].pll;
444 par->pll_limits.mclk = aty_chips[i].mclk; 444 par->pll_limits.mclk = aty_chips[i].mclk;
445 par->pll_limits.xclk = aty_chips[i].xclk; 445 par->pll_limits.xclk = aty_chips[i].xclk;
446 par->pll_limits.ecp_max = aty_chips[i].ecp_max; 446 par->pll_limits.ecp_max = aty_chips[i].ecp_max;
447 par->features = aty_chips[i].features; 447 par->features = aty_chips[i].features;
448 448
449 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); 449 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par);
450 type = chip_id & CFG_CHIP_TYPE; 450 type = chip_id & CFG_CHIP_TYPE;
451 rev = (chip_id & CFG_CHIP_REV) >> 24; 451 rev = (chip_id & CFG_CHIP_REV) >> 24;
452 452
453 switch(par->pci_id) { 453 switch(par->pci_id) {
454 #ifdef CONFIG_FB_ATY_GX 454 #ifdef CONFIG_FB_ATY_GX
455 case PCI_CHIP_MACH64GX: 455 case PCI_CHIP_MACH64GX:
456 if(type != 0x00d7) 456 if(type != 0x00d7)
457 return -ENODEV; 457 return -ENODEV;
458 break; 458 break;
459 case PCI_CHIP_MACH64CX: 459 case PCI_CHIP_MACH64CX:
460 if(type != 0x0057) 460 if(type != 0x0057)
461 return -ENODEV; 461 return -ENODEV;
462 break; 462 break;
463 #endif 463 #endif
464 #ifdef CONFIG_FB_ATY_CT 464 #ifdef CONFIG_FB_ATY_CT
465 case PCI_CHIP_MACH64VT: 465 case PCI_CHIP_MACH64VT:
466 switch (rev & 0x07) { 466 switch (rev & 0x07) {
467 case 0x00: 467 case 0x00:
468 switch (rev & 0xc0) { 468 switch (rev & 0xc0) {
469 case 0x00: 469 case 0x00:
470 name = "ATI264VT (A3) (Mach64 VT)"; 470 name = "ATI264VT (A3) (Mach64 VT)";
471 par->pll_limits.pll_max = 170; 471 par->pll_limits.pll_max = 170;
472 par->pll_limits.mclk = 67; 472 par->pll_limits.mclk = 67;
473 par->pll_limits.xclk = 67; 473 par->pll_limits.xclk = 67;
474 par->pll_limits.ecp_max = 80; 474 par->pll_limits.ecp_max = 80;
475 par->features = ATI_CHIP_264VT; 475 par->features = ATI_CHIP_264VT;
476 break; 476 break;
477 case 0x40: 477 case 0x40:
478 name = "ATI264VT2 (A4) (Mach64 VT)"; 478 name = "ATI264VT2 (A4) (Mach64 VT)";
479 par->pll_limits.pll_max = 200; 479 par->pll_limits.pll_max = 200;
480 par->pll_limits.mclk = 67; 480 par->pll_limits.mclk = 67;
481 par->pll_limits.xclk = 67; 481 par->pll_limits.xclk = 67;
482 par->pll_limits.ecp_max = 80; 482 par->pll_limits.ecp_max = 80;
483 par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV; 483 par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV;
484 break; 484 break;
485 } 485 }
486 break; 486 break;
487 case 0x01: 487 case 0x01:
488 name = "ATI264VT3 (B1) (Mach64 VT)"; 488 name = "ATI264VT3 (B1) (Mach64 VT)";
489 par->pll_limits.pll_max = 200; 489 par->pll_limits.pll_max = 200;
490 par->pll_limits.mclk = 67; 490 par->pll_limits.mclk = 67;
491 par->pll_limits.xclk = 67; 491 par->pll_limits.xclk = 67;
492 par->pll_limits.ecp_max = 80; 492 par->pll_limits.ecp_max = 80;
493 par->features = ATI_CHIP_264VTB; 493 par->features = ATI_CHIP_264VTB;
494 break; 494 break;
495 case 0x02: 495 case 0x02:
496 name = "ATI264VT3 (B2) (Mach64 VT)"; 496 name = "ATI264VT3 (B2) (Mach64 VT)";
497 par->pll_limits.pll_max = 200; 497 par->pll_limits.pll_max = 200;
498 par->pll_limits.mclk = 67; 498 par->pll_limits.mclk = 67;
499 par->pll_limits.xclk = 67; 499 par->pll_limits.xclk = 67;
500 par->pll_limits.ecp_max = 80; 500 par->pll_limits.ecp_max = 80;
501 par->features = ATI_CHIP_264VT3; 501 par->features = ATI_CHIP_264VT3;
502 break; 502 break;
503 } 503 }
504 break; 504 break;
505 case PCI_CHIP_MACH64GT: 505 case PCI_CHIP_MACH64GT:
506 switch (rev & 0x07) { 506 switch (rev & 0x07) {
507 case 0x01: 507 case 0x01:
508 name = "3D RAGE II (Mach64 GT)"; 508 name = "3D RAGE II (Mach64 GT)";
509 par->pll_limits.pll_max = 170; 509 par->pll_limits.pll_max = 170;
510 par->pll_limits.mclk = 67; 510 par->pll_limits.mclk = 67;
511 par->pll_limits.xclk = 67; 511 par->pll_limits.xclk = 67;
512 par->pll_limits.ecp_max = 80; 512 par->pll_limits.ecp_max = 80;
513 par->features = ATI_CHIP_264GTB; 513 par->features = ATI_CHIP_264GTB;
514 break; 514 break;
515 case 0x02: 515 case 0x02:
516 name = "3D RAGE II+ (Mach64 GT)"; 516 name = "3D RAGE II+ (Mach64 GT)";
517 par->pll_limits.pll_max = 200; 517 par->pll_limits.pll_max = 200;
518 par->pll_limits.mclk = 67; 518 par->pll_limits.mclk = 67;
519 par->pll_limits.xclk = 67; 519 par->pll_limits.xclk = 67;
520 par->pll_limits.ecp_max = 100; 520 par->pll_limits.ecp_max = 100;
521 par->features = ATI_CHIP_264GTB; 521 par->features = ATI_CHIP_264GTB;
522 break; 522 break;
523 } 523 }
524 break; 524 break;
525 #endif 525 #endif
526 } 526 }
527 527
528 PRINTKI("%s [0x%04x rev 0x%02x]\n", name, type, rev); 528 PRINTKI("%s [0x%04x rev 0x%02x]\n", name, type, rev);
529 return 0; 529 return 0;
530 } 530 }
531 531
532 static char ram_dram[] __devinitdata = "DRAM"; 532 static char ram_dram[] __devinitdata = "DRAM";
533 static char ram_resv[] __devinitdata = "RESV"; 533 static char ram_resv[] __devinitdata = "RESV";
534 #ifdef CONFIG_FB_ATY_GX 534 #ifdef CONFIG_FB_ATY_GX
535 static char ram_vram[] __devinitdata = "VRAM"; 535 static char ram_vram[] __devinitdata = "VRAM";
536 #endif /* CONFIG_FB_ATY_GX */ 536 #endif /* CONFIG_FB_ATY_GX */
537 #ifdef CONFIG_FB_ATY_CT 537 #ifdef CONFIG_FB_ATY_CT
538 static char ram_edo[] __devinitdata = "EDO"; 538 static char ram_edo[] __devinitdata = "EDO";
539 static char ram_sdram[] __devinitdata = "SDRAM (1:1)"; 539 static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
540 static char ram_sgram[] __devinitdata = "SGRAM (1:1)"; 540 static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
541 static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)"; 541 static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
542 static char ram_off[] __devinitdata = "OFF"; 542 static char ram_off[] __devinitdata = "OFF";
543 #endif /* CONFIG_FB_ATY_CT */ 543 #endif /* CONFIG_FB_ATY_CT */
544 544
545 545
546 static u32 pseudo_palette[17]; 546 static u32 pseudo_palette[17];
547 547
548 #ifdef CONFIG_FB_ATY_GX 548 #ifdef CONFIG_FB_ATY_GX
549 static char *aty_gx_ram[8] __devinitdata = { 549 static char *aty_gx_ram[8] __devinitdata = {
550 ram_dram, ram_vram, ram_vram, ram_dram, 550 ram_dram, ram_vram, ram_vram, ram_dram,
551 ram_dram, ram_vram, ram_vram, ram_resv 551 ram_dram, ram_vram, ram_vram, ram_resv
552 }; 552 };
553 #endif /* CONFIG_FB_ATY_GX */ 553 #endif /* CONFIG_FB_ATY_GX */
554 554
555 #ifdef CONFIG_FB_ATY_CT 555 #ifdef CONFIG_FB_ATY_CT
556 static char *aty_ct_ram[8] __devinitdata = { 556 static char *aty_ct_ram[8] __devinitdata = {
557 ram_off, ram_dram, ram_edo, ram_edo, 557 ram_off, ram_dram, ram_edo, ram_edo,
558 ram_sdram, ram_sgram, ram_sdram32, ram_resv 558 ram_sdram, ram_sgram, ram_sdram32, ram_resv
559 }; 559 };
560 #endif /* CONFIG_FB_ATY_CT */ 560 #endif /* CONFIG_FB_ATY_CT */
561 561
562 static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *par) 562 static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var, struct atyfb_par *par)
563 { 563 {
564 u32 pixclock = var->pixclock; 564 u32 pixclock = var->pixclock;
565 #ifdef CONFIG_FB_ATY_GENERIC_LCD 565 #ifdef CONFIG_FB_ATY_GENERIC_LCD
566 u32 lcd_on_off; 566 u32 lcd_on_off;
567 par->pll.ct.xres = 0; 567 par->pll.ct.xres = 0;
568 if (par->lcd_table != 0) { 568 if (par->lcd_table != 0) {
569 lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par); 569 lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par);
570 if(lcd_on_off & LCD_ON) { 570 if(lcd_on_off & LCD_ON) {
571 par->pll.ct.xres = var->xres; 571 par->pll.ct.xres = var->xres;
572 pixclock = par->lcd_pixclock; 572 pixclock = par->lcd_pixclock;
573 } 573 }
574 } 574 }
575 #endif 575 #endif
576 return pixclock; 576 return pixclock;
577 } 577 }
578 578
579 #if defined(CONFIG_PPC) 579 #if defined(CONFIG_PPC)
580 580
581 /* 581 /*
582 * Apple monitor sense 582 * Apple monitor sense
583 */ 583 */
584 584
585 static int __init read_aty_sense(const struct atyfb_par *par) 585 static int __init read_aty_sense(const struct atyfb_par *par)
586 { 586 {
587 int sense, i; 587 int sense, i;
588 588
589 aty_st_le32(GP_IO, 0x31003100, par); /* drive outputs high */ 589 aty_st_le32(GP_IO, 0x31003100, par); /* drive outputs high */
590 __delay(200); 590 __delay(200);
591 aty_st_le32(GP_IO, 0, par); /* turn off outputs */ 591 aty_st_le32(GP_IO, 0, par); /* turn off outputs */
592 __delay(2000); 592 __delay(2000);
593 i = aty_ld_le32(GP_IO, par); /* get primary sense value */ 593 i = aty_ld_le32(GP_IO, par); /* get primary sense value */
594 sense = ((i & 0x3000) >> 3) | (i & 0x100); 594 sense = ((i & 0x3000) >> 3) | (i & 0x100);
595 595
596 /* drive each sense line low in turn and collect the other 2 */ 596 /* drive each sense line low in turn and collect the other 2 */
597 aty_st_le32(GP_IO, 0x20000000, par); /* drive A low */ 597 aty_st_le32(GP_IO, 0x20000000, par); /* drive A low */
598 __delay(2000); 598 __delay(2000);
599 i = aty_ld_le32(GP_IO, par); 599 i = aty_ld_le32(GP_IO, par);
600 sense |= ((i & 0x1000) >> 7) | ((i & 0x100) >> 4); 600 sense |= ((i & 0x1000) >> 7) | ((i & 0x100) >> 4);
601 aty_st_le32(GP_IO, 0x20002000, par); /* drive A high again */ 601 aty_st_le32(GP_IO, 0x20002000, par); /* drive A high again */
602 __delay(200); 602 __delay(200);
603 603
604 aty_st_le32(GP_IO, 0x10000000, par); /* drive B low */ 604 aty_st_le32(GP_IO, 0x10000000, par); /* drive B low */
605 __delay(2000); 605 __delay(2000);
606 i = aty_ld_le32(GP_IO, par); 606 i = aty_ld_le32(GP_IO, par);
607 sense |= ((i & 0x2000) >> 10) | ((i & 0x100) >> 6); 607 sense |= ((i & 0x2000) >> 10) | ((i & 0x100) >> 6);
608 aty_st_le32(GP_IO, 0x10001000, par); /* drive B high again */ 608 aty_st_le32(GP_IO, 0x10001000, par); /* drive B high again */
609 __delay(200); 609 __delay(200);
610 610
611 aty_st_le32(GP_IO, 0x01000000, par); /* drive C low */ 611 aty_st_le32(GP_IO, 0x01000000, par); /* drive C low */
612 __delay(2000); 612 __delay(2000);
613 sense |= (aty_ld_le32(GP_IO, par) & 0x3000) >> 12; 613 sense |= (aty_ld_le32(GP_IO, par) & 0x3000) >> 12;
614 aty_st_le32(GP_IO, 0, par); /* turn off outputs */ 614 aty_st_le32(GP_IO, 0, par); /* turn off outputs */
615 return sense; 615 return sense;
616 } 616 }
617 617
618 #endif /* defined(CONFIG_PPC) */ 618 #endif /* defined(CONFIG_PPC) */
619 619
620 /* ------------------------------------------------------------------------- */ 620 /* ------------------------------------------------------------------------- */
621 621
622 /* 622 /*
623 * CRTC programming 623 * CRTC programming
624 */ 624 */
625 625
626 static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc) 626 static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
627 { 627 {
628 #ifdef CONFIG_FB_ATY_GENERIC_LCD 628 #ifdef CONFIG_FB_ATY_GENERIC_LCD
629 if (par->lcd_table != 0) { 629 if (par->lcd_table != 0) {
630 if(!M64_HAS(LT_LCD_REGS)) { 630 if(!M64_HAS(LT_LCD_REGS)) {
631 crtc->lcd_index = aty_ld_le32(LCD_INDEX, par); 631 crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
632 aty_st_le32(LCD_INDEX, crtc->lcd_index, par); 632 aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
633 } 633 }
634 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par); 634 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par);
635 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par); 635 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par);
636 636
637 637
638 /* switch to non shadow registers */ 638 /* switch to non shadow registers */
639 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl & 639 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
640 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par); 640 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
641 641
642 /* save stretching */ 642 /* save stretching */
643 crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par); 643 crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
644 crtc->vert_stretching = aty_ld_lcd(VERT_STRETCHING, par); 644 crtc->vert_stretching = aty_ld_lcd(VERT_STRETCHING, par);
645 if (!M64_HAS(LT_LCD_REGS)) 645 if (!M64_HAS(LT_LCD_REGS))
646 crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par); 646 crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par);
647 } 647 }
648 #endif 648 #endif
649 crtc->h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); 649 crtc->h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
650 crtc->h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); 650 crtc->h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
651 crtc->v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); 651 crtc->v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
652 crtc->v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); 652 crtc->v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
653 crtc->vline_crnt_vline = aty_ld_le32(CRTC_VLINE_CRNT_VLINE, par); 653 crtc->vline_crnt_vline = aty_ld_le32(CRTC_VLINE_CRNT_VLINE, par);
654 crtc->off_pitch = aty_ld_le32(CRTC_OFF_PITCH, par); 654 crtc->off_pitch = aty_ld_le32(CRTC_OFF_PITCH, par);
655 crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); 655 crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
656 656
657 #ifdef CONFIG_FB_ATY_GENERIC_LCD 657 #ifdef CONFIG_FB_ATY_GENERIC_LCD
658 if (par->lcd_table != 0) { 658 if (par->lcd_table != 0) {
659 /* switch to shadow registers */ 659 /* switch to shadow registers */
660 aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) | 660 aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
661 SHADOW_EN | SHADOW_RW_EN, par); 661 SHADOW_EN | SHADOW_RW_EN, par);
662 662
663 crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); 663 crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
664 crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); 664 crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
665 crtc->shadow_v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); 665 crtc->shadow_v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
666 crtc->shadow_v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); 666 crtc->shadow_v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
667 667
668 aty_st_le32(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par); 668 aty_st_le32(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
669 } 669 }
670 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 670 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
671 } 671 }
672 672
673 static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc) 673 static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
674 { 674 {
675 #ifdef CONFIG_FB_ATY_GENERIC_LCD 675 #ifdef CONFIG_FB_ATY_GENERIC_LCD
676 if (par->lcd_table != 0) { 676 if (par->lcd_table != 0) {
677 /* stop CRTC */ 677 /* stop CRTC */
678 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par); 678 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~(CRTC_EXT_DISP_EN | CRTC_EN), par);
679 679
680 /* update non-shadow registers first */ 680 /* update non-shadow registers first */
681 aty_st_lcd(CONFIG_PANEL, crtc->lcd_config_panel, par); 681 aty_st_lcd(CONFIG_PANEL, crtc->lcd_config_panel, par);
682 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl & 682 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
683 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par); 683 ~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
684 684
685 /* temporarily disable stretching */ 685 /* temporarily disable stretching */
686 aty_st_lcd(HORZ_STRETCHING, 686 aty_st_lcd(HORZ_STRETCHING,
687 crtc->horz_stretching & 687 crtc->horz_stretching &
688 ~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par); 688 ~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par);
689 aty_st_lcd(VERT_STRETCHING, 689 aty_st_lcd(VERT_STRETCHING,
690 crtc->vert_stretching & 690 crtc->vert_stretching &
691 ~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 | 691 ~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 |
692 VERT_STRETCH_USE0 | VERT_STRETCH_EN), par); 692 VERT_STRETCH_USE0 | VERT_STRETCH_EN), par);
693 } 693 }
694 #endif 694 #endif
695 /* turn off CRT */ 695 /* turn off CRT */
696 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~CRTC_EN, par); 696 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~CRTC_EN, par);
697 697
698 DPRINTK("setting up CRTC\n"); 698 DPRINTK("setting up CRTC\n");
699 DPRINTK("set primary CRT to %ix%i %c%c composite %c\n", 699 DPRINTK("set primary CRT to %ix%i %c%c composite %c\n",
700 ((((crtc->h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->v_tot_disp>>16) & 0x7ff) + 1), 700 ((((crtc->h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->v_tot_disp>>16) & 0x7ff) + 1),
701 (crtc->h_sync_strt_wid & 0x200000)?'N':'P', (crtc->v_sync_strt_wid & 0x200000)?'N':'P', 701 (crtc->h_sync_strt_wid & 0x200000)?'N':'P', (crtc->v_sync_strt_wid & 0x200000)?'N':'P',
702 (crtc->gen_cntl & CRTC_CSYNC_EN)?'P':'N'); 702 (crtc->gen_cntl & CRTC_CSYNC_EN)?'P':'N');
703 703
704 DPRINTK("CRTC_H_TOTAL_DISP: %x\n",crtc->h_tot_disp); 704 DPRINTK("CRTC_H_TOTAL_DISP: %x\n",crtc->h_tot_disp);
705 DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n",crtc->h_sync_strt_wid); 705 DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n",crtc->h_sync_strt_wid);
706 DPRINTK("CRTC_V_TOTAL_DISP: %x\n",crtc->v_tot_disp); 706 DPRINTK("CRTC_V_TOTAL_DISP: %x\n",crtc->v_tot_disp);
707 DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n",crtc->v_sync_strt_wid); 707 DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n",crtc->v_sync_strt_wid);
708 DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch); 708 DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch);
709 DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline); 709 DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline);
710 DPRINTK("CRTC_GEN_CNTL: %x\n",crtc->gen_cntl); 710 DPRINTK("CRTC_GEN_CNTL: %x\n",crtc->gen_cntl);
711 711
712 aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par); 712 aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par);
713 aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par); 713 aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par);
714 aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_tot_disp, par); 714 aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_tot_disp, par);
715 aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid, par); 715 aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid, par);
716 aty_st_le32(CRTC_OFF_PITCH, crtc->off_pitch, par); 716 aty_st_le32(CRTC_OFF_PITCH, crtc->off_pitch, par);
717 aty_st_le32(CRTC_VLINE_CRNT_VLINE, crtc->vline_crnt_vline, par); 717 aty_st_le32(CRTC_VLINE_CRNT_VLINE, crtc->vline_crnt_vline, par);
718 718
719 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl, par); 719 aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl, par);
720 #if 0 720 #if 0
721 FIXME 721 FIXME
722 if (par->accel_flags & FB_ACCELF_TEXT) 722 if (par->accel_flags & FB_ACCELF_TEXT)
723 aty_init_engine(par, info); 723 aty_init_engine(par, info);
724 #endif 724 #endif
725 #ifdef CONFIG_FB_ATY_GENERIC_LCD 725 #ifdef CONFIG_FB_ATY_GENERIC_LCD
726 /* after setting the CRTC registers we should set the LCD registers. */ 726 /* after setting the CRTC registers we should set the LCD registers. */
727 if (par->lcd_table != 0) { 727 if (par->lcd_table != 0) {
728 /* switch to shadow registers */ 728 /* switch to shadow registers */
729 aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) | 729 aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
730 (SHADOW_EN | SHADOW_RW_EN), par); 730 (SHADOW_EN | SHADOW_RW_EN), par);
731 731
732 DPRINTK("set shadow CRT to %ix%i %c%c\n", 732 DPRINTK("set shadow CRT to %ix%i %c%c\n",
733 ((((crtc->shadow_h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->shadow_v_tot_disp>>16) & 0x7ff) + 1), 733 ((((crtc->shadow_h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->shadow_v_tot_disp>>16) & 0x7ff) + 1),
734 (crtc->shadow_h_sync_strt_wid & 0x200000)?'N':'P', (crtc->shadow_v_sync_strt_wid & 0x200000)?'N':'P'); 734 (crtc->shadow_h_sync_strt_wid & 0x200000)?'N':'P', (crtc->shadow_v_sync_strt_wid & 0x200000)?'N':'P');
735 735
736 DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n", crtc->shadow_h_tot_disp); 736 DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n", crtc->shadow_h_tot_disp);
737 DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n", crtc->shadow_h_sync_strt_wid); 737 DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n", crtc->shadow_h_sync_strt_wid);
738 DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n", crtc->shadow_v_tot_disp); 738 DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n", crtc->shadow_v_tot_disp);
739 DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n", crtc->shadow_v_sync_strt_wid); 739 DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n", crtc->shadow_v_sync_strt_wid);
740 740
741 aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par); 741 aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par);
742 aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par); 742 aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par);
743 aty_st_le32(CRTC_V_TOTAL_DISP, crtc->shadow_v_tot_disp, par); 743 aty_st_le32(CRTC_V_TOTAL_DISP, crtc->shadow_v_tot_disp, par);
744 aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->shadow_v_sync_strt_wid, par); 744 aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->shadow_v_sync_strt_wid, par);
745 745
746 /* restore CRTC selection & shadow state and enable stretching */ 746 /* restore CRTC selection & shadow state and enable stretching */
747 DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl); 747 DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl);
748 DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching); 748 DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching);
749 DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching); 749 DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching);
750 if(!M64_HAS(LT_LCD_REGS)) 750 if(!M64_HAS(LT_LCD_REGS))
751 DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch); 751 DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch);
752 752
753 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par); 753 aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
754 aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par); 754 aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par);
755 aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par); 755 aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par);
756 if(!M64_HAS(LT_LCD_REGS)) { 756 if(!M64_HAS(LT_LCD_REGS)) {
757 aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par); 757 aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par);
758 aty_ld_le32(LCD_INDEX, par); 758 aty_ld_le32(LCD_INDEX, par);
759 aty_st_le32(LCD_INDEX, crtc->lcd_index, par); 759 aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
760 } 760 }
761 } 761 }
762 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 762 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
763 } 763 }
764 764
765 static int aty_var_to_crtc(const struct fb_info *info, 765 static int aty_var_to_crtc(const struct fb_info *info,
766 const struct fb_var_screeninfo *var, struct crtc *crtc) 766 const struct fb_var_screeninfo *var, struct crtc *crtc)
767 { 767 {
768 struct atyfb_par *par = (struct atyfb_par *) info->par; 768 struct atyfb_par *par = (struct atyfb_par *) info->par;
769 u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp; 769 u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp;
770 u32 sync, vmode, vdisplay; 770 u32 sync, vmode, vdisplay;
771 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; 771 u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
772 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; 772 u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
773 u32 pix_width, dp_pix_width, dp_chain_mask; 773 u32 pix_width, dp_pix_width, dp_chain_mask;
774 774
775 /* input */ 775 /* input */
776 xres = var->xres; 776 xres = var->xres;
777 yres = var->yres; 777 yres = var->yres;
778 vxres = var->xres_virtual; 778 vxres = var->xres_virtual;
779 vyres = var->yres_virtual; 779 vyres = var->yres_virtual;
780 xoffset = var->xoffset; 780 xoffset = var->xoffset;
781 yoffset = var->yoffset; 781 yoffset = var->yoffset;
782 bpp = var->bits_per_pixel; 782 bpp = var->bits_per_pixel;
783 if (bpp == 16) 783 if (bpp == 16)
784 bpp = (var->green.length == 5) ? 15 : 16; 784 bpp = (var->green.length == 5) ? 15 : 16;
785 sync = var->sync; 785 sync = var->sync;
786 vmode = var->vmode; 786 vmode = var->vmode;
787 787
788 /* convert (and round up) and validate */ 788 /* convert (and round up) and validate */
789 if (vxres < xres + xoffset) 789 if (vxres < xres + xoffset)
790 vxres = xres + xoffset; 790 vxres = xres + xoffset;
791 h_disp = xres; 791 h_disp = xres;
792 792
793 if (vyres < yres + yoffset) 793 if (vyres < yres + yoffset)
794 vyres = yres + yoffset; 794 vyres = yres + yoffset;
795 v_disp = yres; 795 v_disp = yres;
796 796
797 if (bpp <= 8) { 797 if (bpp <= 8) {
798 bpp = 8; 798 bpp = 8;
799 pix_width = CRTC_PIX_WIDTH_8BPP; 799 pix_width = CRTC_PIX_WIDTH_8BPP;
800 dp_pix_width = 800 dp_pix_width =
801 HOST_8BPP | SRC_8BPP | DST_8BPP | 801 HOST_8BPP | SRC_8BPP | DST_8BPP |
802 BYTE_ORDER_LSB_TO_MSB; 802 BYTE_ORDER_LSB_TO_MSB;
803 dp_chain_mask = DP_CHAIN_8BPP; 803 dp_chain_mask = DP_CHAIN_8BPP;
804 } else if (bpp <= 15) { 804 } else if (bpp <= 15) {
805 bpp = 16; 805 bpp = 16;
806 pix_width = CRTC_PIX_WIDTH_15BPP; 806 pix_width = CRTC_PIX_WIDTH_15BPP;
807 dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP | 807 dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP |
808 BYTE_ORDER_LSB_TO_MSB; 808 BYTE_ORDER_LSB_TO_MSB;
809 dp_chain_mask = DP_CHAIN_15BPP; 809 dp_chain_mask = DP_CHAIN_15BPP;
810 } else if (bpp <= 16) { 810 } else if (bpp <= 16) {
811 bpp = 16; 811 bpp = 16;
812 pix_width = CRTC_PIX_WIDTH_16BPP; 812 pix_width = CRTC_PIX_WIDTH_16BPP;
813 dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP | 813 dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP |
814 BYTE_ORDER_LSB_TO_MSB; 814 BYTE_ORDER_LSB_TO_MSB;
815 dp_chain_mask = DP_CHAIN_16BPP; 815 dp_chain_mask = DP_CHAIN_16BPP;
816 } else if (bpp <= 24 && M64_HAS(INTEGRATED)) { 816 } else if (bpp <= 24 && M64_HAS(INTEGRATED)) {
817 bpp = 24; 817 bpp = 24;
818 pix_width = CRTC_PIX_WIDTH_24BPP; 818 pix_width = CRTC_PIX_WIDTH_24BPP;
819 dp_pix_width = 819 dp_pix_width =
820 HOST_8BPP | SRC_8BPP | DST_8BPP | 820 HOST_8BPP | SRC_8BPP | DST_8BPP |
821 BYTE_ORDER_LSB_TO_MSB; 821 BYTE_ORDER_LSB_TO_MSB;
822 dp_chain_mask = DP_CHAIN_24BPP; 822 dp_chain_mask = DP_CHAIN_24BPP;
823 } else if (bpp <= 32) { 823 } else if (bpp <= 32) {
824 bpp = 32; 824 bpp = 32;
825 pix_width = CRTC_PIX_WIDTH_32BPP; 825 pix_width = CRTC_PIX_WIDTH_32BPP;
826 dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP | 826 dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP |
827 BYTE_ORDER_LSB_TO_MSB; 827 BYTE_ORDER_LSB_TO_MSB;
828 dp_chain_mask = DP_CHAIN_32BPP; 828 dp_chain_mask = DP_CHAIN_32BPP;
829 } else 829 } else
830 FAIL("invalid bpp"); 830 FAIL("invalid bpp");
831 831
832 if (vxres * vyres * bpp / 8 > info->fix.smem_len) 832 if (vxres * vyres * bpp / 8 > info->fix.smem_len)
833 FAIL("not enough video RAM"); 833 FAIL("not enough video RAM");
834 834
835 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1; 835 h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
836 v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1; 836 v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
837 837
838 if((xres > 1600) || (yres > 1200)) { 838 if((xres > 1600) || (yres > 1200)) {
839 FAIL("MACH64 chips are designed for max 1600x1200\n" 839 FAIL("MACH64 chips are designed for max 1600x1200\n"
840 "select anoter resolution."); 840 "select anoter resolution.");
841 } 841 }
842 h_sync_strt = h_disp + var->right_margin; 842 h_sync_strt = h_disp + var->right_margin;
843 h_sync_end = h_sync_strt + var->hsync_len; 843 h_sync_end = h_sync_strt + var->hsync_len;
844 h_sync_dly = var->right_margin & 7; 844 h_sync_dly = var->right_margin & 7;
845 h_total = h_sync_end + h_sync_dly + var->left_margin; 845 h_total = h_sync_end + h_sync_dly + var->left_margin;
846 846
847 v_sync_strt = v_disp + var->lower_margin; 847 v_sync_strt = v_disp + var->lower_margin;
848 v_sync_end = v_sync_strt + var->vsync_len; 848 v_sync_end = v_sync_strt + var->vsync_len;
849 v_total = v_sync_end + var->upper_margin; 849 v_total = v_sync_end + var->upper_margin;
850 850
851 #ifdef CONFIG_FB_ATY_GENERIC_LCD 851 #ifdef CONFIG_FB_ATY_GENERIC_LCD
852 if (par->lcd_table != 0) { 852 if (par->lcd_table != 0) {
853 if(!M64_HAS(LT_LCD_REGS)) { 853 if(!M64_HAS(LT_LCD_REGS)) {
854 u32 lcd_index = aty_ld_le32(LCD_INDEX, par); 854 u32 lcd_index = aty_ld_le32(LCD_INDEX, par);
855 crtc->lcd_index = lcd_index & 855 crtc->lcd_index = lcd_index &
856 ~(LCD_INDEX_MASK | LCD_DISPLAY_DIS | LCD_SRC_SEL | CRTC2_DISPLAY_DIS); 856 ~(LCD_INDEX_MASK | LCD_DISPLAY_DIS | LCD_SRC_SEL | CRTC2_DISPLAY_DIS);
857 aty_st_le32(LCD_INDEX, lcd_index, par); 857 aty_st_le32(LCD_INDEX, lcd_index, par);
858 } 858 }
859 859
860 if (!M64_HAS(MOBIL_BUS)) 860 if (!M64_HAS(MOBIL_BUS))
861 crtc->lcd_index |= CRTC2_DISPLAY_DIS; 861 crtc->lcd_index |= CRTC2_DISPLAY_DIS;
862 862
863 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par) | 0x4000; 863 crtc->lcd_config_panel = aty_ld_lcd(CONFIG_PANEL, par) | 0x4000;
864 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT; 864 crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT;
865 865
866 crtc->lcd_gen_cntl &= 866 crtc->lcd_gen_cntl &=
867 ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | TVCLK_PM_EN | 867 ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | TVCLK_PM_EN |
868 /*VCLK_DAC_PM_EN | USE_SHADOWED_VEND |*/ 868 /*VCLK_DAC_PM_EN | USE_SHADOWED_VEND |*/
869 USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN); 869 USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
870 crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT; 870 crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT;
871 871
872 if((crtc->lcd_gen_cntl & LCD_ON) && 872 if((crtc->lcd_gen_cntl & LCD_ON) &&
873 ((xres > par->lcd_width) || (yres > par->lcd_height))) { 873 ((xres > par->lcd_width) || (yres > par->lcd_height))) {
874 /* We cannot display the mode on the LCD. If the CRT is enabled 874 /* We cannot display the mode on the LCD. If the CRT is enabled
875 we can turn off the LCD. 875 we can turn off the LCD.
876 If the CRT is off, it isn't a good idea to switch it on; we don't 876 If the CRT is off, it isn't a good idea to switch it on; we don't
877 know if one is connected. So it's better to fail then. 877 know if one is connected. So it's better to fail then.
878 */ 878 */
879 if (crtc->lcd_gen_cntl & CRT_ON) { 879 if (crtc->lcd_gen_cntl & CRT_ON) {
880 if (!(var->activate & FB_ACTIVATE_TEST)) 880 if (!(var->activate & FB_ACTIVATE_TEST))
881 PRINTKI("Disable LCD panel, because video mode does not fit.\n"); 881 PRINTKI("Disable LCD panel, because video mode does not fit.\n");
882 crtc->lcd_gen_cntl &= ~LCD_ON; 882 crtc->lcd_gen_cntl &= ~LCD_ON;
883 /*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/ 883 /*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/
884 } else { 884 } else {
885 if (!(var->activate & FB_ACTIVATE_TEST)) 885 if (!(var->activate & FB_ACTIVATE_TEST))
886 PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n"); 886 PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n");
887 return -EINVAL; 887 return -EINVAL;
888 } 888 }
889 } 889 }
890 } 890 }
891 891
892 if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) { 892 if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) {
893 int VScan = 1; 893 int VScan = 1;
894 /* bpp -> bytespp, 1,4 -> 0; 8 -> 2; 15,16 -> 1; 24 -> 6; 32 -> 5 894 /* bpp -> bytespp, 1,4 -> 0; 8 -> 2; 15,16 -> 1; 24 -> 6; 32 -> 5
895 const u8 DFP_h_sync_dly_LT[] = { 0, 2, 1, 6, 5 }; 895 const u8 DFP_h_sync_dly_LT[] = { 0, 2, 1, 6, 5 };
896 const u8 ADD_to_strt_wid_and_dly_LT_DAC[] = { 0, 5, 6, 9, 9, 12, 12 }; */ 896 const u8 ADD_to_strt_wid_and_dly_LT_DAC[] = { 0, 5, 6, 9, 9, 12, 12 }; */
897 897
898 vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED); 898 vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED);
899 899
900 /* This is horror! When we simulate, say 640x480 on an 800x600 900 /* This is horror! When we simulate, say 640x480 on an 800x600
901 LCD monitor, the CRTC should be programmed 800x600 values for 901 LCD monitor, the CRTC should be programmed 800x600 values for
902 the non visible part, but 640x480 for the visible part. 902 the non visible part, but 640x480 for the visible part.
903 This code has been tested on a laptop with it's 1400x1050 LCD 903 This code has been tested on a laptop with it's 1400x1050 LCD
904 monitor and a conventional monitor both switched on. 904 monitor and a conventional monitor both switched on.
905 Tested modes: 1280x1024, 1152x864, 1024x768, 800x600, 905 Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
906 works with little glitches also with DOUBLESCAN modes 906 works with little glitches also with DOUBLESCAN modes
907 */ 907 */
908 if (yres < par->lcd_height) { 908 if (yres < par->lcd_height) {
909 VScan = par->lcd_height / yres; 909 VScan = par->lcd_height / yres;
910 if(VScan > 1) { 910 if(VScan > 1) {
911 VScan = 2; 911 VScan = 2;
912 vmode |= FB_VMODE_DOUBLE; 912 vmode |= FB_VMODE_DOUBLE;
913 } 913 }
914 } 914 }
915 915
916 h_sync_strt = h_disp + par->lcd_right_margin; 916 h_sync_strt = h_disp + par->lcd_right_margin;
917 h_sync_end = h_sync_strt + par->lcd_hsync_len; 917 h_sync_end = h_sync_strt + par->lcd_hsync_len;
918 h_sync_dly = /*DFP_h_sync_dly[ ( bpp + 1 ) / 3 ]; */par->lcd_hsync_dly; 918 h_sync_dly = /*DFP_h_sync_dly[ ( bpp + 1 ) / 3 ]; */par->lcd_hsync_dly;
919 h_total = h_disp + par->lcd_hblank_len; 919 h_total = h_disp + par->lcd_hblank_len;
920 920
921 v_sync_strt = v_disp + par->lcd_lower_margin / VScan; 921 v_sync_strt = v_disp + par->lcd_lower_margin / VScan;
922 v_sync_end = v_sync_strt + par->lcd_vsync_len / VScan; 922 v_sync_end = v_sync_strt + par->lcd_vsync_len / VScan;
923 v_total = v_disp + par->lcd_vblank_len / VScan; 923 v_total = v_disp + par->lcd_vblank_len / VScan;
924 } 924 }
925 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 925 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
926 926
927 h_disp = (h_disp >> 3) - 1; 927 h_disp = (h_disp >> 3) - 1;
928 h_sync_strt = (h_sync_strt >> 3) - 1; 928 h_sync_strt = (h_sync_strt >> 3) - 1;
929 h_sync_end = (h_sync_end >> 3) - 1; 929 h_sync_end = (h_sync_end >> 3) - 1;
930 h_total = (h_total >> 3) - 1; 930 h_total = (h_total >> 3) - 1;
931 h_sync_wid = h_sync_end - h_sync_strt; 931 h_sync_wid = h_sync_end - h_sync_strt;
932 932
933 FAIL_MAX("h_disp too large", h_disp, 0xff); 933 FAIL_MAX("h_disp too large", h_disp, 0xff);
934 FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff); 934 FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff);
935 /*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/ 935 /*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/
936 if(h_sync_wid > 0x1f) 936 if(h_sync_wid > 0x1f)
937 h_sync_wid = 0x1f; 937 h_sync_wid = 0x1f;
938 FAIL_MAX("h_total too large", h_total, 0x1ff); 938 FAIL_MAX("h_total too large", h_total, 0x1ff);
939 939
940 if (vmode & FB_VMODE_DOUBLE) { 940 if (vmode & FB_VMODE_DOUBLE) {
941 v_disp <<= 1; 941 v_disp <<= 1;
942 v_sync_strt <<= 1; 942 v_sync_strt <<= 1;
943 v_sync_end <<= 1; 943 v_sync_end <<= 1;
944 v_total <<= 1; 944 v_total <<= 1;
945 } 945 }
946 946
947 vdisplay = yres; 947 vdisplay = yres;
948 #ifdef CONFIG_FB_ATY_GENERIC_LCD 948 #ifdef CONFIG_FB_ATY_GENERIC_LCD
949 if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) 949 if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON))
950 vdisplay = par->lcd_height; 950 vdisplay = par->lcd_height;
951 #endif 951 #endif
952 952
953 v_disp--; 953 v_disp--;
954 v_sync_strt--; 954 v_sync_strt--;
955 v_sync_end--; 955 v_sync_end--;
956 v_total--; 956 v_total--;
957 v_sync_wid = v_sync_end - v_sync_strt; 957 v_sync_wid = v_sync_end - v_sync_strt;
958 958
959 FAIL_MAX("v_disp too large", v_disp, 0x7ff); 959 FAIL_MAX("v_disp too large", v_disp, 0x7ff);
960 FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff); 960 FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff);
961 /*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/ 961 /*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/
962 if(v_sync_wid > 0x1f) 962 if(v_sync_wid > 0x1f)
963 v_sync_wid = 0x1f; 963 v_sync_wid = 0x1f;
964 FAIL_MAX("v_total too large", v_total, 0x7ff); 964 FAIL_MAX("v_total too large", v_total, 0x7ff);
965 965
966 c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? CRTC_CSYNC_EN : 0; 966 c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? CRTC_CSYNC_EN : 0;
967 967
968 /* output */ 968 /* output */
969 crtc->vxres = vxres; 969 crtc->vxres = vxres;
970 crtc->vyres = vyres; 970 crtc->vyres = vyres;
971 crtc->xoffset = xoffset; 971 crtc->xoffset = xoffset;
972 crtc->yoffset = yoffset; 972 crtc->yoffset = yoffset;
973 crtc->bpp = bpp; 973 crtc->bpp = bpp;
974 crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19); 974 crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
975 crtc->vline_crnt_vline = 0; 975 crtc->vline_crnt_vline = 0;
976 976
977 crtc->h_tot_disp = h_total | (h_disp<<16); 977 crtc->h_tot_disp = h_total | (h_disp<<16);
978 crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly<<8) | 978 crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly<<8) |
979 ((h_sync_strt & 0x100)<<4) | (h_sync_wid<<16) | (h_sync_pol<<21); 979 ((h_sync_strt & 0x100)<<4) | (h_sync_wid<<16) | (h_sync_pol<<21);
980 crtc->v_tot_disp = v_total | (v_disp<<16); 980 crtc->v_tot_disp = v_total | (v_disp<<16);
981 crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid<<16) | (v_sync_pol<<21); 981 crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid<<16) | (v_sync_pol<<21);
982 982
983 /* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */ 983 /* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */
984 crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync; 984 crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync;
985 crtc->gen_cntl |= CRTC_VGA_LINEAR; 985 crtc->gen_cntl |= CRTC_VGA_LINEAR;
986 986
987 /* Enable doublescan mode if requested */ 987 /* Enable doublescan mode if requested */
988 if (vmode & FB_VMODE_DOUBLE) 988 if (vmode & FB_VMODE_DOUBLE)
989 crtc->gen_cntl |= CRTC_DBL_SCAN_EN; 989 crtc->gen_cntl |= CRTC_DBL_SCAN_EN;
990 /* Enable interlaced mode if requested */ 990 /* Enable interlaced mode if requested */
991 if (vmode & FB_VMODE_INTERLACED) 991 if (vmode & FB_VMODE_INTERLACED)
992 crtc->gen_cntl |= CRTC_INTERLACE_EN; 992 crtc->gen_cntl |= CRTC_INTERLACE_EN;
993 #ifdef CONFIG_FB_ATY_GENERIC_LCD 993 #ifdef CONFIG_FB_ATY_GENERIC_LCD
994 if (par->lcd_table != 0) { 994 if (par->lcd_table != 0) {
995 vdisplay = yres; 995 vdisplay = yres;
996 if(vmode & FB_VMODE_DOUBLE) 996 if(vmode & FB_VMODE_DOUBLE)
997 vdisplay <<= 1; 997 vdisplay <<= 1;
998 crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH); 998 crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
999 crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | 999 crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 |
1000 /*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/ 1000 /*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
1001 USE_SHADOWED_VEND | USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN); 1001 USE_SHADOWED_VEND | USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
1002 crtc->lcd_gen_cntl |= (DONT_SHADOW_VPAR/* | LOCK_8DOT*/); 1002 crtc->lcd_gen_cntl |= (DONT_SHADOW_VPAR/* | LOCK_8DOT*/);
1003 1003
1004 /* MOBILITY M1 tested, FIXME: LT */ 1004 /* MOBILITY M1 tested, FIXME: LT */
1005 crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par); 1005 crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
1006 if (!M64_HAS(LT_LCD_REGS)) 1006 if (!M64_HAS(LT_LCD_REGS))
1007 crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) & 1007 crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) &
1008 ~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3); 1008 ~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3);
1009 1009
1010 crtc->horz_stretching &= 1010 crtc->horz_stretching &=
1011 ~(HORZ_STRETCH_RATIO | HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO | 1011 ~(HORZ_STRETCH_RATIO | HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
1012 HORZ_STRETCH_MODE | HORZ_STRETCH_EN); 1012 HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
1013 if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) { 1013 if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) {
1014 do { 1014 do {
1015 /* 1015 /*
1016 * The horizontal blender misbehaves when HDisplay is less than a 1016 * The horizontal blender misbehaves when HDisplay is less than a
1017 * a certain threshold (440 for a 1024-wide panel). It doesn't 1017 * a certain threshold (440 for a 1024-wide panel). It doesn't
1018 * stretch such modes enough. Use pixel replication instead of 1018 * stretch such modes enough. Use pixel replication instead of
1019 * blending to stretch modes that can be made to exactly fit the 1019 * blending to stretch modes that can be made to exactly fit the
1020 * panel width. The undocumented "NoLCDBlend" option allows the 1020 * panel width. The undocumented "NoLCDBlend" option allows the
1021 * pixel-replicated mode to be slightly wider or narrower than the 1021 * pixel-replicated mode to be slightly wider or narrower than the
1022 * panel width. It also causes a mode that is exactly half as wide 1022 * panel width. It also causes a mode that is exactly half as wide
1023 * as the panel to be pixel-replicated, rather than blended. 1023 * as the panel to be pixel-replicated, rather than blended.
1024 */ 1024 */
1025 int HDisplay = xres & ~7; 1025 int HDisplay = xres & ~7;
1026 int nStretch = par->lcd_width / HDisplay; 1026 int nStretch = par->lcd_width / HDisplay;
1027 int Remainder = par->lcd_width % HDisplay; 1027 int Remainder = par->lcd_width % HDisplay;
1028 1028
1029 if ((!Remainder && ((nStretch > 2))) || 1029 if ((!Remainder && ((nStretch > 2))) ||
1030 (((HDisplay * 16) / par->lcd_width) < 7)) { 1030 (((HDisplay * 16) / par->lcd_width) < 7)) {
1031 static const char StretchLoops[] = {10, 12, 13, 15, 16}; 1031 static const char StretchLoops[] = {10, 12, 13, 15, 16};
1032 int horz_stretch_loop = -1, BestRemainder; 1032 int horz_stretch_loop = -1, BestRemainder;
1033 int Numerator = HDisplay, Denominator = par->lcd_width; 1033 int Numerator = HDisplay, Denominator = par->lcd_width;
1034 int Index = 5; 1034 int Index = 5;
1035 ATIReduceRatio(&Numerator, &Denominator); 1035 ATIReduceRatio(&Numerator, &Denominator);
1036 1036
1037 BestRemainder = (Numerator * 16) / Denominator; 1037 BestRemainder = (Numerator * 16) / Denominator;
1038 while (--Index >= 0) { 1038 while (--Index >= 0) {
1039 Remainder = ((Denominator - Numerator) * StretchLoops[Index]) % 1039 Remainder = ((Denominator - Numerator) * StretchLoops[Index]) %
1040 Denominator; 1040 Denominator;
1041 if (Remainder < BestRemainder) { 1041 if (Remainder < BestRemainder) {
1042 horz_stretch_loop = Index; 1042 horz_stretch_loop = Index;
1043 if (!(BestRemainder = Remainder)) 1043 if (!(BestRemainder = Remainder))
1044 break; 1044 break;
1045 } 1045 }
1046 } 1046 }
1047 1047
1048 if ((horz_stretch_loop >= 0) && !BestRemainder) { 1048 if ((horz_stretch_loop >= 0) && !BestRemainder) {
1049 int horz_stretch_ratio = 0, Accumulator = 0; 1049 int horz_stretch_ratio = 0, Accumulator = 0;
1050 int reuse_previous = 1; 1050 int reuse_previous = 1;
1051 1051
1052 Index = StretchLoops[horz_stretch_loop]; 1052 Index = StretchLoops[horz_stretch_loop];
1053 1053
1054 while (--Index >= 0) { 1054 while (--Index >= 0) {
1055 if (Accumulator > 0) 1055 if (Accumulator > 0)
1056 horz_stretch_ratio |= reuse_previous; 1056 horz_stretch_ratio |= reuse_previous;
1057 else 1057 else
1058 Accumulator += Denominator; 1058 Accumulator += Denominator;
1059 Accumulator -= Numerator; 1059 Accumulator -= Numerator;
1060 reuse_previous <<= 1; 1060 reuse_previous <<= 1;
1061 } 1061 }
1062 1062
1063 crtc->horz_stretching |= (HORZ_STRETCH_EN | 1063 crtc->horz_stretching |= (HORZ_STRETCH_EN |
1064 ((horz_stretch_loop & HORZ_STRETCH_LOOP) << 16) | 1064 ((horz_stretch_loop & HORZ_STRETCH_LOOP) << 16) |
1065 (horz_stretch_ratio & HORZ_STRETCH_RATIO)); 1065 (horz_stretch_ratio & HORZ_STRETCH_RATIO));
1066 break; /* Out of the do { ... } while (0) */ 1066 break; /* Out of the do { ... } while (0) */
1067 } 1067 }
1068 } 1068 }
1069 1069
1070 crtc->horz_stretching |= (HORZ_STRETCH_MODE | HORZ_STRETCH_EN | 1070 crtc->horz_stretching |= (HORZ_STRETCH_MODE | HORZ_STRETCH_EN |
1071 (((HDisplay * (HORZ_STRETCH_BLEND + 1)) / par->lcd_width) & HORZ_STRETCH_BLEND)); 1071 (((HDisplay * (HORZ_STRETCH_BLEND + 1)) / par->lcd_width) & HORZ_STRETCH_BLEND));
1072 } while (0); 1072 } while (0);
1073 } 1073 }
1074 1074
1075 if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) { 1075 if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) {
1076 crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN | 1076 crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN |
1077 (((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0)); 1077 (((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0));
1078 1078
1079 if (!M64_HAS(LT_LCD_REGS) && 1079 if (!M64_HAS(LT_LCD_REGS) &&
1080 xres <= (M64_HAS(MOBIL_BUS)?1024:800)) 1080 xres <= (M64_HAS(MOBIL_BUS)?1024:800))
1081 crtc->ext_vert_stretch |= VERT_STRETCH_MODE; 1081 crtc->ext_vert_stretch |= VERT_STRETCH_MODE;
1082 } else { 1082 } else {
1083 /* 1083 /*
1084 * Don't use vertical blending if the mode is too wide or not 1084 * Don't use vertical blending if the mode is too wide or not
1085 * vertically stretched. 1085 * vertically stretched.
1086 */ 1086 */
1087 crtc->vert_stretching = 0; 1087 crtc->vert_stretching = 0;
1088 } 1088 }
1089 /* copy to shadow crtc */ 1089 /* copy to shadow crtc */
1090 crtc->shadow_h_tot_disp = crtc->h_tot_disp; 1090 crtc->shadow_h_tot_disp = crtc->h_tot_disp;
1091 crtc->shadow_h_sync_strt_wid = crtc->h_sync_strt_wid; 1091 crtc->shadow_h_sync_strt_wid = crtc->h_sync_strt_wid;
1092 crtc->shadow_v_tot_disp = crtc->v_tot_disp; 1092 crtc->shadow_v_tot_disp = crtc->v_tot_disp;
1093 crtc->shadow_v_sync_strt_wid = crtc->v_sync_strt_wid; 1093 crtc->shadow_v_sync_strt_wid = crtc->v_sync_strt_wid;
1094 } 1094 }
1095 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 1095 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
1096 1096
1097 if (M64_HAS(MAGIC_FIFO)) { 1097 if (M64_HAS(MAGIC_FIFO)) {
1098 /* FIXME: display FIFO low watermark values */ 1098 /* FIXME: display FIFO low watermark values */
1099 crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM); 1099 crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM);
1100 } 1100 }
1101 crtc->dp_pix_width = dp_pix_width; 1101 crtc->dp_pix_width = dp_pix_width;
1102 crtc->dp_chain_mask = dp_chain_mask; 1102 crtc->dp_chain_mask = dp_chain_mask;
1103 1103
1104 return 0; 1104 return 0;
1105 } 1105 }
1106 1106
1107 static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var) 1107 static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *var)
1108 { 1108 {
1109 u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync; 1109 u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync;
1110 u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, 1110 u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid,
1111 h_sync_pol; 1111 h_sync_pol;
1112 u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync; 1112 u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync;
1113 u32 pix_width; 1113 u32 pix_width;
1114 u32 double_scan, interlace; 1114 u32 double_scan, interlace;
1115 1115
1116 /* input */ 1116 /* input */
1117 h_total = crtc->h_tot_disp & 0x1ff; 1117 h_total = crtc->h_tot_disp & 0x1ff;
1118 h_disp = (crtc->h_tot_disp >> 16) & 0xff; 1118 h_disp = (crtc->h_tot_disp >> 16) & 0xff;
1119 h_sync_strt = (crtc->h_sync_strt_wid & 0xff) | ((crtc->h_sync_strt_wid >> 4) & 0x100); 1119 h_sync_strt = (crtc->h_sync_strt_wid & 0xff) | ((crtc->h_sync_strt_wid >> 4) & 0x100);
1120 h_sync_dly = (crtc->h_sync_strt_wid >> 8) & 0x7; 1120 h_sync_dly = (crtc->h_sync_strt_wid >> 8) & 0x7;
1121 h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x1f; 1121 h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x1f;
1122 h_sync_pol = (crtc->h_sync_strt_wid >> 21) & 0x1; 1122 h_sync_pol = (crtc->h_sync_strt_wid >> 21) & 0x1;
1123 v_total = crtc->v_tot_disp & 0x7ff; 1123 v_total = crtc->v_tot_disp & 0x7ff;
1124 v_disp = (crtc->v_tot_disp >> 16) & 0x7ff; 1124 v_disp = (crtc->v_tot_disp >> 16) & 0x7ff;
1125 v_sync_strt = crtc->v_sync_strt_wid & 0x7ff; 1125 v_sync_strt = crtc->v_sync_strt_wid & 0x7ff;
1126 v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f; 1126 v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f;
1127 v_sync_pol = (crtc->v_sync_strt_wid >> 21) & 0x1; 1127 v_sync_pol = (crtc->v_sync_strt_wid >> 21) & 0x1;
1128 c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0; 1128 c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0;
1129 pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK; 1129 pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK;
1130 double_scan = crtc->gen_cntl & CRTC_DBL_SCAN_EN; 1130 double_scan = crtc->gen_cntl & CRTC_DBL_SCAN_EN;
1131 interlace = crtc->gen_cntl & CRTC_INTERLACE_EN; 1131 interlace = crtc->gen_cntl & CRTC_INTERLACE_EN;
1132 1132
1133 /* convert */ 1133 /* convert */
1134 xres = (h_disp + 1) * 8; 1134 xres = (h_disp + 1) * 8;
1135 yres = v_disp + 1; 1135 yres = v_disp + 1;
1136 left = (h_total - h_sync_strt - h_sync_wid) * 8 - h_sync_dly; 1136 left = (h_total - h_sync_strt - h_sync_wid) * 8 - h_sync_dly;
1137 right = (h_sync_strt - h_disp) * 8 + h_sync_dly; 1137 right = (h_sync_strt - h_disp) * 8 + h_sync_dly;
1138 hslen = h_sync_wid * 8; 1138 hslen = h_sync_wid * 8;
1139 upper = v_total - v_sync_strt - v_sync_wid; 1139 upper = v_total - v_sync_strt - v_sync_wid;
1140 lower = v_sync_strt - v_disp; 1140 lower = v_sync_strt - v_disp;
1141 vslen = v_sync_wid; 1141 vslen = v_sync_wid;
1142 sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) | 1142 sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) |
1143 (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) | 1143 (v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) |
1144 (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0); 1144 (c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
1145 1145
1146 switch (pix_width) { 1146 switch (pix_width) {
1147 #if 0 1147 #if 0
1148 case CRTC_PIX_WIDTH_4BPP: 1148 case CRTC_PIX_WIDTH_4BPP:
1149 bpp = 4; 1149 bpp = 4;
1150 var->red.offset = 0; 1150 var->red.offset = 0;
1151 var->red.length = 8; 1151 var->red.length = 8;
1152 var->green.offset = 0; 1152 var->green.offset = 0;
1153 var->green.length = 8; 1153 var->green.length = 8;
1154 var->blue.offset = 0; 1154 var->blue.offset = 0;
1155 var->blue.length = 8; 1155 var->blue.length = 8;
1156 var->transp.offset = 0; 1156 var->transp.offset = 0;
1157 var->transp.length = 0; 1157 var->transp.length = 0;
1158 break; 1158 break;
1159 #endif 1159 #endif
1160 case CRTC_PIX_WIDTH_8BPP: 1160 case CRTC_PIX_WIDTH_8BPP:
1161 bpp = 8; 1161 bpp = 8;
1162 var->red.offset = 0; 1162 var->red.offset = 0;
1163 var->red.length = 8; 1163 var->red.length = 8;
1164 var->green.offset = 0; 1164 var->green.offset = 0;
1165 var->green.length = 8; 1165 var->green.length = 8;
1166 var->blue.offset = 0; 1166 var->blue.offset = 0;
1167 var->blue.length = 8; 1167 var->blue.length = 8;
1168 var->transp.offset = 0; 1168 var->transp.offset = 0;
1169 var->transp.length = 0; 1169 var->transp.length = 0;
1170 break; 1170 break;
1171 case CRTC_PIX_WIDTH_15BPP: /* RGB 555 */ 1171 case CRTC_PIX_WIDTH_15BPP: /* RGB 555 */
1172 bpp = 16; 1172 bpp = 16;
1173 var->red.offset = 10; 1173 var->red.offset = 10;
1174 var->red.length = 5; 1174 var->red.length = 5;
1175 var->green.offset = 5; 1175 var->green.offset = 5;
1176 var->green.length = 5; 1176 var->green.length = 5;
1177 var->blue.offset = 0; 1177 var->blue.offset = 0;
1178 var->blue.length = 5; 1178 var->blue.length = 5;
1179 var->transp.offset = 0; 1179 var->transp.offset = 0;
1180 var->transp.length = 0; 1180 var->transp.length = 0;
1181 break; 1181 break;
1182 case CRTC_PIX_WIDTH_16BPP: /* RGB 565 */ 1182 case CRTC_PIX_WIDTH_16BPP: /* RGB 565 */
1183 bpp = 16; 1183 bpp = 16;
1184 var->red.offset = 11; 1184 var->red.offset = 11;
1185 var->red.length = 5; 1185 var->red.length = 5;
1186 var->green.offset = 5; 1186 var->green.offset = 5;
1187 var->green.length = 6; 1187 var->green.length = 6;
1188 var->blue.offset = 0; 1188 var->blue.offset = 0;
1189 var->blue.length = 5; 1189 var->blue.length = 5;
1190 var->transp.offset = 0; 1190 var->transp.offset = 0;
1191 var->transp.length = 0; 1191 var->transp.length = 0;
1192 break; 1192 break;
1193 case CRTC_PIX_WIDTH_24BPP: /* RGB 888 */ 1193 case CRTC_PIX_WIDTH_24BPP: /* RGB 888 */
1194 bpp = 24; 1194 bpp = 24;
1195 var->red.offset = 16; 1195 var->red.offset = 16;
1196 var->red.length = 8; 1196 var->red.length = 8;
1197 var->green.offset = 8; 1197 var->green.offset = 8;
1198 var->green.length = 8; 1198 var->green.length = 8;
1199 var->blue.offset = 0; 1199 var->blue.offset = 0;
1200 var->blue.length = 8; 1200 var->blue.length = 8;
1201 var->transp.offset = 0; 1201 var->transp.offset = 0;
1202 var->transp.length = 0; 1202 var->transp.length = 0;
1203 break; 1203 break;
1204 case CRTC_PIX_WIDTH_32BPP: /* ARGB 8888 */ 1204 case CRTC_PIX_WIDTH_32BPP: /* ARGB 8888 */
1205 bpp = 32; 1205 bpp = 32;
1206 var->red.offset = 16; 1206 var->red.offset = 16;
1207 var->red.length = 8; 1207 var->red.length = 8;
1208 var->green.offset = 8; 1208 var->green.offset = 8;
1209 var->green.length = 8; 1209 var->green.length = 8;
1210 var->blue.offset = 0; 1210 var->blue.offset = 0;
1211 var->blue.length = 8; 1211 var->blue.length = 8;
1212 var->transp.offset = 24; 1212 var->transp.offset = 24;
1213 var->transp.length = 8; 1213 var->transp.length = 8;
1214 break; 1214 break;
1215 default: 1215 default:
1216 PRINTKE("Invalid pixel width\n"); 1216 PRINTKE("Invalid pixel width\n");
1217 return -EINVAL; 1217 return -EINVAL;
1218 } 1218 }
1219 1219
1220 /* output */ 1220 /* output */
1221 var->xres = xres; 1221 var->xres = xres;
1222 var->yres = yres; 1222 var->yres = yres;
1223 var->xres_virtual = crtc->vxres; 1223 var->xres_virtual = crtc->vxres;
1224 var->yres_virtual = crtc->vyres; 1224 var->yres_virtual = crtc->vyres;
1225 var->bits_per_pixel = bpp; 1225 var->bits_per_pixel = bpp;
1226 var->left_margin = left; 1226 var->left_margin = left;
1227 var->right_margin = right; 1227 var->right_margin = right;
1228 var->upper_margin = upper; 1228 var->upper_margin = upper;
1229 var->lower_margin = lower; 1229 var->lower_margin = lower;
1230 var->hsync_len = hslen; 1230 var->hsync_len = hslen;
1231 var->vsync_len = vslen; 1231 var->vsync_len = vslen;
1232 var->sync = sync; 1232 var->sync = sync;
1233 var->vmode = FB_VMODE_NONINTERLACED; 1233 var->vmode = FB_VMODE_NONINTERLACED;
1234 /* In double scan mode, the vertical parameters are doubled, so we need to 1234 /* In double scan mode, the vertical parameters are doubled, so we need to
1235 half them to get the right values. 1235 half them to get the right values.
1236 In interlaced mode the values are already correct, so no correction is 1236 In interlaced mode the values are already correct, so no correction is
1237 necessary. 1237 necessary.
1238 */ 1238 */
1239 if (interlace) 1239 if (interlace)
1240 var->vmode = FB_VMODE_INTERLACED; 1240 var->vmode = FB_VMODE_INTERLACED;
1241 1241
1242 if (double_scan) { 1242 if (double_scan) {
1243 var->vmode = FB_VMODE_DOUBLE; 1243 var->vmode = FB_VMODE_DOUBLE;
1244 var->yres>>=1; 1244 var->yres>>=1;
1245 var->upper_margin>>=1; 1245 var->upper_margin>>=1;
1246 var->lower_margin>>=1; 1246 var->lower_margin>>=1;
1247 var->vsync_len>>=1; 1247 var->vsync_len>>=1;
1248 } 1248 }
1249 1249
1250 return 0; 1250 return 0;
1251 } 1251 }
1252 1252
1253 /* ------------------------------------------------------------------------- */ 1253 /* ------------------------------------------------------------------------- */
1254 1254
1255 static int atyfb_set_par(struct fb_info *info) 1255 static int atyfb_set_par(struct fb_info *info)
1256 { 1256 {
1257 struct atyfb_par *par = (struct atyfb_par *) info->par; 1257 struct atyfb_par *par = (struct atyfb_par *) info->par;
1258 struct fb_var_screeninfo *var = &info->var; 1258 struct fb_var_screeninfo *var = &info->var;
1259 u32 tmp, pixclock; 1259 u32 tmp, pixclock;
1260 int err; 1260 int err;
1261 #ifdef DEBUG 1261 #ifdef DEBUG
1262 struct fb_var_screeninfo debug; 1262 struct fb_var_screeninfo debug;
1263 u32 pixclock_in_ps; 1263 u32 pixclock_in_ps;
1264 #endif 1264 #endif
1265 if (par->asleep) 1265 if (par->asleep)
1266 return 0; 1266 return 0;
1267 1267
1268 if ((err = aty_var_to_crtc(info, var, &par->crtc))) 1268 if ((err = aty_var_to_crtc(info, var, &par->crtc)))
1269 return err; 1269 return err;
1270 1270
1271 pixclock = atyfb_get_pixclock(var, par); 1271 pixclock = atyfb_get_pixclock(var, par);
1272 1272
1273 if (pixclock == 0) { 1273 if (pixclock == 0) {
1274 PRINTKE("Invalid pixclock\n"); 1274 PRINTKE("Invalid pixclock\n");
1275 return -EINVAL; 1275 return -EINVAL;
1276 } else { 1276 } else {
1277 if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &par->pll))) 1277 if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &par->pll)))
1278 return err; 1278 return err;
1279 } 1279 }
1280 1280
1281 par->accel_flags = var->accel_flags; /* hack */ 1281 par->accel_flags = var->accel_flags; /* hack */
1282 1282
1283 if (par->blitter_may_be_busy) 1283 if (par->blitter_may_be_busy)
1284 wait_for_idle(par); 1284 wait_for_idle(par);
1285 1285
1286 aty_set_crtc(par, &par->crtc); 1286 aty_set_crtc(par, &par->crtc);
1287 par->dac_ops->set_dac(info, &par->pll, var->bits_per_pixel, par->accel_flags); 1287 par->dac_ops->set_dac(info, &par->pll, var->bits_per_pixel, par->accel_flags);
1288 par->pll_ops->set_pll(info, &par->pll); 1288 par->pll_ops->set_pll(info, &par->pll);
1289 1289
1290 #ifdef DEBUG 1290 #ifdef DEBUG
1291 if(par->pll_ops && par->pll_ops->pll_to_var) 1291 if(par->pll_ops && par->pll_ops->pll_to_var)
1292 pixclock_in_ps = par->pll_ops->pll_to_var(info, &(par->pll)); 1292 pixclock_in_ps = par->pll_ops->pll_to_var(info, &(par->pll));
1293 else 1293 else
1294 pixclock_in_ps = 0; 1294 pixclock_in_ps = 0;
1295 1295
1296 if(0 == pixclock_in_ps) { 1296 if(0 == pixclock_in_ps) {
1297 PRINTKE("ALERT ops->pll_to_var get 0\n"); 1297 PRINTKE("ALERT ops->pll_to_var get 0\n");
1298 pixclock_in_ps = pixclock; 1298 pixclock_in_ps = pixclock;
1299 } 1299 }
1300 1300
1301 memset(&debug, 0, sizeof(debug)); 1301 memset(&debug, 0, sizeof(debug));
1302 if(!aty_crtc_to_var(&(par->crtc), &debug)) { 1302 if(!aty_crtc_to_var(&(par->crtc), &debug)) {
1303 u32 hSync, vRefresh; 1303 u32 hSync, vRefresh;
1304 u32 h_disp, h_sync_strt, h_sync_end, h_total; 1304 u32 h_disp, h_sync_strt, h_sync_end, h_total;
1305 u32 v_disp, v_sync_strt, v_sync_end, v_total; 1305 u32 v_disp, v_sync_strt, v_sync_end, v_total;
1306 1306
1307 h_disp = debug.xres; 1307 h_disp = debug.xres;
1308 h_sync_strt = h_disp + debug.right_margin; 1308 h_sync_strt = h_disp + debug.right_margin;
1309 h_sync_end = h_sync_strt + debug.hsync_len; 1309 h_sync_end = h_sync_strt + debug.hsync_len;
1310 h_total = h_sync_end + debug.left_margin; 1310 h_total = h_sync_end + debug.left_margin;
1311 v_disp = debug.yres; 1311 v_disp = debug.yres;
1312 v_sync_strt = v_disp + debug.lower_margin; 1312 v_sync_strt = v_disp + debug.lower_margin;
1313 v_sync_end = v_sync_strt + debug.vsync_len; 1313 v_sync_end = v_sync_strt + debug.vsync_len;
1314 v_total = v_sync_end + debug.upper_margin; 1314 v_total = v_sync_end + debug.upper_margin;
1315 1315
1316 hSync = 1000000000 / (pixclock_in_ps * h_total); 1316 hSync = 1000000000 / (pixclock_in_ps * h_total);
1317 vRefresh = (hSync * 1000) / v_total; 1317 vRefresh = (hSync * 1000) / v_total;
1318 if (par->crtc.gen_cntl & CRTC_INTERLACE_EN) 1318 if (par->crtc.gen_cntl & CRTC_INTERLACE_EN)
1319 vRefresh *= 2; 1319 vRefresh *= 2;
1320 if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) 1320 if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
1321 vRefresh /= 2; 1321 vRefresh /= 2;
1322 1322
1323 DPRINTK("atyfb_set_par\n"); 1323 DPRINTK("atyfb_set_par\n");
1324 DPRINTK(" Set Visible Mode to %ix%i-%i\n", var->xres, var->yres, var->bits_per_pixel); 1324 DPRINTK(" Set Visible Mode to %ix%i-%i\n", var->xres, var->yres, var->bits_per_pixel);
1325 DPRINTK(" Virtual resolution %ix%i, pixclock_in_ps %i (calculated %i)\n", 1325 DPRINTK(" Virtual resolution %ix%i, pixclock_in_ps %i (calculated %i)\n",
1326 var->xres_virtual, var->yres_virtual, pixclock, pixclock_in_ps); 1326 var->xres_virtual, var->yres_virtual, pixclock, pixclock_in_ps);
1327 DPRINTK(" Dot clock: %i MHz\n", 1000000 / pixclock_in_ps); 1327 DPRINTK(" Dot clock: %i MHz\n", 1000000 / pixclock_in_ps);
1328 DPRINTK(" Horizontal sync: %i kHz\n", hSync); 1328 DPRINTK(" Horizontal sync: %i kHz\n", hSync);
1329 DPRINTK(" Vertical refresh: %i Hz\n", vRefresh); 1329 DPRINTK(" Vertical refresh: %i Hz\n", vRefresh);
1330 DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n", 1330 DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n",
1331 1000000 / pixclock_in_ps, 1000000 % pixclock_in_ps, 1331 1000000 / pixclock_in_ps, 1000000 % pixclock_in_ps,
1332 h_disp, h_sync_strt, h_sync_end, h_total, 1332 h_disp, h_sync_strt, h_sync_end, h_total,
1333 v_disp, v_sync_strt, v_sync_end, v_total); 1333 v_disp, v_sync_strt, v_sync_end, v_total);
1334 DPRINTK(" fb style: %i %i %i %i %i %i %i %i %i\n", 1334 DPRINTK(" fb style: %i %i %i %i %i %i %i %i %i\n",
1335 pixclock_in_ps, 1335 pixclock_in_ps,
1336 debug.left_margin, h_disp, debug.right_margin, debug.hsync_len, 1336 debug.left_margin, h_disp, debug.right_margin, debug.hsync_len,
1337 debug.upper_margin, v_disp, debug.lower_margin, debug.vsync_len); 1337 debug.upper_margin, v_disp, debug.lower_margin, debug.vsync_len);
1338 } 1338 }
1339 #endif /* DEBUG */ 1339 #endif /* DEBUG */
1340 1340
1341 if (!M64_HAS(INTEGRATED)) { 1341 if (!M64_HAS(INTEGRATED)) {
1342 /* Don't forget MEM_CNTL */ 1342 /* Don't forget MEM_CNTL */
1343 tmp = aty_ld_le32(MEM_CNTL, par) & 0xf0ffffff; 1343 tmp = aty_ld_le32(MEM_CNTL, par) & 0xf0ffffff;
1344 switch (var->bits_per_pixel) { 1344 switch (var->bits_per_pixel) {
1345 case 8: 1345 case 8:
1346 tmp |= 0x02000000; 1346 tmp |= 0x02000000;
1347 break; 1347 break;
1348 case 16: 1348 case 16:
1349 tmp |= 0x03000000; 1349 tmp |= 0x03000000;
1350 break; 1350 break;
1351 case 32: 1351 case 32:
1352 tmp |= 0x06000000; 1352 tmp |= 0x06000000;
1353 break; 1353 break;
1354 } 1354 }
1355 aty_st_le32(MEM_CNTL, tmp, par); 1355 aty_st_le32(MEM_CNTL, tmp, par);
1356 } else { 1356 } else {
1357 tmp = aty_ld_le32(MEM_CNTL, par) & 0xf00fffff; 1357 tmp = aty_ld_le32(MEM_CNTL, par) & 0xf00fffff;
1358 if (!M64_HAS(MAGIC_POSTDIV)) 1358 if (!M64_HAS(MAGIC_POSTDIV))
1359 tmp |= par->mem_refresh_rate << 20; 1359 tmp |= par->mem_refresh_rate << 20;
1360 switch (var->bits_per_pixel) { 1360 switch (var->bits_per_pixel) {
1361 case 8: 1361 case 8:
1362 case 24: 1362 case 24:
1363 tmp |= 0x00000000; 1363 tmp |= 0x00000000;
1364 break; 1364 break;
1365 case 16: 1365 case 16:
1366 tmp |= 0x04000000; 1366 tmp |= 0x04000000;
1367 break; 1367 break;
1368 case 32: 1368 case 32:
1369 tmp |= 0x08000000; 1369 tmp |= 0x08000000;
1370 break; 1370 break;
1371 } 1371 }
1372 if (M64_HAS(CT_BUS)) { 1372 if (M64_HAS(CT_BUS)) {
1373 aty_st_le32(DAC_CNTL, 0x87010184, par); 1373 aty_st_le32(DAC_CNTL, 0x87010184, par);
1374 aty_st_le32(BUS_CNTL, 0x680000f9, par); 1374 aty_st_le32(BUS_CNTL, 0x680000f9, par);
1375 } else if (M64_HAS(VT_BUS)) { 1375 } else if (M64_HAS(VT_BUS)) {
1376 aty_st_le32(DAC_CNTL, 0x87010184, par); 1376 aty_st_le32(DAC_CNTL, 0x87010184, par);
1377 aty_st_le32(BUS_CNTL, 0x680000f9, par); 1377 aty_st_le32(BUS_CNTL, 0x680000f9, par);
1378 } else if (M64_HAS(MOBIL_BUS)) { 1378 } else if (M64_HAS(MOBIL_BUS)) {
1379 aty_st_le32(DAC_CNTL, 0x80010102, par); 1379 aty_st_le32(DAC_CNTL, 0x80010102, par);
1380 aty_st_le32(BUS_CNTL, 0x7b33a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par); 1380 aty_st_le32(BUS_CNTL, 0x7b33a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
1381 } else { 1381 } else {
1382 /* GT */ 1382 /* GT */
1383 aty_st_le32(DAC_CNTL, 0x86010102, par); 1383 aty_st_le32(DAC_CNTL, 0x86010102, par);
1384 aty_st_le32(BUS_CNTL, 0x7b23a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par); 1384 aty_st_le32(BUS_CNTL, 0x7b23a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
1385 aty_st_le32(EXT_MEM_CNTL, aty_ld_le32(EXT_MEM_CNTL, par) | 0x5000001, par); 1385 aty_st_le32(EXT_MEM_CNTL, aty_ld_le32(EXT_MEM_CNTL, par) | 0x5000001, par);
1386 } 1386 }
1387 aty_st_le32(MEM_CNTL, tmp, par); 1387 aty_st_le32(MEM_CNTL, tmp, par);
1388 } 1388 }
1389 aty_st_8(DAC_MASK, 0xff, par); 1389 aty_st_8(DAC_MASK, 0xff, par);
1390 1390
1391 info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8; 1391 info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
1392 info->fix.visual = var->bits_per_pixel <= 8 ? 1392 info->fix.visual = var->bits_per_pixel <= 8 ?
1393 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; 1393 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
1394 1394
1395 /* Initialize the graphics engine */ 1395 /* Initialize the graphics engine */
1396 if (par->accel_flags & FB_ACCELF_TEXT) 1396 if (par->accel_flags & FB_ACCELF_TEXT)
1397 aty_init_engine(par, info); 1397 aty_init_engine(par, info);
1398 1398
1399 #ifdef CONFIG_BOOTX_TEXT 1399 #ifdef CONFIG_BOOTX_TEXT
1400 btext_update_display(info->fix.smem_start, 1400 btext_update_display(info->fix.smem_start,
1401 (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8, 1401 (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8,
1402 ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1, 1402 ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1,
1403 var->bits_per_pixel, 1403 var->bits_per_pixel,
1404 par->crtc.vxres * var->bits_per_pixel / 8); 1404 par->crtc.vxres * var->bits_per_pixel / 8);
1405 #endif /* CONFIG_BOOTX_TEXT */ 1405 #endif /* CONFIG_BOOTX_TEXT */
1406 #if 0 1406 #if 0
1407 /* switch to accelerator mode */ 1407 /* switch to accelerator mode */
1408 if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN)) 1408 if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN))
1409 aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par); 1409 aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par);
1410 #endif 1410 #endif
1411 #ifdef DEBUG 1411 #ifdef DEBUG
1412 { 1412 {
1413 /* dump non shadow CRTC, pll, LCD registers */ 1413 /* dump non shadow CRTC, pll, LCD registers */
1414 int i; u32 base; 1414 int i; u32 base;
1415 1415
1416 /* CRTC registers */ 1416 /* CRTC registers */
1417 base = 0x2000; 1417 base = 0x2000;
1418 printk("debug atyfb: Mach64 non-shadow register values:"); 1418 printk("debug atyfb: Mach64 non-shadow register values:");
1419 for (i = 0; i < 256; i = i+4) { 1419 for (i = 0; i < 256; i = i+4) {
1420 if(i%16 == 0) printk("\ndebug atyfb: 0x%04X: ", base + i); 1420 if(i%16 == 0) printk("\ndebug atyfb: 0x%04X: ", base + i);
1421 printk(" %08X", aty_ld_le32(i, par)); 1421 printk(" %08X", aty_ld_le32(i, par));
1422 } 1422 }
1423 printk("\n\n"); 1423 printk("\n\n");
1424 1424
1425 #ifdef CONFIG_FB_ATY_CT 1425 #ifdef CONFIG_FB_ATY_CT
1426 /* PLL registers */ 1426 /* PLL registers */
1427 base = 0x00; 1427 base = 0x00;
1428 printk("debug atyfb: Mach64 PLL register values:"); 1428 printk("debug atyfb: Mach64 PLL register values:");
1429 for (i = 0; i < 64; i++) { 1429 for (i = 0; i < 64; i++) {
1430 if(i%16 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i); 1430 if(i%16 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i);
1431 if(i%4 == 0) printk(" "); 1431 if(i%4 == 0) printk(" ");
1432 printk("%02X", aty_ld_pll_ct(i, par)); 1432 printk("%02X", aty_ld_pll_ct(i, par));
1433 } 1433 }
1434 printk("\n\n"); 1434 printk("\n\n");
1435 #endif /* CONFIG_FB_ATY_CT */ 1435 #endif /* CONFIG_FB_ATY_CT */
1436 1436
1437 #ifdef CONFIG_FB_ATY_GENERIC_LCD 1437 #ifdef CONFIG_FB_ATY_GENERIC_LCD
1438 if (par->lcd_table != 0) { 1438 if (par->lcd_table != 0) {
1439 /* LCD registers */ 1439 /* LCD registers */
1440 base = 0x00; 1440 base = 0x00;
1441 printk("debug atyfb: LCD register values:"); 1441 printk("debug atyfb: LCD register values:");
1442 if(M64_HAS(LT_LCD_REGS)) { 1442 if(M64_HAS(LT_LCD_REGS)) {
1443 for(i = 0; i <= POWER_MANAGEMENT; i++) { 1443 for(i = 0; i <= POWER_MANAGEMENT; i++) {
1444 if(i == EXT_VERT_STRETCH) 1444 if(i == EXT_VERT_STRETCH)
1445 continue; 1445 continue;
1446 printk("\ndebug atyfb: 0x%04X: ", lt_lcd_regs[i]); 1446 printk("\ndebug atyfb: 0x%04X: ", lt_lcd_regs[i]);
1447 printk(" %08X", aty_ld_lcd(i, par)); 1447 printk(" %08X", aty_ld_lcd(i, par));
1448 } 1448 }
1449 1449
1450 } else { 1450 } else {
1451 for (i = 0; i < 64; i++) { 1451 for (i = 0; i < 64; i++) {
1452 if(i%4 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i); 1452 if(i%4 == 0) printk("\ndebug atyfb: 0x%02X: ", base + i);
1453 printk(" %08X", aty_ld_lcd(i, par)); 1453 printk(" %08X", aty_ld_lcd(i, par));
1454 } 1454 }
1455 } 1455 }
1456 printk("\n\n"); 1456 printk("\n\n");
1457 } 1457 }
1458 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 1458 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
1459 } 1459 }
1460 #endif /* DEBUG */ 1460 #endif /* DEBUG */
1461 return 0; 1461 return 0;
1462 } 1462 }
1463 1463
1464 static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 1464 static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
1465 { 1465 {
1466 struct atyfb_par *par = (struct atyfb_par *) info->par; 1466 struct atyfb_par *par = (struct atyfb_par *) info->par;
1467 int err; 1467 int err;
1468 struct crtc crtc; 1468 struct crtc crtc;
1469 union aty_pll pll; 1469 union aty_pll pll;
1470 u32 pixclock; 1470 u32 pixclock;
1471 1471
1472 memcpy(&pll, &(par->pll), sizeof(pll)); 1472 memcpy(&pll, &(par->pll), sizeof(pll));
1473 1473
1474 if((err = aty_var_to_crtc(info, var, &crtc))) 1474 if((err = aty_var_to_crtc(info, var, &crtc)))
1475 return err; 1475 return err;
1476 1476
1477 pixclock = atyfb_get_pixclock(var, par); 1477 pixclock = atyfb_get_pixclock(var, par);
1478 1478
1479 if (pixclock == 0) { 1479 if (pixclock == 0) {
1480 if (!(var->activate & FB_ACTIVATE_TEST)) 1480 if (!(var->activate & FB_ACTIVATE_TEST))
1481 PRINTKE("Invalid pixclock\n"); 1481 PRINTKE("Invalid pixclock\n");
1482 return -EINVAL; 1482 return -EINVAL;
1483 } else { 1483 } else {
1484 if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &pll))) 1484 if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &pll)))
1485 return err; 1485 return err;
1486 } 1486 }
1487 1487
1488 if (var->accel_flags & FB_ACCELF_TEXT) 1488 if (var->accel_flags & FB_ACCELF_TEXT)
1489 info->var.accel_flags = FB_ACCELF_TEXT; 1489 info->var.accel_flags = FB_ACCELF_TEXT;
1490 else 1490 else
1491 info->var.accel_flags = 0; 1491 info->var.accel_flags = 0;
1492 1492
1493 #if 0 /* fbmon is not done. uncomment for 2.5.x -brad */ 1493 #if 0 /* fbmon is not done. uncomment for 2.5.x -brad */
1494 if (!fbmon_valid_timings(pixclock, htotal, vtotal, info)) 1494 if (!fbmon_valid_timings(pixclock, htotal, vtotal, info))
1495 return -EINVAL; 1495 return -EINVAL;
1496 #endif 1496 #endif
1497 aty_crtc_to_var(&crtc, var); 1497 aty_crtc_to_var(&crtc, var);
1498 var->pixclock = par->pll_ops->pll_to_var(info, &pll); 1498 var->pixclock = par->pll_ops->pll_to_var(info, &pll);
1499 return 0; 1499 return 0;
1500 } 1500 }
1501 1501
1502 static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info) 1502 static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
1503 { 1503 {
1504 u32 xoffset = info->var.xoffset; 1504 u32 xoffset = info->var.xoffset;
1505 u32 yoffset = info->var.yoffset; 1505 u32 yoffset = info->var.yoffset;
1506 u32 vxres = par->crtc.vxres; 1506 u32 vxres = par->crtc.vxres;
1507 u32 bpp = info->var.bits_per_pixel; 1507 u32 bpp = info->var.bits_per_pixel;
1508 1508
1509 par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19); 1509 par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
1510 } 1510 }
1511 1511
1512 1512
1513 /* 1513 /*
1514 * Open/Release the frame buffer device 1514 * Open/Release the frame buffer device
1515 */ 1515 */
1516 1516
1517 static int atyfb_open(struct fb_info *info, int user) 1517 static int atyfb_open(struct fb_info *info, int user)
1518 { 1518 {
1519 struct atyfb_par *par = (struct atyfb_par *) info->par; 1519 struct atyfb_par *par = (struct atyfb_par *) info->par;
1520 1520
1521 if (user) { 1521 if (user) {
1522 par->open++; 1522 par->open++;
1523 #ifdef __sparc__ 1523 #ifdef __sparc__
1524 par->mmaped = 0; 1524 par->mmaped = 0;
1525 #endif 1525 #endif
1526 } 1526 }
1527 return (0); 1527 return (0);
1528 } 1528 }
1529 1529
1530 static irqreturn_t aty_irq(int irq, void *dev_id, struct pt_regs *fp) 1530 static irqreturn_t aty_irq(int irq, void *dev_id, struct pt_regs *fp)
1531 { 1531 {
1532 struct atyfb_par *par = dev_id; 1532 struct atyfb_par *par = dev_id;
1533 int handled = 0; 1533 int handled = 0;
1534 u32 int_cntl; 1534 u32 int_cntl;
1535 1535
1536 spin_lock(&par->int_lock); 1536 spin_lock(&par->int_lock);
1537 1537
1538 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par); 1538 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par);
1539 1539
1540 if (int_cntl & CRTC_VBLANK_INT) { 1540 if (int_cntl & CRTC_VBLANK_INT) {
1541 /* clear interrupt */ 1541 /* clear interrupt */
1542 aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) | CRTC_VBLANK_INT_AK, par); 1542 aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) | CRTC_VBLANK_INT_AK, par);
1543 par->vblank.count++; 1543 par->vblank.count++;
1544 if (par->vblank.pan_display) { 1544 if (par->vblank.pan_display) {
1545 par->vblank.pan_display = 0; 1545 par->vblank.pan_display = 0;
1546 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); 1546 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
1547 } 1547 }
1548 wake_up_interruptible(&par->vblank.wait); 1548 wake_up_interruptible(&par->vblank.wait);
1549 handled = 1; 1549 handled = 1;
1550 } 1550 }
1551 1551
1552 spin_unlock(&par->int_lock); 1552 spin_unlock(&par->int_lock);
1553 1553
1554 return IRQ_RETVAL(handled); 1554 return IRQ_RETVAL(handled);
1555 } 1555 }
1556 1556
1557 static int aty_enable_irq(struct atyfb_par *par, int reenable) 1557 static int aty_enable_irq(struct atyfb_par *par, int reenable)
1558 { 1558 {
1559 u32 int_cntl; 1559 u32 int_cntl;
1560 1560
1561 if (!test_and_set_bit(0, &par->irq_flags)) { 1561 if (!test_and_set_bit(0, &par->irq_flags)) {
1562 if (request_irq(par->irq, aty_irq, SA_SHIRQ, "atyfb", par)) { 1562 if (request_irq(par->irq, aty_irq, SA_SHIRQ, "atyfb", par)) {
1563 clear_bit(0, &par->irq_flags); 1563 clear_bit(0, &par->irq_flags);
1564 return -EINVAL; 1564 return -EINVAL;
1565 } 1565 }
1566 spin_lock_irq(&par->int_lock); 1566 spin_lock_irq(&par->int_lock);
1567 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; 1567 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
1568 /* clear interrupt */ 1568 /* clear interrupt */
1569 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_AK, par); 1569 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_AK, par);
1570 /* enable interrupt */ 1570 /* enable interrupt */
1571 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par); 1571 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par);
1572 spin_unlock_irq(&par->int_lock); 1572 spin_unlock_irq(&par->int_lock);
1573 } else if (reenable) { 1573 } else if (reenable) {
1574 spin_lock_irq(&par->int_lock); 1574 spin_lock_irq(&par->int_lock);
1575 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; 1575 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
1576 if (!(int_cntl & CRTC_VBLANK_INT_EN)) { 1576 if (!(int_cntl & CRTC_VBLANK_INT_EN)) {
1577 printk("atyfb: someone disabled IRQ [%08x]\n", int_cntl); 1577 printk("atyfb: someone disabled IRQ [%08x]\n", int_cntl);
1578 /* re-enable interrupt */ 1578 /* re-enable interrupt */
1579 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par ); 1579 aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par );
1580 } 1580 }
1581 spin_unlock_irq(&par->int_lock); 1581 spin_unlock_irq(&par->int_lock);
1582 } 1582 }
1583 1583
1584 return 0; 1584 return 0;
1585 } 1585 }
1586 1586
1587 static int aty_disable_irq(struct atyfb_par *par) 1587 static int aty_disable_irq(struct atyfb_par *par)
1588 { 1588 {
1589 u32 int_cntl; 1589 u32 int_cntl;
1590 1590
1591 if (test_and_clear_bit(0, &par->irq_flags)) { 1591 if (test_and_clear_bit(0, &par->irq_flags)) {
1592 if (par->vblank.pan_display) { 1592 if (par->vblank.pan_display) {
1593 par->vblank.pan_display = 0; 1593 par->vblank.pan_display = 0;
1594 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); 1594 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
1595 } 1595 }
1596 spin_lock_irq(&par->int_lock); 1596 spin_lock_irq(&par->int_lock);
1597 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK; 1597 int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
1598 /* disable interrupt */ 1598 /* disable interrupt */
1599 aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par ); 1599 aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par );
1600 spin_unlock_irq(&par->int_lock); 1600 spin_unlock_irq(&par->int_lock);
1601 free_irq(par->irq, par); 1601 free_irq(par->irq, par);
1602 } 1602 }
1603 1603
1604 return 0; 1604 return 0;
1605 } 1605 }
1606 1606
1607 static int atyfb_release(struct fb_info *info, int user) 1607 static int atyfb_release(struct fb_info *info, int user)
1608 { 1608 {
1609 struct atyfb_par *par = (struct atyfb_par *) info->par; 1609 struct atyfb_par *par = (struct atyfb_par *) info->par;
1610 if (user) { 1610 if (user) {
1611 par->open--; 1611 par->open--;
1612 mdelay(1); 1612 mdelay(1);
1613 wait_for_idle(par); 1613 wait_for_idle(par);
1614 if (!par->open) { 1614 if (!par->open) {
1615 #ifdef __sparc__ 1615 #ifdef __sparc__
1616 int was_mmaped = par->mmaped; 1616 int was_mmaped = par->mmaped;
1617 1617
1618 par->mmaped = 0; 1618 par->mmaped = 0;
1619 1619
1620 if (was_mmaped) { 1620 if (was_mmaped) {
1621 struct fb_var_screeninfo var; 1621 struct fb_var_screeninfo var;
1622 1622
1623 /* Now reset the default display config, we have no 1623 /* Now reset the default display config, we have no
1624 * idea what the program(s) which mmap'd the chip did 1624 * idea what the program(s) which mmap'd the chip did
1625 * to the configuration, nor whether it restored it 1625 * to the configuration, nor whether it restored it
1626 * correctly. 1626 * correctly.
1627 */ 1627 */
1628 var = default_var; 1628 var = default_var;
1629 if (noaccel) 1629 if (noaccel)
1630 var.accel_flags &= ~FB_ACCELF_TEXT; 1630 var.accel_flags &= ~FB_ACCELF_TEXT;
1631 else 1631 else
1632 var.accel_flags |= FB_ACCELF_TEXT; 1632 var.accel_flags |= FB_ACCELF_TEXT;
1633 if (var.yres == var.yres_virtual) { 1633 if (var.yres == var.yres_virtual) {
1634 u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2)); 1634 u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
1635 var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual; 1635 var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual;
1636 if (var.yres_virtual < var.yres) 1636 if (var.yres_virtual < var.yres)
1637 var.yres_virtual = var.yres; 1637 var.yres_virtual = var.yres;
1638 } 1638 }
1639 } 1639 }
1640 #endif 1640 #endif
1641 aty_disable_irq(par); 1641 aty_disable_irq(par);
1642 } 1642 }
1643 } 1643 }
1644 return (0); 1644 return (0);
1645 } 1645 }
1646 1646
1647 /* 1647 /*
1648 * Pan or Wrap the Display 1648 * Pan or Wrap the Display
1649 * 1649 *
1650 * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag 1650 * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
1651 */ 1651 */
1652 1652
1653 static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) 1653 static int atyfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
1654 { 1654 {
1655 struct atyfb_par *par = (struct atyfb_par *) info->par; 1655 struct atyfb_par *par = (struct atyfb_par *) info->par;
1656 u32 xres, yres, xoffset, yoffset; 1656 u32 xres, yres, xoffset, yoffset;
1657 1657
1658 xres = (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8; 1658 xres = (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8;
1659 yres = ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1; 1659 yres = ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1;
1660 if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN) 1660 if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
1661 yres >>= 1; 1661 yres >>= 1;
1662 xoffset = (var->xoffset + 7) & ~7; 1662 xoffset = (var->xoffset + 7) & ~7;
1663 yoffset = var->yoffset; 1663 yoffset = var->yoffset;
1664 if (xoffset + xres > par->crtc.vxres || yoffset + yres > par->crtc.vyres) 1664 if (xoffset + xres > par->crtc.vxres || yoffset + yres > par->crtc.vyres)
1665 return -EINVAL; 1665 return -EINVAL;
1666 info->var.xoffset = xoffset; 1666 info->var.xoffset = xoffset;
1667 info->var.yoffset = yoffset; 1667 info->var.yoffset = yoffset;
1668 if (par->asleep) 1668 if (par->asleep)
1669 return 0; 1669 return 0;
1670 1670
1671 set_off_pitch(par, info); 1671 set_off_pitch(par, info);
1672 if ((var->activate & FB_ACTIVATE_VBL) && !aty_enable_irq(par, 0)) { 1672 if ((var->activate & FB_ACTIVATE_VBL) && !aty_enable_irq(par, 0)) {
1673 par->vblank.pan_display = 1; 1673 par->vblank.pan_display = 1;
1674 } else { 1674 } else {
1675 par->vblank.pan_display = 0; 1675 par->vblank.pan_display = 0;
1676 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); 1676 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
1677 } 1677 }
1678 1678
1679 return 0; 1679 return 0;
1680 } 1680 }
1681 1681
1682 static int aty_waitforvblank(struct atyfb_par *par, u32 crtc) 1682 static int aty_waitforvblank(struct atyfb_par *par, u32 crtc)
1683 { 1683 {
1684 struct aty_interrupt *vbl; 1684 struct aty_interrupt *vbl;
1685 unsigned int count; 1685 unsigned int count;
1686 int ret; 1686 int ret;
1687 1687
1688 switch (crtc) { 1688 switch (crtc) {
1689 case 0: 1689 case 0:
1690 vbl = &par->vblank; 1690 vbl = &par->vblank;
1691 break; 1691 break;
1692 default: 1692 default:
1693 return -ENODEV; 1693 return -ENODEV;
1694 } 1694 }
1695 1695
1696 ret = aty_enable_irq(par, 0); 1696 ret = aty_enable_irq(par, 0);
1697 if (ret) 1697 if (ret)
1698 return ret; 1698 return ret;
1699 1699
1700 count = vbl->count; 1700 count = vbl->count;
1701 ret = wait_event_interruptible_timeout(vbl->wait, count != vbl->count, HZ/10); 1701 ret = wait_event_interruptible_timeout(vbl->wait, count != vbl->count, HZ/10);
1702 if (ret < 0) { 1702 if (ret < 0) {
1703 return ret; 1703 return ret;
1704 } 1704 }
1705 if (ret == 0) { 1705 if (ret == 0) {
1706 aty_enable_irq(par, 1); 1706 aty_enable_irq(par, 1);
1707 return -ETIMEDOUT; 1707 return -ETIMEDOUT;
1708 } 1708 }
1709 1709
1710 return 0; 1710 return 0;
1711 } 1711 }
1712 1712
1713 1713
1714 #ifdef DEBUG 1714 #ifdef DEBUG
1715 #define ATYIO_CLKR 0x41545900 /* ATY\00 */ 1715 #define ATYIO_CLKR 0x41545900 /* ATY\00 */
1716 #define ATYIO_CLKW 0x41545901 /* ATY\01 */ 1716 #define ATYIO_CLKW 0x41545901 /* ATY\01 */
1717 1717
1718 struct atyclk { 1718 struct atyclk {
1719 u32 ref_clk_per; 1719 u32 ref_clk_per;
1720 u8 pll_ref_div; 1720 u8 pll_ref_div;
1721 u8 mclk_fb_div; 1721 u8 mclk_fb_div;
1722 u8 mclk_post_div; /* 1,2,3,4,8 */ 1722 u8 mclk_post_div; /* 1,2,3,4,8 */
1723 u8 mclk_fb_mult; /* 2 or 4 */ 1723 u8 mclk_fb_mult; /* 2 or 4 */
1724 u8 xclk_post_div; /* 1,2,3,4,8 */ 1724 u8 xclk_post_div; /* 1,2,3,4,8 */
1725 u8 vclk_fb_div; 1725 u8 vclk_fb_div;
1726 u8 vclk_post_div; /* 1,2,3,4,6,8,12 */ 1726 u8 vclk_post_div; /* 1,2,3,4,6,8,12 */
1727 u32 dsp_xclks_per_row; /* 0-16383 */ 1727 u32 dsp_xclks_per_row; /* 0-16383 */
1728 u32 dsp_loop_latency; /* 0-15 */ 1728 u32 dsp_loop_latency; /* 0-15 */
1729 u32 dsp_precision; /* 0-7 */ 1729 u32 dsp_precision; /* 0-7 */
1730 u32 dsp_on; /* 0-2047 */ 1730 u32 dsp_on; /* 0-2047 */
1731 u32 dsp_off; /* 0-2047 */ 1731 u32 dsp_off; /* 0-2047 */
1732 }; 1732 };
1733 1733
1734 #define ATYIO_FEATR 0x41545902 /* ATY\02 */ 1734 #define ATYIO_FEATR 0x41545902 /* ATY\02 */
1735 #define ATYIO_FEATW 0x41545903 /* ATY\03 */ 1735 #define ATYIO_FEATW 0x41545903 /* ATY\03 */
1736 #endif 1736 #endif
1737 1737
1738 #ifndef FBIO_WAITFORVSYNC 1738 #ifndef FBIO_WAITFORVSYNC
1739 #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) 1739 #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
1740 #endif 1740 #endif
1741 1741
1742 static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) 1742 static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
1743 { 1743 {
1744 struct atyfb_par *par = (struct atyfb_par *) info->par; 1744 struct atyfb_par *par = (struct atyfb_par *) info->par;
1745 #ifdef __sparc__ 1745 #ifdef __sparc__
1746 struct fbtype fbtyp; 1746 struct fbtype fbtyp;
1747 #endif 1747 #endif
1748 1748
1749 switch (cmd) { 1749 switch (cmd) {
1750 #ifdef __sparc__ 1750 #ifdef __sparc__
1751 case FBIOGTYPE: 1751 case FBIOGTYPE:
1752 fbtyp.fb_type = FBTYPE_PCI_GENERIC; 1752 fbtyp.fb_type = FBTYPE_PCI_GENERIC;
1753 fbtyp.fb_width = par->crtc.vxres; 1753 fbtyp.fb_width = par->crtc.vxres;
1754 fbtyp.fb_height = par->crtc.vyres; 1754 fbtyp.fb_height = par->crtc.vyres;
1755 fbtyp.fb_depth = info->var.bits_per_pixel; 1755 fbtyp.fb_depth = info->var.bits_per_pixel;
1756 fbtyp.fb_cmsize = info->cmap.len; 1756 fbtyp.fb_cmsize = info->cmap.len;
1757 fbtyp.fb_size = info->fix.smem_len; 1757 fbtyp.fb_size = info->fix.smem_len;
1758 if (copy_to_user((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp))) 1758 if (copy_to_user((struct fbtype __user *) arg, &fbtyp, sizeof(fbtyp)))
1759 return -EFAULT; 1759 return -EFAULT;
1760 break; 1760 break;
1761 #endif /* __sparc__ */ 1761 #endif /* __sparc__ */
1762 1762
1763 case FBIO_WAITFORVSYNC: 1763 case FBIO_WAITFORVSYNC:
1764 { 1764 {
1765 u32 crtc; 1765 u32 crtc;
1766 1766
1767 if (get_user(crtc, (__u32 __user *) arg)) 1767 if (get_user(crtc, (__u32 __user *) arg))
1768 return -EFAULT; 1768 return -EFAULT;
1769 1769
1770 return aty_waitforvblank(par, crtc); 1770 return aty_waitforvblank(par, crtc);
1771 } 1771 }
1772 break; 1772 break;
1773 1773
1774 #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT) 1774 #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
1775 case ATYIO_CLKR: 1775 case ATYIO_CLKR:
1776 if (M64_HAS(INTEGRATED)) { 1776 if (M64_HAS(INTEGRATED)) {
1777 struct atyclk clk; 1777 struct atyclk clk;
1778 union aty_pll *pll = &(par->pll); 1778 union aty_pll *pll = &(par->pll);
1779 u32 dsp_config = pll->ct.dsp_config; 1779 u32 dsp_config = pll->ct.dsp_config;
1780 u32 dsp_on_off = pll->ct.dsp_on_off; 1780 u32 dsp_on_off = pll->ct.dsp_on_off;
1781 clk.ref_clk_per = par->ref_clk_per; 1781 clk.ref_clk_per = par->ref_clk_per;
1782 clk.pll_ref_div = pll->ct.pll_ref_div; 1782 clk.pll_ref_div = pll->ct.pll_ref_div;
1783 clk.mclk_fb_div = pll->ct.mclk_fb_div; 1783 clk.mclk_fb_div = pll->ct.mclk_fb_div;
1784 clk.mclk_post_div = pll->ct.mclk_post_div_real; 1784 clk.mclk_post_div = pll->ct.mclk_post_div_real;
1785 clk.mclk_fb_mult = pll->ct.mclk_fb_mult; 1785 clk.mclk_fb_mult = pll->ct.mclk_fb_mult;
1786 clk.xclk_post_div = pll->ct.xclk_post_div_real; 1786 clk.xclk_post_div = pll->ct.xclk_post_div_real;
1787 clk.vclk_fb_div = pll->ct.vclk_fb_div; 1787 clk.vclk_fb_div = pll->ct.vclk_fb_div;
1788 clk.vclk_post_div = pll->ct.vclk_post_div_real; 1788 clk.vclk_post_div = pll->ct.vclk_post_div_real;
1789 clk.dsp_xclks_per_row = dsp_config & 0x3fff; 1789 clk.dsp_xclks_per_row = dsp_config & 0x3fff;
1790 clk.dsp_loop_latency = (dsp_config >> 16) & 0xf; 1790 clk.dsp_loop_latency = (dsp_config >> 16) & 0xf;
1791 clk.dsp_precision = (dsp_config >> 20) & 7; 1791 clk.dsp_precision = (dsp_config >> 20) & 7;
1792 clk.dsp_off = dsp_on_off & 0x7ff; 1792 clk.dsp_off = dsp_on_off & 0x7ff;
1793 clk.dsp_on = (dsp_on_off >> 16) & 0x7ff; 1793 clk.dsp_on = (dsp_on_off >> 16) & 0x7ff;
1794 if (copy_to_user((struct atyclk __user *) arg, &clk, 1794 if (copy_to_user((struct atyclk __user *) arg, &clk,
1795 sizeof(clk))) 1795 sizeof(clk)))
1796 return -EFAULT; 1796 return -EFAULT;
1797 } else 1797 } else
1798 return -EINVAL; 1798 return -EINVAL;
1799 break; 1799 break;
1800 case ATYIO_CLKW: 1800 case ATYIO_CLKW:
1801 if (M64_HAS(INTEGRATED)) { 1801 if (M64_HAS(INTEGRATED)) {
1802 struct atyclk clk; 1802 struct atyclk clk;
1803 union aty_pll *pll = &(par->pll); 1803 union aty_pll *pll = &(par->pll);
1804 if (copy_from_user(&clk, (struct atyclk __user *) arg, sizeof(clk))) 1804 if (copy_from_user(&clk, (struct atyclk __user *) arg, sizeof(clk)))
1805 return -EFAULT; 1805 return -EFAULT;
1806 par->ref_clk_per = clk.ref_clk_per; 1806 par->ref_clk_per = clk.ref_clk_per;
1807 pll->ct.pll_ref_div = clk.pll_ref_div; 1807 pll->ct.pll_ref_div = clk.pll_ref_div;
1808 pll->ct.mclk_fb_div = clk.mclk_fb_div; 1808 pll->ct.mclk_fb_div = clk.mclk_fb_div;
1809 pll->ct.mclk_post_div_real = clk.mclk_post_div; 1809 pll->ct.mclk_post_div_real = clk.mclk_post_div;
1810 pll->ct.mclk_fb_mult = clk.mclk_fb_mult; 1810 pll->ct.mclk_fb_mult = clk.mclk_fb_mult;
1811 pll->ct.xclk_post_div_real = clk.xclk_post_div; 1811 pll->ct.xclk_post_div_real = clk.xclk_post_div;
1812 pll->ct.vclk_fb_div = clk.vclk_fb_div; 1812 pll->ct.vclk_fb_div = clk.vclk_fb_div;
1813 pll->ct.vclk_post_div_real = clk.vclk_post_div; 1813 pll->ct.vclk_post_div_real = clk.vclk_post_div;
1814 pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) | 1814 pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) |
1815 ((clk.dsp_loop_latency & 0xf)<<16)| ((clk.dsp_precision & 7)<<20); 1815 ((clk.dsp_loop_latency & 0xf)<<16)| ((clk.dsp_precision & 7)<<20);
1816 pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) | ((clk.dsp_on & 0x7ff)<<16); 1816 pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) | ((clk.dsp_on & 0x7ff)<<16);
1817 /*aty_calc_pll_ct(info, &pll->ct);*/ 1817 /*aty_calc_pll_ct(info, &pll->ct);*/
1818 aty_set_pll_ct(info, pll); 1818 aty_set_pll_ct(info, pll);
1819 } else 1819 } else
1820 return -EINVAL; 1820 return -EINVAL;
1821 break; 1821 break;
1822 case ATYIO_FEATR: 1822 case ATYIO_FEATR:
1823 if (get_user(par->features, (u32 __user *) arg)) 1823 if (get_user(par->features, (u32 __user *) arg))
1824 return -EFAULT; 1824 return -EFAULT;
1825 break; 1825 break;
1826 case ATYIO_FEATW: 1826 case ATYIO_FEATW:
1827 if (put_user(par->features, (u32 __user *) arg)) 1827 if (put_user(par->features, (u32 __user *) arg))
1828 return -EFAULT; 1828 return -EFAULT;
1829 break; 1829 break;
1830 #endif /* DEBUG && CONFIG_FB_ATY_CT */ 1830 #endif /* DEBUG && CONFIG_FB_ATY_CT */
1831 default: 1831 default:
1832 return -EINVAL; 1832 return -EINVAL;
1833 } 1833 }
1834 return 0; 1834 return 0;
1835 } 1835 }
1836 1836
1837 static int atyfb_sync(struct fb_info *info) 1837 static int atyfb_sync(struct fb_info *info)
1838 { 1838 {
1839 struct atyfb_par *par = (struct atyfb_par *) info->par; 1839 struct atyfb_par *par = (struct atyfb_par *) info->par;
1840 1840
1841 if (par->blitter_may_be_busy) 1841 if (par->blitter_may_be_busy)
1842 wait_for_idle(par); 1842 wait_for_idle(par);
1843 return 0; 1843 return 0;
1844 } 1844 }
1845 1845
1846 #ifdef __sparc__ 1846 #ifdef __sparc__
1847 static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1847 static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1848 { 1848 {
1849 struct atyfb_par *par = (struct atyfb_par *) info->par; 1849 struct atyfb_par *par = (struct atyfb_par *) info->par;
1850 unsigned int size, page, map_size = 0; 1850 unsigned int size, page, map_size = 0;
1851 unsigned long map_offset = 0; 1851 unsigned long map_offset = 0;
1852 unsigned long off; 1852 unsigned long off;
1853 int i; 1853 int i;
1854 1854
1855 if (!par->mmap_map) 1855 if (!par->mmap_map)
1856 return -ENXIO; 1856 return -ENXIO;
1857 1857
1858 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1858 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1859 return -EINVAL; 1859 return -EINVAL;
1860 1860
1861 off = vma->vm_pgoff << PAGE_SHIFT; 1861 off = vma->vm_pgoff << PAGE_SHIFT;
1862 size = vma->vm_end - vma->vm_start; 1862 size = vma->vm_end - vma->vm_start;
1863 1863
1864 /* To stop the swapper from even considering these pages. */ 1864 /* To stop the swapper from even considering these pages. */
1865 vma->vm_flags |= (VM_IO | VM_RESERVED); 1865 vma->vm_flags |= (VM_IO | VM_RESERVED);
1866 1866
1867 if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) || 1867 if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
1868 ((off == info->fix.smem_len) && (size == PAGE_SIZE))) 1868 ((off == info->fix.smem_len) && (size == PAGE_SIZE)))
1869 off += 0x8000000000000000UL; 1869 off += 0x8000000000000000UL;
1870 1870
1871 vma->vm_pgoff = off >> PAGE_SHIFT; /* propagate off changes */ 1871 vma->vm_pgoff = off >> PAGE_SHIFT; /* propagate off changes */
1872 1872
1873 /* Each page, see which map applies */ 1873 /* Each page, see which map applies */
1874 for (page = 0; page < size;) { 1874 for (page = 0; page < size;) {
1875 map_size = 0; 1875 map_size = 0;
1876 for (i = 0; par->mmap_map[i].size; i++) { 1876 for (i = 0; par->mmap_map[i].size; i++) {
1877 unsigned long start = par->mmap_map[i].voff; 1877 unsigned long start = par->mmap_map[i].voff;
1878 unsigned long end = start + par->mmap_map[i].size; 1878 unsigned long end = start + par->mmap_map[i].size;
1879 unsigned long offset = off + page; 1879 unsigned long offset = off + page;
1880 1880
1881 if (start > offset) 1881 if (start > offset)
1882 continue; 1882 continue;
1883 if (offset >= end) 1883 if (offset >= end)
1884 continue; 1884 continue;
1885 1885
1886 map_size = par->mmap_map[i].size - (offset - start); 1886 map_size = par->mmap_map[i].size - (offset - start);
1887 map_offset = 1887 map_offset =
1888 par->mmap_map[i].poff + (offset - start); 1888 par->mmap_map[i].poff + (offset - start);
1889 break; 1889 break;
1890 } 1890 }
1891 if (!map_size) { 1891 if (!map_size) {
1892 page += PAGE_SIZE; 1892 page += PAGE_SIZE;
1893 continue; 1893 continue;
1894 } 1894 }
1895 if (page + map_size > size) 1895 if (page + map_size > size)
1896 map_size = size - page; 1896 map_size = size - page;
1897 1897
1898 pgprot_val(vma->vm_page_prot) &= 1898 pgprot_val(vma->vm_page_prot) &=
1899 ~(par->mmap_map[i].prot_mask); 1899 ~(par->mmap_map[i].prot_mask);
1900 pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag; 1900 pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
1901 1901
1902 if (remap_pfn_range(vma, vma->vm_start + page, 1902 if (remap_pfn_range(vma, vma->vm_start + page,
1903 map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot)) 1903 map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
1904 return -EAGAIN; 1904 return -EAGAIN;
1905 1905
1906 page += map_size; 1906 page += map_size;
1907 } 1907 }
1908 1908
1909 if (!map_size) 1909 if (!map_size)
1910 return -EINVAL; 1910 return -EINVAL;
1911 1911
1912 if (!par->mmaped) 1912 if (!par->mmaped)
1913 par->mmaped = 1; 1913 par->mmaped = 1;
1914 return 0; 1914 return 0;
1915 } 1915 }
1916 1916
1917 static struct { 1917 static struct {
1918 u32 yoffset; 1918 u32 yoffset;
1919 u8 r[2][256]; 1919 u8 r[2][256];
1920 u8 g[2][256]; 1920 u8 g[2][256];
1921 u8 b[2][256]; 1921 u8 b[2][256];
1922 } atyfb_save; 1922 } atyfb_save;
1923 1923
1924 static void atyfb_save_palette(struct atyfb_par *par, int enter) 1924 static void atyfb_save_palette(struct atyfb_par *par, int enter)
1925 { 1925 {
1926 int i, tmp; 1926 int i, tmp;
1927 1927
1928 for (i = 0; i < 256; i++) { 1928 for (i = 0; i < 256; i++) {
1929 tmp = aty_ld_8(DAC_CNTL, par) & 0xfc; 1929 tmp = aty_ld_8(DAC_CNTL, par) & 0xfc;
1930 if (M64_HAS(EXTRA_BRIGHT)) 1930 if (M64_HAS(EXTRA_BRIGHT))
1931 tmp |= 0x2; 1931 tmp |= 0x2;
1932 aty_st_8(DAC_CNTL, tmp, par); 1932 aty_st_8(DAC_CNTL, tmp, par);
1933 aty_st_8(DAC_MASK, 0xff, par); 1933 aty_st_8(DAC_MASK, 0xff, par);
1934 1934
1935 writeb(i, &par->aty_cmap_regs->rindex); 1935 writeb(i, &par->aty_cmap_regs->rindex);
1936 atyfb_save.r[enter][i] = readb(&par->aty_cmap_regs->lut); 1936 atyfb_save.r[enter][i] = readb(&par->aty_cmap_regs->lut);
1937 atyfb_save.g[enter][i] = readb(&par->aty_cmap_regs->lut); 1937 atyfb_save.g[enter][i] = readb(&par->aty_cmap_regs->lut);
1938 atyfb_save.b[enter][i] = readb(&par->aty_cmap_regs->lut); 1938 atyfb_save.b[enter][i] = readb(&par->aty_cmap_regs->lut);
1939 writeb(i, &par->aty_cmap_regs->windex); 1939 writeb(i, &par->aty_cmap_regs->windex);
1940 writeb(atyfb_save.r[1 - enter][i], 1940 writeb(atyfb_save.r[1 - enter][i],
1941 &par->aty_cmap_regs->lut); 1941 &par->aty_cmap_regs->lut);
1942 writeb(atyfb_save.g[1 - enter][i], 1942 writeb(atyfb_save.g[1 - enter][i],
1943 &par->aty_cmap_regs->lut); 1943 &par->aty_cmap_regs->lut);
1944 writeb(atyfb_save.b[1 - enter][i], 1944 writeb(atyfb_save.b[1 - enter][i],
1945 &par->aty_cmap_regs->lut); 1945 &par->aty_cmap_regs->lut);
1946 } 1946 }
1947 } 1947 }
1948 1948
1949 static void atyfb_palette(int enter) 1949 static void atyfb_palette(int enter)
1950 { 1950 {
1951 struct atyfb_par *par; 1951 struct atyfb_par *par;
1952 struct fb_info *info; 1952 struct fb_info *info;
1953 int i; 1953 int i;
1954 1954
1955 for (i = 0; i < FB_MAX; i++) { 1955 for (i = 0; i < FB_MAX; i++) {
1956 info = registered_fb[i]; 1956 info = registered_fb[i];
1957 if (info && info->fbops == &atyfb_ops) { 1957 if (info && info->fbops == &atyfb_ops) {
1958 par = (struct atyfb_par *) info->par; 1958 par = (struct atyfb_par *) info->par;
1959 1959
1960 atyfb_save_palette(par, enter); 1960 atyfb_save_palette(par, enter);
1961 if (enter) { 1961 if (enter) {
1962 atyfb_save.yoffset = info->var.yoffset; 1962 atyfb_save.yoffset = info->var.yoffset;
1963 info->var.yoffset = 0; 1963 info->var.yoffset = 0;
1964 set_off_pitch(par, info); 1964 set_off_pitch(par, info);
1965 } else { 1965 } else {
1966 info->var.yoffset = atyfb_save.yoffset; 1966 info->var.yoffset = atyfb_save.yoffset;
1967 set_off_pitch(par, info); 1967 set_off_pitch(par, info);
1968 } 1968 }
1969 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par); 1969 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
1970 break; 1970 break;
1971 } 1971 }
1972 } 1972 }
1973 } 1973 }
1974 #endif /* __sparc__ */ 1974 #endif /* __sparc__ */
1975 1975
1976 1976
1977 1977
1978 #if defined(CONFIG_PM) && defined(CONFIG_PCI) 1978 #if defined(CONFIG_PM) && defined(CONFIG_PCI)
1979 1979
1980 /* Power management routines. Those are used for PowerBook sleep. 1980 /* Power management routines. Those are used for PowerBook sleep.
1981 */ 1981 */
1982 static int aty_power_mgmt(int sleep, struct atyfb_par *par) 1982 static int aty_power_mgmt(int sleep, struct atyfb_par *par)
1983 { 1983 {
1984 u32 pm; 1984 u32 pm;
1985 int timeout; 1985 int timeout;
1986 1986
1987 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 1987 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
1988 pm = (pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_REG; 1988 pm = (pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_REG;
1989 aty_st_lcd(POWER_MANAGEMENT, pm, par); 1989 aty_st_lcd(POWER_MANAGEMENT, pm, par);
1990 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 1990 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
1991 1991
1992 timeout = 2000; 1992 timeout = 2000;
1993 if (sleep) { 1993 if (sleep) {
1994 /* Sleep */ 1994 /* Sleep */
1995 pm &= ~PWR_MGT_ON; 1995 pm &= ~PWR_MGT_ON;
1996 aty_st_lcd(POWER_MANAGEMENT, pm, par); 1996 aty_st_lcd(POWER_MANAGEMENT, pm, par);
1997 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 1997 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
1998 udelay(10); 1998 udelay(10);
1999 pm &= ~(PWR_BLON | AUTO_PWR_UP); 1999 pm &= ~(PWR_BLON | AUTO_PWR_UP);
2000 pm |= SUSPEND_NOW; 2000 pm |= SUSPEND_NOW;
2001 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2001 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2002 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2002 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2003 udelay(10); 2003 udelay(10);
2004 pm |= PWR_MGT_ON; 2004 pm |= PWR_MGT_ON;
2005 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2005 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2006 do { 2006 do {
2007 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2007 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2008 mdelay(1); 2008 mdelay(1);
2009 if ((--timeout) == 0) 2009 if ((--timeout) == 0)
2010 break; 2010 break;
2011 } while ((pm & PWR_MGT_STATUS_MASK) != PWR_MGT_STATUS_SUSPEND); 2011 } while ((pm & PWR_MGT_STATUS_MASK) != PWR_MGT_STATUS_SUSPEND);
2012 } else { 2012 } else {
2013 /* Wakeup */ 2013 /* Wakeup */
2014 pm &= ~PWR_MGT_ON; 2014 pm &= ~PWR_MGT_ON;
2015 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2015 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2016 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2016 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2017 udelay(10); 2017 udelay(10);
2018 pm &= ~SUSPEND_NOW; 2018 pm &= ~SUSPEND_NOW;
2019 pm |= (PWR_BLON | AUTO_PWR_UP); 2019 pm |= (PWR_BLON | AUTO_PWR_UP);
2020 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2020 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2021 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2021 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2022 udelay(10); 2022 udelay(10);
2023 pm |= PWR_MGT_ON; 2023 pm |= PWR_MGT_ON;
2024 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2024 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2025 do { 2025 do {
2026 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2026 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2027 mdelay(1); 2027 mdelay(1);
2028 if ((--timeout) == 0) 2028 if ((--timeout) == 0)
2029 break; 2029 break;
2030 } while ((pm & PWR_MGT_STATUS_MASK) != 0); 2030 } while ((pm & PWR_MGT_STATUS_MASK) != 0);
2031 } 2031 }
2032 mdelay(500); 2032 mdelay(500);
2033 2033
2034 return timeout ? 0 : -EIO; 2034 return timeout ? 0 : -EIO;
2035 } 2035 }
2036 2036
2037 static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2037 static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2038 { 2038 {
2039 struct fb_info *info = pci_get_drvdata(pdev); 2039 struct fb_info *info = pci_get_drvdata(pdev);
2040 struct atyfb_par *par = (struct atyfb_par *) info->par; 2040 struct atyfb_par *par = (struct atyfb_par *) info->par;
2041 2041
2042 #ifndef CONFIG_PPC_PMAC 2042 #ifndef CONFIG_PPC_PMAC
2043 /* HACK ALERT ! Once I find a proper way to say to each driver 2043 /* HACK ALERT ! Once I find a proper way to say to each driver
2044 * individually what will happen with it's PCI slot, I'll change 2044 * individually what will happen with it's PCI slot, I'll change
2045 * that. On laptops, the AGP slot is just unclocked, so D2 is 2045 * that. On laptops, the AGP slot is just unclocked, so D2 is
2046 * expected, while on desktops, the card is powered off 2046 * expected, while on desktops, the card is powered off
2047 */ 2047 */
2048 return 0; 2048 return 0;
2049 #endif /* CONFIG_PPC_PMAC */ 2049 #endif /* CONFIG_PPC_PMAC */
2050 2050
2051 if (state.event == pdev->dev.power.power_state.event) 2051 if (state.event == pdev->dev.power.power_state.event)
2052 return 0; 2052 return 0;
2053 2053
2054 acquire_console_sem(); 2054 acquire_console_sem();
2055 2055
2056 fb_set_suspend(info, 1); 2056 fb_set_suspend(info, 1);
2057 2057
2058 /* Idle & reset engine */ 2058 /* Idle & reset engine */
2059 wait_for_idle(par); 2059 wait_for_idle(par);
2060 aty_reset_engine(par); 2060 aty_reset_engine(par);
2061 2061
2062 /* Blank display and LCD */ 2062 /* Blank display and LCD */
2063 atyfb_blank(FB_BLANK_POWERDOWN, info); 2063 atyfb_blank(FB_BLANK_POWERDOWN, info);
2064 2064
2065 par->asleep = 1; 2065 par->asleep = 1;
2066 par->lock_blank = 1; 2066 par->lock_blank = 1;
2067 2067
2068 /* Set chip to "suspend" mode */ 2068 /* Set chip to "suspend" mode */
2069 if (aty_power_mgmt(1, par)) { 2069 if (aty_power_mgmt(1, par)) {
2070 par->asleep = 0; 2070 par->asleep = 0;
2071 par->lock_blank = 0; 2071 par->lock_blank = 0;
2072 atyfb_blank(FB_BLANK_UNBLANK, info); 2072 atyfb_blank(FB_BLANK_UNBLANK, info);
2073 fb_set_suspend(info, 0); 2073 fb_set_suspend(info, 0);
2074 release_console_sem(); 2074 release_console_sem();
2075 return -EIO; 2075 return -EIO;
2076 } 2076 }
2077 2077
2078 release_console_sem(); 2078 release_console_sem();
2079 2079
2080 pdev->dev.power.power_state = state; 2080 pdev->dev.power.power_state = state;
2081 2081
2082 return 0; 2082 return 0;
2083 } 2083 }
2084 2084
2085 static int atyfb_pci_resume(struct pci_dev *pdev) 2085 static int atyfb_pci_resume(struct pci_dev *pdev)
2086 { 2086 {
2087 struct fb_info *info = pci_get_drvdata(pdev); 2087 struct fb_info *info = pci_get_drvdata(pdev);
2088 struct atyfb_par *par = (struct atyfb_par *) info->par; 2088 struct atyfb_par *par = (struct atyfb_par *) info->par;
2089 2089
2090 if (pdev->dev.power.power_state.event == PM_EVENT_ON) 2090 if (pdev->dev.power.power_state.event == PM_EVENT_ON)
2091 return 0; 2091 return 0;
2092 2092
2093 acquire_console_sem(); 2093 acquire_console_sem();
2094 2094
2095 if (pdev->dev.power.power_state.event == 2) 2095 if (pdev->dev.power.power_state.event == 2)
2096 aty_power_mgmt(0, par); 2096 aty_power_mgmt(0, par);
2097 par->asleep = 0; 2097 par->asleep = 0;
2098 2098
2099 /* Restore display */ 2099 /* Restore display */
2100 atyfb_set_par(info); 2100 atyfb_set_par(info);
2101 2101
2102 /* Refresh */ 2102 /* Refresh */
2103 fb_set_suspend(info, 0); 2103 fb_set_suspend(info, 0);
2104 2104
2105 /* Unblank */ 2105 /* Unblank */
2106 par->lock_blank = 0; 2106 par->lock_blank = 0;
2107 atyfb_blank(FB_BLANK_UNBLANK, info); 2107 atyfb_blank(FB_BLANK_UNBLANK, info);
2108 2108
2109 release_console_sem(); 2109 release_console_sem();
2110 2110
2111 pdev->dev.power.power_state = PMSG_ON; 2111 pdev->dev.power.power_state = PMSG_ON;
2112 2112
2113 return 0; 2113 return 0;
2114 } 2114 }
2115 2115
2116 #endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */ 2116 #endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */
2117 2117
2118 #ifdef CONFIG_PMAC_BACKLIGHT 2118 #ifdef CONFIG_PMAC_BACKLIGHT
2119 2119
2120 /* 2120 /*
2121 * LCD backlight control 2121 * LCD backlight control
2122 */ 2122 */
2123 2123
2124 static int backlight_conv[] = { 2124 static int backlight_conv[] = {
2125 0x00, 0x3f, 0x4c, 0x59, 0x66, 0x73, 0x80, 0x8d, 2125 0x00, 0x3f, 0x4c, 0x59, 0x66, 0x73, 0x80, 0x8d,
2126 0x9a, 0xa7, 0xb4, 0xc1, 0xcf, 0xdc, 0xe9, 0xff 2126 0x9a, 0xa7, 0xb4, 0xc1, 0xcf, 0xdc, 0xe9, 0xff
2127 }; 2127 };
2128 2128
2129 static int aty_set_backlight_enable(int on, int level, void *data) 2129 static int aty_set_backlight_enable(int on, int level, void *data)
2130 { 2130 {
2131 struct fb_info *info = (struct fb_info *) data; 2131 struct fb_info *info = (struct fb_info *) data;
2132 struct atyfb_par *par = (struct atyfb_par *) info->par; 2132 struct atyfb_par *par = (struct atyfb_par *) info->par;
2133 unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par); 2133 unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
2134 2134
2135 reg |= (BLMOD_EN | BIASMOD_EN); 2135 reg |= (BLMOD_EN | BIASMOD_EN);
2136 if (on && level > BACKLIGHT_OFF) { 2136 if (on && level > BACKLIGHT_OFF) {
2137 reg &= ~BIAS_MOD_LEVEL_MASK; 2137 reg &= ~BIAS_MOD_LEVEL_MASK;
2138 reg |= (backlight_conv[level] << BIAS_MOD_LEVEL_SHIFT); 2138 reg |= (backlight_conv[level] << BIAS_MOD_LEVEL_SHIFT);
2139 } else { 2139 } else {
2140 reg &= ~BIAS_MOD_LEVEL_MASK; 2140 reg &= ~BIAS_MOD_LEVEL_MASK;
2141 reg |= (backlight_conv[0] << BIAS_MOD_LEVEL_SHIFT); 2141 reg |= (backlight_conv[0] << BIAS_MOD_LEVEL_SHIFT);
2142 } 2142 }
2143 aty_st_lcd(LCD_MISC_CNTL, reg, par); 2143 aty_st_lcd(LCD_MISC_CNTL, reg, par);
2144 return 0; 2144 return 0;
2145 } 2145 }
2146 2146
2147 static int aty_set_backlight_level(int level, void *data) 2147 static int aty_set_backlight_level(int level, void *data)
2148 { 2148 {
2149 return aty_set_backlight_enable(1, level, data); 2149 return aty_set_backlight_enable(1, level, data);
2150 } 2150 }
2151 2151
2152 static struct backlight_controller aty_backlight_controller = { 2152 static struct backlight_controller aty_backlight_controller = {
2153 aty_set_backlight_enable, 2153 aty_set_backlight_enable,
2154 aty_set_backlight_level 2154 aty_set_backlight_level
2155 }; 2155 };
2156 #endif /* CONFIG_PMAC_BACKLIGHT */ 2156 #endif /* CONFIG_PMAC_BACKLIGHT */
2157 2157
2158 static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk) 2158 static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2159 { 2159 {
2160 const int ragepro_tbl[] = { 2160 const int ragepro_tbl[] = {
2161 44, 50, 55, 66, 75, 80, 100 2161 44, 50, 55, 66, 75, 80, 100
2162 }; 2162 };
2163 const int ragexl_tbl[] = { 2163 const int ragexl_tbl[] = {
2164 50, 66, 75, 83, 90, 95, 100, 105, 2164 50, 66, 75, 83, 90, 95, 100, 105,
2165 110, 115, 120, 125, 133, 143, 166 2165 110, 115, 120, 125, 133, 143, 166
2166 }; 2166 };
2167 const int *refresh_tbl; 2167 const int *refresh_tbl;
2168 int i, size; 2168 int i, size;
2169 2169
2170 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) { 2170 if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
2171 refresh_tbl = ragexl_tbl; 2171 refresh_tbl = ragexl_tbl;
2172 size = ARRAY_SIZE(ragexl_tbl); 2172 size = ARRAY_SIZE(ragexl_tbl);
2173 } else { 2173 } else {
2174 refresh_tbl = ragepro_tbl; 2174 refresh_tbl = ragepro_tbl;
2175 size = ARRAY_SIZE(ragepro_tbl); 2175 size = ARRAY_SIZE(ragepro_tbl);
2176 } 2176 }
2177 2177
2178 for (i=0; i < size; i++) { 2178 for (i=0; i < size; i++) {
2179 if (xclk < refresh_tbl[i]) 2179 if (xclk < refresh_tbl[i])
2180 break; 2180 break;
2181 } 2181 }
2182 par->mem_refresh_rate = i; 2182 par->mem_refresh_rate = i;
2183 } 2183 }
2184 2184
2185 /* 2185 /*
2186 * Initialisation 2186 * Initialisation
2187 */ 2187 */
2188 2188
2189 static struct fb_info *fb_list = NULL; 2189 static struct fb_info *fb_list = NULL;
2190 2190
2191 #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) 2191 #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
2192 static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par, 2192 static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
2193 struct fb_var_screeninfo *var) 2193 struct fb_var_screeninfo *var)
2194 { 2194 {
2195 int ret = -EINVAL; 2195 int ret = -EINVAL;
2196 2196
2197 if (par->lcd_table != 0 && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2197 if (par->lcd_table != 0 && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2198 *var = default_var; 2198 *var = default_var;
2199 var->xres = var->xres_virtual = par->lcd_hdisp; 2199 var->xres = var->xres_virtual = par->lcd_hdisp;
2200 var->right_margin = par->lcd_right_margin; 2200 var->right_margin = par->lcd_right_margin;
2201 var->left_margin = par->lcd_hblank_len - 2201 var->left_margin = par->lcd_hblank_len -
2202 (par->lcd_right_margin + par->lcd_hsync_dly + 2202 (par->lcd_right_margin + par->lcd_hsync_dly +
2203 par->lcd_hsync_len); 2203 par->lcd_hsync_len);
2204 var->hsync_len = par->lcd_hsync_len + par->lcd_hsync_dly; 2204 var->hsync_len = par->lcd_hsync_len + par->lcd_hsync_dly;
2205 var->yres = var->yres_virtual = par->lcd_vdisp; 2205 var->yres = var->yres_virtual = par->lcd_vdisp;
2206 var->lower_margin = par->lcd_lower_margin; 2206 var->lower_margin = par->lcd_lower_margin;
2207 var->upper_margin = par->lcd_vblank_len - 2207 var->upper_margin = par->lcd_vblank_len -
2208 (par->lcd_lower_margin + par->lcd_vsync_len); 2208 (par->lcd_lower_margin + par->lcd_vsync_len);
2209 var->vsync_len = par->lcd_vsync_len; 2209 var->vsync_len = par->lcd_vsync_len;
2210 var->pixclock = par->lcd_pixclock; 2210 var->pixclock = par->lcd_pixclock;
2211 ret = 0; 2211 ret = 0;
2212 } 2212 }
2213 2213
2214 return ret; 2214 return ret;
2215 } 2215 }
2216 #endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */ 2216 #endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
2217 2217
2218 static int __init aty_init(struct fb_info *info, const char *name) 2218 static int __init aty_init(struct fb_info *info, const char *name)
2219 { 2219 {
2220 struct atyfb_par *par = (struct atyfb_par *) info->par; 2220 struct atyfb_par *par = (struct atyfb_par *) info->par;
2221 const char *ramname = NULL, *xtal; 2221 const char *ramname = NULL, *xtal;
2222 int gtb_memsize, has_var = 0; 2222 int gtb_memsize, has_var = 0;
2223 struct fb_var_screeninfo var; 2223 struct fb_var_screeninfo var;
2224 u8 pll_ref_div; 2224 u8 pll_ref_div;
2225 u32 i; 2225 u32 i;
2226 #if defined(CONFIG_PPC) 2226 #if defined(CONFIG_PPC)
2227 int sense; 2227 int sense;
2228 #endif 2228 #endif
2229 2229
2230 init_waitqueue_head(&par->vblank.wait); 2230 init_waitqueue_head(&par->vblank.wait);
2231 spin_lock_init(&par->int_lock); 2231 spin_lock_init(&par->int_lock);
2232 2232
2233 par->aty_cmap_regs = 2233 par->aty_cmap_regs =
2234 (struct aty_cmap_regs __iomem *) (par->ati_regbase + 0xc0); 2234 (struct aty_cmap_regs __iomem *) (par->ati_regbase + 0xc0);
2235 2235
2236 #ifdef CONFIG_PPC_PMAC 2236 #ifdef CONFIG_PPC_PMAC
2237 /* The Apple iBook1 uses non-standard memory frequencies. We detect it 2237 /* The Apple iBook1 uses non-standard memory frequencies. We detect it
2238 * and set the frequency manually. */ 2238 * and set the frequency manually. */
2239 if (machine_is_compatible("PowerBook2,1")) { 2239 if (machine_is_compatible("PowerBook2,1")) {
2240 par->pll_limits.mclk = 70; 2240 par->pll_limits.mclk = 70;
2241 par->pll_limits.xclk = 53; 2241 par->pll_limits.xclk = 53;
2242 } 2242 }
2243 #endif 2243 #endif
2244 if (pll) 2244 if (pll)
2245 par->pll_limits.pll_max = pll; 2245 par->pll_limits.pll_max = pll;
2246 if (mclk) 2246 if (mclk)
2247 par->pll_limits.mclk = mclk; 2247 par->pll_limits.mclk = mclk;
2248 if (xclk) 2248 if (xclk)
2249 par->pll_limits.xclk = xclk; 2249 par->pll_limits.xclk = xclk;
2250 2250
2251 aty_calc_mem_refresh(par, par->pll_limits.xclk); 2251 aty_calc_mem_refresh(par, par->pll_limits.xclk);
2252 par->pll_per = 1000000/par->pll_limits.pll_max; 2252 par->pll_per = 1000000/par->pll_limits.pll_max;
2253 par->mclk_per = 1000000/par->pll_limits.mclk; 2253 par->mclk_per = 1000000/par->pll_limits.mclk;
2254 par->xclk_per = 1000000/par->pll_limits.xclk; 2254 par->xclk_per = 1000000/par->pll_limits.xclk;
2255 2255
2256 par->ref_clk_per = 1000000000000ULL / 14318180; 2256 par->ref_clk_per = 1000000000000ULL / 14318180;
2257 xtal = "14.31818"; 2257 xtal = "14.31818";
2258 2258
2259 #ifdef CONFIG_FB_ATY_GX 2259 #ifdef CONFIG_FB_ATY_GX
2260 if (!M64_HAS(INTEGRATED)) { 2260 if (!M64_HAS(INTEGRATED)) {
2261 u32 stat0; 2261 u32 stat0;
2262 u8 dac_type, dac_subtype, clk_type; 2262 u8 dac_type, dac_subtype, clk_type;
2263 stat0 = aty_ld_le32(CONFIG_STAT0, par); 2263 stat0 = aty_ld_le32(CONFIG_STAT0, par);
2264 par->bus_type = (stat0 >> 0) & 0x07; 2264 par->bus_type = (stat0 >> 0) & 0x07;
2265 par->ram_type = (stat0 >> 3) & 0x07; 2265 par->ram_type = (stat0 >> 3) & 0x07;
2266 ramname = aty_gx_ram[par->ram_type]; 2266 ramname = aty_gx_ram[par->ram_type];
2267 /* FIXME: clockchip/RAMDAC probing? */ 2267 /* FIXME: clockchip/RAMDAC probing? */
2268 dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07; 2268 dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
2269 #ifdef CONFIG_ATARI 2269 #ifdef CONFIG_ATARI
2270 clk_type = CLK_ATI18818_1; 2270 clk_type = CLK_ATI18818_1;
2271 dac_type = (stat0 >> 9) & 0x07; 2271 dac_type = (stat0 >> 9) & 0x07;
2272 if (dac_type == 0x07) 2272 if (dac_type == 0x07)
2273 dac_subtype = DAC_ATT20C408; 2273 dac_subtype = DAC_ATT20C408;
2274 else 2274 else
2275 dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type; 2275 dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
2276 #else 2276 #else
2277 dac_type = DAC_IBMRGB514; 2277 dac_type = DAC_IBMRGB514;
2278 dac_subtype = DAC_IBMRGB514; 2278 dac_subtype = DAC_IBMRGB514;
2279 clk_type = CLK_IBMRGB514; 2279 clk_type = CLK_IBMRGB514;
2280 #endif 2280 #endif
2281 switch (dac_subtype) { 2281 switch (dac_subtype) {
2282 case DAC_IBMRGB514: 2282 case DAC_IBMRGB514:
2283 par->dac_ops = &aty_dac_ibm514; 2283 par->dac_ops = &aty_dac_ibm514;
2284 break; 2284 break;
2285 case DAC_ATI68860_B: 2285 case DAC_ATI68860_B:
2286 case DAC_ATI68860_C: 2286 case DAC_ATI68860_C:
2287 par->dac_ops = &aty_dac_ati68860b; 2287 par->dac_ops = &aty_dac_ati68860b;
2288 break; 2288 break;
2289 case DAC_ATT20C408: 2289 case DAC_ATT20C408:
2290 case DAC_ATT21C498: 2290 case DAC_ATT21C498:
2291 par->dac_ops = &aty_dac_att21c498; 2291 par->dac_ops = &aty_dac_att21c498;
2292 break; 2292 break;
2293 default: 2293 default:
2294 PRINTKI("aty_init: DAC type not implemented yet!\n"); 2294 PRINTKI("aty_init: DAC type not implemented yet!\n");
2295 par->dac_ops = &aty_dac_unsupported; 2295 par->dac_ops = &aty_dac_unsupported;
2296 break; 2296 break;
2297 } 2297 }
2298 switch (clk_type) { 2298 switch (clk_type) {
2299 case CLK_ATI18818_1: 2299 case CLK_ATI18818_1:
2300 par->pll_ops = &aty_pll_ati18818_1; 2300 par->pll_ops = &aty_pll_ati18818_1;
2301 break; 2301 break;
2302 case CLK_IBMRGB514: 2302 case CLK_IBMRGB514:
2303 par->pll_ops = &aty_pll_ibm514; 2303 par->pll_ops = &aty_pll_ibm514;
2304 break; 2304 break;
2305 #if 0 /* dead code */ 2305 #if 0 /* dead code */
2306 case CLK_STG1703: 2306 case CLK_STG1703:
2307 par->pll_ops = &aty_pll_stg1703; 2307 par->pll_ops = &aty_pll_stg1703;
2308 break; 2308 break;
2309 case CLK_CH8398: 2309 case CLK_CH8398:
2310 par->pll_ops = &aty_pll_ch8398; 2310 par->pll_ops = &aty_pll_ch8398;
2311 break; 2311 break;
2312 case CLK_ATT20C408: 2312 case CLK_ATT20C408:
2313 par->pll_ops = &aty_pll_att20c408; 2313 par->pll_ops = &aty_pll_att20c408;
2314 break; 2314 break;
2315 #endif 2315 #endif
2316 default: 2316 default:
2317 PRINTKI("aty_init: CLK type not implemented yet!"); 2317 PRINTKI("aty_init: CLK type not implemented yet!");
2318 par->pll_ops = &aty_pll_unsupported; 2318 par->pll_ops = &aty_pll_unsupported;
2319 break; 2319 break;
2320 } 2320 }
2321 } 2321 }
2322 #endif /* CONFIG_FB_ATY_GX */ 2322 #endif /* CONFIG_FB_ATY_GX */
2323 #ifdef CONFIG_FB_ATY_CT 2323 #ifdef CONFIG_FB_ATY_CT
2324 if (M64_HAS(INTEGRATED)) { 2324 if (M64_HAS(INTEGRATED)) {
2325 par->dac_ops = &aty_dac_ct; 2325 par->dac_ops = &aty_dac_ct;
2326 par->pll_ops = &aty_pll_ct; 2326 par->pll_ops = &aty_pll_ct;
2327 par->bus_type = PCI; 2327 par->bus_type = PCI;
2328 par->ram_type = (aty_ld_le32(CONFIG_STAT0, par) & 0x07); 2328 par->ram_type = (aty_ld_le32(CONFIG_STAT0, par) & 0x07);
2329 ramname = aty_ct_ram[par->ram_type]; 2329 ramname = aty_ct_ram[par->ram_type];
2330 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */ 2330 /* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
2331 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM) 2331 if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
2332 par->pll_limits.mclk = 63; 2332 par->pll_limits.mclk = 63;
2333 } 2333 }
2334 2334
2335 if (M64_HAS(GTB_DSP) 2335 if (M64_HAS(GTB_DSP)
2336 && (pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par))) { 2336 && (pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par))) {
2337 int diff1, diff2; 2337 int diff1, diff2;
2338 diff1 = 510 * 14 / pll_ref_div - par->pll_limits.pll_max; 2338 diff1 = 510 * 14 / pll_ref_div - par->pll_limits.pll_max;
2339 diff2 = 510 * 29 / pll_ref_div - par->pll_limits.pll_max; 2339 diff2 = 510 * 29 / pll_ref_div - par->pll_limits.pll_max;
2340 if (diff1 < 0) 2340 if (diff1 < 0)
2341 diff1 = -diff1; 2341 diff1 = -diff1;
2342 if (diff2 < 0) 2342 if (diff2 < 0)
2343 diff2 = -diff2; 2343 diff2 = -diff2;
2344 if (diff2 < diff1) { 2344 if (diff2 < diff1) {
2345 par->ref_clk_per = 1000000000000ULL / 29498928; 2345 par->ref_clk_per = 1000000000000ULL / 29498928;
2346 xtal = "29.498928"; 2346 xtal = "29.498928";
2347 } 2347 }
2348 } 2348 }
2349 #endif /* CONFIG_FB_ATY_CT */ 2349 #endif /* CONFIG_FB_ATY_CT */
2350 2350
2351 /* save previous video mode */ 2351 /* save previous video mode */
2352 aty_get_crtc(par, &saved_crtc); 2352 aty_get_crtc(par, &saved_crtc);
2353 if(par->pll_ops->get_pll) 2353 if(par->pll_ops->get_pll)
2354 par->pll_ops->get_pll(info, &saved_pll); 2354 par->pll_ops->get_pll(info, &saved_pll);
2355 2355
2356 i = aty_ld_le32(MEM_CNTL, par); 2356 i = aty_ld_le32(MEM_CNTL, par);
2357 gtb_memsize = M64_HAS(GTB_DSP); 2357 gtb_memsize = M64_HAS(GTB_DSP);
2358 if (gtb_memsize) 2358 if (gtb_memsize)
2359 switch (i & 0xF) { /* 0xF used instead of MEM_SIZE_ALIAS */ 2359 switch (i & 0xF) { /* 0xF used instead of MEM_SIZE_ALIAS */
2360 case MEM_SIZE_512K: 2360 case MEM_SIZE_512K:
2361 info->fix.smem_len = 0x80000; 2361 info->fix.smem_len = 0x80000;
2362 break; 2362 break;
2363 case MEM_SIZE_1M: 2363 case MEM_SIZE_1M:
2364 info->fix.smem_len = 0x100000; 2364 info->fix.smem_len = 0x100000;
2365 break; 2365 break;
2366 case MEM_SIZE_2M_GTB: 2366 case MEM_SIZE_2M_GTB:
2367 info->fix.smem_len = 0x200000; 2367 info->fix.smem_len = 0x200000;
2368 break; 2368 break;
2369 case MEM_SIZE_4M_GTB: 2369 case MEM_SIZE_4M_GTB:
2370 info->fix.smem_len = 0x400000; 2370 info->fix.smem_len = 0x400000;
2371 break; 2371 break;
2372 case MEM_SIZE_6M_GTB: 2372 case MEM_SIZE_6M_GTB:
2373 info->fix.smem_len = 0x600000; 2373 info->fix.smem_len = 0x600000;
2374 break; 2374 break;
2375 case MEM_SIZE_8M_GTB: 2375 case MEM_SIZE_8M_GTB:
2376 info->fix.smem_len = 0x800000; 2376 info->fix.smem_len = 0x800000;
2377 break; 2377 break;
2378 default: 2378 default:
2379 info->fix.smem_len = 0x80000; 2379 info->fix.smem_len = 0x80000;
2380 } else 2380 } else
2381 switch (i & MEM_SIZE_ALIAS) { 2381 switch (i & MEM_SIZE_ALIAS) {
2382 case MEM_SIZE_512K: 2382 case MEM_SIZE_512K:
2383 info->fix.smem_len = 0x80000; 2383 info->fix.smem_len = 0x80000;
2384 break; 2384 break;
2385 case MEM_SIZE_1M: 2385 case MEM_SIZE_1M:
2386 info->fix.smem_len = 0x100000; 2386 info->fix.smem_len = 0x100000;
2387 break; 2387 break;
2388 case MEM_SIZE_2M: 2388 case MEM_SIZE_2M:
2389 info->fix.smem_len = 0x200000; 2389 info->fix.smem_len = 0x200000;
2390 break; 2390 break;
2391 case MEM_SIZE_4M: 2391 case MEM_SIZE_4M:
2392 info->fix.smem_len = 0x400000; 2392 info->fix.smem_len = 0x400000;
2393 break; 2393 break;
2394 case MEM_SIZE_6M: 2394 case MEM_SIZE_6M:
2395 info->fix.smem_len = 0x600000; 2395 info->fix.smem_len = 0x600000;
2396 break; 2396 break;
2397 case MEM_SIZE_8M: 2397 case MEM_SIZE_8M:
2398 info->fix.smem_len = 0x800000; 2398 info->fix.smem_len = 0x800000;
2399 break; 2399 break;
2400 default: 2400 default:
2401 info->fix.smem_len = 0x80000; 2401 info->fix.smem_len = 0x80000;
2402 } 2402 }
2403 2403
2404 if (M64_HAS(MAGIC_VRAM_SIZE)) { 2404 if (M64_HAS(MAGIC_VRAM_SIZE)) {
2405 if (aty_ld_le32(CONFIG_STAT1, par) & 0x40000000) 2405 if (aty_ld_le32(CONFIG_STAT1, par) & 0x40000000)
2406 info->fix.smem_len += 0x400000; 2406 info->fix.smem_len += 0x400000;
2407 } 2407 }
2408 2408
2409 if (vram) { 2409 if (vram) {
2410 info->fix.smem_len = vram * 1024; 2410 info->fix.smem_len = vram * 1024;
2411 i = i & ~(gtb_memsize ? 0xF : MEM_SIZE_ALIAS); 2411 i = i & ~(gtb_memsize ? 0xF : MEM_SIZE_ALIAS);
2412 if (info->fix.smem_len <= 0x80000) 2412 if (info->fix.smem_len <= 0x80000)
2413 i |= MEM_SIZE_512K; 2413 i |= MEM_SIZE_512K;
2414 else if (info->fix.smem_len <= 0x100000) 2414 else if (info->fix.smem_len <= 0x100000)
2415 i |= MEM_SIZE_1M; 2415 i |= MEM_SIZE_1M;
2416 else if (info->fix.smem_len <= 0x200000) 2416 else if (info->fix.smem_len <= 0x200000)
2417 i |= gtb_memsize ? MEM_SIZE_2M_GTB : MEM_SIZE_2M; 2417 i |= gtb_memsize ? MEM_SIZE_2M_GTB : MEM_SIZE_2M;
2418 else if (info->fix.smem_len <= 0x400000) 2418 else if (info->fix.smem_len <= 0x400000)
2419 i |= gtb_memsize ? MEM_SIZE_4M_GTB : MEM_SIZE_4M; 2419 i |= gtb_memsize ? MEM_SIZE_4M_GTB : MEM_SIZE_4M;
2420 else if (info->fix.smem_len <= 0x600000) 2420 else if (info->fix.smem_len <= 0x600000)
2421 i |= gtb_memsize ? MEM_SIZE_6M_GTB : MEM_SIZE_6M; 2421 i |= gtb_memsize ? MEM_SIZE_6M_GTB : MEM_SIZE_6M;
2422 else 2422 else
2423 i |= gtb_memsize ? MEM_SIZE_8M_GTB : MEM_SIZE_8M; 2423 i |= gtb_memsize ? MEM_SIZE_8M_GTB : MEM_SIZE_8M;
2424 aty_st_le32(MEM_CNTL, i, par); 2424 aty_st_le32(MEM_CNTL, i, par);
2425 } 2425 }
2426 2426
2427 /* 2427 /*
2428 * Reg Block 0 (CT-compatible block) is at mmio_start 2428 * Reg Block 0 (CT-compatible block) is at mmio_start
2429 * Reg Block 1 (multimedia extensions) is at mmio_start - 0x400 2429 * Reg Block 1 (multimedia extensions) is at mmio_start - 0x400
2430 */ 2430 */
2431 if (M64_HAS(GX)) { 2431 if (M64_HAS(GX)) {
2432 info->fix.mmio_len = 0x400; 2432 info->fix.mmio_len = 0x400;
2433 info->fix.accel = FB_ACCEL_ATI_MACH64GX; 2433 info->fix.accel = FB_ACCEL_ATI_MACH64GX;
2434 } else if (M64_HAS(CT)) { 2434 } else if (M64_HAS(CT)) {
2435 info->fix.mmio_len = 0x400; 2435 info->fix.mmio_len = 0x400;
2436 info->fix.accel = FB_ACCEL_ATI_MACH64CT; 2436 info->fix.accel = FB_ACCEL_ATI_MACH64CT;
2437 } else if (M64_HAS(VT)) { 2437 } else if (M64_HAS(VT)) {
2438 info->fix.mmio_start -= 0x400; 2438 info->fix.mmio_start -= 0x400;
2439 info->fix.mmio_len = 0x800; 2439 info->fix.mmio_len = 0x800;
2440 info->fix.accel = FB_ACCEL_ATI_MACH64VT; 2440 info->fix.accel = FB_ACCEL_ATI_MACH64VT;
2441 } else {/* GT */ 2441 } else {/* GT */
2442 info->fix.mmio_start -= 0x400; 2442 info->fix.mmio_start -= 0x400;
2443 info->fix.mmio_len = 0x800; 2443 info->fix.mmio_len = 0x800;
2444 info->fix.accel = FB_ACCEL_ATI_MACH64GT; 2444 info->fix.accel = FB_ACCEL_ATI_MACH64GT;
2445 } 2445 }
2446 2446
2447 PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n", 2447 PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n",
2448 info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len >> 20), 2448 info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len >> 20),
2449 info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal, par->pll_limits.pll_max, 2449 info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal, par->pll_limits.pll_max,
2450 par->pll_limits.mclk, par->pll_limits.xclk); 2450 par->pll_limits.mclk, par->pll_limits.xclk);
2451 2451
2452 #if defined(DEBUG) && defined(CONFIG_ATY_CT) 2452 #if defined(DEBUG) && defined(CONFIG_ATY_CT)
2453 if (M64_HAS(INTEGRATED)) { 2453 if (M64_HAS(INTEGRATED)) {
2454 int i; 2454 int i;
2455 printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL EXT_MEM_CNTL CRTC_GEN_CNTL " 2455 printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL EXT_MEM_CNTL CRTC_GEN_CNTL "
2456 "DSP_CONFIG DSP_ON_OFF CLOCK_CNTL\n" 2456 "DSP_CONFIG DSP_ON_OFF CLOCK_CNTL\n"
2457 "debug atyfb: %08x %08x %08x %08x %08x %08x %08x %08x\n" 2457 "debug atyfb: %08x %08x %08x %08x %08x %08x %08x %08x\n"
2458 "debug atyfb: PLL", 2458 "debug atyfb: PLL",
2459 aty_ld_le32(BUS_CNTL, par), aty_ld_le32(DAC_CNTL, par), 2459 aty_ld_le32(BUS_CNTL, par), aty_ld_le32(DAC_CNTL, par),
2460 aty_ld_le32(MEM_CNTL, par), aty_ld_le32(EXT_MEM_CNTL, par), 2460 aty_ld_le32(MEM_CNTL, par), aty_ld_le32(EXT_MEM_CNTL, par),
2461 aty_ld_le32(CRTC_GEN_CNTL, par), aty_ld_le32(DSP_CONFIG, par), 2461 aty_ld_le32(CRTC_GEN_CNTL, par), aty_ld_le32(DSP_CONFIG, par),
2462 aty_ld_le32(DSP_ON_OFF, par), aty_ld_le32(CLOCK_CNTL, par)); 2462 aty_ld_le32(DSP_ON_OFF, par), aty_ld_le32(CLOCK_CNTL, par));
2463 for (i = 0; i < 40; i++) 2463 for (i = 0; i < 40; i++)
2464 printk(" %02x", aty_ld_pll_ct(i, par)); 2464 printk(" %02x", aty_ld_pll_ct(i, par));
2465 printk("\n"); 2465 printk("\n");
2466 } 2466 }
2467 #endif 2467 #endif
2468 if(par->pll_ops->init_pll) 2468 if(par->pll_ops->init_pll)
2469 par->pll_ops->init_pll(info, &par->pll); 2469 par->pll_ops->init_pll(info, &par->pll);
2470 2470
2471 /* 2471 /*
2472 * Last page of 8 MB (4 MB on ISA) aperture is MMIO 2472 * Last page of 8 MB (4 MB on ISA) aperture is MMIO
2473 * FIXME: we should use the auxiliary aperture instead so we can access 2473 * FIXME: we should use the auxiliary aperture instead so we can access
2474 * the full 8 MB of video RAM on 8 MB boards 2474 * the full 8 MB of video RAM on 8 MB boards
2475 */ 2475 */
2476 2476
2477 if (!par->aux_start && 2477 if (!par->aux_start &&
2478 (info->fix.smem_len == 0x800000 || (par->bus_type == ISA && info->fix.smem_len == 0x400000))) 2478 (info->fix.smem_len == 0x800000 || (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
2479 info->fix.smem_len -= GUI_RESERVE; 2479 info->fix.smem_len -= GUI_RESERVE;
2480 2480
2481 /* 2481 /*
2482 * Disable register access through the linear aperture 2482 * Disable register access through the linear aperture
2483 * if the auxiliary aperture is used so we can access 2483 * if the auxiliary aperture is used so we can access
2484 * the full 8 MB of video RAM on 8 MB boards. 2484 * the full 8 MB of video RAM on 8 MB boards.
2485 */ 2485 */
2486 if (par->aux_start) 2486 if (par->aux_start)
2487 aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par); 2487 aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
2488 2488
2489 #ifdef CONFIG_MTRR 2489 #ifdef CONFIG_MTRR
2490 par->mtrr_aper = -1; 2490 par->mtrr_aper = -1;
2491 par->mtrr_reg = -1; 2491 par->mtrr_reg = -1;
2492 if (!nomtrr) { 2492 if (!nomtrr) {
2493 /* Cover the whole resource. */ 2493 /* Cover the whole resource. */
2494 par->mtrr_aper = mtrr_add(par->res_start, par->res_size, MTRR_TYPE_WRCOMB, 1); 2494 par->mtrr_aper = mtrr_add(par->res_start, par->res_size, MTRR_TYPE_WRCOMB, 1);
2495 if (par->mtrr_aper >= 0 && !par->aux_start) { 2495 if (par->mtrr_aper >= 0 && !par->aux_start) {
2496 /* Make a hole for mmio. */ 2496 /* Make a hole for mmio. */
2497 par->mtrr_reg = mtrr_add(par->res_start + 0x800000 - GUI_RESERVE, 2497 par->mtrr_reg = mtrr_add(par->res_start + 0x800000 - GUI_RESERVE,
2498 GUI_RESERVE, MTRR_TYPE_UNCACHABLE, 1); 2498 GUI_RESERVE, MTRR_TYPE_UNCACHABLE, 1);
2499 if (par->mtrr_reg < 0) { 2499 if (par->mtrr_reg < 0) {
2500 mtrr_del(par->mtrr_aper, 0, 0); 2500 mtrr_del(par->mtrr_aper, 0, 0);
2501 par->mtrr_aper = -1; 2501 par->mtrr_aper = -1;
2502 } 2502 }
2503 } 2503 }
2504 } 2504 }
2505 #endif 2505 #endif
2506 2506
2507 info->fbops = &atyfb_ops; 2507 info->fbops = &atyfb_ops;
2508 info->pseudo_palette = pseudo_palette; 2508 info->pseudo_palette = pseudo_palette;
2509 info->flags = FBINFO_FLAG_DEFAULT; 2509 info->flags = FBINFO_FLAG_DEFAULT;
2510 2510
2511 #ifdef CONFIG_PMAC_BACKLIGHT 2511 #ifdef CONFIG_PMAC_BACKLIGHT
2512 if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) { 2512 if (M64_HAS(G3_PB_1_1) && machine_is_compatible("PowerBook1,1")) {
2513 /* these bits let the 101 powerbook wake up from sleep -- paulus */ 2513 /* these bits let the 101 powerbook wake up from sleep -- paulus */
2514 aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) 2514 aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par)
2515 | (USE_F32KHZ | TRISTATE_MEM_EN), par); 2515 | (USE_F32KHZ | TRISTATE_MEM_EN), par);
2516 } else if (M64_HAS(MOBIL_BUS)) 2516 } else if (M64_HAS(MOBIL_BUS))
2517 register_backlight_controller(&aty_backlight_controller, info, "ati"); 2517 register_backlight_controller(&aty_backlight_controller, info, "ati");
2518 #endif /* CONFIG_PMAC_BACKLIGHT */ 2518 #endif /* CONFIG_PMAC_BACKLIGHT */
2519 2519
2520 memset(&var, 0, sizeof(var)); 2520 memset(&var, 0, sizeof(var));
2521 #ifdef CONFIG_PPC 2521 #ifdef CONFIG_PPC
2522 if (machine_is(powermac)) { 2522 if (machine_is(powermac)) {
2523 /* 2523 /*
2524 * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it 2524 * FIXME: The NVRAM stuff should be put in a Mac-specific file, as it
2525 * applies to all Mac video cards 2525 * applies to all Mac video cards
2526 */ 2526 */
2527 if (mode) { 2527 if (mode) {
2528 if (mac_find_mode(&var, info, mode, 8)) 2528 if (mac_find_mode(&var, info, mode, 8))
2529 has_var = 1; 2529 has_var = 1;
2530 } else { 2530 } else {
2531 if (default_vmode == VMODE_CHOOSE) { 2531 if (default_vmode == VMODE_CHOOSE) {
2532 if (M64_HAS(G3_PB_1024x768)) 2532 if (M64_HAS(G3_PB_1024x768))
2533 /* G3 PowerBook with 1024x768 LCD */ 2533 /* G3 PowerBook with 1024x768 LCD */
2534 default_vmode = VMODE_1024_768_60; 2534 default_vmode = VMODE_1024_768_60;
2535 else if (machine_is_compatible("iMac")) 2535 else if (machine_is_compatible("iMac"))
2536 default_vmode = VMODE_1024_768_75; 2536 default_vmode = VMODE_1024_768_75;
2537 else if (machine_is_compatible 2537 else if (machine_is_compatible
2538 ("PowerBook2,1")) 2538 ("PowerBook2,1"))
2539 /* iBook with 800x600 LCD */ 2539 /* iBook with 800x600 LCD */
2540 default_vmode = VMODE_800_600_60; 2540 default_vmode = VMODE_800_600_60;
2541 else 2541 else
2542 default_vmode = VMODE_640_480_67; 2542 default_vmode = VMODE_640_480_67;
2543 sense = read_aty_sense(par); 2543 sense = read_aty_sense(par);
2544 PRINTKI("monitor sense=%x, mode %d\n", 2544 PRINTKI("monitor sense=%x, mode %d\n",
2545 sense, mac_map_monitor_sense(sense)); 2545 sense, mac_map_monitor_sense(sense));
2546 } 2546 }
2547 if (default_vmode <= 0 || default_vmode > VMODE_MAX) 2547 if (default_vmode <= 0 || default_vmode > VMODE_MAX)
2548 default_vmode = VMODE_640_480_60; 2548 default_vmode = VMODE_640_480_60;
2549 if (default_cmode < CMODE_8 || default_cmode > CMODE_32) 2549 if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
2550 default_cmode = CMODE_8; 2550 default_cmode = CMODE_8;
2551 if (!mac_vmode_to_var(default_vmode, default_cmode, 2551 if (!mac_vmode_to_var(default_vmode, default_cmode,
2552 &var)) 2552 &var))
2553 has_var = 1; 2553 has_var = 1;
2554 } 2554 }
2555 } 2555 }
2556 2556
2557 #endif /* !CONFIG_PPC */ 2557 #endif /* !CONFIG_PPC */
2558 2558
2559 #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) 2559 #if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
2560 if (!atyfb_get_timings_from_lcd(par, &var)) 2560 if (!atyfb_get_timings_from_lcd(par, &var))
2561 has_var = 1; 2561 has_var = 1;
2562 #endif 2562 #endif
2563 2563
2564 if (mode && fb_find_mode(&var, info, mode, NULL, 0, &defmode, 8)) 2564 if (mode && fb_find_mode(&var, info, mode, NULL, 0, &defmode, 8))
2565 has_var = 1; 2565 has_var = 1;
2566 2566
2567 if (!has_var) 2567 if (!has_var)
2568 var = default_var; 2568 var = default_var;
2569 2569
2570 if (noaccel) 2570 if (noaccel)
2571 var.accel_flags &= ~FB_ACCELF_TEXT; 2571 var.accel_flags &= ~FB_ACCELF_TEXT;
2572 else 2572 else
2573 var.accel_flags |= FB_ACCELF_TEXT; 2573 var.accel_flags |= FB_ACCELF_TEXT;
2574 2574
2575 if (comp_sync != -1) { 2575 if (comp_sync != -1) {
2576 if (!comp_sync) 2576 if (!comp_sync)
2577 var.sync &= ~FB_SYNC_COMP_HIGH_ACT; 2577 var.sync &= ~FB_SYNC_COMP_HIGH_ACT;
2578 else 2578 else
2579 var.sync |= FB_SYNC_COMP_HIGH_ACT; 2579 var.sync |= FB_SYNC_COMP_HIGH_ACT;
2580 } 2580 }
2581 2581
2582 if (var.yres == var.yres_virtual) { 2582 if (var.yres == var.yres_virtual) {
2583 u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2)); 2583 u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
2584 var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual; 2584 var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual;
2585 if (var.yres_virtual < var.yres) 2585 if (var.yres_virtual < var.yres)
2586 var.yres_virtual = var.yres; 2586 var.yres_virtual = var.yres;
2587 } 2587 }
2588 2588
2589 if (atyfb_check_var(&var, info)) { 2589 if (atyfb_check_var(&var, info)) {
2590 PRINTKE("can't set default video mode\n"); 2590 PRINTKE("can't set default video mode\n");
2591 goto aty_init_exit; 2591 goto aty_init_exit;
2592 } 2592 }
2593 2593
2594 #ifdef __sparc__ 2594 #ifdef __sparc__
2595 atyfb_save_palette(par, 0); 2595 atyfb_save_palette(par, 0);
2596 #endif 2596 #endif
2597 2597
2598 #ifdef CONFIG_FB_ATY_CT 2598 #ifdef CONFIG_FB_ATY_CT
2599 if (!noaccel && M64_HAS(INTEGRATED)) 2599 if (!noaccel && M64_HAS(INTEGRATED))
2600 aty_init_cursor(info); 2600 aty_init_cursor(info);
2601 #endif /* CONFIG_FB_ATY_CT */ 2601 #endif /* CONFIG_FB_ATY_CT */
2602 info->var = var; 2602 info->var = var;
2603 2603
2604 fb_alloc_cmap(&info->cmap, 256, 0); 2604 fb_alloc_cmap(&info->cmap, 256, 0);
2605 2605
2606 if (register_framebuffer(info) < 0) 2606 if (register_framebuffer(info) < 0)
2607 goto aty_init_exit; 2607 goto aty_init_exit;
2608 2608
2609 fb_list = info; 2609 fb_list = info;
2610 2610
2611 PRINTKI("fb%d: %s frame buffer device on %s\n", 2611 PRINTKI("fb%d: %s frame buffer device on %s\n",
2612 info->node, info->fix.id, name); 2612 info->node, info->fix.id, name);
2613 return 0; 2613 return 0;
2614 2614
2615 aty_init_exit: 2615 aty_init_exit:
2616 /* restore video mode */ 2616 /* restore video mode */
2617 aty_set_crtc(par, &saved_crtc); 2617 aty_set_crtc(par, &saved_crtc);
2618 par->pll_ops->set_pll(info, &saved_pll); 2618 par->pll_ops->set_pll(info, &saved_pll);
2619 2619
2620 #ifdef CONFIG_MTRR 2620 #ifdef CONFIG_MTRR
2621 if (par->mtrr_reg >= 0) { 2621 if (par->mtrr_reg >= 0) {
2622 mtrr_del(par->mtrr_reg, 0, 0); 2622 mtrr_del(par->mtrr_reg, 0, 0);
2623 par->mtrr_reg = -1; 2623 par->mtrr_reg = -1;
2624 } 2624 }
2625 if (par->mtrr_aper >= 0) { 2625 if (par->mtrr_aper >= 0) {
2626 mtrr_del(par->mtrr_aper, 0, 0); 2626 mtrr_del(par->mtrr_aper, 0, 0);
2627 par->mtrr_aper = -1; 2627 par->mtrr_aper = -1;
2628 } 2628 }
2629 #endif 2629 #endif
2630 return -1; 2630 return -1;
2631 } 2631 }
2632 2632
2633 #ifdef CONFIG_ATARI 2633 #ifdef CONFIG_ATARI
2634 static int __init store_video_par(char *video_str, unsigned char m64_num) 2634 static int __init store_video_par(char *video_str, unsigned char m64_num)
2635 { 2635 {
2636 char *p; 2636 char *p;
2637 unsigned long vmembase, size, guiregbase; 2637 unsigned long vmembase, size, guiregbase;
2638 2638
2639 PRINTKI("store_video_par() '%s' \n", video_str); 2639 PRINTKI("store_video_par() '%s' \n", video_str);
2640 2640
2641 if (!(p = strsep(&video_str, ";")) || !*p) 2641 if (!(p = strsep(&video_str, ";")) || !*p)
2642 goto mach64_invalid; 2642 goto mach64_invalid;
2643 vmembase = simple_strtoul(p, NULL, 0); 2643 vmembase = simple_strtoul(p, NULL, 0);
2644 if (!(p = strsep(&video_str, ";")) || !*p) 2644 if (!(p = strsep(&video_str, ";")) || !*p)
2645 goto mach64_invalid; 2645 goto mach64_invalid;
2646 size = simple_strtoul(p, NULL, 0); 2646 size = simple_strtoul(p, NULL, 0);
2647 if (!(p = strsep(&video_str, ";")) || !*p) 2647 if (!(p = strsep(&video_str, ";")) || !*p)
2648 goto mach64_invalid; 2648 goto mach64_invalid;
2649 guiregbase = simple_strtoul(p, NULL, 0); 2649 guiregbase = simple_strtoul(p, NULL, 0);
2650 2650
2651 phys_vmembase[m64_num] = vmembase; 2651 phys_vmembase[m64_num] = vmembase;
2652 phys_size[m64_num] = size; 2652 phys_size[m64_num] = size;
2653 phys_guiregbase[m64_num] = guiregbase; 2653 phys_guiregbase[m64_num] = guiregbase;
2654 PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size, 2654 PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size,
2655 guiregbase); 2655 guiregbase);
2656 return 0; 2656 return 0;
2657 2657
2658 mach64_invalid: 2658 mach64_invalid:
2659 phys_vmembase[m64_num] = 0; 2659 phys_vmembase[m64_num] = 0;
2660 return -1; 2660 return -1;
2661 } 2661 }
2662 #endif /* CONFIG_ATARI */ 2662 #endif /* CONFIG_ATARI */
2663 2663
2664 /* 2664 /*
2665 * Blank the display. 2665 * Blank the display.
2666 */ 2666 */
2667 2667
2668 static int atyfb_blank(int blank, struct fb_info *info) 2668 static int atyfb_blank(int blank, struct fb_info *info)
2669 { 2669 {
2670 struct atyfb_par *par = (struct atyfb_par *) info->par; 2670 struct atyfb_par *par = (struct atyfb_par *) info->par;
2671 u32 gen_cntl; 2671 u32 gen_cntl;
2672 2672
2673 if (par->lock_blank || par->asleep) 2673 if (par->lock_blank || par->asleep)
2674 return 0; 2674 return 0;
2675 2675
2676 #ifdef CONFIG_PMAC_BACKLIGHT 2676 #ifdef CONFIG_PMAC_BACKLIGHT
2677 if (machine_is(powermac) && blank > FB_BLANK_NORMAL) 2677 if (machine_is(powermac) && blank > FB_BLANK_NORMAL)
2678 set_backlight_enable(0); 2678 set_backlight_enable(0);
2679 #elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2679 #elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2680 if (par->lcd_table && blank > FB_BLANK_NORMAL && 2680 if (par->lcd_table && blank > FB_BLANK_NORMAL &&
2681 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2681 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2682 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2682 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2683 pm &= ~PWR_BLON; 2683 pm &= ~PWR_BLON;
2684 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2684 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2685 } 2685 }
2686 #endif 2686 #endif
2687 2687
2688 gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); 2688 gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
2689 switch (blank) { 2689 switch (blank) {
2690 case FB_BLANK_UNBLANK: 2690 case FB_BLANK_UNBLANK:
2691 gen_cntl &= ~0x400004c; 2691 gen_cntl &= ~0x400004c;
2692 break; 2692 break;
2693 case FB_BLANK_NORMAL: 2693 case FB_BLANK_NORMAL:
2694 gen_cntl |= 0x4000040; 2694 gen_cntl |= 0x4000040;
2695 break; 2695 break;
2696 case FB_BLANK_VSYNC_SUSPEND: 2696 case FB_BLANK_VSYNC_SUSPEND:
2697 gen_cntl |= 0x4000048; 2697 gen_cntl |= 0x4000048;
2698 break; 2698 break;
2699 case FB_BLANK_HSYNC_SUSPEND: 2699 case FB_BLANK_HSYNC_SUSPEND:
2700 gen_cntl |= 0x4000044; 2700 gen_cntl |= 0x4000044;
2701 break; 2701 break;
2702 case FB_BLANK_POWERDOWN: 2702 case FB_BLANK_POWERDOWN:
2703 gen_cntl |= 0x400004c; 2703 gen_cntl |= 0x400004c;
2704 break; 2704 break;
2705 } 2705 }
2706 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); 2706 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
2707 2707
2708 #ifdef CONFIG_PMAC_BACKLIGHT 2708 #ifdef CONFIG_PMAC_BACKLIGHT
2709 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL) 2709 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL)
2710 set_backlight_enable(1); 2710 set_backlight_enable(1);
2711 #elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2711 #elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2712 if (par->lcd_table && blank <= FB_BLANK_NORMAL && 2712 if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
2713 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2713 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2714 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2714 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
2715 pm |= PWR_BLON; 2715 pm |= PWR_BLON;
2716 aty_st_lcd(POWER_MANAGEMENT, pm, par); 2716 aty_st_lcd(POWER_MANAGEMENT, pm, par);
2717 } 2717 }
2718 #endif 2718 #endif
2719 2719
2720 return 0; 2720 return 0;
2721 } 2721 }
2722 2722
2723 static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue, 2723 static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue,
2724 const struct atyfb_par *par) 2724 const struct atyfb_par *par)
2725 { 2725 {
2726 #ifdef CONFIG_ATARI 2726 #ifdef CONFIG_ATARI
2727 out_8(&par->aty_cmap_regs->windex, regno); 2727 out_8(&par->aty_cmap_regs->windex, regno);
2728 out_8(&par->aty_cmap_regs->lut, red); 2728 out_8(&par->aty_cmap_regs->lut, red);
2729 out_8(&par->aty_cmap_regs->lut, green); 2729 out_8(&par->aty_cmap_regs->lut, green);
2730 out_8(&par->aty_cmap_regs->lut, blue); 2730 out_8(&par->aty_cmap_regs->lut, blue);
2731 #else 2731 #else
2732 writeb(regno, &par->aty_cmap_regs->windex); 2732 writeb(regno, &par->aty_cmap_regs->windex);
2733 writeb(red, &par->aty_cmap_regs->lut); 2733 writeb(red, &par->aty_cmap_regs->lut);
2734 writeb(green, &par->aty_cmap_regs->lut); 2734 writeb(green, &par->aty_cmap_regs->lut);
2735 writeb(blue, &par->aty_cmap_regs->lut); 2735 writeb(blue, &par->aty_cmap_regs->lut);
2736 #endif 2736 #endif
2737 } 2737 }
2738 2738
2739 /* 2739 /*
2740 * Set a single color register. The values supplied are already 2740 * Set a single color register. The values supplied are already
2741 * rounded down to the hardware's capabilities (according to the 2741 * rounded down to the hardware's capabilities (according to the
2742 * entries in the var structure). Return != 0 for invalid regno. 2742 * entries in the var structure). Return != 0 for invalid regno.
2743 * !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR 2743 * !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR
2744 */ 2744 */
2745 2745
2746 static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 2746 static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
2747 u_int transp, struct fb_info *info) 2747 u_int transp, struct fb_info *info)
2748 { 2748 {
2749 struct atyfb_par *par = (struct atyfb_par *) info->par; 2749 struct atyfb_par *par = (struct atyfb_par *) info->par;
2750 int i, depth; 2750 int i, depth;
2751 u32 *pal = info->pseudo_palette; 2751 u32 *pal = info->pseudo_palette;
2752 2752
2753 depth = info->var.bits_per_pixel; 2753 depth = info->var.bits_per_pixel;
2754 if (depth == 16) 2754 if (depth == 16)
2755 depth = (info->var.green.length == 5) ? 15 : 16; 2755 depth = (info->var.green.length == 5) ? 15 : 16;
2756 2756
2757 if (par->asleep) 2757 if (par->asleep)
2758 return 0; 2758 return 0;
2759 2759
2760 if (regno > 255 || 2760 if (regno > 255 ||
2761 (depth == 16 && regno > 63) || 2761 (depth == 16 && regno > 63) ||
2762 (depth == 15 && regno > 31)) 2762 (depth == 15 && regno > 31))
2763 return 1; 2763 return 1;
2764 2764
2765 red >>= 8; 2765 red >>= 8;
2766 green >>= 8; 2766 green >>= 8;
2767 blue >>= 8; 2767 blue >>= 8;
2768 2768
2769 par->palette[regno].red = red; 2769 par->palette[regno].red = red;
2770 par->palette[regno].green = green; 2770 par->palette[regno].green = green;
2771 par->palette[regno].blue = blue; 2771 par->palette[regno].blue = blue;
2772 2772
2773 if (regno < 16) { 2773 if (regno < 16) {
2774 switch (depth) { 2774 switch (depth) {
2775 case 15: 2775 case 15:
2776 pal[regno] = (regno << 10) | (regno << 5) | regno; 2776 pal[regno] = (regno << 10) | (regno << 5) | regno;
2777 break; 2777 break;
2778 case 16: 2778 case 16:
2779 pal[regno] = (regno << 11) | (regno << 5) | regno; 2779 pal[regno] = (regno << 11) | (regno << 5) | regno;
2780 break; 2780 break;
2781 case 24: 2781 case 24:
2782 pal[regno] = (regno << 16) | (regno << 8) | regno; 2782 pal[regno] = (regno << 16) | (regno << 8) | regno;
2783 break; 2783 break;
2784 case 32: 2784 case 32:
2785 i = (regno << 8) | regno; 2785 i = (regno << 8) | regno;
2786 pal[regno] = (i << 16) | i; 2786 pal[regno] = (i << 16) | i;
2787 break; 2787 break;
2788 } 2788 }
2789 } 2789 }
2790 2790
2791 i = aty_ld_8(DAC_CNTL, par) & 0xfc; 2791 i = aty_ld_8(DAC_CNTL, par) & 0xfc;
2792 if (M64_HAS(EXTRA_BRIGHT)) 2792 if (M64_HAS(EXTRA_BRIGHT))
2793 i |= 0x2; /* DAC_CNTL | 0x2 turns off the extra brightness for gt */ 2793 i |= 0x2; /* DAC_CNTL | 0x2 turns off the extra brightness for gt */
2794 aty_st_8(DAC_CNTL, i, par); 2794 aty_st_8(DAC_CNTL, i, par);
2795 aty_st_8(DAC_MASK, 0xff, par); 2795 aty_st_8(DAC_MASK, 0xff, par);
2796 2796
2797 if (M64_HAS(INTEGRATED)) { 2797 if (M64_HAS(INTEGRATED)) {
2798 if (depth == 16) { 2798 if (depth == 16) {
2799 if (regno < 32) 2799 if (regno < 32)
2800 aty_st_pal(regno << 3, red, 2800 aty_st_pal(regno << 3, red,
2801 par->palette[regno<<1].green, 2801 par->palette[regno<<1].green,
2802 blue, par); 2802 blue, par);
2803 red = par->palette[regno>>1].red; 2803 red = par->palette[regno>>1].red;
2804 blue = par->palette[regno>>1].blue; 2804 blue = par->palette[regno>>1].blue;
2805 regno <<= 2; 2805 regno <<= 2;
2806 } else if (depth == 15) { 2806 } else if (depth == 15) {
2807 regno <<= 3; 2807 regno <<= 3;
2808 for(i = 0; i < 8; i++) { 2808 for(i = 0; i < 8; i++) {
2809 aty_st_pal(regno + i, red, green, blue, par); 2809 aty_st_pal(regno + i, red, green, blue, par);
2810 } 2810 }
2811 } 2811 }
2812 } 2812 }
2813 aty_st_pal(regno, red, green, blue, par); 2813 aty_st_pal(regno, red, green, blue, par);
2814 2814
2815 return 0; 2815 return 0;
2816 } 2816 }
2817 2817
2818 #ifdef CONFIG_PCI 2818 #ifdef CONFIG_PCI
2819 2819
2820 #ifdef __sparc__ 2820 #ifdef __sparc__
2821 2821
2822 extern void (*prom_palette) (int); 2822 extern void (*prom_palette) (int);
2823 2823
2824 static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, 2824 static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
2825 struct fb_info *info, unsigned long addr) 2825 struct fb_info *info, unsigned long addr)
2826 { 2826 {
2827 extern int con_is_present(void); 2827 extern int con_is_present(void);
2828 2828
2829 struct atyfb_par *par = info->par; 2829 struct atyfb_par *par = info->par;
2830 struct pcidev_cookie *pcp; 2830 struct pcidev_cookie *pcp;
2831 char prop[128]; 2831 char prop[128];
2832 int node, len, i, j, ret; 2832 int node, len, i, j, ret;
2833 u32 mem, chip_id; 2833 u32 mem, chip_id;
2834 2834
2835 /* Do not attach when we have a serial console. */ 2835 /* Do not attach when we have a serial console. */
2836 if (!con_is_present()) 2836 if (!con_is_present())
2837 return -ENXIO; 2837 return -ENXIO;
2838 2838
2839 /* 2839 /*
2840 * Map memory-mapped registers. 2840 * Map memory-mapped registers.
2841 */ 2841 */
2842 par->ati_regbase = (void *)addr + 0x7ffc00UL; 2842 par->ati_regbase = (void *)addr + 0x7ffc00UL;
2843 info->fix.mmio_start = addr + 0x7ffc00UL; 2843 info->fix.mmio_start = addr + 0x7ffc00UL;
2844 2844
2845 /* 2845 /*
2846 * Map in big-endian aperture. 2846 * Map in big-endian aperture.
2847 */ 2847 */
2848 info->screen_base = (char *) (addr + 0x800000UL); 2848 info->screen_base = (char *) (addr + 0x800000UL);
2849 info->fix.smem_start = addr + 0x800000UL; 2849 info->fix.smem_start = addr + 0x800000UL;
2850 2850
2851 /* 2851 /*
2852 * Figure mmap addresses from PCI config space. 2852 * Figure mmap addresses from PCI config space.
2853 * Split Framebuffer in big- and little-endian halfs. 2853 * Split Framebuffer in big- and little-endian halfs.
2854 */ 2854 */
2855 for (i = 0; i < 6 && pdev->resource[i].start; i++) 2855 for (i = 0; i < 6 && pdev->resource[i].start; i++)
2856 /* nothing */ ; 2856 /* nothing */ ;
2857 j = i + 4; 2857 j = i + 4;
2858 2858
2859 par->mmap_map = kmalloc(j * sizeof(*par->mmap_map), GFP_ATOMIC); 2859 par->mmap_map = kmalloc(j * sizeof(*par->mmap_map), GFP_ATOMIC);
2860 if (!par->mmap_map) { 2860 if (!par->mmap_map) {
2861 PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n"); 2861 PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n");
2862 return -ENOMEM; 2862 return -ENOMEM;
2863 } 2863 }
2864 memset(par->mmap_map, 0, j * sizeof(*par->mmap_map)); 2864 memset(par->mmap_map, 0, j * sizeof(*par->mmap_map));
2865 2865
2866 for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) { 2866 for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) {
2867 struct resource *rp = &pdev->resource[i]; 2867 struct resource *rp = &pdev->resource[i];
2868 int io, breg = PCI_BASE_ADDRESS_0 + (i << 2); 2868 int io, breg = PCI_BASE_ADDRESS_0 + (i << 2);
2869 unsigned long base; 2869 unsigned long base;
2870 u32 size, pbase; 2870 u32 size, pbase;
2871 2871
2872 base = rp->start; 2872 base = rp->start;
2873 2873
2874 io = (rp->flags & IORESOURCE_IO); 2874 io = (rp->flags & IORESOURCE_IO);
2875 2875
2876 size = rp->end - base + 1; 2876 size = rp->end - base + 1;
2877 2877
2878 pci_read_config_dword(pdev, breg, &pbase); 2878 pci_read_config_dword(pdev, breg, &pbase);
2879 2879
2880 if (io) 2880 if (io)
2881 size &= ~1; 2881 size &= ~1;
2882 2882
2883 /* 2883 /*
2884 * Map the framebuffer a second time, this time without 2884 * Map the framebuffer a second time, this time without
2885 * the braindead _PAGE_IE setting. This is used by the 2885 * the braindead _PAGE_IE setting. This is used by the
2886 * fixed Xserver, but we need to maintain the old mapping 2886 * fixed Xserver, but we need to maintain the old mapping
2887 * to stay compatible with older ones... 2887 * to stay compatible with older ones...
2888 */ 2888 */
2889 if (base == addr) { 2889 if (base == addr) {
2890 par->mmap_map[j].voff = (pbase + 0x10000000) & PAGE_MASK; 2890 par->mmap_map[j].voff = (pbase + 0x10000000) & PAGE_MASK;
2891 par->mmap_map[j].poff = base & PAGE_MASK; 2891 par->mmap_map[j].poff = base & PAGE_MASK;
2892 par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK; 2892 par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
2893 par->mmap_map[j].prot_mask = _PAGE_CACHE; 2893 par->mmap_map[j].prot_mask = _PAGE_CACHE;
2894 par->mmap_map[j].prot_flag = _PAGE_E; 2894 par->mmap_map[j].prot_flag = _PAGE_E;
2895 j++; 2895 j++;
2896 } 2896 }
2897 2897
2898 /* 2898 /*
2899 * Here comes the old framebuffer mapping with _PAGE_IE 2899 * Here comes the old framebuffer mapping with _PAGE_IE
2900 * set for the big endian half of the framebuffer... 2900 * set for the big endian half of the framebuffer...
2901 */ 2901 */
2902 if (base == addr) { 2902 if (base == addr) {
2903 par->mmap_map[j].voff = (pbase + 0x800000) & PAGE_MASK; 2903 par->mmap_map[j].voff = (pbase + 0x800000) & PAGE_MASK;
2904 par->mmap_map[j].poff = (base + 0x800000) & PAGE_MASK; 2904 par->mmap_map[j].poff = (base + 0x800000) & PAGE_MASK;
2905 par->mmap_map[j].size = 0x800000; 2905 par->mmap_map[j].size = 0x800000;
2906 par->mmap_map[j].prot_mask = _PAGE_CACHE; 2906 par->mmap_map[j].prot_mask = _PAGE_CACHE;
2907 par->mmap_map[j].prot_flag = _PAGE_E | _PAGE_IE; 2907 par->mmap_map[j].prot_flag = _PAGE_E | _PAGE_IE;
2908 size -= 0x800000; 2908 size -= 0x800000;
2909 j++; 2909 j++;
2910 } 2910 }
2911 2911
2912 par->mmap_map[j].voff = pbase & PAGE_MASK; 2912 par->mmap_map[j].voff = pbase & PAGE_MASK;
2913 par->mmap_map[j].poff = base & PAGE_MASK; 2913 par->mmap_map[j].poff = base & PAGE_MASK;
2914 par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK; 2914 par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
2915 par->mmap_map[j].prot_mask = _PAGE_CACHE; 2915 par->mmap_map[j].prot_mask = _PAGE_CACHE;
2916 par->mmap_map[j].prot_flag = _PAGE_E; 2916 par->mmap_map[j].prot_flag = _PAGE_E;
2917 j++; 2917 j++;
2918 } 2918 }
2919 2919
2920 if((ret = correct_chipset(par))) 2920 if((ret = correct_chipset(par)))
2921 return ret; 2921 return ret;
2922 2922
2923 if (IS_XL(pdev->device)) { 2923 if (IS_XL(pdev->device)) {
2924 /* 2924 /*
2925 * Fix PROMs idea of MEM_CNTL settings... 2925 * Fix PROMs idea of MEM_CNTL settings...
2926 */ 2926 */
2927 mem = aty_ld_le32(MEM_CNTL, par); 2927 mem = aty_ld_le32(MEM_CNTL, par);
2928 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par); 2928 chip_id = aty_ld_le32(CONFIG_CHIP_ID, par);
2929 if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) { 2929 if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) {
2930 switch (mem & 0x0f) { 2930 switch (mem & 0x0f) {
2931 case 3: 2931 case 3:
2932 mem = (mem & ~(0x0f)) | 2; 2932 mem = (mem & ~(0x0f)) | 2;
2933 break; 2933 break;
2934 case 7: 2934 case 7:
2935 mem = (mem & ~(0x0f)) | 3; 2935 mem = (mem & ~(0x0f)) | 3;
2936 break; 2936 break;
2937 case 9: 2937 case 9:
2938 mem = (mem & ~(0x0f)) | 4; 2938 mem = (mem & ~(0x0f)) | 4;
2939 break; 2939 break;
2940 case 11: 2940 case 11:
2941 mem = (mem & ~(0x0f)) | 5; 2941 mem = (mem & ~(0x0f)) | 5;
2942 break; 2942 break;
2943 default: 2943 default:
2944 break; 2944 break;
2945 } 2945 }
2946 if ((aty_ld_le32(CONFIG_STAT0, par) & 7) >= SDRAM) 2946 if ((aty_ld_le32(CONFIG_STAT0, par) & 7) >= SDRAM)
2947 mem &= ~(0x00700000); 2947 mem &= ~(0x00700000);
2948 } 2948 }
2949 mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */ 2949 mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */
2950 aty_st_le32(MEM_CNTL, mem, par); 2950 aty_st_le32(MEM_CNTL, mem, par);
2951 } 2951 }
2952 2952
2953 /* 2953 /*
2954 * If this is the console device, we will set default video 2954 * If this is the console device, we will set default video
2955 * settings to what the PROM left us with. 2955 * settings to what the PROM left us with.
2956 */ 2956 */
2957 node = prom_getchild(prom_root_node); 2957 node = prom_getchild(prom_root_node);
2958 node = prom_searchsiblings(node, "aliases"); 2958 node = prom_searchsiblings(node, "aliases");
2959 if (node) { 2959 if (node) {
2960 len = prom_getproperty(node, "screen", prop, sizeof(prop)); 2960 len = prom_getproperty(node, "screen", prop, sizeof(prop));
2961 if (len > 0) { 2961 if (len > 0) {
2962 prop[len] = '\0'; 2962 prop[len] = '\0';
2963 node = prom_finddevice(prop); 2963 node = prom_finddevice(prop);
2964 } else 2964 } else
2965 node = 0; 2965 node = 0;
2966 } 2966 }
2967 2967
2968 pcp = pdev->sysdata; 2968 pcp = pdev->sysdata;
2969 if (node == pcp->prom_node) { 2969 if (node == pcp->prom_node->node) {
2970 struct fb_var_screeninfo *var = &default_var; 2970 struct fb_var_screeninfo *var = &default_var;
2971 unsigned int N, P, Q, M, T, R; 2971 unsigned int N, P, Q, M, T, R;
2972 u32 v_total, h_total; 2972 u32 v_total, h_total;
2973 struct crtc crtc; 2973 struct crtc crtc;
2974 u8 pll_regs[16]; 2974 u8 pll_regs[16];
2975 u8 clock_cntl; 2975 u8 clock_cntl;
2976 2976
2977 crtc.vxres = prom_getintdefault(node, "width", 1024); 2977 crtc.vxres = prom_getintdefault(node, "width", 1024);
2978 crtc.vyres = prom_getintdefault(node, "height", 768); 2978 crtc.vyres = prom_getintdefault(node, "height", 768);
2979 var->bits_per_pixel = prom_getintdefault(node, "depth", 8); 2979 var->bits_per_pixel = prom_getintdefault(node, "depth", 8);
2980 var->xoffset = var->yoffset = 0; 2980 var->xoffset = var->yoffset = 0;
2981 crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par); 2981 crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
2982 crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par); 2982 crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
2983 crtc.v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par); 2983 crtc.v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
2984 crtc.v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par); 2984 crtc.v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
2985 crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par); 2985 crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
2986 aty_crtc_to_var(&crtc, var); 2986 aty_crtc_to_var(&crtc, var);
2987 2987
2988 h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin; 2988 h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
2989 v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin; 2989 v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
2990 2990
2991 /* 2991 /*
2992 * Read the PLL to figure actual Refresh Rate. 2992 * Read the PLL to figure actual Refresh Rate.
2993 */ 2993 */
2994 clock_cntl = aty_ld_8(CLOCK_CNTL, par); 2994 clock_cntl = aty_ld_8(CLOCK_CNTL, par);
2995 /* DPRINTK("CLOCK_CNTL %02x\n", clock_cntl); */ 2995 /* DPRINTK("CLOCK_CNTL %02x\n", clock_cntl); */
2996 for (i = 0; i < 16; i++) 2996 for (i = 0; i < 16; i++)
2997 pll_regs[i] = aty_ld_pll_ct(i, par); 2997 pll_regs[i] = aty_ld_pll_ct(i, par);
2998 2998
2999 /* 2999 /*
3000 * PLL Reference Divider M: 3000 * PLL Reference Divider M:
3001 */ 3001 */
3002 M = pll_regs[2]; 3002 M = pll_regs[2];
3003 3003
3004 /* 3004 /*
3005 * PLL Feedback Divider N (Dependant on CLOCK_CNTL): 3005 * PLL Feedback Divider N (Dependant on CLOCK_CNTL):
3006 */ 3006 */
3007 N = pll_regs[7 + (clock_cntl & 3)]; 3007 N = pll_regs[7 + (clock_cntl & 3)];
3008 3008
3009 /* 3009 /*
3010 * PLL Post Divider P (Dependant on CLOCK_CNTL): 3010 * PLL Post Divider P (Dependant on CLOCK_CNTL):
3011 */ 3011 */
3012 P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1)); 3012 P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
3013 3013
3014 /* 3014 /*
3015 * PLL Divider Q: 3015 * PLL Divider Q:
3016 */ 3016 */
3017 Q = N / P; 3017 Q = N / P;
3018 3018
3019 /* 3019 /*
3020 * Target Frequency: 3020 * Target Frequency:
3021 * 3021 *
3022 * T * M 3022 * T * M
3023 * Q = ------- 3023 * Q = -------
3024 * 2 * R 3024 * 2 * R
3025 * 3025 *
3026 * where R is XTALIN (= 14318 or 29498 kHz). 3026 * where R is XTALIN (= 14318 or 29498 kHz).
3027 */ 3027 */
3028 if (IS_XL(pdev->device)) 3028 if (IS_XL(pdev->device))
3029 R = 29498; 3029 R = 29498;
3030 else 3030 else
3031 R = 14318; 3031 R = 14318;
3032 3032
3033 T = 2 * Q * R / M; 3033 T = 2 * Q * R / M;
3034 3034
3035 default_var.pixclock = 1000000000 / T; 3035 default_var.pixclock = 1000000000 / T;
3036 } 3036 }
3037 3037
3038 return 0; 3038 return 0;
3039 } 3039 }
3040 3040
3041 #else /* __sparc__ */ 3041 #else /* __sparc__ */
3042 3042
3043 #ifdef __i386__ 3043 #ifdef __i386__
3044 #ifdef CONFIG_FB_ATY_GENERIC_LCD 3044 #ifdef CONFIG_FB_ATY_GENERIC_LCD
3045 static void aty_init_lcd(struct atyfb_par *par, u32 bios_base) 3045 static void aty_init_lcd(struct atyfb_par *par, u32 bios_base)
3046 { 3046 {
3047 u32 driv_inf_tab, sig; 3047 u32 driv_inf_tab, sig;
3048 u16 lcd_ofs; 3048 u16 lcd_ofs;
3049 3049
3050 /* To support an LCD panel, we should know it's dimensions and 3050 /* To support an LCD panel, we should know it's dimensions and
3051 * it's desired pixel clock. 3051 * it's desired pixel clock.
3052 * There are two ways to do it: 3052 * There are two ways to do it:
3053 * - Check the startup video mode and calculate the panel 3053 * - Check the startup video mode and calculate the panel
3054 * size from it. This is unreliable. 3054 * size from it. This is unreliable.
3055 * - Read it from the driver information table in the video BIOS. 3055 * - Read it from the driver information table in the video BIOS.
3056 */ 3056 */
3057 /* Address of driver information table is at offset 0x78. */ 3057 /* Address of driver information table is at offset 0x78. */
3058 driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78)); 3058 driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78));
3059 3059
3060 /* Check for the driver information table signature. */ 3060 /* Check for the driver information table signature. */
3061 sig = (*(u32 *)driv_inf_tab); 3061 sig = (*(u32 *)driv_inf_tab);
3062 if ((sig == 0x54504c24) || /* Rage LT pro */ 3062 if ((sig == 0x54504c24) || /* Rage LT pro */
3063 (sig == 0x544d5224) || /* Rage mobility */ 3063 (sig == 0x544d5224) || /* Rage mobility */
3064 (sig == 0x54435824) || /* Rage XC */ 3064 (sig == 0x54435824) || /* Rage XC */
3065 (sig == 0x544c5824)) { /* Rage XL */ 3065 (sig == 0x544c5824)) { /* Rage XL */
3066 PRINTKI("BIOS contains driver information table.\n"); 3066 PRINTKI("BIOS contains driver information table.\n");
3067 lcd_ofs = (*(u16 *)(driv_inf_tab + 10)); 3067 lcd_ofs = (*(u16 *)(driv_inf_tab + 10));
3068 par->lcd_table = 0; 3068 par->lcd_table = 0;
3069 if (lcd_ofs != 0) { 3069 if (lcd_ofs != 0) {
3070 par->lcd_table = bios_base + lcd_ofs; 3070 par->lcd_table = bios_base + lcd_ofs;
3071 } 3071 }
3072 } 3072 }
3073 3073
3074 if (par->lcd_table != 0) { 3074 if (par->lcd_table != 0) {
3075 char model[24]; 3075 char model[24];
3076 char strbuf[16]; 3076 char strbuf[16];
3077 char refresh_rates_buf[100]; 3077 char refresh_rates_buf[100];
3078 int id, tech, f, i, m, default_refresh_rate; 3078 int id, tech, f, i, m, default_refresh_rate;
3079 char *txtcolour; 3079 char *txtcolour;
3080 char *txtmonitor; 3080 char *txtmonitor;
3081 char *txtdual; 3081 char *txtdual;
3082 char *txtformat; 3082 char *txtformat;
3083 u16 width, height, panel_type, refresh_rates; 3083 u16 width, height, panel_type, refresh_rates;
3084 u16 *lcdmodeptr; 3084 u16 *lcdmodeptr;
3085 u32 format; 3085 u32 format;
3086 u8 lcd_refresh_rates[16] = {50,56,60,67,70,72,75,76,85,90,100,120,140,150,160,200}; 3086 u8 lcd_refresh_rates[16] = {50,56,60,67,70,72,75,76,85,90,100,120,140,150,160,200};
3087 /* The most important information is the panel size at 3087 /* The most important information is the panel size at
3088 * offset 25 and 27, but there's some other nice information 3088 * offset 25 and 27, but there's some other nice information
3089 * which we print to the screen. 3089 * which we print to the screen.
3090 */ 3090 */
3091 id = *(u8 *)par->lcd_table; 3091 id = *(u8 *)par->lcd_table;
3092 strncpy(model,(char *)par->lcd_table+1,24); 3092 strncpy(model,(char *)par->lcd_table+1,24);
3093 model[23]=0; 3093 model[23]=0;
3094 3094
3095 width = par->lcd_width = *(u16 *)(par->lcd_table+25); 3095 width = par->lcd_width = *(u16 *)(par->lcd_table+25);
3096 height = par->lcd_height = *(u16 *)(par->lcd_table+27); 3096 height = par->lcd_height = *(u16 *)(par->lcd_table+27);
3097 panel_type = *(u16 *)(par->lcd_table+29); 3097 panel_type = *(u16 *)(par->lcd_table+29);
3098 if (panel_type & 1) 3098 if (panel_type & 1)
3099 txtcolour = "colour"; 3099 txtcolour = "colour";
3100 else 3100 else
3101 txtcolour = "monochrome"; 3101 txtcolour = "monochrome";
3102 if (panel_type & 2) 3102 if (panel_type & 2)
3103 txtdual = "dual (split) "; 3103 txtdual = "dual (split) ";
3104 else 3104 else
3105 txtdual = ""; 3105 txtdual = "";
3106 tech = (panel_type>>2) & 63; 3106 tech = (panel_type>>2) & 63;
3107 switch (tech) { 3107 switch (tech) {
3108 case 0: 3108 case 0:
3109 txtmonitor = "passive matrix"; 3109 txtmonitor = "passive matrix";
3110 break; 3110 break;
3111 case 1: 3111 case 1:
3112 txtmonitor = "active matrix"; 3112 txtmonitor = "active matrix";
3113 break; 3113 break;
3114 case 2: 3114 case 2:
3115 txtmonitor = "active addressed STN"; 3115 txtmonitor = "active addressed STN";
3116 break; 3116 break;
3117 case 3: 3117 case 3:
3118 txtmonitor = "EL"; 3118 txtmonitor = "EL";
3119 break; 3119 break;
3120 case 4: 3120 case 4:
3121 txtmonitor = "plasma"; 3121 txtmonitor = "plasma";
3122 break; 3122 break;
3123 default: 3123 default:
3124 txtmonitor = "unknown"; 3124 txtmonitor = "unknown";
3125 } 3125 }
3126 format = *(u32 *)(par->lcd_table+57); 3126 format = *(u32 *)(par->lcd_table+57);
3127 if (tech == 0 || tech == 2) { 3127 if (tech == 0 || tech == 2) {
3128 switch (format & 7) { 3128 switch (format & 7) {
3129 case 0: 3129 case 0:
3130 txtformat = "12 bit interface"; 3130 txtformat = "12 bit interface";
3131 break; 3131 break;
3132 case 1: 3132 case 1:
3133 txtformat = "16 bit interface"; 3133 txtformat = "16 bit interface";
3134 break; 3134 break;
3135 case 2: 3135 case 2:
3136 txtformat = "24 bit interface"; 3136 txtformat = "24 bit interface";
3137 break; 3137 break;
3138 default: 3138 default:
3139 txtformat = "unkown format"; 3139 txtformat = "unkown format";
3140 } 3140 }
3141 } else { 3141 } else {
3142 switch (format & 7) { 3142 switch (format & 7) {
3143 case 0: 3143 case 0:
3144 txtformat = "8 colours"; 3144 txtformat = "8 colours";
3145 break; 3145 break;
3146 case 1: 3146 case 1:
3147 txtformat = "512 colours"; 3147 txtformat = "512 colours";
3148 break; 3148 break;
3149 case 2: 3149 case 2:
3150 txtformat = "4096 colours"; 3150 txtformat = "4096 colours";
3151 break; 3151 break;
3152 case 4: 3152 case 4:
3153 txtformat = "262144 colours (LT mode)"; 3153 txtformat = "262144 colours (LT mode)";
3154 break; 3154 break;
3155 case 5: 3155 case 5:
3156 txtformat = "16777216 colours"; 3156 txtformat = "16777216 colours";
3157 break; 3157 break;
3158 case 6: 3158 case 6:
3159 txtformat = "262144 colours (FDPI-2 mode)"; 3159 txtformat = "262144 colours (FDPI-2 mode)";
3160 break; 3160 break;
3161 default: 3161 default:
3162 txtformat = "unkown format"; 3162 txtformat = "unkown format";
3163 } 3163 }
3164 } 3164 }
3165 PRINTKI("%s%s %s monitor detected: %s\n", 3165 PRINTKI("%s%s %s monitor detected: %s\n",
3166 txtdual ,txtcolour, txtmonitor, model); 3166 txtdual ,txtcolour, txtmonitor, model);
3167 PRINTKI(" id=%d, %dx%d pixels, %s\n", 3167 PRINTKI(" id=%d, %dx%d pixels, %s\n",
3168 id, width, height, txtformat); 3168 id, width, height, txtformat);
3169 refresh_rates_buf[0] = 0; 3169 refresh_rates_buf[0] = 0;
3170 refresh_rates = *(u16 *)(par->lcd_table+62); 3170 refresh_rates = *(u16 *)(par->lcd_table+62);
3171 m = 1; 3171 m = 1;
3172 f = 0; 3172 f = 0;
3173 for (i=0;i<16;i++) { 3173 for (i=0;i<16;i++) {
3174 if (refresh_rates & m) { 3174 if (refresh_rates & m) {
3175 if (f == 0) { 3175 if (f == 0) {
3176 sprintf(strbuf, "%d", lcd_refresh_rates[i]); 3176 sprintf(strbuf, "%d", lcd_refresh_rates[i]);
3177 f++; 3177 f++;
3178 } else { 3178 } else {
3179 sprintf(strbuf, ",%d", lcd_refresh_rates[i]); 3179 sprintf(strbuf, ",%d", lcd_refresh_rates[i]);
3180 } 3180 }
3181 strcat(refresh_rates_buf,strbuf); 3181 strcat(refresh_rates_buf,strbuf);
3182 } 3182 }
3183 m = m << 1; 3183 m = m << 1;
3184 } 3184 }
3185 default_refresh_rate = (*(u8 *)(par->lcd_table+61) & 0xf0) >> 4; 3185 default_refresh_rate = (*(u8 *)(par->lcd_table+61) & 0xf0) >> 4;
3186 PRINTKI(" supports refresh rates [%s], default %d Hz\n", 3186 PRINTKI(" supports refresh rates [%s], default %d Hz\n",
3187 refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]); 3187 refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]);
3188 par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate]; 3188 par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate];
3189 /* We now need to determine the crtc parameters for the 3189 /* We now need to determine the crtc parameters for the
3190 * LCD monitor. This is tricky, because they are not stored 3190 * LCD monitor. This is tricky, because they are not stored
3191 * individually in the BIOS. Instead, the BIOS contains a 3191 * individually in the BIOS. Instead, the BIOS contains a
3192 * table of display modes that work for this monitor. 3192 * table of display modes that work for this monitor.
3193 * 3193 *
3194 * The idea is that we search for a mode of the same dimensions 3194 * The idea is that we search for a mode of the same dimensions
3195 * as the dimensions of the LCD monitor. Say our LCD monitor 3195 * as the dimensions of the LCD monitor. Say our LCD monitor
3196 * is 800x600 pixels, we search for a 800x600 monitor. 3196 * is 800x600 pixels, we search for a 800x600 monitor.
3197 * The CRTC parameters we find here are the ones that we need 3197 * The CRTC parameters we find here are the ones that we need
3198 * to use to simulate other resolutions on the LCD screen. 3198 * to use to simulate other resolutions on the LCD screen.
3199 */ 3199 */
3200 lcdmodeptr = (u16 *)(par->lcd_table + 64); 3200 lcdmodeptr = (u16 *)(par->lcd_table + 64);
3201 while (*lcdmodeptr != 0) { 3201 while (*lcdmodeptr != 0) {
3202 u32 modeptr; 3202 u32 modeptr;
3203 u16 mwidth, mheight, lcd_hsync_start, lcd_vsync_start; 3203 u16 mwidth, mheight, lcd_hsync_start, lcd_vsync_start;
3204 modeptr = bios_base + *lcdmodeptr; 3204 modeptr = bios_base + *lcdmodeptr;
3205 3205
3206 mwidth = *((u16 *)(modeptr+0)); 3206 mwidth = *((u16 *)(modeptr+0));
3207 mheight = *((u16 *)(modeptr+2)); 3207 mheight = *((u16 *)(modeptr+2));
3208 3208
3209 if (mwidth == width && mheight == height) { 3209 if (mwidth == width && mheight == height) {
3210 par->lcd_pixclock = 100000000 / *((u16 *)(modeptr+9)); 3210 par->lcd_pixclock = 100000000 / *((u16 *)(modeptr+9));
3211 par->lcd_htotal = *((u16 *)(modeptr+17)) & 511; 3211 par->lcd_htotal = *((u16 *)(modeptr+17)) & 511;
3212 par->lcd_hdisp = *((u16 *)(modeptr+19)) & 511; 3212 par->lcd_hdisp = *((u16 *)(modeptr+19)) & 511;
3213 lcd_hsync_start = *((u16 *)(modeptr+21)) & 511; 3213 lcd_hsync_start = *((u16 *)(modeptr+21)) & 511;
3214 par->lcd_hsync_dly = (*((u16 *)(modeptr+21)) >> 9) & 7; 3214 par->lcd_hsync_dly = (*((u16 *)(modeptr+21)) >> 9) & 7;
3215 par->lcd_hsync_len = *((u8 *)(modeptr+23)) & 63; 3215 par->lcd_hsync_len = *((u8 *)(modeptr+23)) & 63;
3216 3216
3217 par->lcd_vtotal = *((u16 *)(modeptr+24)) & 2047; 3217 par->lcd_vtotal = *((u16 *)(modeptr+24)) & 2047;
3218 par->lcd_vdisp = *((u16 *)(modeptr+26)) & 2047; 3218 par->lcd_vdisp = *((u16 *)(modeptr+26)) & 2047;
3219 lcd_vsync_start = *((u16 *)(modeptr+28)) & 2047; 3219 lcd_vsync_start = *((u16 *)(modeptr+28)) & 2047;
3220 par->lcd_vsync_len = (*((u16 *)(modeptr+28)) >> 11) & 31; 3220 par->lcd_vsync_len = (*((u16 *)(modeptr+28)) >> 11) & 31;
3221 3221
3222 par->lcd_htotal = (par->lcd_htotal + 1) * 8; 3222 par->lcd_htotal = (par->lcd_htotal + 1) * 8;
3223 par->lcd_hdisp = (par->lcd_hdisp + 1) * 8; 3223 par->lcd_hdisp = (par->lcd_hdisp + 1) * 8;
3224 lcd_hsync_start = (lcd_hsync_start + 1) * 8; 3224 lcd_hsync_start = (lcd_hsync_start + 1) * 8;
3225 par->lcd_hsync_len = par->lcd_hsync_len * 8; 3225 par->lcd_hsync_len = par->lcd_hsync_len * 8;
3226 3226
3227 par->lcd_vtotal++; 3227 par->lcd_vtotal++;
3228 par->lcd_vdisp++; 3228 par->lcd_vdisp++;
3229 lcd_vsync_start++; 3229 lcd_vsync_start++;
3230 3230
3231 par->lcd_right_margin = lcd_hsync_start - par->lcd_hdisp; 3231 par->lcd_right_margin = lcd_hsync_start - par->lcd_hdisp;
3232 par->lcd_lower_margin = lcd_vsync_start - par->lcd_vdisp; 3232 par->lcd_lower_margin = lcd_vsync_start - par->lcd_vdisp;
3233 par->lcd_hblank_len = par->lcd_htotal - par->lcd_hdisp; 3233 par->lcd_hblank_len = par->lcd_htotal - par->lcd_hdisp;
3234 par->lcd_vblank_len = par->lcd_vtotal - par->lcd_vdisp; 3234 par->lcd_vblank_len = par->lcd_vtotal - par->lcd_vdisp;
3235 break; 3235 break;
3236 } 3236 }
3237 3237
3238 lcdmodeptr++; 3238 lcdmodeptr++;
3239 } 3239 }
3240 if (*lcdmodeptr == 0) { 3240 if (*lcdmodeptr == 0) {
3241 PRINTKE("LCD monitor CRTC parameters not found!!!\n"); 3241 PRINTKE("LCD monitor CRTC parameters not found!!!\n");
3242 /* To do: Switch to CRT if possible. */ 3242 /* To do: Switch to CRT if possible. */
3243 } else { 3243 } else {
3244 PRINTKI(" LCD CRTC parameters: %d.%d %d %d %d %d %d %d %d %d\n", 3244 PRINTKI(" LCD CRTC parameters: %d.%d %d %d %d %d %d %d %d %d\n",
3245 1000000 / par->lcd_pixclock, 1000000 % par->lcd_pixclock, 3245 1000000 / par->lcd_pixclock, 1000000 % par->lcd_pixclock,
3246 par->lcd_hdisp, 3246 par->lcd_hdisp,
3247 par->lcd_hdisp + par->lcd_right_margin, 3247 par->lcd_hdisp + par->lcd_right_margin,
3248 par->lcd_hdisp + par->lcd_right_margin 3248 par->lcd_hdisp + par->lcd_right_margin
3249 + par->lcd_hsync_dly + par->lcd_hsync_len, 3249 + par->lcd_hsync_dly + par->lcd_hsync_len,
3250 par->lcd_htotal, 3250 par->lcd_htotal,
3251 par->lcd_vdisp, 3251 par->lcd_vdisp,
3252 par->lcd_vdisp + par->lcd_lower_margin, 3252 par->lcd_vdisp + par->lcd_lower_margin,
3253 par->lcd_vdisp + par->lcd_lower_margin + par->lcd_vsync_len, 3253 par->lcd_vdisp + par->lcd_lower_margin + par->lcd_vsync_len,
3254 par->lcd_vtotal); 3254 par->lcd_vtotal);
3255 PRINTKI(" : %d %d %d %d %d %d %d %d %d\n", 3255 PRINTKI(" : %d %d %d %d %d %d %d %d %d\n",
3256 par->lcd_pixclock, 3256 par->lcd_pixclock,
3257 par->lcd_hblank_len - (par->lcd_right_margin + 3257 par->lcd_hblank_len - (par->lcd_right_margin +
3258 par->lcd_hsync_dly + par->lcd_hsync_len), 3258 par->lcd_hsync_dly + par->lcd_hsync_len),
3259 par->lcd_hdisp, 3259 par->lcd_hdisp,
3260 par->lcd_right_margin, 3260 par->lcd_right_margin,
3261 par->lcd_hsync_len, 3261 par->lcd_hsync_len,
3262 par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len), 3262 par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len),
3263 par->lcd_vdisp, 3263 par->lcd_vdisp,
3264 par->lcd_lower_margin, 3264 par->lcd_lower_margin,
3265 par->lcd_vsync_len); 3265 par->lcd_vsync_len);
3266 } 3266 }
3267 } 3267 }
3268 } 3268 }
3269 #endif /* CONFIG_FB_ATY_GENERIC_LCD */ 3269 #endif /* CONFIG_FB_ATY_GENERIC_LCD */
3270 3270
3271 static int __devinit init_from_bios(struct atyfb_par *par) 3271 static int __devinit init_from_bios(struct atyfb_par *par)
3272 { 3272 {
3273 u32 bios_base, rom_addr; 3273 u32 bios_base, rom_addr;
3274 int ret; 3274 int ret;
3275 3275
3276 rom_addr = 0xc0000 + ((aty_ld_le32(SCRATCH_REG1, par) & 0x7f) << 11); 3276 rom_addr = 0xc0000 + ((aty_ld_le32(SCRATCH_REG1, par) & 0x7f) << 11);
3277 bios_base = (unsigned long)ioremap(rom_addr, 0x10000); 3277 bios_base = (unsigned long)ioremap(rom_addr, 0x10000);
3278 3278
3279 /* The BIOS starts with 0xaa55. */ 3279 /* The BIOS starts with 0xaa55. */
3280 if (*((u16 *)bios_base) == 0xaa55) { 3280 if (*((u16 *)bios_base) == 0xaa55) {
3281 3281
3282 u8 *bios_ptr; 3282 u8 *bios_ptr;
3283 u16 rom_table_offset, freq_table_offset; 3283 u16 rom_table_offset, freq_table_offset;
3284 PLL_BLOCK_MACH64 pll_block; 3284 PLL_BLOCK_MACH64 pll_block;
3285 3285
3286 PRINTKI("Mach64 BIOS is located at %x, mapped at %x.\n", rom_addr, bios_base); 3286 PRINTKI("Mach64 BIOS is located at %x, mapped at %x.\n", rom_addr, bios_base);
3287 3287
3288 /* check for frequncy table */ 3288 /* check for frequncy table */
3289 bios_ptr = (u8*)bios_base; 3289 bios_ptr = (u8*)bios_base;
3290 rom_table_offset = (u16)(bios_ptr[0x48] | (bios_ptr[0x49] << 8)); 3290 rom_table_offset = (u16)(bios_ptr[0x48] | (bios_ptr[0x49] << 8));
3291 freq_table_offset = bios_ptr[rom_table_offset + 16] | (bios_ptr[rom_table_offset + 17] << 8); 3291 freq_table_offset = bios_ptr[rom_table_offset + 16] | (bios_ptr[rom_table_offset + 17] << 8);
3292 memcpy(&pll_block, bios_ptr + freq_table_offset, sizeof(PLL_BLOCK_MACH64)); 3292 memcpy(&pll_block, bios_ptr + freq_table_offset, sizeof(PLL_BLOCK_MACH64));
3293 3293
3294 PRINTKI("BIOS frequency table:\n"); 3294 PRINTKI("BIOS frequency table:\n");
3295 PRINTKI("PCLK_min_freq %d, PCLK_max_freq %d, ref_freq %d, ref_divider %d\n", 3295 PRINTKI("PCLK_min_freq %d, PCLK_max_freq %d, ref_freq %d, ref_divider %d\n",
3296 pll_block.PCLK_min_freq, pll_block.PCLK_max_freq, 3296 pll_block.PCLK_min_freq, pll_block.PCLK_max_freq,
3297 pll_block.ref_freq, pll_block.ref_divider); 3297 pll_block.ref_freq, pll_block.ref_divider);
3298 PRINTKI("MCLK_pwd %d, MCLK_max_freq %d, XCLK_max_freq %d, SCLK_freq %d\n", 3298 PRINTKI("MCLK_pwd %d, MCLK_max_freq %d, XCLK_max_freq %d, SCLK_freq %d\n",
3299 pll_block.MCLK_pwd, pll_block.MCLK_max_freq, 3299 pll_block.MCLK_pwd, pll_block.MCLK_max_freq,
3300 pll_block.XCLK_max_freq, pll_block.SCLK_freq); 3300 pll_block.XCLK_max_freq, pll_block.SCLK_freq);
3301 3301
3302 par->pll_limits.pll_min = pll_block.PCLK_min_freq/100; 3302 par->pll_limits.pll_min = pll_block.PCLK_min_freq/100;
3303 par->pll_limits.pll_max = pll_block.PCLK_max_freq/100; 3303 par->pll_limits.pll_max = pll_block.PCLK_max_freq/100;
3304 par->pll_limits.ref_clk = pll_block.ref_freq/100; 3304 par->pll_limits.ref_clk = pll_block.ref_freq/100;
3305 par->pll_limits.ref_div = pll_block.ref_divider; 3305 par->pll_limits.ref_div = pll_block.ref_divider;
3306 par->pll_limits.sclk = pll_block.SCLK_freq/100; 3306 par->pll_limits.sclk = pll_block.SCLK_freq/100;
3307 par->pll_limits.mclk = pll_block.MCLK_max_freq/100; 3307 par->pll_limits.mclk = pll_block.MCLK_max_freq/100;
3308 par->pll_limits.mclk_pm = pll_block.MCLK_pwd/100; 3308 par->pll_limits.mclk_pm = pll_block.MCLK_pwd/100;
3309 par->pll_limits.xclk = pll_block.XCLK_max_freq/100; 3309 par->pll_limits.xclk = pll_block.XCLK_max_freq/100;
3310 #ifdef CONFIG_FB_ATY_GENERIC_LCD 3310 #ifdef CONFIG_FB_ATY_GENERIC_LCD
3311 aty_init_lcd(par, bios_base); 3311 aty_init_lcd(par, bios_base);
3312 #endif 3312 #endif
3313 ret = 0; 3313 ret = 0;
3314 } else { 3314 } else {
3315 PRINTKE("no BIOS frequency table found, use parameters\n"); 3315 PRINTKE("no BIOS frequency table found, use parameters\n");
3316 ret = -ENXIO; 3316 ret = -ENXIO;
3317 } 3317 }
3318 iounmap((void* __iomem )bios_base); 3318 iounmap((void* __iomem )bios_base);
3319 3319
3320 return ret; 3320 return ret;
3321 } 3321 }
3322 #endif /* __i386__ */ 3322 #endif /* __i386__ */
3323 3323
3324 static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, unsigned long addr) 3324 static int __devinit atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info, unsigned long addr)
3325 { 3325 {
3326 struct atyfb_par *par = info->par; 3326 struct atyfb_par *par = info->par;
3327 u16 tmp; 3327 u16 tmp;
3328 unsigned long raddr; 3328 unsigned long raddr;
3329 struct resource *rrp; 3329 struct resource *rrp;
3330 int ret = 0; 3330 int ret = 0;
3331 3331
3332 raddr = addr + 0x7ff000UL; 3332 raddr = addr + 0x7ff000UL;
3333 rrp = &pdev->resource[2]; 3333 rrp = &pdev->resource[2];
3334 if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) { 3334 if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) {
3335 par->aux_start = rrp->start; 3335 par->aux_start = rrp->start;
3336 par->aux_size = rrp->end - rrp->start + 1; 3336 par->aux_size = rrp->end - rrp->start + 1;
3337 raddr = rrp->start; 3337 raddr = rrp->start;
3338 PRINTKI("using auxiliary register aperture\n"); 3338 PRINTKI("using auxiliary register aperture\n");
3339 } 3339 }
3340 3340
3341 info->fix.mmio_start = raddr; 3341 info->fix.mmio_start = raddr;
3342 par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000); 3342 par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
3343 if (par->ati_regbase == 0) 3343 if (par->ati_regbase == 0)
3344 return -ENOMEM; 3344 return -ENOMEM;
3345 3345
3346 info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00; 3346 info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00;
3347 par->ati_regbase += par->aux_start ? 0x400 : 0xc00; 3347 par->ati_regbase += par->aux_start ? 0x400 : 0xc00;
3348 3348
3349 /* 3349 /*
3350 * Enable memory-space accesses using config-space 3350 * Enable memory-space accesses using config-space
3351 * command register. 3351 * command register.
3352 */ 3352 */
3353 pci_read_config_word(pdev, PCI_COMMAND, &tmp); 3353 pci_read_config_word(pdev, PCI_COMMAND, &tmp);
3354 if (!(tmp & PCI_COMMAND_MEMORY)) { 3354 if (!(tmp & PCI_COMMAND_MEMORY)) {
3355 tmp |= PCI_COMMAND_MEMORY; 3355 tmp |= PCI_COMMAND_MEMORY;
3356 pci_write_config_word(pdev, PCI_COMMAND, tmp); 3356 pci_write_config_word(pdev, PCI_COMMAND, tmp);
3357 } 3357 }
3358 #ifdef __BIG_ENDIAN 3358 #ifdef __BIG_ENDIAN
3359 /* Use the big-endian aperture */ 3359 /* Use the big-endian aperture */
3360 addr += 0x800000; 3360 addr += 0x800000;
3361 #endif 3361 #endif
3362 3362
3363 /* Map in frame buffer */ 3363 /* Map in frame buffer */
3364 info->fix.smem_start = addr; 3364 info->fix.smem_start = addr;
3365 info->screen_base = ioremap(addr, 0x800000); 3365 info->screen_base = ioremap(addr, 0x800000);
3366 if (info->screen_base == NULL) { 3366 if (info->screen_base == NULL) {
3367 ret = -ENOMEM; 3367 ret = -ENOMEM;
3368 goto atyfb_setup_generic_fail; 3368 goto atyfb_setup_generic_fail;
3369 } 3369 }
3370 3370
3371 if((ret = correct_chipset(par))) 3371 if((ret = correct_chipset(par)))
3372 goto atyfb_setup_generic_fail; 3372 goto atyfb_setup_generic_fail;
3373 #ifdef __i386__ 3373 #ifdef __i386__
3374 if((ret = init_from_bios(par))) 3374 if((ret = init_from_bios(par)))
3375 goto atyfb_setup_generic_fail; 3375 goto atyfb_setup_generic_fail;
3376 #endif 3376 #endif
3377 if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN)) 3377 if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
3378 par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2; 3378 par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
3379 else 3379 else
3380 par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U; 3380 par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
3381 3381
3382 /* according to ATI, we should use clock 3 for acelerated mode */ 3382 /* according to ATI, we should use clock 3 for acelerated mode */
3383 par->clk_wr_offset = 3; 3383 par->clk_wr_offset = 3;
3384 3384
3385 return 0; 3385 return 0;
3386 3386
3387 atyfb_setup_generic_fail: 3387 atyfb_setup_generic_fail:
3388 iounmap(par->ati_regbase); 3388 iounmap(par->ati_regbase);
3389 par->ati_regbase = NULL; 3389 par->ati_regbase = NULL;
3390 return ret; 3390 return ret;
3391 } 3391 }
3392 3392
3393 #endif /* !__sparc__ */ 3393 #endif /* !__sparc__ */
3394 3394
3395 static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3395 static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3396 { 3396 {
3397 unsigned long addr, res_start, res_size; 3397 unsigned long addr, res_start, res_size;
3398 struct fb_info *info; 3398 struct fb_info *info;
3399 struct resource *rp; 3399 struct resource *rp;
3400 struct atyfb_par *par; 3400 struct atyfb_par *par;
3401 int i, rc = -ENOMEM; 3401 int i, rc = -ENOMEM;
3402 3402
3403 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--) 3403 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
3404 if (pdev->device == aty_chips[i].pci_id) 3404 if (pdev->device == aty_chips[i].pci_id)
3405 break; 3405 break;
3406 3406
3407 if (i < 0) 3407 if (i < 0)
3408 return -ENODEV; 3408 return -ENODEV;
3409 3409
3410 /* Enable device in PCI config */ 3410 /* Enable device in PCI config */
3411 if (pci_enable_device(pdev)) { 3411 if (pci_enable_device(pdev)) {
3412 PRINTKE("Cannot enable PCI device\n"); 3412 PRINTKE("Cannot enable PCI device\n");
3413 return -ENXIO; 3413 return -ENXIO;
3414 } 3414 }
3415 3415
3416 /* Find which resource to use */ 3416 /* Find which resource to use */
3417 rp = &pdev->resource[0]; 3417 rp = &pdev->resource[0];
3418 if (rp->flags & IORESOURCE_IO) 3418 if (rp->flags & IORESOURCE_IO)
3419 rp = &pdev->resource[1]; 3419 rp = &pdev->resource[1];
3420 addr = rp->start; 3420 addr = rp->start;
3421 if (!addr) 3421 if (!addr)
3422 return -ENXIO; 3422 return -ENXIO;
3423 3423
3424 /* Reserve space */ 3424 /* Reserve space */
3425 res_start = rp->start; 3425 res_start = rp->start;
3426 res_size = rp->end - rp->start + 1; 3426 res_size = rp->end - rp->start + 1;
3427 if (!request_mem_region (res_start, res_size, "atyfb")) 3427 if (!request_mem_region (res_start, res_size, "atyfb"))
3428 return -EBUSY; 3428 return -EBUSY;
3429 3429
3430 /* Allocate framebuffer */ 3430 /* Allocate framebuffer */
3431 info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev); 3431 info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev);
3432 if (!info) { 3432 if (!info) {
3433 PRINTKE("atyfb_pci_probe() can't alloc fb_info\n"); 3433 PRINTKE("atyfb_pci_probe() can't alloc fb_info\n");
3434 return -ENOMEM; 3434 return -ENOMEM;
3435 } 3435 }
3436 par = info->par; 3436 par = info->par;
3437 info->fix = atyfb_fix; 3437 info->fix = atyfb_fix;
3438 info->device = &pdev->dev; 3438 info->device = &pdev->dev;
3439 par->pci_id = aty_chips[i].pci_id; 3439 par->pci_id = aty_chips[i].pci_id;
3440 par->res_start = res_start; 3440 par->res_start = res_start;
3441 par->res_size = res_size; 3441 par->res_size = res_size;
3442 par->irq = pdev->irq; 3442 par->irq = pdev->irq;
3443 3443
3444 /* Setup "info" structure */ 3444 /* Setup "info" structure */
3445 #ifdef __sparc__ 3445 #ifdef __sparc__
3446 rc = atyfb_setup_sparc(pdev, info, addr); 3446 rc = atyfb_setup_sparc(pdev, info, addr);
3447 #else 3447 #else
3448 rc = atyfb_setup_generic(pdev, info, addr); 3448 rc = atyfb_setup_generic(pdev, info, addr);
3449 #endif 3449 #endif
3450 if (rc) 3450 if (rc)
3451 goto err_release_mem; 3451 goto err_release_mem;
3452 3452
3453 pci_set_drvdata(pdev, info); 3453 pci_set_drvdata(pdev, info);
3454 3454
3455 /* Init chip & register framebuffer */ 3455 /* Init chip & register framebuffer */
3456 if (aty_init(info, "PCI")) 3456 if (aty_init(info, "PCI"))
3457 goto err_release_io; 3457 goto err_release_io;
3458 3458
3459 #ifdef __sparc__ 3459 #ifdef __sparc__
3460 if (!prom_palette) 3460 if (!prom_palette)
3461 prom_palette = atyfb_palette; 3461 prom_palette = atyfb_palette;
3462 3462
3463 /* 3463 /*
3464 * Add /dev/fb mmap values. 3464 * Add /dev/fb mmap values.
3465 */ 3465 */
3466 par->mmap_map[0].voff = 0x8000000000000000UL; 3466 par->mmap_map[0].voff = 0x8000000000000000UL;
3467 par->mmap_map[0].poff = (unsigned long) info->screen_base & PAGE_MASK; 3467 par->mmap_map[0].poff = (unsigned long) info->screen_base & PAGE_MASK;
3468 par->mmap_map[0].size = info->fix.smem_len; 3468 par->mmap_map[0].size = info->fix.smem_len;
3469 par->mmap_map[0].prot_mask = _PAGE_CACHE; 3469 par->mmap_map[0].prot_mask = _PAGE_CACHE;
3470 par->mmap_map[0].prot_flag = _PAGE_E; 3470 par->mmap_map[0].prot_flag = _PAGE_E;
3471 par->mmap_map[1].voff = par->mmap_map[0].voff + info->fix.smem_len; 3471 par->mmap_map[1].voff = par->mmap_map[0].voff + info->fix.smem_len;
3472 par->mmap_map[1].poff = (long)par->ati_regbase & PAGE_MASK; 3472 par->mmap_map[1].poff = (long)par->ati_regbase & PAGE_MASK;
3473 par->mmap_map[1].size = PAGE_SIZE; 3473 par->mmap_map[1].size = PAGE_SIZE;
3474 par->mmap_map[1].prot_mask = _PAGE_CACHE; 3474 par->mmap_map[1].prot_mask = _PAGE_CACHE;
3475 par->mmap_map[1].prot_flag = _PAGE_E; 3475 par->mmap_map[1].prot_flag = _PAGE_E;
3476 #endif /* __sparc__ */ 3476 #endif /* __sparc__ */
3477 3477
3478 return 0; 3478 return 0;
3479 3479
3480 err_release_io: 3480 err_release_io:
3481 #ifdef __sparc__ 3481 #ifdef __sparc__
3482 kfree(par->mmap_map); 3482 kfree(par->mmap_map);
3483 #else 3483 #else
3484 if (par->ati_regbase) 3484 if (par->ati_regbase)
3485 iounmap(par->ati_regbase); 3485 iounmap(par->ati_regbase);
3486 if (info->screen_base) 3486 if (info->screen_base)
3487 iounmap(info->screen_base); 3487 iounmap(info->screen_base);
3488 #endif 3488 #endif
3489 err_release_mem: 3489 err_release_mem:
3490 if (par->aux_start) 3490 if (par->aux_start)
3491 release_mem_region(par->aux_start, par->aux_size); 3491 release_mem_region(par->aux_start, par->aux_size);
3492 3492
3493 release_mem_region(par->res_start, par->res_size); 3493 release_mem_region(par->res_start, par->res_size);
3494 framebuffer_release(info); 3494 framebuffer_release(info);
3495 3495
3496 return rc; 3496 return rc;
3497 } 3497 }
3498 3498
3499 #endif /* CONFIG_PCI */ 3499 #endif /* CONFIG_PCI */
3500 3500
3501 #ifdef CONFIG_ATARI 3501 #ifdef CONFIG_ATARI
3502 3502
3503 static int __devinit atyfb_atari_probe(void) 3503 static int __devinit atyfb_atari_probe(void)
3504 { 3504 {
3505 struct atyfb_par *par; 3505 struct atyfb_par *par;
3506 struct fb_info *info; 3506 struct fb_info *info;
3507 int m64_num; 3507 int m64_num;
3508 u32 clock_r; 3508 u32 clock_r;
3509 3509
3510 for (m64_num = 0; m64_num < mach64_count; m64_num++) { 3510 for (m64_num = 0; m64_num < mach64_count; m64_num++) {
3511 if (!phys_vmembase[m64_num] || !phys_size[m64_num] || 3511 if (!phys_vmembase[m64_num] || !phys_size[m64_num] ||
3512 !phys_guiregbase[m64_num]) { 3512 !phys_guiregbase[m64_num]) {
3513 PRINTKI("phys_*[%d] parameters not set => returning early. \n", m64_num); 3513 PRINTKI("phys_*[%d] parameters not set => returning early. \n", m64_num);
3514 continue; 3514 continue;
3515 } 3515 }
3516 3516
3517 info = framebuffer_alloc(sizeof(struct atyfb_par), NULL); 3517 info = framebuffer_alloc(sizeof(struct atyfb_par), NULL);
3518 if (!info) { 3518 if (!info) {
3519 PRINTKE("atyfb_atari_probe() can't alloc fb_info\n"); 3519 PRINTKE("atyfb_atari_probe() can't alloc fb_info\n");
3520 return -ENOMEM; 3520 return -ENOMEM;
3521 } 3521 }
3522 par = info->par; 3522 par = info->par;
3523 3523
3524 info->fix = atyfb_fix; 3524 info->fix = atyfb_fix;
3525 3525
3526 par->irq = (unsigned int) -1; /* something invalid */ 3526 par->irq = (unsigned int) -1; /* something invalid */
3527 3527
3528 /* 3528 /*
3529 * Map the video memory (physical address given) to somewhere in the 3529 * Map the video memory (physical address given) to somewhere in the
3530 * kernel address space. 3530 * kernel address space.
3531 */ 3531 */
3532 info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]); 3532 info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
3533 info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */ 3533 info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
3534 par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) + 3534 par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) +
3535 0xFC00ul; 3535 0xFC00ul;
3536 info->fix.mmio_start = (unsigned long)par->ati_regbase; /* Fake! */ 3536 info->fix.mmio_start = (unsigned long)par->ati_regbase; /* Fake! */
3537 3537
3538 aty_st_le32(CLOCK_CNTL, 0x12345678, par); 3538 aty_st_le32(CLOCK_CNTL, 0x12345678, par);
3539 clock_r = aty_ld_le32(CLOCK_CNTL, par); 3539 clock_r = aty_ld_le32(CLOCK_CNTL, par);
3540 3540
3541 switch (clock_r & 0x003F) { 3541 switch (clock_r & 0x003F) {
3542 case 0x12: 3542 case 0x12:
3543 par->clk_wr_offset = 3; /* */ 3543 par->clk_wr_offset = 3; /* */
3544 break; 3544 break;
3545 case 0x34: 3545 case 0x34:
3546 par->clk_wr_offset = 2; /* Medusa ST-IO ISA Adapter etc. */ 3546 par->clk_wr_offset = 2; /* Medusa ST-IO ISA Adapter etc. */
3547 break; 3547 break;
3548 case 0x16: 3548 case 0x16:
3549 par->clk_wr_offset = 1; /* */ 3549 par->clk_wr_offset = 1; /* */
3550 break; 3550 break;
3551 case 0x38: 3551 case 0x38:
3552 par->clk_wr_offset = 0; /* Panther 1 ISA Adapter (Gerald) */ 3552 par->clk_wr_offset = 0; /* Panther 1 ISA Adapter (Gerald) */
3553 break; 3553 break;
3554 } 3554 }
3555 3555
3556 if (aty_init(info, "ISA bus")) { 3556 if (aty_init(info, "ISA bus")) {
3557 framebuffer_release(info); 3557 framebuffer_release(info);
3558 /* This is insufficient! kernel_map has added two large chunks!! */ 3558 /* This is insufficient! kernel_map has added two large chunks!! */
3559 return -ENXIO; 3559 return -ENXIO;
3560 } 3560 }
3561 } 3561 }
3562 } 3562 }
3563 3563
3564 #endif /* CONFIG_ATARI */ 3564 #endif /* CONFIG_ATARI */
3565 3565
3566 static void __devexit atyfb_remove(struct fb_info *info) 3566 static void __devexit atyfb_remove(struct fb_info *info)
3567 { 3567 {
3568 struct atyfb_par *par = (struct atyfb_par *) info->par; 3568 struct atyfb_par *par = (struct atyfb_par *) info->par;
3569 3569
3570 /* restore video mode */ 3570 /* restore video mode */
3571 aty_set_crtc(par, &saved_crtc); 3571 aty_set_crtc(par, &saved_crtc);
3572 par->pll_ops->set_pll(info, &saved_pll); 3572 par->pll_ops->set_pll(info, &saved_pll);
3573 3573
3574 unregister_framebuffer(info); 3574 unregister_framebuffer(info);
3575 3575
3576 #ifdef CONFIG_MTRR 3576 #ifdef CONFIG_MTRR
3577 if (par->mtrr_reg >= 0) { 3577 if (par->mtrr_reg >= 0) {
3578 mtrr_del(par->mtrr_reg, 0, 0); 3578 mtrr_del(par->mtrr_reg, 0, 0);
3579 par->mtrr_reg = -1; 3579 par->mtrr_reg = -1;
3580 } 3580 }
3581 if (par->mtrr_aper >= 0) { 3581 if (par->mtrr_aper >= 0) {
3582 mtrr_del(par->mtrr_aper, 0, 0); 3582 mtrr_del(par->mtrr_aper, 0, 0);
3583 par->mtrr_aper = -1; 3583 par->mtrr_aper = -1;
3584 } 3584 }
3585 #endif 3585 #endif
3586 #ifndef __sparc__ 3586 #ifndef __sparc__
3587 if (par->ati_regbase) 3587 if (par->ati_regbase)
3588 iounmap(par->ati_regbase); 3588 iounmap(par->ati_regbase);
3589 if (info->screen_base) 3589 if (info->screen_base)
3590 iounmap(info->screen_base); 3590 iounmap(info->screen_base);
3591 #ifdef __BIG_ENDIAN 3591 #ifdef __BIG_ENDIAN
3592 if (info->sprite.addr) 3592 if (info->sprite.addr)
3593 iounmap(info->sprite.addr); 3593 iounmap(info->sprite.addr);
3594 #endif 3594 #endif
3595 #endif 3595 #endif
3596 #ifdef __sparc__ 3596 #ifdef __sparc__
3597 kfree(par->mmap_map); 3597 kfree(par->mmap_map);
3598 #endif 3598 #endif
3599 if (par->aux_start) 3599 if (par->aux_start)
3600 release_mem_region(par->aux_start, par->aux_size); 3600 release_mem_region(par->aux_start, par->aux_size);
3601 3601
3602 if (par->res_start) 3602 if (par->res_start)
3603 release_mem_region(par->res_start, par->res_size); 3603 release_mem_region(par->res_start, par->res_size);
3604 3604
3605 framebuffer_release(info); 3605 framebuffer_release(info);
3606 } 3606 }
3607 3607
3608 #ifdef CONFIG_PCI 3608 #ifdef CONFIG_PCI
3609 3609
3610 static void __devexit atyfb_pci_remove(struct pci_dev *pdev) 3610 static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
3611 { 3611 {
3612 struct fb_info *info = pci_get_drvdata(pdev); 3612 struct fb_info *info = pci_get_drvdata(pdev);
3613 3613
3614 atyfb_remove(info); 3614 atyfb_remove(info);
3615 } 3615 }
3616 3616
3617 /* 3617 /*
3618 * This driver uses its own matching table. That will be more difficult 3618 * This driver uses its own matching table. That will be more difficult
3619 * to fix, so for now, we just match against any ATI ID and let the 3619 * to fix, so for now, we just match against any ATI ID and let the
3620 * probe() function find out what's up. That also mean we don't have 3620 * probe() function find out what's up. That also mean we don't have
3621 * a module ID table though. 3621 * a module ID table though.
3622 */ 3622 */
3623 static struct pci_device_id atyfb_pci_tbl[] = { 3623 static struct pci_device_id atyfb_pci_tbl[] = {
3624 { PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 3624 { PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
3625 PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, 3625 PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 },
3626 { 0, } 3626 { 0, }
3627 }; 3627 };
3628 3628
3629 static struct pci_driver atyfb_driver = { 3629 static struct pci_driver atyfb_driver = {
3630 .name = "atyfb", 3630 .name = "atyfb",
3631 .id_table = atyfb_pci_tbl, 3631 .id_table = atyfb_pci_tbl,
3632 .probe = atyfb_pci_probe, 3632 .probe = atyfb_pci_probe,
3633 .remove = __devexit_p(atyfb_pci_remove), 3633 .remove = __devexit_p(atyfb_pci_remove),
3634 #ifdef CONFIG_PM 3634 #ifdef CONFIG_PM
3635 .suspend = atyfb_pci_suspend, 3635 .suspend = atyfb_pci_suspend,
3636 .resume = atyfb_pci_resume, 3636 .resume = atyfb_pci_resume,
3637 #endif /* CONFIG_PM */ 3637 #endif /* CONFIG_PM */
3638 }; 3638 };
3639 3639
3640 #endif /* CONFIG_PCI */ 3640 #endif /* CONFIG_PCI */
3641 3641
3642 #ifndef MODULE 3642 #ifndef MODULE
3643 static int __init atyfb_setup(char *options) 3643 static int __init atyfb_setup(char *options)
3644 { 3644 {
3645 char *this_opt; 3645 char *this_opt;
3646 3646
3647 if (!options || !*options) 3647 if (!options || !*options)
3648 return 0; 3648 return 0;
3649 3649
3650 while ((this_opt = strsep(&options, ",")) != NULL) { 3650 while ((this_opt = strsep(&options, ",")) != NULL) {
3651 if (!strncmp(this_opt, "noaccel", 7)) { 3651 if (!strncmp(this_opt, "noaccel", 7)) {
3652 noaccel = 1; 3652 noaccel = 1;
3653 #ifdef CONFIG_MTRR 3653 #ifdef CONFIG_MTRR
3654 } else if (!strncmp(this_opt, "nomtrr", 6)) { 3654 } else if (!strncmp(this_opt, "nomtrr", 6)) {
3655 nomtrr = 1; 3655 nomtrr = 1;
3656 #endif 3656 #endif
3657 } else if (!strncmp(this_opt, "vram:", 5)) 3657 } else if (!strncmp(this_opt, "vram:", 5))
3658 vram = simple_strtoul(this_opt + 5, NULL, 0); 3658 vram = simple_strtoul(this_opt + 5, NULL, 0);
3659 else if (!strncmp(this_opt, "pll:", 4)) 3659 else if (!strncmp(this_opt, "pll:", 4))
3660 pll = simple_strtoul(this_opt + 4, NULL, 0); 3660 pll = simple_strtoul(this_opt + 4, NULL, 0);
3661 else if (!strncmp(this_opt, "mclk:", 5)) 3661 else if (!strncmp(this_opt, "mclk:", 5))
3662 mclk = simple_strtoul(this_opt + 5, NULL, 0); 3662 mclk = simple_strtoul(this_opt + 5, NULL, 0);
3663 else if (!strncmp(this_opt, "xclk:", 5)) 3663 else if (!strncmp(this_opt, "xclk:", 5))
3664 xclk = simple_strtoul(this_opt+5, NULL, 0); 3664 xclk = simple_strtoul(this_opt+5, NULL, 0);
3665 else if (!strncmp(this_opt, "comp_sync:", 10)) 3665 else if (!strncmp(this_opt, "comp_sync:", 10))
3666 comp_sync = simple_strtoul(this_opt+10, NULL, 0); 3666 comp_sync = simple_strtoul(this_opt+10, NULL, 0);
3667 #ifdef CONFIG_PPC 3667 #ifdef CONFIG_PPC
3668 else if (!strncmp(this_opt, "vmode:", 6)) { 3668 else if (!strncmp(this_opt, "vmode:", 6)) {
3669 unsigned int vmode = 3669 unsigned int vmode =
3670 simple_strtoul(this_opt + 6, NULL, 0); 3670 simple_strtoul(this_opt + 6, NULL, 0);
3671 if (vmode > 0 && vmode <= VMODE_MAX) 3671 if (vmode > 0 && vmode <= VMODE_MAX)
3672 default_vmode = vmode; 3672 default_vmode = vmode;
3673 } else if (!strncmp(this_opt, "cmode:", 6)) { 3673 } else if (!strncmp(this_opt, "cmode:", 6)) {
3674 unsigned int cmode = 3674 unsigned int cmode =
3675 simple_strtoul(this_opt + 6, NULL, 0); 3675 simple_strtoul(this_opt + 6, NULL, 0);
3676 switch (cmode) { 3676 switch (cmode) {
3677 case 0: 3677 case 0:
3678 case 8: 3678 case 8:
3679 default_cmode = CMODE_8; 3679 default_cmode = CMODE_8;
3680 break; 3680 break;
3681 case 15: 3681 case 15:
3682 case 16: 3682 case 16:
3683 default_cmode = CMODE_16; 3683 default_cmode = CMODE_16;
3684 break; 3684 break;
3685 case 24: 3685 case 24:
3686 case 32: 3686 case 32:
3687 default_cmode = CMODE_32; 3687 default_cmode = CMODE_32;
3688 break; 3688 break;
3689 } 3689 }
3690 } 3690 }
3691 #endif 3691 #endif
3692 #ifdef CONFIG_ATARI 3692 #ifdef CONFIG_ATARI
3693 /* 3693 /*
3694 * Why do we need this silly Mach64 argument? 3694 * Why do we need this silly Mach64 argument?
3695 * We are already here because of mach64= so its redundant. 3695 * We are already here because of mach64= so its redundant.
3696 */ 3696 */
3697 else if (MACH_IS_ATARI 3697 else if (MACH_IS_ATARI
3698 && (!strncmp(this_opt, "Mach64:", 7))) { 3698 && (!strncmp(this_opt, "Mach64:", 7))) {
3699 static unsigned char m64_num; 3699 static unsigned char m64_num;
3700 static char mach64_str[80]; 3700 static char mach64_str[80];
3701 strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str)); 3701 strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
3702 if (!store_video_par(mach64_str, m64_num)) { 3702 if (!store_video_par(mach64_str, m64_num)) {
3703 m64_num++; 3703 m64_num++;
3704 mach64_count = m64_num; 3704 mach64_count = m64_num;
3705 } 3705 }
3706 } 3706 }
3707 #endif 3707 #endif
3708 else 3708 else
3709 mode = this_opt; 3709 mode = this_opt;
3710 } 3710 }
3711 return 0; 3711 return 0;
3712 } 3712 }
3713 #endif /* MODULE */ 3713 #endif /* MODULE */
3714 3714
3715 static int __init atyfb_init(void) 3715 static int __init atyfb_init(void)
3716 { 3716 {
3717 #ifndef MODULE 3717 #ifndef MODULE
3718 char *option = NULL; 3718 char *option = NULL;
3719 3719
3720 if (fb_get_options("atyfb", &option)) 3720 if (fb_get_options("atyfb", &option))
3721 return -ENODEV; 3721 return -ENODEV;
3722 atyfb_setup(option); 3722 atyfb_setup(option);
3723 #endif 3723 #endif
3724 3724
3725 #ifdef CONFIG_PCI 3725 #ifdef CONFIG_PCI
3726 pci_register_driver(&atyfb_driver); 3726 pci_register_driver(&atyfb_driver);
3727 #endif 3727 #endif
3728 #ifdef CONFIG_ATARI 3728 #ifdef CONFIG_ATARI
3729 atyfb_atari_probe(); 3729 atyfb_atari_probe();
3730 #endif 3730 #endif
3731 return 0; 3731 return 0;
3732 } 3732 }
3733 3733
3734 static void __exit atyfb_exit(void) 3734 static void __exit atyfb_exit(void)
3735 { 3735 {
3736 #ifdef CONFIG_PCI 3736 #ifdef CONFIG_PCI
3737 pci_unregister_driver(&atyfb_driver); 3737 pci_unregister_driver(&atyfb_driver);
3738 #endif 3738 #endif
3739 } 3739 }
3740 3740
3741 module_init(atyfb_init); 3741 module_init(atyfb_init);
3742 module_exit(atyfb_exit); 3742 module_exit(atyfb_exit);
3743 3743
3744 MODULE_DESCRIPTION("FBDev driver for ATI Mach64 cards"); 3744 MODULE_DESCRIPTION("FBDev driver for ATI Mach64 cards");
3745 MODULE_LICENSE("GPL"); 3745 MODULE_LICENSE("GPL");
3746 module_param(noaccel, bool, 0); 3746 module_param(noaccel, bool, 0);
3747 MODULE_PARM_DESC(noaccel, "bool: disable acceleration"); 3747 MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
3748 module_param(vram, int, 0); 3748 module_param(vram, int, 0);
3749 MODULE_PARM_DESC(vram, "int: override size of video ram"); 3749 MODULE_PARM_DESC(vram, "int: override size of video ram");
3750 module_param(pll, int, 0); 3750 module_param(pll, int, 0);
3751 MODULE_PARM_DESC(pll, "int: override video clock"); 3751 MODULE_PARM_DESC(pll, "int: override video clock");
3752 module_param(mclk, int, 0); 3752 module_param(mclk, int, 0);
3753 MODULE_PARM_DESC(mclk, "int: override memory clock"); 3753 MODULE_PARM_DESC(mclk, "int: override memory clock");
3754 module_param(xclk, int, 0); 3754 module_param(xclk, int, 0);
3755 MODULE_PARM_DESC(xclk, "int: override accelerated engine clock"); 3755 MODULE_PARM_DESC(xclk, "int: override accelerated engine clock");
3756 module_param(comp_sync, int, 0); 3756 module_param(comp_sync, int, 0);
3757 MODULE_PARM_DESC(comp_sync, 3757 MODULE_PARM_DESC(comp_sync,
3758 "Set composite sync signal to low (0) or high (1)"); 3758 "Set composite sync signal to low (0) or high (1)");
3759 module_param(mode, charp, 0); 3759 module_param(mode, charp, 0);
3760 MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" "); 3760 MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
3761 #ifdef CONFIG_MTRR 3761 #ifdef CONFIG_MTRR
3762 module_param(nomtrr, bool, 0); 3762 module_param(nomtrr, bool, 0);
3763 MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers"); 3763 MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
3764 #endif 3764 #endif
3765 3765
include/asm-sparc64/pbm.h
1 /* $Id: pbm.h,v 1.27 2001/08/12 13:18:23 davem Exp $ 1 /* $Id: pbm.h,v 1.27 2001/08/12 13:18:23 davem Exp $
2 * pbm.h: UltraSparc PCI controller software state. 2 * pbm.h: UltraSparc PCI controller software state.
3 * 3 *
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7 #ifndef __SPARC64_PBM_H 7 #ifndef __SPARC64_PBM_H
8 #define __SPARC64_PBM_H 8 #define __SPARC64_PBM_H
9 9
10 #include <linux/types.h> 10 #include <linux/types.h>
11 #include <linux/pci.h> 11 #include <linux/pci.h>
12 #include <linux/ioport.h> 12 #include <linux/ioport.h>
13 #include <linux/spinlock.h> 13 #include <linux/spinlock.h>
14 14
15 #include <asm/io.h> 15 #include <asm/io.h>
16 #include <asm/page.h> 16 #include <asm/page.h>
17 #include <asm/oplib.h> 17 #include <asm/oplib.h>
18 #include <asm/prom.h> 18 #include <asm/prom.h>
19 #include <asm/iommu.h> 19 #include <asm/iommu.h>
20 20
21 /* The abstraction used here is that there are PCI controllers, 21 /* The abstraction used here is that there are PCI controllers,
22 * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules 22 * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules
23 * underneath. Each PCI bus module uses an IOMMU (shared by both 23 * underneath. Each PCI bus module uses an IOMMU (shared by both
24 * PBMs of a controller, or per-PBM), and if a streaming buffer 24 * PBMs of a controller, or per-PBM), and if a streaming buffer
25 * is present, each PCI bus module has it's own. (ie. the IOMMU 25 * is present, each PCI bus module has it's own. (ie. the IOMMU
26 * might be shared between PBMs, the STC is never shared) 26 * might be shared between PBMs, the STC is never shared)
27 * Furthermore, each PCI bus module controls it's own autonomous 27 * Furthermore, each PCI bus module controls it's own autonomous
28 * PCI bus. 28 * PCI bus.
29 */ 29 */
30 30
31 struct pci_controller_info; 31 struct pci_controller_info;
32 32
33 /* This contains the software state necessary to drive a PCI 33 /* This contains the software state necessary to drive a PCI
34 * controller's IOMMU. 34 * controller's IOMMU.
35 */ 35 */
36 struct pci_iommu_arena { 36 struct pci_iommu_arena {
37 unsigned long *map; 37 unsigned long *map;
38 unsigned int hint; 38 unsigned int hint;
39 unsigned int limit; 39 unsigned int limit;
40 }; 40 };
41 41
42 struct pci_iommu { 42 struct pci_iommu {
43 /* This protects the controller's IOMMU and all 43 /* This protects the controller's IOMMU and all
44 * streaming buffers underneath. 44 * streaming buffers underneath.
45 */ 45 */
46 spinlock_t lock; 46 spinlock_t lock;
47 47
48 struct pci_iommu_arena arena; 48 struct pci_iommu_arena arena;
49 49
50 /* IOMMU page table, a linear array of ioptes. */ 50 /* IOMMU page table, a linear array of ioptes. */
51 iopte_t *page_table; /* The page table itself. */ 51 iopte_t *page_table; /* The page table itself. */
52 52
53 /* Base PCI memory space address where IOMMU mappings 53 /* Base PCI memory space address where IOMMU mappings
54 * begin. 54 * begin.
55 */ 55 */
56 u32 page_table_map_base; 56 u32 page_table_map_base;
57 57
58 /* IOMMU Controller Registers */ 58 /* IOMMU Controller Registers */
59 unsigned long iommu_control; /* IOMMU control register */ 59 unsigned long iommu_control; /* IOMMU control register */
60 unsigned long iommu_tsbbase; /* IOMMU page table base register */ 60 unsigned long iommu_tsbbase; /* IOMMU page table base register */
61 unsigned long iommu_flush; /* IOMMU page flush register */ 61 unsigned long iommu_flush; /* IOMMU page flush register */
62 unsigned long iommu_ctxflush; /* IOMMU context flush register */ 62 unsigned long iommu_ctxflush; /* IOMMU context flush register */
63 63
64 /* This is a register in the PCI controller, which if 64 /* This is a register in the PCI controller, which if
65 * read will have no side-effects but will guarantee 65 * read will have no side-effects but will guarantee
66 * completion of all previous writes into IOMMU/STC. 66 * completion of all previous writes into IOMMU/STC.
67 */ 67 */
68 unsigned long write_complete_reg; 68 unsigned long write_complete_reg;
69 69
70 /* In order to deal with some buggy third-party PCI bridges that 70 /* In order to deal with some buggy third-party PCI bridges that
71 * do wrong prefetching, we never mark valid mappings as invalid. 71 * do wrong prefetching, we never mark valid mappings as invalid.
72 * Instead we point them at this dummy page. 72 * Instead we point them at this dummy page.
73 */ 73 */
74 unsigned long dummy_page; 74 unsigned long dummy_page;
75 unsigned long dummy_page_pa; 75 unsigned long dummy_page_pa;
76 76
77 /* CTX allocation. */ 77 /* CTX allocation. */
78 unsigned long ctx_lowest_free; 78 unsigned long ctx_lowest_free;
79 unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; 79 unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
80 80
81 /* Here a PCI controller driver describes the areas of 81 /* Here a PCI controller driver describes the areas of
82 * PCI memory space where DMA to/from physical memory 82 * PCI memory space where DMA to/from physical memory
83 * are addressed. Drivers interrogate the PCI layer 83 * are addressed. Drivers interrogate the PCI layer
84 * if their device has addressing limitations. They 84 * if their device has addressing limitations. They
85 * do so via pci_dma_supported, and pass in a mask of 85 * do so via pci_dma_supported, and pass in a mask of
86 * DMA address bits their device can actually drive. 86 * DMA address bits their device can actually drive.
87 * 87 *
88 * The test for being usable is: 88 * The test for being usable is:
89 * (device_mask & dma_addr_mask) == dma_addr_mask 89 * (device_mask & dma_addr_mask) == dma_addr_mask
90 */ 90 */
91 u32 dma_addr_mask; 91 u32 dma_addr_mask;
92 }; 92 };
93 93
94 extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); 94 extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
95 95
96 /* This describes a PCI bus module's streaming buffer. */ 96 /* This describes a PCI bus module's streaming buffer. */
97 struct pci_strbuf { 97 struct pci_strbuf {
98 int strbuf_enabled; /* Present and using it? */ 98 int strbuf_enabled; /* Present and using it? */
99 99
100 /* Streaming Buffer Control Registers */ 100 /* Streaming Buffer Control Registers */
101 unsigned long strbuf_control; /* STC control register */ 101 unsigned long strbuf_control; /* STC control register */
102 unsigned long strbuf_pflush; /* STC page flush register */ 102 unsigned long strbuf_pflush; /* STC page flush register */
103 unsigned long strbuf_fsync; /* STC flush synchronization reg */ 103 unsigned long strbuf_fsync; /* STC flush synchronization reg */
104 unsigned long strbuf_ctxflush; /* STC context flush register */ 104 unsigned long strbuf_ctxflush; /* STC context flush register */
105 unsigned long strbuf_ctxmatch_base; /* STC context flush match reg */ 105 unsigned long strbuf_ctxmatch_base; /* STC context flush match reg */
106 unsigned long strbuf_flushflag_pa; /* Physical address of flush flag */ 106 unsigned long strbuf_flushflag_pa; /* Physical address of flush flag */
107 volatile unsigned long *strbuf_flushflag; /* The flush flag itself */ 107 volatile unsigned long *strbuf_flushflag; /* The flush flag itself */
108 108
109 /* And this is the actual flush flag area. 109 /* And this is the actual flush flag area.
110 * We allocate extra because the chips require 110 * We allocate extra because the chips require
111 * a 64-byte aligned area. 111 * a 64-byte aligned area.
112 */ 112 */
113 volatile unsigned long __flushflag_buf[(64 + (64 - 1)) / sizeof(long)]; 113 volatile unsigned long __flushflag_buf[(64 + (64 - 1)) / sizeof(long)];
114 }; 114 };
115 115
116 #define PCI_STC_FLUSHFLAG_INIT(STC) \ 116 #define PCI_STC_FLUSHFLAG_INIT(STC) \
117 (*((STC)->strbuf_flushflag) = 0UL) 117 (*((STC)->strbuf_flushflag) = 0UL)
118 #define PCI_STC_FLUSHFLAG_SET(STC) \ 118 #define PCI_STC_FLUSHFLAG_SET(STC) \
119 (*((STC)->strbuf_flushflag) != 0UL) 119 (*((STC)->strbuf_flushflag) != 0UL)
120 120
121 /* There can be quite a few ranges and interrupt maps on a PCI 121 /* There can be quite a few ranges and interrupt maps on a PCI
122 * segment. Thus... 122 * segment. Thus...
123 */ 123 */
124 #define PROM_PCIRNG_MAX 64 124 #define PROM_PCIRNG_MAX 64
125 #define PROM_PCIIMAP_MAX 64 125 #define PROM_PCIIMAP_MAX 64
126 126
127 struct pci_pbm_info { 127 struct pci_pbm_info {
128 /* PCI controller we sit under. */ 128 /* PCI controller we sit under. */
129 struct pci_controller_info *parent; 129 struct pci_controller_info *parent;
130 130
131 /* Physical address base of controller registers. */ 131 /* Physical address base of controller registers. */
132 unsigned long controller_regs; 132 unsigned long controller_regs;
133 133
134 /* Physical address base of PBM registers. */ 134 /* Physical address base of PBM registers. */
135 unsigned long pbm_regs; 135 unsigned long pbm_regs;
136 136
137 /* Physical address of DMA sync register, if any. */ 137 /* Physical address of DMA sync register, if any. */
138 unsigned long sync_reg; 138 unsigned long sync_reg;
139 139
140 /* Opaque 32-bit system bus Port ID. */ 140 /* Opaque 32-bit system bus Port ID. */
141 u32 portid; 141 u32 portid;
142 142
143 /* Opaque 32-bit handle used for hypervisor calls. */ 143 /* Opaque 32-bit handle used for hypervisor calls. */
144 u32 devhandle; 144 u32 devhandle;
145 145
146 /* Chipset version information. */ 146 /* Chipset version information. */
147 int chip_type; 147 int chip_type;
148 #define PBM_CHIP_TYPE_SABRE 1 148 #define PBM_CHIP_TYPE_SABRE 1
149 #define PBM_CHIP_TYPE_PSYCHO 2 149 #define PBM_CHIP_TYPE_PSYCHO 2
150 #define PBM_CHIP_TYPE_SCHIZO 3 150 #define PBM_CHIP_TYPE_SCHIZO 3
151 #define PBM_CHIP_TYPE_SCHIZO_PLUS 4 151 #define PBM_CHIP_TYPE_SCHIZO_PLUS 4
152 #define PBM_CHIP_TYPE_TOMATILLO 5 152 #define PBM_CHIP_TYPE_TOMATILLO 5
153 int chip_version; 153 int chip_version;
154 int chip_revision; 154 int chip_revision;
155 155
156 /* Name used for top-level resources. */ 156 /* Name used for top-level resources. */
157 char *name; 157 char *name;
158 158
159 /* OBP specific information. */ 159 /* OBP specific information. */
160 struct device_node *prom_node; 160 struct device_node *prom_node;
161 struct linux_prom_pci_ranges *pbm_ranges; 161 struct linux_prom_pci_ranges *pbm_ranges;
162 int num_pbm_ranges; 162 int num_pbm_ranges;
163 struct linux_prom_pci_intmap *pbm_intmap; 163 struct linux_prom_pci_intmap *pbm_intmap;
164 int num_pbm_intmap; 164 int num_pbm_intmap;
165 struct linux_prom_pci_intmask *pbm_intmask; 165 struct linux_prom_pci_intmask *pbm_intmask;
166 u64 ino_bitmap; 166 u64 ino_bitmap;
167 167
168 /* PBM I/O and Memory space resources. */ 168 /* PBM I/O and Memory space resources. */
169 struct resource io_space; 169 struct resource io_space;
170 struct resource mem_space; 170 struct resource mem_space;
171 171
172 /* Base of PCI Config space, can be per-PBM or shared. */ 172 /* Base of PCI Config space, can be per-PBM or shared. */
173 unsigned long config_space; 173 unsigned long config_space;
174 174
175 /* State of 66MHz capabilities on this PBM. */ 175 /* State of 66MHz capabilities on this PBM. */
176 int is_66mhz_capable; 176 int is_66mhz_capable;
177 int all_devs_66mhz; 177 int all_devs_66mhz;
178 178
179 /* This PBM's streaming buffer. */ 179 /* This PBM's streaming buffer. */
180 struct pci_strbuf stc; 180 struct pci_strbuf stc;
181 181
182 /* IOMMU state, potentially shared by both PBM segments. */ 182 /* IOMMU state, potentially shared by both PBM segments. */
183 struct pci_iommu *iommu; 183 struct pci_iommu *iommu;
184 184
185 /* PCI slot mapping. */ 185 /* PCI slot mapping. */
186 unsigned int pci_first_slot; 186 unsigned int pci_first_slot;
187 187
188 /* Now things for the actual PCI bus probes. */ 188 /* Now things for the actual PCI bus probes. */
189 unsigned int pci_first_busno; 189 unsigned int pci_first_busno;
190 unsigned int pci_last_busno; 190 unsigned int pci_last_busno;
191 struct pci_bus *pci_bus; 191 struct pci_bus *pci_bus;
192 }; 192 };
193 193
194 struct pci_controller_info { 194 struct pci_controller_info {
195 /* List of all PCI controllers. */ 195 /* List of all PCI controllers. */
196 struct pci_controller_info *next; 196 struct pci_controller_info *next;
197 197
198 /* Each controller gets a unique index, used mostly for 198 /* Each controller gets a unique index, used mostly for
199 * error logging purposes. 199 * error logging purposes.
200 */ 200 */
201 int index; 201 int index;
202 202
203 /* Do the PBMs both exist in the same PCI domain? */ 203 /* Do the PBMs both exist in the same PCI domain? */
204 int pbms_same_domain; 204 int pbms_same_domain;
205 205
206 /* The PCI bus modules controlled by us. */ 206 /* The PCI bus modules controlled by us. */
207 struct pci_pbm_info pbm_A; 207 struct pci_pbm_info pbm_A;
208 struct pci_pbm_info pbm_B; 208 struct pci_pbm_info pbm_B;
209 209
210 /* Operations which are controller specific. */ 210 /* Operations which are controller specific. */
211 void (*scan_bus)(struct pci_controller_info *); 211 void (*scan_bus)(struct pci_controller_info *);
212 unsigned int (*irq_build)(struct pci_pbm_info *, struct pci_dev *, unsigned int); 212 unsigned int (*irq_build)(struct pci_pbm_info *, struct pci_dev *, unsigned int);
213 void (*base_address_update)(struct pci_dev *, int); 213 void (*base_address_update)(struct pci_dev *, int);
214 void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *); 214 void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *);
215 215
216 /* Now things for the actual PCI bus probes. */ 216 /* Now things for the actual PCI bus probes. */
217 struct pci_ops *pci_ops; 217 struct pci_ops *pci_ops;
218 unsigned int pci_first_busno; 218 unsigned int pci_first_busno;
219 unsigned int pci_last_busno; 219 unsigned int pci_last_busno;
220 220
221 void *starfire_cookie; 221 void *starfire_cookie;
222 }; 222 };
223 223
224 /* PCI devices which are not bridges have this placed in their pci_dev 224 /* PCI devices which are not bridges have this placed in their pci_dev
225 * sysdata member. This makes OBP aware PCI device drivers easier to 225 * sysdata member. This makes OBP aware PCI device drivers easier to
226 * code. 226 * code.
227 */ 227 */
228 struct pcidev_cookie { 228 struct pcidev_cookie {
229 struct pci_pbm_info *pbm; 229 struct pci_pbm_info *pbm;
230 char prom_name[64]; 230 struct device_node *prom_node;
231 int prom_node;
232 struct linux_prom_pci_registers prom_regs[PROMREG_MAX]; 231 struct linux_prom_pci_registers prom_regs[PROMREG_MAX];
233 int num_prom_regs; 232 int num_prom_regs;
234 struct linux_prom_pci_registers prom_assignments[PROMREG_MAX]; 233 struct linux_prom_pci_registers prom_assignments[PROMREG_MAX];
235 int num_prom_assignments; 234 int num_prom_assignments;
236 }; 235 };
237 236
238 /* Currently these are the same across all PCI controllers 237 /* Currently these are the same across all PCI controllers
239 * we support. Someday they may not be... 238 * we support. Someday they may not be...
240 */ 239 */
241 #define PCI_IRQ_IGN 0x000007c0 /* Interrupt Group Number */ 240 #define PCI_IRQ_IGN 0x000007c0 /* Interrupt Group Number */
242 #define PCI_IRQ_INO 0x0000003f /* Interrupt Number */ 241 #define PCI_IRQ_INO 0x0000003f /* Interrupt Number */
243 242
244 #endif /* !(__SPARC64_PBM_H) */ 243 #endif /* !(__SPARC64_PBM_H) */
245 244
include/asm-sparc64/prom.h
1 #ifndef _SPARC64_PROM_H 1 #ifndef _SPARC64_PROM_H
2 #define _SPARC64_PROM_H 2 #define _SPARC64_PROM_H
3 #ifdef __KERNEL__ 3 #ifdef __KERNEL__
4 4
5 5
6 /* 6 /*
7 * Definitions for talking to the Open Firmware PROM on 7 * Definitions for talking to the Open Firmware PROM on
8 * Power Macintosh computers. 8 * Power Macintosh computers.
9 * 9 *
10 * Copyright (C) 1996-2005 Paul Mackerras. 10 * Copyright (C) 1996-2005 Paul Mackerras.
11 * 11 *
12 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. 12 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
13 * Updates for SPARC64 by David S. Miller 13 * Updates for SPARC64 by David S. Miller
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21 #include <linux/types.h> 21 #include <linux/types.h>
22 #include <linux/proc_fs.h> 22 #include <linux/proc_fs.h>
23 #include <asm/atomic.h> 23 #include <asm/atomic.h>
24 24
25 typedef u32 phandle; 25 typedef u32 phandle;
26 typedef u32 ihandle; 26 typedef u32 ihandle;
27 27
28 struct interrupt_info { 28 struct interrupt_info {
29 int line; 29 int line;
30 int sense; /* +ve/-ve logic, edge or level, etc. */ 30 int sense; /* +ve/-ve logic, edge or level, etc. */
31 }; 31 };
32 32
33 struct property { 33 struct property {
34 char *name; 34 char *name;
35 int length; 35 int length;
36 void *value; 36 void *value;
37 struct property *next; 37 struct property *next;
38 }; 38 };
39 39
40 struct device_node { 40 struct device_node {
41 char *name; 41 char *name;
42 char *type; 42 char *type;
43 phandle node; 43 phandle node;
44 phandle linux_phandle; 44 phandle linux_phandle;
45 int n_intrs; 45 int n_intrs;
46 struct interrupt_info *intrs; 46 struct interrupt_info *intrs;
47 char *path_component_name; 47 char *path_component_name;
48 char *full_name; 48 char *full_name;
49 49
50 struct property *properties; 50 struct property *properties;
51 struct property *deadprops; /* removed properties */ 51 struct property *deadprops; /* removed properties */
52 struct device_node *parent; 52 struct device_node *parent;
53 struct device_node *child; 53 struct device_node *child;
54 struct device_node *sibling; 54 struct device_node *sibling;
55 struct device_node *next; /* next device of same type */ 55 struct device_node *next; /* next device of same type */
56 struct device_node *allnext; /* next in list of all nodes */ 56 struct device_node *allnext; /* next in list of all nodes */
57 struct proc_dir_entry *pde; /* this node's proc directory */ 57 struct proc_dir_entry *pde; /* this node's proc directory */
58 struct kref kref; 58 struct kref kref;
59 unsigned long _flags; 59 unsigned long _flags;
60 void *data; 60 void *data;
61 }; 61 };
62 62
63 static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 63 static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
64 { 64 {
65 dn->pde = de; 65 dn->pde = de;
66 } 66 }
67 67
68 extern struct device_node *of_find_node_by_name(struct device_node *from, 68 extern struct device_node *of_find_node_by_name(struct device_node *from,
69 const char *name); 69 const char *name);
70 #define for_each_node_by_name(dn, name) \ 70 #define for_each_node_by_name(dn, name) \
71 for (dn = of_find_node_by_name(NULL, name); dn; \ 71 for (dn = of_find_node_by_name(NULL, name); dn; \
72 dn = of_find_node_by_name(dn, name)) 72 dn = of_find_node_by_name(dn, name))
73 extern struct device_node *of_find_node_by_type(struct device_node *from, 73 extern struct device_node *of_find_node_by_type(struct device_node *from,
74 const char *type); 74 const char *type);
75 #define for_each_node_by_type(dn, type) \ 75 #define for_each_node_by_type(dn, type) \
76 for (dn = of_find_node_by_type(NULL, type); dn; \ 76 for (dn = of_find_node_by_type(NULL, type); dn; \
77 dn = of_find_node_by_type(dn, type)) 77 dn = of_find_node_by_type(dn, type))
78 extern struct device_node *of_find_node_by_path(const char *path); 78 extern struct device_node *of_find_node_by_path(const char *path);
79 extern struct device_node *of_find_node_by_phandle(phandle handle);
79 extern struct device_node *of_get_parent(const struct device_node *node); 80 extern struct device_node *of_get_parent(const struct device_node *node);
80 extern struct device_node *of_get_next_child(const struct device_node *node, 81 extern struct device_node *of_get_next_child(const struct device_node *node,
81 struct device_node *prev); 82 struct device_node *prev);
82 extern struct property *of_find_property(struct device_node *np, 83 extern struct property *of_find_property(struct device_node *np,
83 const char *name, 84 const char *name,
84 int *lenp); 85 int *lenp);
86 extern void *of_get_property(struct device_node *node, const char *name,
87 int *lenp);
85 extern int of_getintprop_default(struct device_node *np, 88 extern int of_getintprop_default(struct device_node *np,
86 const char *name, 89 const char *name,
87 int def); 90 int def);
88 91
89 extern void prom_build_devicetree(void); 92 extern void prom_build_devicetree(void);
90 93
91 #endif /* __KERNEL__ */ 94 #endif /* __KERNEL__ */
92 #endif /* _SPARC64_PROM_H */ 95 #endif /* _SPARC64_PROM_H */
93 96