Commit f8de31040d50b7e4c26a5ca4c02b2929dde34a58

Authored by Andy Shevchenko
Committed by David S. Miller
1 parent 8390f81482

atm: he: print MAC via %pM

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 2 additions and 9 deletions Inline Diff

1 /* 1 /*
2 2
3 he.c 3 he.c
4 4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux 5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory 6 Copyright (C) 1999-2001 Naval Research Laboratory
7 7
8 This library is free software; you can redistribute it and/or 8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public 9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either 10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version. 11 version 2.1 of the License, or (at your option) any later version.
12 12
13 This library is distributed in the hope that it will be useful, 13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details. 16 Lesser General Public License for more details.
17 17
18 You should have received a copy of the GNU Lesser General Public 18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software 19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 21
22 */ 22 */
23 23
24 /* 24 /*
25 25
26 he.c 26 he.c
27 27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux 28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory 29 Copyright (C) 1999-2001 Naval Research Laboratory
30 30
31 Permission to use, copy, modify and distribute this software and its 31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright 32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software, 33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and 34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation. 35 that both notices appear in supporting documentation.
36 36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND 37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE. 39 RESULTING FROM THE USE OF THIS SOFTWARE.
40 40
41 This driver was written using the "Programmer's Reference Manual for 41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98. 42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43 43
44 AUTHORS: 44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil> 45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil> 46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47 47
48 NOTES: 48 NOTES:
49 4096 supported 'connections' 49 4096 supported 'connections'
50 group 0 is used for all traffic 50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts 51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com) 52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53 53
54 */ 54 */
55 55
56 #include <linux/module.h> 56 #include <linux/module.h>
57 #include <linux/kernel.h> 57 #include <linux/kernel.h>
58 #include <linux/skbuff.h> 58 #include <linux/skbuff.h>
59 #include <linux/pci.h> 59 #include <linux/pci.h>
60 #include <linux/errno.h> 60 #include <linux/errno.h>
61 #include <linux/types.h> 61 #include <linux/types.h>
62 #include <linux/string.h> 62 #include <linux/string.h>
63 #include <linux/delay.h> 63 #include <linux/delay.h>
64 #include <linux/init.h> 64 #include <linux/init.h>
65 #include <linux/mm.h> 65 #include <linux/mm.h>
66 #include <linux/sched.h> 66 #include <linux/sched.h>
67 #include <linux/timer.h> 67 #include <linux/timer.h>
68 #include <linux/interrupt.h> 68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h> 69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h> 70 #include <linux/bitmap.h>
71 #include <linux/slab.h> 71 #include <linux/slab.h>
72 #include <asm/io.h> 72 #include <asm/io.h>
73 #include <asm/byteorder.h> 73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h> 74 #include <asm/uaccess.h>
75 75
76 #include <linux/atmdev.h> 76 #include <linux/atmdev.h>
77 #include <linux/atm.h> 77 #include <linux/atm.h>
78 #include <linux/sonet.h> 78 #include <linux/sonet.h>
79 79
80 #undef USE_SCATTERGATHER 80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */ 81 #undef USE_CHECKSUM_HW /* still confused about this */
82 /* #undef HE_DEBUG */ 82 /* #undef HE_DEBUG */
83 83
84 #include "he.h" 84 #include "he.h"
85 #include "suni.h" 85 #include "suni.h"
86 #include <linux/atm_he.h> 86 #include <linux/atm_he.h>
87 87
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args) 88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
89 89
90 #ifdef HE_DEBUG 90 #ifdef HE_DEBUG
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args) 91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
92 #else /* !HE_DEBUG */ 92 #else /* !HE_DEBUG */
93 #define HPRINTK(fmt,args...) do { } while (0) 93 #define HPRINTK(fmt,args...) do { } while (0)
94 #endif /* HE_DEBUG */ 94 #endif /* HE_DEBUG */
95 95
96 /* declarations */ 96 /* declarations */
97 97
98 static int he_open(struct atm_vcc *vcc); 98 static int he_open(struct atm_vcc *vcc);
99 static void he_close(struct atm_vcc *vcc); 99 static void he_close(struct atm_vcc *vcc);
100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb); 100 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); 101 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
102 static irqreturn_t he_irq_handler(int irq, void *dev_id); 102 static irqreturn_t he_irq_handler(int irq, void *dev_id);
103 static void he_tasklet(unsigned long data); 103 static void he_tasklet(unsigned long data);
104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page); 104 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
105 static int he_start(struct atm_dev *dev); 105 static int he_start(struct atm_dev *dev);
106 static void he_stop(struct he_dev *dev); 106 static void he_stop(struct he_dev *dev);
107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long); 107 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev *, unsigned long); 108 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
109 109
110 static u8 read_prom_byte(struct he_dev *he_dev, int addr); 110 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
111 111
112 /* globals */ 112 /* globals */
113 113
114 static struct he_dev *he_devs; 114 static struct he_dev *he_devs;
115 static bool disable64; 115 static bool disable64;
116 static short nvpibits = -1; 116 static short nvpibits = -1;
117 static short nvcibits = -1; 117 static short nvcibits = -1;
118 static short rx_skb_reserve = 16; 118 static short rx_skb_reserve = 16;
119 static bool irq_coalesce = 1; 119 static bool irq_coalesce = 1;
120 static bool sdh = 0; 120 static bool sdh = 0;
121 121
122 /* Read from EEPROM = 0000 0011b */ 122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab[] = { 123 static unsigned int readtab[] = {
124 CS_HIGH | CLK_HIGH, 124 CS_HIGH | CLK_HIGH,
125 CS_LOW | CLK_LOW, 125 CS_LOW | CLK_LOW,
126 CLK_HIGH, /* 0 */ 126 CLK_HIGH, /* 0 */
127 CLK_LOW, 127 CLK_LOW,
128 CLK_HIGH, /* 0 */ 128 CLK_HIGH, /* 0 */
129 CLK_LOW, 129 CLK_LOW,
130 CLK_HIGH, /* 0 */ 130 CLK_HIGH, /* 0 */
131 CLK_LOW, 131 CLK_LOW,
132 CLK_HIGH, /* 0 */ 132 CLK_HIGH, /* 0 */
133 CLK_LOW, 133 CLK_LOW,
134 CLK_HIGH, /* 0 */ 134 CLK_HIGH, /* 0 */
135 CLK_LOW, 135 CLK_LOW,
136 CLK_HIGH, /* 0 */ 136 CLK_HIGH, /* 0 */
137 CLK_LOW | SI_HIGH, 137 CLK_LOW | SI_HIGH,
138 CLK_HIGH | SI_HIGH, /* 1 */ 138 CLK_HIGH | SI_HIGH, /* 1 */
139 CLK_LOW | SI_HIGH, 139 CLK_LOW | SI_HIGH,
140 CLK_HIGH | SI_HIGH /* 1 */ 140 CLK_HIGH | SI_HIGH /* 1 */
141 }; 141 };
142 142
143 /* Clock to read from/write to the EEPROM */ 143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab[] = { 144 static unsigned int clocktab[] = {
145 CLK_LOW, 145 CLK_LOW,
146 CLK_HIGH, 146 CLK_HIGH,
147 CLK_LOW, 147 CLK_LOW,
148 CLK_HIGH, 148 CLK_HIGH,
149 CLK_LOW, 149 CLK_LOW,
150 CLK_HIGH, 150 CLK_HIGH,
151 CLK_LOW, 151 CLK_LOW,
152 CLK_HIGH, 152 CLK_HIGH,
153 CLK_LOW, 153 CLK_LOW,
154 CLK_HIGH, 154 CLK_HIGH,
155 CLK_LOW, 155 CLK_LOW,
156 CLK_HIGH, 156 CLK_HIGH,
157 CLK_LOW, 157 CLK_LOW,
158 CLK_HIGH, 158 CLK_HIGH,
159 CLK_LOW, 159 CLK_LOW,
160 CLK_HIGH, 160 CLK_HIGH,
161 CLK_LOW 161 CLK_LOW
162 }; 162 };
163 163
164 static struct atmdev_ops he_ops = 164 static struct atmdev_ops he_ops =
165 { 165 {
166 .open = he_open, 166 .open = he_open,
167 .close = he_close, 167 .close = he_close,
168 .ioctl = he_ioctl, 168 .ioctl = he_ioctl,
169 .send = he_send, 169 .send = he_send,
170 .phy_put = he_phy_put, 170 .phy_put = he_phy_put,
171 .phy_get = he_phy_get, 171 .phy_get = he_phy_get,
172 .proc_read = he_proc_read, 172 .proc_read = he_proc_read,
173 .owner = THIS_MODULE 173 .owner = THIS_MODULE
174 }; 174 };
175 175
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0) 176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg)) 177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
178 178
179 /* section 2.12 connection memory access */ 179 /* section 2.12 connection memory access */
180 180
181 static __inline__ void 181 static __inline__ void
182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr, 182 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
183 unsigned flags) 183 unsigned flags)
184 { 184 {
185 he_writel(he_dev, val, CON_DAT); 185 he_writel(he_dev, val, CON_DAT);
186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */ 186 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL); 187 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); 188 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
189 } 189 }
190 190
191 #define he_writel_rcm(dev, val, reg) \ 191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM) 192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
193 193
194 #define he_writel_tcm(dev, val, reg) \ 194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM) 195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
196 196
197 #define he_writel_mbox(dev, val, reg) \ 197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX) 198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
199 199
200 static unsigned 200 static unsigned
201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags) 201 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
202 { 202 {
203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL); 203 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY); 204 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
205 return he_readl(he_dev, CON_DAT); 205 return he_readl(he_dev, CON_DAT);
206 } 206 }
207 207
208 #define he_readl_rcm(dev, reg) \ 208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM) 209 he_readl_internal(dev, reg, CON_CTL_RCM)
210 210
211 #define he_readl_tcm(dev, reg) \ 211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM) 212 he_readl_internal(dev, reg, CON_CTL_TCM)
213 213
214 #define he_readl_mbox(dev, reg) \ 214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX) 215 he_readl_internal(dev, reg, CON_CTL_MBOX)
216 216
217 217
218 /* figure 2.2 connection id */ 218 /* figure 2.2 connection id */
219 219
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff) 220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
221 221
222 /* 2.5.1 per connection transmit state registers */ 222 /* 2.5.1 per connection transmit state registers */
223 223
224 #define he_writel_tsr0(dev, val, cid) \ 224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0) 225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \ 226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0) 227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
228 228
229 #define he_writel_tsr1(dev, val, cid) \ 229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1) 230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
231 231
232 #define he_writel_tsr2(dev, val, cid) \ 232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2) 233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
234 234
235 #define he_writel_tsr3(dev, val, cid) \ 235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3) 236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
237 237
238 #define he_writel_tsr4(dev, val, cid) \ 238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4) 239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
240 240
241 /* from page 2-20 241 /* from page 2-20
242 * 242 *
243 * NOTE While the transmit connection is active, bits 23 through 0 243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte 244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing 245 * enables should be used during normal operation when writing
246 * the most significant byte. 246 * the most significant byte.
247 */ 247 */
248 248
249 #define he_writel_tsr4_upper(dev, val, cid) \ 249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \ 250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
251 CON_CTL_TCM \ 251 CON_CTL_TCM \
252 | CON_BYTE_DISABLE_2 \ 252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \ 253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0) 254 | CON_BYTE_DISABLE_0)
255 255
256 #define he_readl_tsr4(dev, cid) \ 256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4) 257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
258 258
259 #define he_writel_tsr5(dev, val, cid) \ 259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5) 260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
261 261
262 #define he_writel_tsr6(dev, val, cid) \ 262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6) 263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
264 264
265 #define he_writel_tsr7(dev, val, cid) \ 265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7) 266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
267 267
268 268
269 #define he_writel_tsr8(dev, val, cid) \ 269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0) 270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
271 271
272 #define he_writel_tsr9(dev, val, cid) \ 272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1) 273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
274 274
275 #define he_writel_tsr10(dev, val, cid) \ 275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2) 276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
277 277
278 #define he_writel_tsr11(dev, val, cid) \ 278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3) 279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
280 280
281 281
282 #define he_writel_tsr12(dev, val, cid) \ 282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0) 283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
284 284
285 #define he_writel_tsr13(dev, val, cid) \ 285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1) 286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
287 287
288 288
289 #define he_writel_tsr14(dev, val, cid) \ 289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid) 290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
291 291
292 #define he_writel_tsr14_upper(dev, val, cid) \ 292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \ 293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
294 CON_CTL_TCM \ 294 CON_CTL_TCM \
295 | CON_BYTE_DISABLE_2 \ 295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \ 296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0) 297 | CON_BYTE_DISABLE_0)
298 298
299 /* 2.7.1 per connection receive state registers */ 299 /* 2.7.1 per connection receive state registers */
300 300
301 #define he_writel_rsr0(dev, val, cid) \ 301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0) 302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \ 303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0) 304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
305 305
306 #define he_writel_rsr1(dev, val, cid) \ 306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1) 307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
308 308
309 #define he_writel_rsr2(dev, val, cid) \ 309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2) 310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
311 311
312 #define he_writel_rsr3(dev, val, cid) \ 312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3) 313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
314 314
315 #define he_writel_rsr4(dev, val, cid) \ 315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4) 316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
317 317
318 #define he_writel_rsr5(dev, val, cid) \ 318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5) 319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
320 320
321 #define he_writel_rsr6(dev, val, cid) \ 321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6) 322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
323 323
324 #define he_writel_rsr7(dev, val, cid) \ 324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7) 325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
326 326
327 static __inline__ struct atm_vcc* 327 static __inline__ struct atm_vcc*
328 __find_vcc(struct he_dev *he_dev, unsigned cid) 328 __find_vcc(struct he_dev *he_dev, unsigned cid)
329 { 329 {
330 struct hlist_head *head; 330 struct hlist_head *head;
331 struct atm_vcc *vcc; 331 struct atm_vcc *vcc;
332 struct sock *s; 332 struct sock *s;
333 short vpi; 333 short vpi;
334 int vci; 334 int vci;
335 335
336 vpi = cid >> he_dev->vcibits; 336 vpi = cid >> he_dev->vcibits;
337 vci = cid & ((1 << he_dev->vcibits) - 1); 337 vci = cid & ((1 << he_dev->vcibits) - 1);
338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; 338 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
339 339
340 sk_for_each(s, head) { 340 sk_for_each(s, head) {
341 vcc = atm_sk(s); 341 vcc = atm_sk(s);
342 if (vcc->dev == he_dev->atm_dev && 342 if (vcc->dev == he_dev->atm_dev &&
343 vcc->vci == vci && vcc->vpi == vpi && 343 vcc->vci == vci && vcc->vpi == vpi &&
344 vcc->qos.rxtp.traffic_class != ATM_NONE) { 344 vcc->qos.rxtp.traffic_class != ATM_NONE) {
345 return vcc; 345 return vcc;
346 } 346 }
347 } 347 }
348 return NULL; 348 return NULL;
349 } 349 }
350 350
351 static int he_init_one(struct pci_dev *pci_dev, 351 static int he_init_one(struct pci_dev *pci_dev,
352 const struct pci_device_id *pci_ent) 352 const struct pci_device_id *pci_ent)
353 { 353 {
354 struct atm_dev *atm_dev = NULL; 354 struct atm_dev *atm_dev = NULL;
355 struct he_dev *he_dev = NULL; 355 struct he_dev *he_dev = NULL;
356 int err = 0; 356 int err = 0;
357 357
358 printk(KERN_INFO "ATM he driver\n"); 358 printk(KERN_INFO "ATM he driver\n");
359 359
360 if (pci_enable_device(pci_dev)) 360 if (pci_enable_device(pci_dev))
361 return -EIO; 361 return -EIO;
362 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) { 362 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING "he: no suitable dma available\n"); 363 printk(KERN_WARNING "he: no suitable dma available\n");
364 err = -EIO; 364 err = -EIO;
365 goto init_one_failure; 365 goto init_one_failure;
366 } 366 }
367 367
368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL); 368 atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL);
369 if (!atm_dev) { 369 if (!atm_dev) {
370 err = -ENODEV; 370 err = -ENODEV;
371 goto init_one_failure; 371 goto init_one_failure;
372 } 372 }
373 pci_set_drvdata(pci_dev, atm_dev); 373 pci_set_drvdata(pci_dev, atm_dev);
374 374
375 he_dev = kzalloc(sizeof(struct he_dev), 375 he_dev = kzalloc(sizeof(struct he_dev),
376 GFP_KERNEL); 376 GFP_KERNEL);
377 if (!he_dev) { 377 if (!he_dev) {
378 err = -ENOMEM; 378 err = -ENOMEM;
379 goto init_one_failure; 379 goto init_one_failure;
380 } 380 }
381 he_dev->pci_dev = pci_dev; 381 he_dev->pci_dev = pci_dev;
382 he_dev->atm_dev = atm_dev; 382 he_dev->atm_dev = atm_dev;
383 he_dev->atm_dev->dev_data = he_dev; 383 he_dev->atm_dev->dev_data = he_dev;
384 atm_dev->dev_data = he_dev; 384 atm_dev->dev_data = he_dev;
385 he_dev->number = atm_dev->number; 385 he_dev->number = atm_dev->number;
386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); 386 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
387 spin_lock_init(&he_dev->global_lock); 387 spin_lock_init(&he_dev->global_lock);
388 388
389 if (he_start(atm_dev)) { 389 if (he_start(atm_dev)) {
390 he_stop(he_dev); 390 he_stop(he_dev);
391 err = -ENODEV; 391 err = -ENODEV;
392 goto init_one_failure; 392 goto init_one_failure;
393 } 393 }
394 he_dev->next = NULL; 394 he_dev->next = NULL;
395 if (he_devs) 395 if (he_devs)
396 he_dev->next = he_devs; 396 he_dev->next = he_devs;
397 he_devs = he_dev; 397 he_devs = he_dev;
398 return 0; 398 return 0;
399 399
400 init_one_failure: 400 init_one_failure:
401 if (atm_dev) 401 if (atm_dev)
402 atm_dev_deregister(atm_dev); 402 atm_dev_deregister(atm_dev);
403 kfree(he_dev); 403 kfree(he_dev);
404 pci_disable_device(pci_dev); 404 pci_disable_device(pci_dev);
405 return err; 405 return err;
406 } 406 }
407 407
408 static void he_remove_one(struct pci_dev *pci_dev) 408 static void he_remove_one(struct pci_dev *pci_dev)
409 { 409 {
410 struct atm_dev *atm_dev; 410 struct atm_dev *atm_dev;
411 struct he_dev *he_dev; 411 struct he_dev *he_dev;
412 412
413 atm_dev = pci_get_drvdata(pci_dev); 413 atm_dev = pci_get_drvdata(pci_dev);
414 he_dev = HE_DEV(atm_dev); 414 he_dev = HE_DEV(atm_dev);
415 415
416 /* need to remove from he_devs */ 416 /* need to remove from he_devs */
417 417
418 he_stop(he_dev); 418 he_stop(he_dev);
419 atm_dev_deregister(atm_dev); 419 atm_dev_deregister(atm_dev);
420 kfree(he_dev); 420 kfree(he_dev);
421 421
422 pci_set_drvdata(pci_dev, NULL); 422 pci_set_drvdata(pci_dev, NULL);
423 pci_disable_device(pci_dev); 423 pci_disable_device(pci_dev);
424 } 424 }
425 425
426 426
427 static unsigned 427 static unsigned
428 rate_to_atmf(unsigned rate) /* cps to atm forum format */ 428 rate_to_atmf(unsigned rate) /* cps to atm forum format */
429 { 429 {
430 #define NONZERO (1 << 14) 430 #define NONZERO (1 << 14)
431 431
432 unsigned exp = 0; 432 unsigned exp = 0;
433 433
434 if (rate == 0) 434 if (rate == 0)
435 return 0; 435 return 0;
436 436
437 rate <<= 9; 437 rate <<= 9;
438 while (rate > 0x3ff) { 438 while (rate > 0x3ff) {
439 ++exp; 439 ++exp;
440 rate >>= 1; 440 rate >>= 1;
441 } 441 }
442 442
443 return (NONZERO | (exp << 9) | (rate & 0x1ff)); 443 return (NONZERO | (exp << 9) | (rate & 0x1ff));
444 } 444 }
445 445
446 static void he_init_rx_lbfp0(struct he_dev *he_dev) 446 static void he_init_rx_lbfp0(struct he_dev *he_dev)
447 { 447 {
448 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; 448 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
449 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; 449 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
450 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; 450 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
451 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row; 451 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
452 452
453 lbufd_index = 0; 453 lbufd_index = 0;
454 lbm_offset = he_readl(he_dev, RCMLBM_BA); 454 lbm_offset = he_readl(he_dev, RCMLBM_BA);
455 455
456 he_writel(he_dev, lbufd_index, RLBF0_H); 456 he_writel(he_dev, lbufd_index, RLBF0_H);
457 457
458 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) { 458 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
459 lbufd_index += 2; 459 lbufd_index += 2;
460 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; 460 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
461 461
462 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); 462 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
463 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); 463 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
464 464
465 if (++lbuf_count == lbufs_per_row) { 465 if (++lbuf_count == lbufs_per_row) {
466 lbuf_count = 0; 466 lbuf_count = 0;
467 row_offset += he_dev->bytes_per_row; 467 row_offset += he_dev->bytes_per_row;
468 } 468 }
469 lbm_offset += 4; 469 lbm_offset += 4;
470 } 470 }
471 471
472 he_writel(he_dev, lbufd_index - 2, RLBF0_T); 472 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
473 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C); 473 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
474 } 474 }
475 475
476 static void he_init_rx_lbfp1(struct he_dev *he_dev) 476 static void he_init_rx_lbfp1(struct he_dev *he_dev)
477 { 477 {
478 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; 478 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
479 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; 479 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
480 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; 480 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
481 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row; 481 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
482 482
483 lbufd_index = 1; 483 lbufd_index = 1;
484 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); 484 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
485 485
486 he_writel(he_dev, lbufd_index, RLBF1_H); 486 he_writel(he_dev, lbufd_index, RLBF1_H);
487 487
488 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) { 488 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
489 lbufd_index += 2; 489 lbufd_index += 2;
490 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; 490 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
491 491
492 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); 492 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
493 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); 493 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
494 494
495 if (++lbuf_count == lbufs_per_row) { 495 if (++lbuf_count == lbufs_per_row) {
496 lbuf_count = 0; 496 lbuf_count = 0;
497 row_offset += he_dev->bytes_per_row; 497 row_offset += he_dev->bytes_per_row;
498 } 498 }
499 lbm_offset += 4; 499 lbm_offset += 4;
500 } 500 }
501 501
502 he_writel(he_dev, lbufd_index - 2, RLBF1_T); 502 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
503 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C); 503 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
504 } 504 }
505 505
506 static void he_init_tx_lbfp(struct he_dev *he_dev) 506 static void he_init_tx_lbfp(struct he_dev *he_dev)
507 { 507 {
508 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count; 508 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
509 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf; 509 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
510 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD; 510 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
511 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row; 511 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
512 512
513 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs; 513 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
514 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index); 514 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
515 515
516 he_writel(he_dev, lbufd_index, TLBF_H); 516 he_writel(he_dev, lbufd_index, TLBF_H);
517 517
518 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) { 518 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
519 lbufd_index += 1; 519 lbufd_index += 1;
520 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32; 520 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
521 521
522 he_writel_rcm(he_dev, lbuf_addr, lbm_offset); 522 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
523 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1); 523 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
524 524
525 if (++lbuf_count == lbufs_per_row) { 525 if (++lbuf_count == lbufs_per_row) {
526 lbuf_count = 0; 526 lbuf_count = 0;
527 row_offset += he_dev->bytes_per_row; 527 row_offset += he_dev->bytes_per_row;
528 } 528 }
529 lbm_offset += 2; 529 lbm_offset += 2;
530 } 530 }
531 531
532 he_writel(he_dev, lbufd_index - 1, TLBF_T); 532 he_writel(he_dev, lbufd_index - 1, TLBF_T);
533 } 533 }
534 534
535 static int he_init_tpdrq(struct he_dev *he_dev) 535 static int he_init_tpdrq(struct he_dev *he_dev)
536 { 536 {
537 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev, 537 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys); 538 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539 if (he_dev->tpdrq_base == NULL) { 539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n"); 540 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM; 541 return -ENOMEM;
542 } 542 }
543 memset(he_dev->tpdrq_base, 0, 543 memset(he_dev->tpdrq_base, 0,
544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq)); 544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
545 545
546 he_dev->tpdrq_tail = he_dev->tpdrq_base; 546 he_dev->tpdrq_tail = he_dev->tpdrq_base;
547 he_dev->tpdrq_head = he_dev->tpdrq_base; 547 he_dev->tpdrq_head = he_dev->tpdrq_base;
548 548
549 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H); 549 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550 he_writel(he_dev, 0, TPDRQ_T); 550 he_writel(he_dev, 0, TPDRQ_T);
551 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S); 551 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
552 552
553 return 0; 553 return 0;
554 } 554 }
555 555
556 static void he_init_cs_block(struct he_dev *he_dev) 556 static void he_init_cs_block(struct he_dev *he_dev)
557 { 557 {
558 unsigned clock, rate, delta; 558 unsigned clock, rate, delta;
559 int reg; 559 int reg;
560 560
561 /* 5.1.7 cs block initialization */ 561 /* 5.1.7 cs block initialization */
562 562
563 for (reg = 0; reg < 0x20; ++reg) 563 for (reg = 0; reg < 0x20; ++reg)
564 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg); 564 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
565 565
566 /* rate grid timer reload values */ 566 /* rate grid timer reload values */
567 567
568 clock = he_is622(he_dev) ? 66667000 : 50000000; 568 clock = he_is622(he_dev) ? 66667000 : 50000000;
569 rate = he_dev->atm_dev->link_rate; 569 rate = he_dev->atm_dev->link_rate;
570 delta = rate / 16 / 2; 570 delta = rate / 16 / 2;
571 571
572 for (reg = 0; reg < 0x10; ++reg) { 572 for (reg = 0; reg < 0x10; ++reg) {
573 /* 2.4 internal transmit function 573 /* 2.4 internal transmit function
574 * 574 *
575 * we initialize the first row in the rate grid. 575 * we initialize the first row in the rate grid.
576 * values are period (in clock cycles) of timer 576 * values are period (in clock cycles) of timer
577 */ 577 */
578 unsigned period = clock / rate; 578 unsigned period = clock / rate;
579 579
580 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg); 580 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
581 rate -= delta; 581 rate -= delta;
582 } 582 }
583 583
584 if (he_is622(he_dev)) { 584 if (he_is622(he_dev)) {
585 /* table 5.2 (4 cells per lbuf) */ 585 /* table 5.2 (4 cells per lbuf) */
586 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0); 586 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
587 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1); 587 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
588 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2); 588 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
589 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3); 589 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
590 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4); 590 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
591 591
592 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ 592 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
593 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0); 593 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
594 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1); 594 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
595 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2); 595 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
596 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); 596 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
597 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1); 597 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
598 he_writel_mbox(he_dev, 0x14585, CS_RTFWR); 598 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
599 599
600 he_writel_mbox(he_dev, 0x4680, CS_RTATR); 600 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
601 601
602 /* table 5.8 */ 602 /* table 5.8 */
603 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET); 603 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
604 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX); 604 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
605 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN); 605 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
606 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC); 606 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
607 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC); 607 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
608 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL); 608 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
609 609
610 /* table 5.9 */ 610 /* table 5.9 */
611 he_writel_mbox(he_dev, 0x5, CS_OTPPER); 611 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
612 he_writel_mbox(he_dev, 0x14, CS_OTWPER); 612 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
613 } else { 613 } else {
614 /* table 5.1 (4 cells per lbuf) */ 614 /* table 5.1 (4 cells per lbuf) */
615 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0); 615 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
616 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1); 616 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
617 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2); 617 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
618 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3); 618 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
619 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4); 619 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
620 620
621 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */ 621 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
622 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0); 622 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
623 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1); 623 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
624 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2); 624 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
625 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0); 625 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
626 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1); 626 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
627 he_writel_mbox(he_dev, 0xf424, CS_RTFWR); 627 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
628 628
629 he_writel_mbox(he_dev, 0x4680, CS_RTATR); 629 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
630 630
631 /* table 5.8 */ 631 /* table 5.8 */
632 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET); 632 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
633 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX); 633 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
634 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN); 634 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
635 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC); 635 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
636 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC); 636 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
637 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL); 637 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
638 638
639 /* table 5.9 */ 639 /* table 5.9 */
640 he_writel_mbox(he_dev, 0x6, CS_OTPPER); 640 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
641 he_writel_mbox(he_dev, 0x1e, CS_OTWPER); 641 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
642 } 642 }
643 643
644 he_writel_mbox(he_dev, 0x8, CS_OTTLIM); 644 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
645 645
646 for (reg = 0; reg < 0x8; ++reg) 646 for (reg = 0; reg < 0x8; ++reg)
647 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg); 647 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
648 648
649 } 649 }
650 650
651 static int he_init_cs_block_rcm(struct he_dev *he_dev) 651 static int he_init_cs_block_rcm(struct he_dev *he_dev)
652 { 652 {
653 unsigned (*rategrid)[16][16]; 653 unsigned (*rategrid)[16][16];
654 unsigned rate, delta; 654 unsigned rate, delta;
655 int i, j, reg; 655 int i, j, reg;
656 656
657 unsigned rate_atmf, exp, man; 657 unsigned rate_atmf, exp, man;
658 unsigned long long rate_cps; 658 unsigned long long rate_cps;
659 int mult, buf, buf_limit = 4; 659 int mult, buf, buf_limit = 4;
660 660
661 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL); 661 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
662 if (!rategrid) 662 if (!rategrid)
663 return -ENOMEM; 663 return -ENOMEM;
664 664
665 /* initialize rate grid group table */ 665 /* initialize rate grid group table */
666 666
667 for (reg = 0x0; reg < 0xff; ++reg) 667 for (reg = 0x0; reg < 0xff; ++reg)
668 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); 668 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
669 669
670 /* initialize rate controller groups */ 670 /* initialize rate controller groups */
671 671
672 for (reg = 0x100; reg < 0x1ff; ++reg) 672 for (reg = 0x100; reg < 0x1ff; ++reg)
673 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg); 673 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
674 674
675 /* initialize tNrm lookup table */ 675 /* initialize tNrm lookup table */
676 676
677 /* the manual makes reference to a routine in a sample driver 677 /* the manual makes reference to a routine in a sample driver
678 for proper configuration; fortunately, we only need this 678 for proper configuration; fortunately, we only need this
679 in order to support abr connection */ 679 in order to support abr connection */
680 680
681 /* initialize rate to group table */ 681 /* initialize rate to group table */
682 682
683 rate = he_dev->atm_dev->link_rate; 683 rate = he_dev->atm_dev->link_rate;
684 delta = rate / 32; 684 delta = rate / 32;
685 685
686 /* 686 /*
687 * 2.4 transmit internal functions 687 * 2.4 transmit internal functions
688 * 688 *
689 * we construct a copy of the rate grid used by the scheduler 689 * we construct a copy of the rate grid used by the scheduler
690 * in order to construct the rate to group table below 690 * in order to construct the rate to group table below
691 */ 691 */
692 692
693 for (j = 0; j < 16; j++) { 693 for (j = 0; j < 16; j++) {
694 (*rategrid)[0][j] = rate; 694 (*rategrid)[0][j] = rate;
695 rate -= delta; 695 rate -= delta;
696 } 696 }
697 697
698 for (i = 1; i < 16; i++) 698 for (i = 1; i < 16; i++)
699 for (j = 0; j < 16; j++) 699 for (j = 0; j < 16; j++)
700 if (i > 14) 700 if (i > 14)
701 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4; 701 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
702 else 702 else
703 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2; 703 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
704 704
705 /* 705 /*
706 * 2.4 transmit internal function 706 * 2.4 transmit internal function
707 * 707 *
708 * this table maps the upper 5 bits of exponent and mantissa 708 * this table maps the upper 5 bits of exponent and mantissa
709 * of the atm forum representation of the rate into an index 709 * of the atm forum representation of the rate into an index
710 * on rate grid 710 * on rate grid
711 */ 711 */
712 712
713 rate_atmf = 0; 713 rate_atmf = 0;
714 while (rate_atmf < 0x400) { 714 while (rate_atmf < 0x400) {
715 man = (rate_atmf & 0x1f) << 4; 715 man = (rate_atmf & 0x1f) << 4;
716 exp = rate_atmf >> 5; 716 exp = rate_atmf >> 5;
717 717
718 /* 718 /*
719 instead of '/ 512', use '>> 9' to prevent a call 719 instead of '/ 512', use '>> 9' to prevent a call
720 to divdu3 on x86 platforms 720 to divdu3 on x86 platforms
721 */ 721 */
722 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; 722 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
723 723
724 if (rate_cps < 10) 724 if (rate_cps < 10)
725 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ 725 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
726 726
727 for (i = 255; i > 0; i--) 727 for (i = 255; i > 0; i--)
728 if ((*rategrid)[i/16][i%16] >= rate_cps) 728 if ((*rategrid)[i/16][i%16] >= rate_cps)
729 break; /* pick nearest rate instead? */ 729 break; /* pick nearest rate instead? */
730 730
731 /* 731 /*
732 * each table entry is 16 bits: (rate grid index (8 bits) 732 * each table entry is 16 bits: (rate grid index (8 bits)
733 * and a buffer limit (8 bits) 733 * and a buffer limit (8 bits)
734 * there are two table entries in each 32-bit register 734 * there are two table entries in each 32-bit register
735 */ 735 */
736 736
737 #ifdef notdef 737 #ifdef notdef
738 buf = rate_cps * he_dev->tx_numbuffs / 738 buf = rate_cps * he_dev->tx_numbuffs /
739 (he_dev->atm_dev->link_rate * 2); 739 (he_dev->atm_dev->link_rate * 2);
740 #else 740 #else
741 /* this is pretty, but avoids _divdu3 and is mostly correct */ 741 /* this is pretty, but avoids _divdu3 and is mostly correct */
742 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; 742 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
743 if (rate_cps > (272 * mult)) 743 if (rate_cps > (272 * mult))
744 buf = 4; 744 buf = 4;
745 else if (rate_cps > (204 * mult)) 745 else if (rate_cps > (204 * mult))
746 buf = 3; 746 buf = 3;
747 else if (rate_cps > (136 * mult)) 747 else if (rate_cps > (136 * mult))
748 buf = 2; 748 buf = 2;
749 else if (rate_cps > (68 * mult)) 749 else if (rate_cps > (68 * mult))
750 buf = 1; 750 buf = 1;
751 else 751 else
752 buf = 0; 752 buf = 0;
753 #endif 753 #endif
754 if (buf > buf_limit) 754 if (buf > buf_limit)
755 buf = buf_limit; 755 buf = buf_limit;
756 reg = (reg << 16) | ((i << 8) | buf); 756 reg = (reg << 16) | ((i << 8) | buf);
757 757
758 #define RTGTBL_OFFSET 0x400 758 #define RTGTBL_OFFSET 0x400
759 759
760 if (rate_atmf & 0x1) 760 if (rate_atmf & 0x1)
761 he_writel_rcm(he_dev, reg, 761 he_writel_rcm(he_dev, reg,
762 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1)); 762 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
763 763
764 ++rate_atmf; 764 ++rate_atmf;
765 } 765 }
766 766
767 kfree(rategrid); 767 kfree(rategrid);
768 return 0; 768 return 0;
769 } 769 }
770 770
771 static int he_init_group(struct he_dev *he_dev, int group) 771 static int he_init_group(struct he_dev *he_dev, int group)
772 { 772 {
773 struct he_buff *heb, *next; 773 struct he_buff *heb, *next;
774 dma_addr_t mapping; 774 dma_addr_t mapping;
775 int i; 775 int i;
776 776
777 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); 777 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
778 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); 778 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
779 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); 779 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
780 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), 780 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
781 G0_RBPS_BS + (group * 32)); 781 G0_RBPS_BS + (group * 32));
782 782
783 /* bitmap table */ 783 /* bitmap table */
784 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE) 784 he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
785 * sizeof(unsigned long), GFP_KERNEL); 785 * sizeof(unsigned long), GFP_KERNEL);
786 if (!he_dev->rbpl_table) { 786 if (!he_dev->rbpl_table) {
787 hprintk("unable to allocate rbpl bitmap table\n"); 787 hprintk("unable to allocate rbpl bitmap table\n");
788 return -ENOMEM; 788 return -ENOMEM;
789 } 789 }
790 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE); 790 bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
791 791
792 /* rbpl_virt 64-bit pointers */ 792 /* rbpl_virt 64-bit pointers */
793 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE 793 he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
794 * sizeof(struct he_buff *), GFP_KERNEL); 794 * sizeof(struct he_buff *), GFP_KERNEL);
795 if (!he_dev->rbpl_virt) { 795 if (!he_dev->rbpl_virt) {
796 hprintk("unable to allocate rbpl virt table\n"); 796 hprintk("unable to allocate rbpl virt table\n");
797 goto out_free_rbpl_table; 797 goto out_free_rbpl_table;
798 } 798 }
799 799
800 /* large buffer pool */ 800 /* large buffer pool */
801 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev, 801 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
802 CONFIG_RBPL_BUFSIZE, 64, 0); 802 CONFIG_RBPL_BUFSIZE, 64, 0);
803 if (he_dev->rbpl_pool == NULL) { 803 if (he_dev->rbpl_pool == NULL) {
804 hprintk("unable to create rbpl pool\n"); 804 hprintk("unable to create rbpl pool\n");
805 goto out_free_rbpl_virt; 805 goto out_free_rbpl_virt;
806 } 806 }
807 807
808 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, 808 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys); 809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
810 if (he_dev->rbpl_base == NULL) { 810 if (he_dev->rbpl_base == NULL) {
811 hprintk("failed to alloc rbpl_base\n"); 811 hprintk("failed to alloc rbpl_base\n");
812 goto out_destroy_rbpl_pool; 812 goto out_destroy_rbpl_pool;
813 } 813 }
814 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); 814 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
815 815
816 INIT_LIST_HEAD(&he_dev->rbpl_outstanding); 816 INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
817 817
818 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) { 818 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
819 819
820 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping); 820 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
821 if (!heb) 821 if (!heb)
822 goto out_free_rbpl; 822 goto out_free_rbpl;
823 heb->mapping = mapping; 823 heb->mapping = mapping;
824 list_add(&heb->entry, &he_dev->rbpl_outstanding); 824 list_add(&heb->entry, &he_dev->rbpl_outstanding);
825 825
826 set_bit(i, he_dev->rbpl_table); 826 set_bit(i, he_dev->rbpl_table);
827 he_dev->rbpl_virt[i] = heb; 827 he_dev->rbpl_virt[i] = heb;
828 he_dev->rbpl_hint = i + 1; 828 he_dev->rbpl_hint = i + 1;
829 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET; 829 he_dev->rbpl_base[i].idx = i << RBP_IDX_OFFSET;
830 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); 830 he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
831 } 831 }
832 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1]; 832 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
833 833
834 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32)); 834 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
835 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), 835 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
836 G0_RBPL_T + (group * 32)); 836 G0_RBPL_T + (group * 32));
837 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4, 837 he_writel(he_dev, (CONFIG_RBPL_BUFSIZE - sizeof(struct he_buff))/4,
838 G0_RBPL_BS + (group * 32)); 838 G0_RBPL_BS + (group * 32));
839 he_writel(he_dev, 839 he_writel(he_dev,
840 RBP_THRESH(CONFIG_RBPL_THRESH) | 840 RBP_THRESH(CONFIG_RBPL_THRESH) |
841 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) | 841 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
842 RBP_INT_ENB, 842 RBP_INT_ENB,
843 G0_RBPL_QI + (group * 32)); 843 G0_RBPL_QI + (group * 32));
844 844
845 /* rx buffer ready queue */ 845 /* rx buffer ready queue */
846 846
847 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev, 847 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); 848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
849 if (he_dev->rbrq_base == NULL) { 849 if (he_dev->rbrq_base == NULL) {
850 hprintk("failed to allocate rbrq\n"); 850 hprintk("failed to allocate rbrq\n");
851 goto out_free_rbpl; 851 goto out_free_rbpl;
852 } 852 }
853 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); 853 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
854 854
855 he_dev->rbrq_head = he_dev->rbrq_base; 855 he_dev->rbrq_head = he_dev->rbrq_base;
856 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); 856 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
857 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16)); 857 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
858 he_writel(he_dev, 858 he_writel(he_dev,
859 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1), 859 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
860 G0_RBRQ_Q + (group * 16)); 860 G0_RBRQ_Q + (group * 16));
861 if (irq_coalesce) { 861 if (irq_coalesce) {
862 hprintk("coalescing interrupts\n"); 862 hprintk("coalescing interrupts\n");
863 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7), 863 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
864 G0_RBRQ_I + (group * 16)); 864 G0_RBRQ_I + (group * 16));
865 } else 865 } else
866 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1), 866 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
867 G0_RBRQ_I + (group * 16)); 867 G0_RBRQ_I + (group * 16));
868 868
869 /* tx buffer ready queue */ 869 /* tx buffer ready queue */
870 870
871 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev, 871 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys); 872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
873 if (he_dev->tbrq_base == NULL) { 873 if (he_dev->tbrq_base == NULL) {
874 hprintk("failed to allocate tbrq\n"); 874 hprintk("failed to allocate tbrq\n");
875 goto out_free_rbpq_base; 875 goto out_free_rbpq_base;
876 } 876 }
877 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq)); 877 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
878 878
879 he_dev->tbrq_head = he_dev->tbrq_base; 879 he_dev->tbrq_head = he_dev->tbrq_base;
880 880
881 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16)); 881 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
882 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16)); 882 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
883 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16)); 883 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
884 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16)); 884 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
885 885
886 return 0; 886 return 0;
887 887
888 out_free_rbpq_base: 888 out_free_rbpq_base:
889 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * 889 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
890 sizeof(struct he_rbrq), he_dev->rbrq_base, 890 sizeof(struct he_rbrq), he_dev->rbrq_base,
891 he_dev->rbrq_phys); 891 he_dev->rbrq_phys);
892 out_free_rbpl: 892 out_free_rbpl:
893 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) 893 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
894 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 894 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
895 895
896 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE * 896 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
897 sizeof(struct he_rbp), he_dev->rbpl_base, 897 sizeof(struct he_rbp), he_dev->rbpl_base,
898 he_dev->rbpl_phys); 898 he_dev->rbpl_phys);
899 out_destroy_rbpl_pool: 899 out_destroy_rbpl_pool:
900 pci_pool_destroy(he_dev->rbpl_pool); 900 pci_pool_destroy(he_dev->rbpl_pool);
901 out_free_rbpl_virt: 901 out_free_rbpl_virt:
902 kfree(he_dev->rbpl_virt); 902 kfree(he_dev->rbpl_virt);
903 out_free_rbpl_table: 903 out_free_rbpl_table:
904 kfree(he_dev->rbpl_table); 904 kfree(he_dev->rbpl_table);
905 905
906 return -ENOMEM; 906 return -ENOMEM;
907 } 907 }
908 908
909 static int he_init_irq(struct he_dev *he_dev) 909 static int he_init_irq(struct he_dev *he_dev)
910 { 910 {
911 int i; 911 int i;
912 912
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 913 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */ 914 end of the interrupt queue */
915 915
916 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev, 916 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
917 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys); 917 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
918 if (he_dev->irq_base == NULL) { 918 if (he_dev->irq_base == NULL) {
919 hprintk("failed to allocate irq\n"); 919 hprintk("failed to allocate irq\n");
920 return -ENOMEM; 920 return -ENOMEM;
921 } 921 }
922 he_dev->irq_tailoffset = (unsigned *) 922 he_dev->irq_tailoffset = (unsigned *)
923 &he_dev->irq_base[CONFIG_IRQ_SIZE]; 923 &he_dev->irq_base[CONFIG_IRQ_SIZE];
924 *he_dev->irq_tailoffset = 0; 924 *he_dev->irq_tailoffset = 0;
925 he_dev->irq_head = he_dev->irq_base; 925 he_dev->irq_head = he_dev->irq_base;
926 he_dev->irq_tail = he_dev->irq_base; 926 he_dev->irq_tail = he_dev->irq_base;
927 927
928 for (i = 0; i < CONFIG_IRQ_SIZE; ++i) 928 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
929 he_dev->irq_base[i].isw = ITYPE_INVALID; 929 he_dev->irq_base[i].isw = ITYPE_INVALID;
930 930
931 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE); 931 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
932 he_writel(he_dev, 932 he_writel(he_dev,
933 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH), 933 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
934 IRQ0_HEAD); 934 IRQ0_HEAD);
935 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL); 935 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
936 he_writel(he_dev, 0x0, IRQ0_DATA); 936 he_writel(he_dev, 0x0, IRQ0_DATA);
937 937
938 he_writel(he_dev, 0x0, IRQ1_BASE); 938 he_writel(he_dev, 0x0, IRQ1_BASE);
939 he_writel(he_dev, 0x0, IRQ1_HEAD); 939 he_writel(he_dev, 0x0, IRQ1_HEAD);
940 he_writel(he_dev, 0x0, IRQ1_CNTL); 940 he_writel(he_dev, 0x0, IRQ1_CNTL);
941 he_writel(he_dev, 0x0, IRQ1_DATA); 941 he_writel(he_dev, 0x0, IRQ1_DATA);
942 942
943 he_writel(he_dev, 0x0, IRQ2_BASE); 943 he_writel(he_dev, 0x0, IRQ2_BASE);
944 he_writel(he_dev, 0x0, IRQ2_HEAD); 944 he_writel(he_dev, 0x0, IRQ2_HEAD);
945 he_writel(he_dev, 0x0, IRQ2_CNTL); 945 he_writel(he_dev, 0x0, IRQ2_CNTL);
946 he_writel(he_dev, 0x0, IRQ2_DATA); 946 he_writel(he_dev, 0x0, IRQ2_DATA);
947 947
948 he_writel(he_dev, 0x0, IRQ3_BASE); 948 he_writel(he_dev, 0x0, IRQ3_BASE);
949 he_writel(he_dev, 0x0, IRQ3_HEAD); 949 he_writel(he_dev, 0x0, IRQ3_HEAD);
950 he_writel(he_dev, 0x0, IRQ3_CNTL); 950 he_writel(he_dev, 0x0, IRQ3_CNTL);
951 he_writel(he_dev, 0x0, IRQ3_DATA); 951 he_writel(he_dev, 0x0, IRQ3_DATA);
952 952
953 /* 2.9.3.2 interrupt queue mapping registers */ 953 /* 2.9.3.2 interrupt queue mapping registers */
954 954
955 he_writel(he_dev, 0x0, GRP_10_MAP); 955 he_writel(he_dev, 0x0, GRP_10_MAP);
956 he_writel(he_dev, 0x0, GRP_32_MAP); 956 he_writel(he_dev, 0x0, GRP_32_MAP);
957 he_writel(he_dev, 0x0, GRP_54_MAP); 957 he_writel(he_dev, 0x0, GRP_54_MAP);
958 he_writel(he_dev, 0x0, GRP_76_MAP); 958 he_writel(he_dev, 0x0, GRP_76_MAP);
959 959
960 if (request_irq(he_dev->pci_dev->irq, 960 if (request_irq(he_dev->pci_dev->irq,
961 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) { 961 he_irq_handler, IRQF_SHARED, DEV_LABEL, he_dev)) {
962 hprintk("irq %d already in use\n", he_dev->pci_dev->irq); 962 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
963 return -EINVAL; 963 return -EINVAL;
964 } 964 }
965 965
966 he_dev->irq = he_dev->pci_dev->irq; 966 he_dev->irq = he_dev->pci_dev->irq;
967 967
968 return 0; 968 return 0;
969 } 969 }
970 970
971 static int he_start(struct atm_dev *dev) 971 static int he_start(struct atm_dev *dev)
972 { 972 {
973 struct he_dev *he_dev; 973 struct he_dev *he_dev;
974 struct pci_dev *pci_dev; 974 struct pci_dev *pci_dev;
975 unsigned long membase; 975 unsigned long membase;
976 976
977 u16 command; 977 u16 command;
978 u32 gen_cntl_0, host_cntl, lb_swap; 978 u32 gen_cntl_0, host_cntl, lb_swap;
979 u8 cache_size, timer; 979 u8 cache_size, timer;
980 980
981 unsigned err; 981 unsigned err;
982 unsigned int status, reg; 982 unsigned int status, reg;
983 int i, group; 983 int i, group;
984 984
985 he_dev = HE_DEV(dev); 985 he_dev = HE_DEV(dev);
986 pci_dev = he_dev->pci_dev; 986 pci_dev = he_dev->pci_dev;
987 987
988 membase = pci_resource_start(pci_dev, 0); 988 membase = pci_resource_start(pci_dev, 0);
989 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq); 989 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
990 990
991 /* 991 /*
992 * pci bus controller initialization 992 * pci bus controller initialization
993 */ 993 */
994 994
995 /* 4.3 pci bus controller-specific initialization */ 995 /* 4.3 pci bus controller-specific initialization */
996 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) { 996 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
997 hprintk("can't read GEN_CNTL_0\n"); 997 hprintk("can't read GEN_CNTL_0\n");
998 return -EINVAL; 998 return -EINVAL;
999 } 999 }
1000 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT); 1000 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1001 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) { 1001 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1002 hprintk("can't write GEN_CNTL_0.\n"); 1002 hprintk("can't write GEN_CNTL_0.\n");
1003 return -EINVAL; 1003 return -EINVAL;
1004 } 1004 }
1005 1005
1006 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) { 1006 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1007 hprintk("can't read PCI_COMMAND.\n"); 1007 hprintk("can't read PCI_COMMAND.\n");
1008 return -EINVAL; 1008 return -EINVAL;
1009 } 1009 }
1010 1010
1011 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE); 1011 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1012 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) { 1012 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1013 hprintk("can't enable memory.\n"); 1013 hprintk("can't enable memory.\n");
1014 return -EINVAL; 1014 return -EINVAL;
1015 } 1015 }
1016 1016
1017 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) { 1017 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1018 hprintk("can't read cache line size?\n"); 1018 hprintk("can't read cache line size?\n");
1019 return -EINVAL; 1019 return -EINVAL;
1020 } 1020 }
1021 1021
1022 if (cache_size < 16) { 1022 if (cache_size < 16) {
1023 cache_size = 16; 1023 cache_size = 16;
1024 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size)) 1024 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1025 hprintk("can't set cache line size to %d\n", cache_size); 1025 hprintk("can't set cache line size to %d\n", cache_size);
1026 } 1026 }
1027 1027
1028 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) { 1028 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1029 hprintk("can't read latency timer?\n"); 1029 hprintk("can't read latency timer?\n");
1030 return -EINVAL; 1030 return -EINVAL;
1031 } 1031 }
1032 1032
1033 /* from table 3.9 1033 /* from table 3.9
1034 * 1034 *
1035 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE 1035 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1036 * 1036 *
1037 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles] 1037 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1038 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles] 1038 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1039 * 1039 *
1040 */ 1040 */
1041 #define LAT_TIMER 209 1041 #define LAT_TIMER 209
1042 if (timer < LAT_TIMER) { 1042 if (timer < LAT_TIMER) {
1043 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER); 1043 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1044 timer = LAT_TIMER; 1044 timer = LAT_TIMER;
1045 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer)) 1045 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1046 hprintk("can't set latency timer to %d\n", timer); 1046 hprintk("can't set latency timer to %d\n", timer);
1047 } 1047 }
1048 1048
1049 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) { 1049 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1050 hprintk("can't set up page mapping\n"); 1050 hprintk("can't set up page mapping\n");
1051 return -EINVAL; 1051 return -EINVAL;
1052 } 1052 }
1053 1053
1054 /* 4.4 card reset */ 1054 /* 4.4 card reset */
1055 he_writel(he_dev, 0x0, RESET_CNTL); 1055 he_writel(he_dev, 0x0, RESET_CNTL);
1056 he_writel(he_dev, 0xff, RESET_CNTL); 1056 he_writel(he_dev, 0xff, RESET_CNTL);
1057 1057
1058 msleep(16); /* 16 ms */ 1058 msleep(16); /* 16 ms */
1059 status = he_readl(he_dev, RESET_CNTL); 1059 status = he_readl(he_dev, RESET_CNTL);
1060 if ((status & BOARD_RST_STATUS) == 0) { 1060 if ((status & BOARD_RST_STATUS) == 0) {
1061 hprintk("reset failed\n"); 1061 hprintk("reset failed\n");
1062 return -EINVAL; 1062 return -EINVAL;
1063 } 1063 }
1064 1064
1065 /* 4.5 set bus width */ 1065 /* 4.5 set bus width */
1066 host_cntl = he_readl(he_dev, HOST_CNTL); 1066 host_cntl = he_readl(he_dev, HOST_CNTL);
1067 if (host_cntl & PCI_BUS_SIZE64) 1067 if (host_cntl & PCI_BUS_SIZE64)
1068 gen_cntl_0 |= ENBL_64; 1068 gen_cntl_0 |= ENBL_64;
1069 else 1069 else
1070 gen_cntl_0 &= ~ENBL_64; 1070 gen_cntl_0 &= ~ENBL_64;
1071 1071
1072 if (disable64 == 1) { 1072 if (disable64 == 1) {
1073 hprintk("disabling 64-bit pci bus transfers\n"); 1073 hprintk("disabling 64-bit pci bus transfers\n");
1074 gen_cntl_0 &= ~ENBL_64; 1074 gen_cntl_0 &= ~ENBL_64;
1075 } 1075 }
1076 1076
1077 if (gen_cntl_0 & ENBL_64) 1077 if (gen_cntl_0 & ENBL_64)
1078 hprintk("64-bit transfers enabled\n"); 1078 hprintk("64-bit transfers enabled\n");
1079 1079
1080 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); 1080 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1081 1081
1082 /* 4.7 read prom contents */ 1082 /* 4.7 read prom contents */
1083 for (i = 0; i < PROD_ID_LEN; ++i) 1083 for (i = 0; i < PROD_ID_LEN; ++i)
1084 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i); 1084 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1085 1085
1086 he_dev->media = read_prom_byte(he_dev, MEDIA); 1086 he_dev->media = read_prom_byte(he_dev, MEDIA);
1087 1087
1088 for (i = 0; i < 6; ++i) 1088 for (i = 0; i < 6; ++i)
1089 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i); 1089 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1090 1090
1091 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n", 1091 hprintk("%s%s, %pM\n", he_dev->prod_id,
1092 he_dev->prod_id, 1092 he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
1093 he_dev->media & 0x40 ? "SM" : "MM",
1094 dev->esi[0],
1095 dev->esi[1],
1096 dev->esi[2],
1097 dev->esi[3],
1098 dev->esi[4],
1099 dev->esi[5]);
1100 he_dev->atm_dev->link_rate = he_is622(he_dev) ? 1093 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1101 ATM_OC12_PCR : ATM_OC3_PCR; 1094 ATM_OC12_PCR : ATM_OC3_PCR;
1102 1095
1103 /* 4.6 set host endianess */ 1096 /* 4.6 set host endianess */
1104 lb_swap = he_readl(he_dev, LB_SWAP); 1097 lb_swap = he_readl(he_dev, LB_SWAP);
1105 if (he_is622(he_dev)) 1098 if (he_is622(he_dev))
1106 lb_swap &= ~XFER_SIZE; /* 4 cells */ 1099 lb_swap &= ~XFER_SIZE; /* 4 cells */
1107 else 1100 else
1108 lb_swap |= XFER_SIZE; /* 8 cells */ 1101 lb_swap |= XFER_SIZE; /* 8 cells */
1109 #ifdef __BIG_ENDIAN 1102 #ifdef __BIG_ENDIAN
1110 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST; 1103 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1111 #else 1104 #else
1112 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST | 1105 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1113 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP); 1106 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1114 #endif /* __BIG_ENDIAN */ 1107 #endif /* __BIG_ENDIAN */
1115 he_writel(he_dev, lb_swap, LB_SWAP); 1108 he_writel(he_dev, lb_swap, LB_SWAP);
1116 1109
1117 /* 4.8 sdram controller initialization */ 1110 /* 4.8 sdram controller initialization */
1118 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL); 1111 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1119 1112
1120 /* 4.9 initialize rnum value */ 1113 /* 4.9 initialize rnum value */
1121 lb_swap |= SWAP_RNUM_MAX(0xf); 1114 lb_swap |= SWAP_RNUM_MAX(0xf);
1122 he_writel(he_dev, lb_swap, LB_SWAP); 1115 he_writel(he_dev, lb_swap, LB_SWAP);
1123 1116
1124 /* 4.10 initialize the interrupt queues */ 1117 /* 4.10 initialize the interrupt queues */
1125 if ((err = he_init_irq(he_dev)) != 0) 1118 if ((err = he_init_irq(he_dev)) != 0)
1126 return err; 1119 return err;
1127 1120
1128 /* 4.11 enable pci bus controller state machines */ 1121 /* 4.11 enable pci bus controller state machines */
1129 host_cntl |= (OUTFF_ENB | CMDFF_ENB | 1122 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1130 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB); 1123 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1131 he_writel(he_dev, host_cntl, HOST_CNTL); 1124 he_writel(he_dev, host_cntl, HOST_CNTL);
1132 1125
1133 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB; 1126 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1134 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); 1127 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1135 1128
1136 /* 1129 /*
1137 * atm network controller initialization 1130 * atm network controller initialization
1138 */ 1131 */
1139 1132
1140 /* 5.1.1 generic configuration state */ 1133 /* 5.1.1 generic configuration state */
1141 1134
1142 /* 1135 /*
1143 * local (cell) buffer memory map 1136 * local (cell) buffer memory map
1144 * 1137 *
1145 * HE155 HE622 1138 * HE155 HE622
1146 * 1139 *
1147 * 0 ____________1023 bytes 0 _______________________2047 bytes 1140 * 0 ____________1023 bytes 0 _______________________2047 bytes
1148 * | | | | | 1141 * | | | | |
1149 * | utility | | rx0 | | 1142 * | utility | | rx0 | |
1150 * 5|____________| 255|___________________| u | 1143 * 5|____________| 255|___________________| u |
1151 * 6| | 256| | t | 1144 * 6| | 256| | t |
1152 * | | | | i | 1145 * | | | | i |
1153 * | rx0 | row | tx | l | 1146 * | rx0 | row | tx | l |
1154 * | | | | i | 1147 * | | | | i |
1155 * | | 767|___________________| t | 1148 * | | 767|___________________| t |
1156 * 517|____________| 768| | y | 1149 * 517|____________| 768| | y |
1157 * row 518| | | rx1 | | 1150 * row 518| | | rx1 | |
1158 * | | 1023|___________________|___| 1151 * | | 1023|___________________|___|
1159 * | | 1152 * | |
1160 * | tx | 1153 * | tx |
1161 * | | 1154 * | |
1162 * | | 1155 * | |
1163 * 1535|____________| 1156 * 1535|____________|
1164 * 1536| | 1157 * 1536| |
1165 * | rx1 | 1158 * | rx1 |
1166 * 2047|____________| 1159 * 2047|____________|
1167 * 1160 *
1168 */ 1161 */
1169 1162
1170 /* total 4096 connections */ 1163 /* total 4096 connections */
1171 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS; 1164 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1172 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS; 1165 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1173 1166
1174 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) { 1167 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1175 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS); 1168 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1176 return -ENODEV; 1169 return -ENODEV;
1177 } 1170 }
1178 1171
1179 if (nvpibits != -1) { 1172 if (nvpibits != -1) {
1180 he_dev->vpibits = nvpibits; 1173 he_dev->vpibits = nvpibits;
1181 he_dev->vcibits = HE_MAXCIDBITS - nvpibits; 1174 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1182 } 1175 }
1183 1176
1184 if (nvcibits != -1) { 1177 if (nvcibits != -1) {
1185 he_dev->vcibits = nvcibits; 1178 he_dev->vcibits = nvcibits;
1186 he_dev->vpibits = HE_MAXCIDBITS - nvcibits; 1179 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1187 } 1180 }
1188 1181
1189 1182
1190 if (he_is622(he_dev)) { 1183 if (he_is622(he_dev)) {
1191 he_dev->cells_per_row = 40; 1184 he_dev->cells_per_row = 40;
1192 he_dev->bytes_per_row = 2048; 1185 he_dev->bytes_per_row = 2048;
1193 he_dev->r0_numrows = 256; 1186 he_dev->r0_numrows = 256;
1194 he_dev->tx_numrows = 512; 1187 he_dev->tx_numrows = 512;
1195 he_dev->r1_numrows = 256; 1188 he_dev->r1_numrows = 256;
1196 he_dev->r0_startrow = 0; 1189 he_dev->r0_startrow = 0;
1197 he_dev->tx_startrow = 256; 1190 he_dev->tx_startrow = 256;
1198 he_dev->r1_startrow = 768; 1191 he_dev->r1_startrow = 768;
1199 } else { 1192 } else {
1200 he_dev->cells_per_row = 20; 1193 he_dev->cells_per_row = 20;
1201 he_dev->bytes_per_row = 1024; 1194 he_dev->bytes_per_row = 1024;
1202 he_dev->r0_numrows = 512; 1195 he_dev->r0_numrows = 512;
1203 he_dev->tx_numrows = 1018; 1196 he_dev->tx_numrows = 1018;
1204 he_dev->r1_numrows = 512; 1197 he_dev->r1_numrows = 512;
1205 he_dev->r0_startrow = 6; 1198 he_dev->r0_startrow = 6;
1206 he_dev->tx_startrow = 518; 1199 he_dev->tx_startrow = 518;
1207 he_dev->r1_startrow = 1536; 1200 he_dev->r1_startrow = 1536;
1208 } 1201 }
1209 1202
1210 he_dev->cells_per_lbuf = 4; 1203 he_dev->cells_per_lbuf = 4;
1211 he_dev->buffer_limit = 4; 1204 he_dev->buffer_limit = 4;
1212 he_dev->r0_numbuffs = he_dev->r0_numrows * 1205 he_dev->r0_numbuffs = he_dev->r0_numrows *
1213 he_dev->cells_per_row / he_dev->cells_per_lbuf; 1206 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1214 if (he_dev->r0_numbuffs > 2560) 1207 if (he_dev->r0_numbuffs > 2560)
1215 he_dev->r0_numbuffs = 2560; 1208 he_dev->r0_numbuffs = 2560;
1216 1209
1217 he_dev->r1_numbuffs = he_dev->r1_numrows * 1210 he_dev->r1_numbuffs = he_dev->r1_numrows *
1218 he_dev->cells_per_row / he_dev->cells_per_lbuf; 1211 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1219 if (he_dev->r1_numbuffs > 2560) 1212 if (he_dev->r1_numbuffs > 2560)
1220 he_dev->r1_numbuffs = 2560; 1213 he_dev->r1_numbuffs = 2560;
1221 1214
1222 he_dev->tx_numbuffs = he_dev->tx_numrows * 1215 he_dev->tx_numbuffs = he_dev->tx_numrows *
1223 he_dev->cells_per_row / he_dev->cells_per_lbuf; 1216 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1224 if (he_dev->tx_numbuffs > 5120) 1217 if (he_dev->tx_numbuffs > 5120)
1225 he_dev->tx_numbuffs = 5120; 1218 he_dev->tx_numbuffs = 5120;
1226 1219
1227 /* 5.1.2 configure hardware dependent registers */ 1220 /* 5.1.2 configure hardware dependent registers */
1228 1221
1229 he_writel(he_dev, 1222 he_writel(he_dev,
1230 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) | 1223 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1231 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) | 1224 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1232 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) | 1225 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1233 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)), 1226 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1234 LBARB); 1227 LBARB);
1235 1228
1236 he_writel(he_dev, BANK_ON | 1229 he_writel(he_dev, BANK_ON |
1237 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)), 1230 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1238 SDRAMCON); 1231 SDRAMCON);
1239 1232
1240 he_writel(he_dev, 1233 he_writel(he_dev,
1241 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) | 1234 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1242 RM_RW_WAIT(1), RCMCONFIG); 1235 RM_RW_WAIT(1), RCMCONFIG);
1243 he_writel(he_dev, 1236 he_writel(he_dev,
1244 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) | 1237 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1245 TM_RW_WAIT(1), TCMCONFIG); 1238 TM_RW_WAIT(1), TCMCONFIG);
1246 1239
1247 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG); 1240 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1248 1241
1249 he_writel(he_dev, 1242 he_writel(he_dev,
1250 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) | 1243 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1251 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) | 1244 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1252 RX_VALVP(he_dev->vpibits) | 1245 RX_VALVP(he_dev->vpibits) |
1253 RX_VALVC(he_dev->vcibits), RC_CONFIG); 1246 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1254 1247
1255 he_writel(he_dev, DRF_THRESH(0x20) | 1248 he_writel(he_dev, DRF_THRESH(0x20) |
1256 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) | 1249 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1257 TX_VCI_MASK(he_dev->vcibits) | 1250 TX_VCI_MASK(he_dev->vcibits) |
1258 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG); 1251 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1259 1252
1260 he_writel(he_dev, 0x0, TXAAL5_PROTO); 1253 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1261 1254
1262 he_writel(he_dev, PHY_INT_ENB | 1255 he_writel(he_dev, PHY_INT_ENB |
1263 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)), 1256 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1264 RH_CONFIG); 1257 RH_CONFIG);
1265 1258
1266 /* 5.1.3 initialize connection memory */ 1259 /* 5.1.3 initialize connection memory */
1267 1260
1268 for (i = 0; i < TCM_MEM_SIZE; ++i) 1261 for (i = 0; i < TCM_MEM_SIZE; ++i)
1269 he_writel_tcm(he_dev, 0, i); 1262 he_writel_tcm(he_dev, 0, i);
1270 1263
1271 for (i = 0; i < RCM_MEM_SIZE; ++i) 1264 for (i = 0; i < RCM_MEM_SIZE; ++i)
1272 he_writel_rcm(he_dev, 0, i); 1265 he_writel_rcm(he_dev, 0, i);
1273 1266
1274 /* 1267 /*
1275 * transmit connection memory map 1268 * transmit connection memory map
1276 * 1269 *
1277 * tx memory 1270 * tx memory
1278 * 0x0 ___________________ 1271 * 0x0 ___________________
1279 * | | 1272 * | |
1280 * | | 1273 * | |
1281 * | TSRa | 1274 * | TSRa |
1282 * | | 1275 * | |
1283 * | | 1276 * | |
1284 * 0x8000|___________________| 1277 * 0x8000|___________________|
1285 * | | 1278 * | |
1286 * | TSRb | 1279 * | TSRb |
1287 * 0xc000|___________________| 1280 * 0xc000|___________________|
1288 * | | 1281 * | |
1289 * | TSRc | 1282 * | TSRc |
1290 * 0xe000|___________________| 1283 * 0xe000|___________________|
1291 * | TSRd | 1284 * | TSRd |
1292 * 0xf000|___________________| 1285 * 0xf000|___________________|
1293 * | tmABR | 1286 * | tmABR |
1294 * 0x10000|___________________| 1287 * 0x10000|___________________|
1295 * | | 1288 * | |
1296 * | tmTPD | 1289 * | tmTPD |
1297 * |___________________| 1290 * |___________________|
1298 * | | 1291 * | |
1299 * .... 1292 * ....
1300 * 0x1ffff|___________________| 1293 * 0x1ffff|___________________|
1301 * 1294 *
1302 * 1295 *
1303 */ 1296 */
1304 1297
1305 he_writel(he_dev, CONFIG_TSRB, TSRB_BA); 1298 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1306 he_writel(he_dev, CONFIG_TSRC, TSRC_BA); 1299 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1307 he_writel(he_dev, CONFIG_TSRD, TSRD_BA); 1300 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1308 he_writel(he_dev, CONFIG_TMABR, TMABR_BA); 1301 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1309 he_writel(he_dev, CONFIG_TPDBA, TPD_BA); 1302 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1310 1303
1311 1304
1312 /* 1305 /*
1313 * receive connection memory map 1306 * receive connection memory map
1314 * 1307 *
1315 * 0x0 ___________________ 1308 * 0x0 ___________________
1316 * | | 1309 * | |
1317 * | | 1310 * | |
1318 * | RSRa | 1311 * | RSRa |
1319 * | | 1312 * | |
1320 * | | 1313 * | |
1321 * 0x8000|___________________| 1314 * 0x8000|___________________|
1322 * | | 1315 * | |
1323 * | rx0/1 | 1316 * | rx0/1 |
1324 * | LBM | link lists of local 1317 * | LBM | link lists of local
1325 * | tx | buffer memory 1318 * | tx | buffer memory
1326 * | | 1319 * | |
1327 * 0xd000|___________________| 1320 * 0xd000|___________________|
1328 * | | 1321 * | |
1329 * | rmABR | 1322 * | rmABR |
1330 * 0xe000|___________________| 1323 * 0xe000|___________________|
1331 * | | 1324 * | |
1332 * | RSRb | 1325 * | RSRb |
1333 * |___________________| 1326 * |___________________|
1334 * | | 1327 * | |
1335 * .... 1328 * ....
1336 * 0xffff|___________________| 1329 * 0xffff|___________________|
1337 */ 1330 */
1338 1331
1339 he_writel(he_dev, 0x08000, RCMLBM_BA); 1332 he_writel(he_dev, 0x08000, RCMLBM_BA);
1340 he_writel(he_dev, 0x0e000, RCMRSRB_BA); 1333 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1341 he_writel(he_dev, 0x0d800, RCMABR_BA); 1334 he_writel(he_dev, 0x0d800, RCMABR_BA);
1342 1335
1343 /* 5.1.4 initialize local buffer free pools linked lists */ 1336 /* 5.1.4 initialize local buffer free pools linked lists */
1344 1337
1345 he_init_rx_lbfp0(he_dev); 1338 he_init_rx_lbfp0(he_dev);
1346 he_init_rx_lbfp1(he_dev); 1339 he_init_rx_lbfp1(he_dev);
1347 1340
1348 he_writel(he_dev, 0x0, RLBC_H); 1341 he_writel(he_dev, 0x0, RLBC_H);
1349 he_writel(he_dev, 0x0, RLBC_T); 1342 he_writel(he_dev, 0x0, RLBC_T);
1350 he_writel(he_dev, 0x0, RLBC_H2); 1343 he_writel(he_dev, 0x0, RLBC_H2);
1351 1344
1352 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */ 1345 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1353 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */ 1346 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1354 1347
1355 he_init_tx_lbfp(he_dev); 1348 he_init_tx_lbfp(he_dev);
1356 1349
1357 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA); 1350 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1358 1351
1359 /* 5.1.5 initialize intermediate receive queues */ 1352 /* 5.1.5 initialize intermediate receive queues */
1360 1353
1361 if (he_is622(he_dev)) { 1354 if (he_is622(he_dev)) {
1362 he_writel(he_dev, 0x000f, G0_INMQ_S); 1355 he_writel(he_dev, 0x000f, G0_INMQ_S);
1363 he_writel(he_dev, 0x200f, G0_INMQ_L); 1356 he_writel(he_dev, 0x200f, G0_INMQ_L);
1364 1357
1365 he_writel(he_dev, 0x001f, G1_INMQ_S); 1358 he_writel(he_dev, 0x001f, G1_INMQ_S);
1366 he_writel(he_dev, 0x201f, G1_INMQ_L); 1359 he_writel(he_dev, 0x201f, G1_INMQ_L);
1367 1360
1368 he_writel(he_dev, 0x002f, G2_INMQ_S); 1361 he_writel(he_dev, 0x002f, G2_INMQ_S);
1369 he_writel(he_dev, 0x202f, G2_INMQ_L); 1362 he_writel(he_dev, 0x202f, G2_INMQ_L);
1370 1363
1371 he_writel(he_dev, 0x003f, G3_INMQ_S); 1364 he_writel(he_dev, 0x003f, G3_INMQ_S);
1372 he_writel(he_dev, 0x203f, G3_INMQ_L); 1365 he_writel(he_dev, 0x203f, G3_INMQ_L);
1373 1366
1374 he_writel(he_dev, 0x004f, G4_INMQ_S); 1367 he_writel(he_dev, 0x004f, G4_INMQ_S);
1375 he_writel(he_dev, 0x204f, G4_INMQ_L); 1368 he_writel(he_dev, 0x204f, G4_INMQ_L);
1376 1369
1377 he_writel(he_dev, 0x005f, G5_INMQ_S); 1370 he_writel(he_dev, 0x005f, G5_INMQ_S);
1378 he_writel(he_dev, 0x205f, G5_INMQ_L); 1371 he_writel(he_dev, 0x205f, G5_INMQ_L);
1379 1372
1380 he_writel(he_dev, 0x006f, G6_INMQ_S); 1373 he_writel(he_dev, 0x006f, G6_INMQ_S);
1381 he_writel(he_dev, 0x206f, G6_INMQ_L); 1374 he_writel(he_dev, 0x206f, G6_INMQ_L);
1382 1375
1383 he_writel(he_dev, 0x007f, G7_INMQ_S); 1376 he_writel(he_dev, 0x007f, G7_INMQ_S);
1384 he_writel(he_dev, 0x207f, G7_INMQ_L); 1377 he_writel(he_dev, 0x207f, G7_INMQ_L);
1385 } else { 1378 } else {
1386 he_writel(he_dev, 0x0000, G0_INMQ_S); 1379 he_writel(he_dev, 0x0000, G0_INMQ_S);
1387 he_writel(he_dev, 0x0008, G0_INMQ_L); 1380 he_writel(he_dev, 0x0008, G0_INMQ_L);
1388 1381
1389 he_writel(he_dev, 0x0001, G1_INMQ_S); 1382 he_writel(he_dev, 0x0001, G1_INMQ_S);
1390 he_writel(he_dev, 0x0009, G1_INMQ_L); 1383 he_writel(he_dev, 0x0009, G1_INMQ_L);
1391 1384
1392 he_writel(he_dev, 0x0002, G2_INMQ_S); 1385 he_writel(he_dev, 0x0002, G2_INMQ_S);
1393 he_writel(he_dev, 0x000a, G2_INMQ_L); 1386 he_writel(he_dev, 0x000a, G2_INMQ_L);
1394 1387
1395 he_writel(he_dev, 0x0003, G3_INMQ_S); 1388 he_writel(he_dev, 0x0003, G3_INMQ_S);
1396 he_writel(he_dev, 0x000b, G3_INMQ_L); 1389 he_writel(he_dev, 0x000b, G3_INMQ_L);
1397 1390
1398 he_writel(he_dev, 0x0004, G4_INMQ_S); 1391 he_writel(he_dev, 0x0004, G4_INMQ_S);
1399 he_writel(he_dev, 0x000c, G4_INMQ_L); 1392 he_writel(he_dev, 0x000c, G4_INMQ_L);
1400 1393
1401 he_writel(he_dev, 0x0005, G5_INMQ_S); 1394 he_writel(he_dev, 0x0005, G5_INMQ_S);
1402 he_writel(he_dev, 0x000d, G5_INMQ_L); 1395 he_writel(he_dev, 0x000d, G5_INMQ_L);
1403 1396
1404 he_writel(he_dev, 0x0006, G6_INMQ_S); 1397 he_writel(he_dev, 0x0006, G6_INMQ_S);
1405 he_writel(he_dev, 0x000e, G6_INMQ_L); 1398 he_writel(he_dev, 0x000e, G6_INMQ_L);
1406 1399
1407 he_writel(he_dev, 0x0007, G7_INMQ_S); 1400 he_writel(he_dev, 0x0007, G7_INMQ_S);
1408 he_writel(he_dev, 0x000f, G7_INMQ_L); 1401 he_writel(he_dev, 0x000f, G7_INMQ_L);
1409 } 1402 }
1410 1403
1411 /* 5.1.6 application tunable parameters */ 1404 /* 5.1.6 application tunable parameters */
1412 1405
1413 he_writel(he_dev, 0x0, MCC); 1406 he_writel(he_dev, 0x0, MCC);
1414 he_writel(he_dev, 0x0, OEC); 1407 he_writel(he_dev, 0x0, OEC);
1415 he_writel(he_dev, 0x0, DCC); 1408 he_writel(he_dev, 0x0, DCC);
1416 he_writel(he_dev, 0x0, CEC); 1409 he_writel(he_dev, 0x0, CEC);
1417 1410
1418 /* 5.1.7 cs block initialization */ 1411 /* 5.1.7 cs block initialization */
1419 1412
1420 he_init_cs_block(he_dev); 1413 he_init_cs_block(he_dev);
1421 1414
1422 /* 5.1.8 cs block connection memory initialization */ 1415 /* 5.1.8 cs block connection memory initialization */
1423 1416
1424 if (he_init_cs_block_rcm(he_dev) < 0) 1417 if (he_init_cs_block_rcm(he_dev) < 0)
1425 return -ENOMEM; 1418 return -ENOMEM;
1426 1419
1427 /* 5.1.10 initialize host structures */ 1420 /* 5.1.10 initialize host structures */
1428 1421
1429 he_init_tpdrq(he_dev); 1422 he_init_tpdrq(he_dev);
1430 1423
1431 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev, 1424 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1432 sizeof(struct he_tpd), TPD_ALIGNMENT, 0); 1425 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1433 if (he_dev->tpd_pool == NULL) { 1426 if (he_dev->tpd_pool == NULL) {
1434 hprintk("unable to create tpd pci_pool\n"); 1427 hprintk("unable to create tpd pci_pool\n");
1435 return -ENOMEM; 1428 return -ENOMEM;
1436 } 1429 }
1437 1430
1438 INIT_LIST_HEAD(&he_dev->outstanding_tpds); 1431 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1439 1432
1440 if (he_init_group(he_dev, 0) != 0) 1433 if (he_init_group(he_dev, 0) != 0)
1441 return -ENOMEM; 1434 return -ENOMEM;
1442 1435
1443 for (group = 1; group < HE_NUM_GROUPS; ++group) { 1436 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1444 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32)); 1437 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1445 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32)); 1438 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1446 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32)); 1439 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1447 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), 1440 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1448 G0_RBPS_BS + (group * 32)); 1441 G0_RBPS_BS + (group * 32));
1449 1442
1450 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32)); 1443 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1451 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32)); 1444 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1452 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0), 1445 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1453 G0_RBPL_QI + (group * 32)); 1446 G0_RBPL_QI + (group * 32));
1454 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32)); 1447 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1455 1448
1456 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16)); 1449 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1457 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16)); 1450 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1458 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0), 1451 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1459 G0_RBRQ_Q + (group * 16)); 1452 G0_RBRQ_Q + (group * 16));
1460 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16)); 1453 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1461 1454
1462 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16)); 1455 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1463 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16)); 1456 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1464 he_writel(he_dev, TBRQ_THRESH(0x1), 1457 he_writel(he_dev, TBRQ_THRESH(0x1),
1465 G0_TBRQ_THRESH + (group * 16)); 1458 G0_TBRQ_THRESH + (group * 16));
1466 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16)); 1459 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1467 } 1460 }
1468 1461
1469 /* host status page */ 1462 /* host status page */
1470 1463
1471 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev, 1464 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1472 sizeof(struct he_hsp), &he_dev->hsp_phys); 1465 sizeof(struct he_hsp), &he_dev->hsp_phys);
1473 if (he_dev->hsp == NULL) { 1466 if (he_dev->hsp == NULL) {
1474 hprintk("failed to allocate host status page\n"); 1467 hprintk("failed to allocate host status page\n");
1475 return -ENOMEM; 1468 return -ENOMEM;
1476 } 1469 }
1477 memset(he_dev->hsp, 0, sizeof(struct he_hsp)); 1470 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1478 he_writel(he_dev, he_dev->hsp_phys, HSP_BA); 1471 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1479 1472
1480 /* initialize framer */ 1473 /* initialize framer */
1481 1474
1482 #ifdef CONFIG_ATM_HE_USE_SUNI 1475 #ifdef CONFIG_ATM_HE_USE_SUNI
1483 if (he_isMM(he_dev)) 1476 if (he_isMM(he_dev))
1484 suni_init(he_dev->atm_dev); 1477 suni_init(he_dev->atm_dev);
1485 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start) 1478 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1486 he_dev->atm_dev->phy->start(he_dev->atm_dev); 1479 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1487 #endif /* CONFIG_ATM_HE_USE_SUNI */ 1480 #endif /* CONFIG_ATM_HE_USE_SUNI */
1488 1481
1489 if (sdh) { 1482 if (sdh) {
1490 /* this really should be in suni.c but for now... */ 1483 /* this really should be in suni.c but for now... */
1491 int val; 1484 int val;
1492 1485
1493 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM); 1486 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1494 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT); 1487 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1495 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM); 1488 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1496 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP); 1489 he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
1497 } 1490 }
1498 1491
1499 /* 5.1.12 enable transmit and receive */ 1492 /* 5.1.12 enable transmit and receive */
1500 1493
1501 reg = he_readl_mbox(he_dev, CS_ERCTL0); 1494 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1502 reg |= TX_ENABLE|ER_ENABLE; 1495 reg |= TX_ENABLE|ER_ENABLE;
1503 he_writel_mbox(he_dev, reg, CS_ERCTL0); 1496 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1504 1497
1505 reg = he_readl(he_dev, RC_CONFIG); 1498 reg = he_readl(he_dev, RC_CONFIG);
1506 reg |= RX_ENABLE; 1499 reg |= RX_ENABLE;
1507 he_writel(he_dev, reg, RC_CONFIG); 1500 he_writel(he_dev, reg, RC_CONFIG);
1508 1501
1509 for (i = 0; i < HE_NUM_CS_STPER; ++i) { 1502 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1510 he_dev->cs_stper[i].inuse = 0; 1503 he_dev->cs_stper[i].inuse = 0;
1511 he_dev->cs_stper[i].pcr = -1; 1504 he_dev->cs_stper[i].pcr = -1;
1512 } 1505 }
1513 he_dev->total_bw = 0; 1506 he_dev->total_bw = 0;
1514 1507
1515 1508
1516 /* atm linux initialization */ 1509 /* atm linux initialization */
1517 1510
1518 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits; 1511 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1519 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits; 1512 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1520 1513
1521 he_dev->irq_peak = 0; 1514 he_dev->irq_peak = 0;
1522 he_dev->rbrq_peak = 0; 1515 he_dev->rbrq_peak = 0;
1523 he_dev->rbpl_peak = 0; 1516 he_dev->rbpl_peak = 0;
1524 he_dev->tbrq_peak = 0; 1517 he_dev->tbrq_peak = 0;
1525 1518
1526 HPRINTK("hell bent for leather!\n"); 1519 HPRINTK("hell bent for leather!\n");
1527 1520
1528 return 0; 1521 return 0;
1529 } 1522 }
1530 1523
1531 static void 1524 static void
1532 he_stop(struct he_dev *he_dev) 1525 he_stop(struct he_dev *he_dev)
1533 { 1526 {
1534 struct he_buff *heb, *next; 1527 struct he_buff *heb, *next;
1535 struct pci_dev *pci_dev; 1528 struct pci_dev *pci_dev;
1536 u32 gen_cntl_0, reg; 1529 u32 gen_cntl_0, reg;
1537 u16 command; 1530 u16 command;
1538 1531
1539 pci_dev = he_dev->pci_dev; 1532 pci_dev = he_dev->pci_dev;
1540 1533
1541 /* disable interrupts */ 1534 /* disable interrupts */
1542 1535
1543 if (he_dev->membase) { 1536 if (he_dev->membase) {
1544 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0); 1537 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1545 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB); 1538 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1546 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0); 1539 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1547 1540
1548 tasklet_disable(&he_dev->tasklet); 1541 tasklet_disable(&he_dev->tasklet);
1549 1542
1550 /* disable recv and transmit */ 1543 /* disable recv and transmit */
1551 1544
1552 reg = he_readl_mbox(he_dev, CS_ERCTL0); 1545 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1553 reg &= ~(TX_ENABLE|ER_ENABLE); 1546 reg &= ~(TX_ENABLE|ER_ENABLE);
1554 he_writel_mbox(he_dev, reg, CS_ERCTL0); 1547 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1555 1548
1556 reg = he_readl(he_dev, RC_CONFIG); 1549 reg = he_readl(he_dev, RC_CONFIG);
1557 reg &= ~(RX_ENABLE); 1550 reg &= ~(RX_ENABLE);
1558 he_writel(he_dev, reg, RC_CONFIG); 1551 he_writel(he_dev, reg, RC_CONFIG);
1559 } 1552 }
1560 1553
1561 #ifdef CONFIG_ATM_HE_USE_SUNI 1554 #ifdef CONFIG_ATM_HE_USE_SUNI
1562 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop) 1555 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1563 he_dev->atm_dev->phy->stop(he_dev->atm_dev); 1556 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1564 #endif /* CONFIG_ATM_HE_USE_SUNI */ 1557 #endif /* CONFIG_ATM_HE_USE_SUNI */
1565 1558
1566 if (he_dev->irq) 1559 if (he_dev->irq)
1567 free_irq(he_dev->irq, he_dev); 1560 free_irq(he_dev->irq, he_dev);
1568 1561
1569 if (he_dev->irq_base) 1562 if (he_dev->irq_base)
1570 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1) 1563 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1571 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys); 1564 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1572 1565
1573 if (he_dev->hsp) 1566 if (he_dev->hsp)
1574 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp), 1567 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1575 he_dev->hsp, he_dev->hsp_phys); 1568 he_dev->hsp, he_dev->hsp_phys);
1576 1569
1577 if (he_dev->rbpl_base) { 1570 if (he_dev->rbpl_base) {
1578 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry) 1571 list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
1579 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1572 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1580 1573
1581 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE 1574 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1582 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys); 1575 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1583 } 1576 }
1584 1577
1585 kfree(he_dev->rbpl_virt); 1578 kfree(he_dev->rbpl_virt);
1586 kfree(he_dev->rbpl_table); 1579 kfree(he_dev->rbpl_table);
1587 1580
1588 if (he_dev->rbpl_pool) 1581 if (he_dev->rbpl_pool)
1589 pci_pool_destroy(he_dev->rbpl_pool); 1582 pci_pool_destroy(he_dev->rbpl_pool);
1590 1583
1591 if (he_dev->rbrq_base) 1584 if (he_dev->rbrq_base)
1592 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 1585 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1593 he_dev->rbrq_base, he_dev->rbrq_phys); 1586 he_dev->rbrq_base, he_dev->rbrq_phys);
1594 1587
1595 if (he_dev->tbrq_base) 1588 if (he_dev->tbrq_base)
1596 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1589 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1597 he_dev->tbrq_base, he_dev->tbrq_phys); 1590 he_dev->tbrq_base, he_dev->tbrq_phys);
1598 1591
1599 if (he_dev->tpdrq_base) 1592 if (he_dev->tpdrq_base)
1600 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1593 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1601 he_dev->tpdrq_base, he_dev->tpdrq_phys); 1594 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1602 1595
1603 if (he_dev->tpd_pool) 1596 if (he_dev->tpd_pool)
1604 pci_pool_destroy(he_dev->tpd_pool); 1597 pci_pool_destroy(he_dev->tpd_pool);
1605 1598
1606 if (he_dev->pci_dev) { 1599 if (he_dev->pci_dev) {
1607 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); 1600 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1608 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 1601 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1609 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command); 1602 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1610 } 1603 }
1611 1604
1612 if (he_dev->membase) 1605 if (he_dev->membase)
1613 iounmap(he_dev->membase); 1606 iounmap(he_dev->membase);
1614 } 1607 }
1615 1608
1616 static struct he_tpd * 1609 static struct he_tpd *
1617 __alloc_tpd(struct he_dev *he_dev) 1610 __alloc_tpd(struct he_dev *he_dev)
1618 { 1611 {
1619 struct he_tpd *tpd; 1612 struct he_tpd *tpd;
1620 dma_addr_t mapping; 1613 dma_addr_t mapping;
1621 1614
1622 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping); 1615 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1623 if (tpd == NULL) 1616 if (tpd == NULL)
1624 return NULL; 1617 return NULL;
1625 1618
1626 tpd->status = TPD_ADDR(mapping); 1619 tpd->status = TPD_ADDR(mapping);
1627 tpd->reserved = 0; 1620 tpd->reserved = 0;
1628 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0; 1621 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1629 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0; 1622 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1630 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0; 1623 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1631 1624
1632 return tpd; 1625 return tpd;
1633 } 1626 }
1634 1627
1635 #define AAL5_LEN(buf,len) \ 1628 #define AAL5_LEN(buf,len) \
1636 ((((unsigned char *)(buf))[(len)-6] << 8) | \ 1629 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1637 (((unsigned char *)(buf))[(len)-5])) 1630 (((unsigned char *)(buf))[(len)-5]))
1638 1631
1639 /* 2.10.1.2 receive 1632 /* 2.10.1.2 receive
1640 * 1633 *
1641 * aal5 packets can optionally return the tcp checksum in the lower 1634 * aal5 packets can optionally return the tcp checksum in the lower
1642 * 16 bits of the crc (RSR0_TCP_CKSUM) 1635 * 16 bits of the crc (RSR0_TCP_CKSUM)
1643 */ 1636 */
1644 1637
1645 #define TCP_CKSUM(buf,len) \ 1638 #define TCP_CKSUM(buf,len) \
1646 ((((unsigned char *)(buf))[(len)-2] << 8) | \ 1639 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1647 (((unsigned char *)(buf))[(len-1)])) 1640 (((unsigned char *)(buf))[(len-1)]))
1648 1641
1649 static int 1642 static int
1650 he_service_rbrq(struct he_dev *he_dev, int group) 1643 he_service_rbrq(struct he_dev *he_dev, int group)
1651 { 1644 {
1652 struct he_rbrq *rbrq_tail = (struct he_rbrq *) 1645 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1653 ((unsigned long)he_dev->rbrq_base | 1646 ((unsigned long)he_dev->rbrq_base |
1654 he_dev->hsp->group[group].rbrq_tail); 1647 he_dev->hsp->group[group].rbrq_tail);
1655 unsigned cid, lastcid = -1; 1648 unsigned cid, lastcid = -1;
1656 struct sk_buff *skb; 1649 struct sk_buff *skb;
1657 struct atm_vcc *vcc = NULL; 1650 struct atm_vcc *vcc = NULL;
1658 struct he_vcc *he_vcc; 1651 struct he_vcc *he_vcc;
1659 struct he_buff *heb, *next; 1652 struct he_buff *heb, *next;
1660 int i; 1653 int i;
1661 int pdus_assembled = 0; 1654 int pdus_assembled = 0;
1662 int updated = 0; 1655 int updated = 0;
1663 1656
1664 read_lock(&vcc_sklist_lock); 1657 read_lock(&vcc_sklist_lock);
1665 while (he_dev->rbrq_head != rbrq_tail) { 1658 while (he_dev->rbrq_head != rbrq_tail) {
1666 ++updated; 1659 ++updated;
1667 1660
1668 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n", 1661 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1669 he_dev->rbrq_head, group, 1662 he_dev->rbrq_head, group,
1670 RBRQ_ADDR(he_dev->rbrq_head), 1663 RBRQ_ADDR(he_dev->rbrq_head),
1671 RBRQ_BUFLEN(he_dev->rbrq_head), 1664 RBRQ_BUFLEN(he_dev->rbrq_head),
1672 RBRQ_CID(he_dev->rbrq_head), 1665 RBRQ_CID(he_dev->rbrq_head),
1673 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "", 1666 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1674 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "", 1667 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1675 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "", 1668 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1676 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "", 1669 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1677 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "", 1670 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1678 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : ""); 1671 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1679 1672
1680 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET; 1673 i = RBRQ_ADDR(he_dev->rbrq_head) >> RBP_IDX_OFFSET;
1681 heb = he_dev->rbpl_virt[i]; 1674 heb = he_dev->rbpl_virt[i];
1682 1675
1683 cid = RBRQ_CID(he_dev->rbrq_head); 1676 cid = RBRQ_CID(he_dev->rbrq_head);
1684 if (cid != lastcid) 1677 if (cid != lastcid)
1685 vcc = __find_vcc(he_dev, cid); 1678 vcc = __find_vcc(he_dev, cid);
1686 lastcid = cid; 1679 lastcid = cid;
1687 1680
1688 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) { 1681 if (vcc == NULL || (he_vcc = HE_VCC(vcc)) == NULL) {
1689 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid); 1682 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid);
1690 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) { 1683 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1691 clear_bit(i, he_dev->rbpl_table); 1684 clear_bit(i, he_dev->rbpl_table);
1692 list_del(&heb->entry); 1685 list_del(&heb->entry);
1693 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1686 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1694 } 1687 }
1695 1688
1696 goto next_rbrq_entry; 1689 goto next_rbrq_entry;
1697 } 1690 }
1698 1691
1699 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { 1692 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1700 hprintk("HBUF_ERR! (cid 0x%x)\n", cid); 1693 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1701 atomic_inc(&vcc->stats->rx_drop); 1694 atomic_inc(&vcc->stats->rx_drop);
1702 goto return_host_buffers; 1695 goto return_host_buffers;
1703 } 1696 }
1704 1697
1705 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4; 1698 heb->len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1706 clear_bit(i, he_dev->rbpl_table); 1699 clear_bit(i, he_dev->rbpl_table);
1707 list_move_tail(&heb->entry, &he_vcc->buffers); 1700 list_move_tail(&heb->entry, &he_vcc->buffers);
1708 he_vcc->pdu_len += heb->len; 1701 he_vcc->pdu_len += heb->len;
1709 1702
1710 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) { 1703 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1711 lastcid = -1; 1704 lastcid = -1;
1712 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid); 1705 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1713 wake_up(&he_vcc->rx_waitq); 1706 wake_up(&he_vcc->rx_waitq);
1714 goto return_host_buffers; 1707 goto return_host_buffers;
1715 } 1708 }
1716 1709
1717 if (!RBRQ_END_PDU(he_dev->rbrq_head)) 1710 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1718 goto next_rbrq_entry; 1711 goto next_rbrq_entry;
1719 1712
1720 if (RBRQ_LEN_ERR(he_dev->rbrq_head) 1713 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1721 || RBRQ_CRC_ERR(he_dev->rbrq_head)) { 1714 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1722 HPRINTK("%s%s (%d.%d)\n", 1715 HPRINTK("%s%s (%d.%d)\n",
1723 RBRQ_CRC_ERR(he_dev->rbrq_head) 1716 RBRQ_CRC_ERR(he_dev->rbrq_head)
1724 ? "CRC_ERR " : "", 1717 ? "CRC_ERR " : "",
1725 RBRQ_LEN_ERR(he_dev->rbrq_head) 1718 RBRQ_LEN_ERR(he_dev->rbrq_head)
1726 ? "LEN_ERR" : "", 1719 ? "LEN_ERR" : "",
1727 vcc->vpi, vcc->vci); 1720 vcc->vpi, vcc->vci);
1728 atomic_inc(&vcc->stats->rx_err); 1721 atomic_inc(&vcc->stats->rx_err);
1729 goto return_host_buffers; 1722 goto return_host_buffers;
1730 } 1723 }
1731 1724
1732 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve, 1725 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1733 GFP_ATOMIC); 1726 GFP_ATOMIC);
1734 if (!skb) { 1727 if (!skb) {
1735 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci); 1728 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1736 goto return_host_buffers; 1729 goto return_host_buffers;
1737 } 1730 }
1738 1731
1739 if (rx_skb_reserve > 0) 1732 if (rx_skb_reserve > 0)
1740 skb_reserve(skb, rx_skb_reserve); 1733 skb_reserve(skb, rx_skb_reserve);
1741 1734
1742 __net_timestamp(skb); 1735 __net_timestamp(skb);
1743 1736
1744 list_for_each_entry(heb, &he_vcc->buffers, entry) 1737 list_for_each_entry(heb, &he_vcc->buffers, entry)
1745 memcpy(skb_put(skb, heb->len), &heb->data, heb->len); 1738 memcpy(skb_put(skb, heb->len), &heb->data, heb->len);
1746 1739
1747 switch (vcc->qos.aal) { 1740 switch (vcc->qos.aal) {
1748 case ATM_AAL0: 1741 case ATM_AAL0:
1749 /* 2.10.1.5 raw cell receive */ 1742 /* 2.10.1.5 raw cell receive */
1750 skb->len = ATM_AAL0_SDU; 1743 skb->len = ATM_AAL0_SDU;
1751 skb_set_tail_pointer(skb, skb->len); 1744 skb_set_tail_pointer(skb, skb->len);
1752 break; 1745 break;
1753 case ATM_AAL5: 1746 case ATM_AAL5:
1754 /* 2.10.1.2 aal5 receive */ 1747 /* 2.10.1.2 aal5 receive */
1755 1748
1756 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); 1749 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1757 skb_set_tail_pointer(skb, skb->len); 1750 skb_set_tail_pointer(skb, skb->len);
1758 #ifdef USE_CHECKSUM_HW 1751 #ifdef USE_CHECKSUM_HW
1759 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { 1752 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1760 skb->ip_summed = CHECKSUM_COMPLETE; 1753 skb->ip_summed = CHECKSUM_COMPLETE;
1761 skb->csum = TCP_CKSUM(skb->data, 1754 skb->csum = TCP_CKSUM(skb->data,
1762 he_vcc->pdu_len); 1755 he_vcc->pdu_len);
1763 } 1756 }
1764 #endif 1757 #endif
1765 break; 1758 break;
1766 } 1759 }
1767 1760
1768 #ifdef should_never_happen 1761 #ifdef should_never_happen
1769 if (skb->len > vcc->qos.rxtp.max_sdu) 1762 if (skb->len > vcc->qos.rxtp.max_sdu)
1770 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid); 1763 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1771 #endif 1764 #endif
1772 1765
1773 #ifdef notdef 1766 #ifdef notdef
1774 ATM_SKB(skb)->vcc = vcc; 1767 ATM_SKB(skb)->vcc = vcc;
1775 #endif 1768 #endif
1776 spin_unlock(&he_dev->global_lock); 1769 spin_unlock(&he_dev->global_lock);
1777 vcc->push(vcc, skb); 1770 vcc->push(vcc, skb);
1778 spin_lock(&he_dev->global_lock); 1771 spin_lock(&he_dev->global_lock);
1779 1772
1780 atomic_inc(&vcc->stats->rx); 1773 atomic_inc(&vcc->stats->rx);
1781 1774
1782 return_host_buffers: 1775 return_host_buffers:
1783 ++pdus_assembled; 1776 ++pdus_assembled;
1784 1777
1785 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry) 1778 list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
1786 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping); 1779 pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
1787 INIT_LIST_HEAD(&he_vcc->buffers); 1780 INIT_LIST_HEAD(&he_vcc->buffers);
1788 he_vcc->pdu_len = 0; 1781 he_vcc->pdu_len = 0;
1789 1782
1790 next_rbrq_entry: 1783 next_rbrq_entry:
1791 he_dev->rbrq_head = (struct he_rbrq *) 1784 he_dev->rbrq_head = (struct he_rbrq *)
1792 ((unsigned long) he_dev->rbrq_base | 1785 ((unsigned long) he_dev->rbrq_base |
1793 RBRQ_MASK(he_dev->rbrq_head + 1)); 1786 RBRQ_MASK(he_dev->rbrq_head + 1));
1794 1787
1795 } 1788 }
1796 read_unlock(&vcc_sklist_lock); 1789 read_unlock(&vcc_sklist_lock);
1797 1790
1798 if (updated) { 1791 if (updated) {
1799 if (updated > he_dev->rbrq_peak) 1792 if (updated > he_dev->rbrq_peak)
1800 he_dev->rbrq_peak = updated; 1793 he_dev->rbrq_peak = updated;
1801 1794
1802 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head), 1795 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1803 G0_RBRQ_H + (group * 16)); 1796 G0_RBRQ_H + (group * 16));
1804 } 1797 }
1805 1798
1806 return pdus_assembled; 1799 return pdus_assembled;
1807 } 1800 }
1808 1801
1809 static void 1802 static void
1810 he_service_tbrq(struct he_dev *he_dev, int group) 1803 he_service_tbrq(struct he_dev *he_dev, int group)
1811 { 1804 {
1812 struct he_tbrq *tbrq_tail = (struct he_tbrq *) 1805 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1813 ((unsigned long)he_dev->tbrq_base | 1806 ((unsigned long)he_dev->tbrq_base |
1814 he_dev->hsp->group[group].tbrq_tail); 1807 he_dev->hsp->group[group].tbrq_tail);
1815 struct he_tpd *tpd; 1808 struct he_tpd *tpd;
1816 int slot, updated = 0; 1809 int slot, updated = 0;
1817 struct he_tpd *__tpd; 1810 struct he_tpd *__tpd;
1818 1811
1819 /* 2.1.6 transmit buffer return queue */ 1812 /* 2.1.6 transmit buffer return queue */
1820 1813
1821 while (he_dev->tbrq_head != tbrq_tail) { 1814 while (he_dev->tbrq_head != tbrq_tail) {
1822 ++updated; 1815 ++updated;
1823 1816
1824 HPRINTK("tbrq%d 0x%x%s%s\n", 1817 HPRINTK("tbrq%d 0x%x%s%s\n",
1825 group, 1818 group,
1826 TBRQ_TPD(he_dev->tbrq_head), 1819 TBRQ_TPD(he_dev->tbrq_head),
1827 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "", 1820 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1828 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); 1821 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1829 tpd = NULL; 1822 tpd = NULL;
1830 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) { 1823 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1831 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { 1824 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1832 tpd = __tpd; 1825 tpd = __tpd;
1833 list_del(&__tpd->entry); 1826 list_del(&__tpd->entry);
1834 break; 1827 break;
1835 } 1828 }
1836 } 1829 }
1837 1830
1838 if (tpd == NULL) { 1831 if (tpd == NULL) {
1839 hprintk("unable to locate tpd for dma buffer %x\n", 1832 hprintk("unable to locate tpd for dma buffer %x\n",
1840 TBRQ_TPD(he_dev->tbrq_head)); 1833 TBRQ_TPD(he_dev->tbrq_head));
1841 goto next_tbrq_entry; 1834 goto next_tbrq_entry;
1842 } 1835 }
1843 1836
1844 if (TBRQ_EOS(he_dev->tbrq_head)) { 1837 if (TBRQ_EOS(he_dev->tbrq_head)) {
1845 HPRINTK("wake_up(tx_waitq) cid 0x%x\n", 1838 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1846 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci)); 1839 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
1847 if (tpd->vcc) 1840 if (tpd->vcc)
1848 wake_up(&HE_VCC(tpd->vcc)->tx_waitq); 1841 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
1849 1842
1850 goto next_tbrq_entry; 1843 goto next_tbrq_entry;
1851 } 1844 }
1852 1845
1853 for (slot = 0; slot < TPD_MAXIOV; ++slot) { 1846 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
1854 if (tpd->iovec[slot].addr) 1847 if (tpd->iovec[slot].addr)
1855 pci_unmap_single(he_dev->pci_dev, 1848 pci_unmap_single(he_dev->pci_dev,
1856 tpd->iovec[slot].addr, 1849 tpd->iovec[slot].addr,
1857 tpd->iovec[slot].len & TPD_LEN_MASK, 1850 tpd->iovec[slot].len & TPD_LEN_MASK,
1858 PCI_DMA_TODEVICE); 1851 PCI_DMA_TODEVICE);
1859 if (tpd->iovec[slot].len & TPD_LST) 1852 if (tpd->iovec[slot].len & TPD_LST)
1860 break; 1853 break;
1861 1854
1862 } 1855 }
1863 1856
1864 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */ 1857 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1865 if (tpd->vcc && tpd->vcc->pop) 1858 if (tpd->vcc && tpd->vcc->pop)
1866 tpd->vcc->pop(tpd->vcc, tpd->skb); 1859 tpd->vcc->pop(tpd->vcc, tpd->skb);
1867 else 1860 else
1868 dev_kfree_skb_any(tpd->skb); 1861 dev_kfree_skb_any(tpd->skb);
1869 } 1862 }
1870 1863
1871 next_tbrq_entry: 1864 next_tbrq_entry:
1872 if (tpd) 1865 if (tpd)
1873 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); 1866 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
1874 he_dev->tbrq_head = (struct he_tbrq *) 1867 he_dev->tbrq_head = (struct he_tbrq *)
1875 ((unsigned long) he_dev->tbrq_base | 1868 ((unsigned long) he_dev->tbrq_base |
1876 TBRQ_MASK(he_dev->tbrq_head + 1)); 1869 TBRQ_MASK(he_dev->tbrq_head + 1));
1877 } 1870 }
1878 1871
1879 if (updated) { 1872 if (updated) {
1880 if (updated > he_dev->tbrq_peak) 1873 if (updated > he_dev->tbrq_peak)
1881 he_dev->tbrq_peak = updated; 1874 he_dev->tbrq_peak = updated;
1882 1875
1883 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head), 1876 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
1884 G0_TBRQ_H + (group * 16)); 1877 G0_TBRQ_H + (group * 16));
1885 } 1878 }
1886 } 1879 }
1887 1880
1888 static void 1881 static void
1889 he_service_rbpl(struct he_dev *he_dev, int group) 1882 he_service_rbpl(struct he_dev *he_dev, int group)
1890 { 1883 {
1891 struct he_rbp *new_tail; 1884 struct he_rbp *new_tail;
1892 struct he_rbp *rbpl_head; 1885 struct he_rbp *rbpl_head;
1893 struct he_buff *heb; 1886 struct he_buff *heb;
1894 dma_addr_t mapping; 1887 dma_addr_t mapping;
1895 int i; 1888 int i;
1896 int moved = 0; 1889 int moved = 0;
1897 1890
1898 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | 1891 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1899 RBPL_MASK(he_readl(he_dev, G0_RBPL_S))); 1892 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
1900 1893
1901 for (;;) { 1894 for (;;) {
1902 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base | 1895 new_tail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
1903 RBPL_MASK(he_dev->rbpl_tail+1)); 1896 RBPL_MASK(he_dev->rbpl_tail+1));
1904 1897
1905 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */ 1898 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1906 if (new_tail == rbpl_head) 1899 if (new_tail == rbpl_head)
1907 break; 1900 break;
1908 1901
1909 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint); 1902 i = find_next_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE, he_dev->rbpl_hint);
1910 if (i > (RBPL_TABLE_SIZE - 1)) { 1903 if (i > (RBPL_TABLE_SIZE - 1)) {
1911 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE); 1904 i = find_first_zero_bit(he_dev->rbpl_table, RBPL_TABLE_SIZE);
1912 if (i > (RBPL_TABLE_SIZE - 1)) 1905 if (i > (RBPL_TABLE_SIZE - 1))
1913 break; 1906 break;
1914 } 1907 }
1915 he_dev->rbpl_hint = i + 1; 1908 he_dev->rbpl_hint = i + 1;
1916 1909
1917 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping); 1910 heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
1918 if (!heb) 1911 if (!heb)
1919 break; 1912 break;
1920 heb->mapping = mapping; 1913 heb->mapping = mapping;
1921 list_add(&heb->entry, &he_dev->rbpl_outstanding); 1914 list_add(&heb->entry, &he_dev->rbpl_outstanding);
1922 he_dev->rbpl_virt[i] = heb; 1915 he_dev->rbpl_virt[i] = heb;
1923 set_bit(i, he_dev->rbpl_table); 1916 set_bit(i, he_dev->rbpl_table);
1924 new_tail->idx = i << RBP_IDX_OFFSET; 1917 new_tail->idx = i << RBP_IDX_OFFSET;
1925 new_tail->phys = mapping + offsetof(struct he_buff, data); 1918 new_tail->phys = mapping + offsetof(struct he_buff, data);
1926 1919
1927 he_dev->rbpl_tail = new_tail; 1920 he_dev->rbpl_tail = new_tail;
1928 ++moved; 1921 ++moved;
1929 } 1922 }
1930 1923
1931 if (moved) 1924 if (moved)
1932 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T); 1925 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
1933 } 1926 }
1934 1927
1935 static void 1928 static void
1936 he_tasklet(unsigned long data) 1929 he_tasklet(unsigned long data)
1937 { 1930 {
1938 unsigned long flags; 1931 unsigned long flags;
1939 struct he_dev *he_dev = (struct he_dev *) data; 1932 struct he_dev *he_dev = (struct he_dev *) data;
1940 int group, type; 1933 int group, type;
1941 int updated = 0; 1934 int updated = 0;
1942 1935
1943 HPRINTK("tasklet (0x%lx)\n", data); 1936 HPRINTK("tasklet (0x%lx)\n", data);
1944 spin_lock_irqsave(&he_dev->global_lock, flags); 1937 spin_lock_irqsave(&he_dev->global_lock, flags);
1945 1938
1946 while (he_dev->irq_head != he_dev->irq_tail) { 1939 while (he_dev->irq_head != he_dev->irq_tail) {
1947 ++updated; 1940 ++updated;
1948 1941
1949 type = ITYPE_TYPE(he_dev->irq_head->isw); 1942 type = ITYPE_TYPE(he_dev->irq_head->isw);
1950 group = ITYPE_GROUP(he_dev->irq_head->isw); 1943 group = ITYPE_GROUP(he_dev->irq_head->isw);
1951 1944
1952 switch (type) { 1945 switch (type) {
1953 case ITYPE_RBRQ_THRESH: 1946 case ITYPE_RBRQ_THRESH:
1954 HPRINTK("rbrq%d threshold\n", group); 1947 HPRINTK("rbrq%d threshold\n", group);
1955 /* fall through */ 1948 /* fall through */
1956 case ITYPE_RBRQ_TIMER: 1949 case ITYPE_RBRQ_TIMER:
1957 if (he_service_rbrq(he_dev, group)) 1950 if (he_service_rbrq(he_dev, group))
1958 he_service_rbpl(he_dev, group); 1951 he_service_rbpl(he_dev, group);
1959 break; 1952 break;
1960 case ITYPE_TBRQ_THRESH: 1953 case ITYPE_TBRQ_THRESH:
1961 HPRINTK("tbrq%d threshold\n", group); 1954 HPRINTK("tbrq%d threshold\n", group);
1962 /* fall through */ 1955 /* fall through */
1963 case ITYPE_TPD_COMPLETE: 1956 case ITYPE_TPD_COMPLETE:
1964 he_service_tbrq(he_dev, group); 1957 he_service_tbrq(he_dev, group);
1965 break; 1958 break;
1966 case ITYPE_RBPL_THRESH: 1959 case ITYPE_RBPL_THRESH:
1967 he_service_rbpl(he_dev, group); 1960 he_service_rbpl(he_dev, group);
1968 break; 1961 break;
1969 case ITYPE_RBPS_THRESH: 1962 case ITYPE_RBPS_THRESH:
1970 /* shouldn't happen unless small buffers enabled */ 1963 /* shouldn't happen unless small buffers enabled */
1971 break; 1964 break;
1972 case ITYPE_PHY: 1965 case ITYPE_PHY:
1973 HPRINTK("phy interrupt\n"); 1966 HPRINTK("phy interrupt\n");
1974 #ifdef CONFIG_ATM_HE_USE_SUNI 1967 #ifdef CONFIG_ATM_HE_USE_SUNI
1975 spin_unlock_irqrestore(&he_dev->global_lock, flags); 1968 spin_unlock_irqrestore(&he_dev->global_lock, flags);
1976 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt) 1969 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
1977 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev); 1970 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
1978 spin_lock_irqsave(&he_dev->global_lock, flags); 1971 spin_lock_irqsave(&he_dev->global_lock, flags);
1979 #endif 1972 #endif
1980 break; 1973 break;
1981 case ITYPE_OTHER: 1974 case ITYPE_OTHER:
1982 switch (type|group) { 1975 switch (type|group) {
1983 case ITYPE_PARITY: 1976 case ITYPE_PARITY:
1984 hprintk("parity error\n"); 1977 hprintk("parity error\n");
1985 break; 1978 break;
1986 case ITYPE_ABORT: 1979 case ITYPE_ABORT:
1987 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR)); 1980 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
1988 break; 1981 break;
1989 } 1982 }
1990 break; 1983 break;
1991 case ITYPE_TYPE(ITYPE_INVALID): 1984 case ITYPE_TYPE(ITYPE_INVALID):
1992 /* see 8.1.1 -- check all queues */ 1985 /* see 8.1.1 -- check all queues */
1993 1986
1994 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw); 1987 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
1995 1988
1996 he_service_rbrq(he_dev, 0); 1989 he_service_rbrq(he_dev, 0);
1997 he_service_rbpl(he_dev, 0); 1990 he_service_rbpl(he_dev, 0);
1998 he_service_tbrq(he_dev, 0); 1991 he_service_tbrq(he_dev, 0);
1999 break; 1992 break;
2000 default: 1993 default:
2001 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw); 1994 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2002 } 1995 }
2003 1996
2004 he_dev->irq_head->isw = ITYPE_INVALID; 1997 he_dev->irq_head->isw = ITYPE_INVALID;
2005 1998
2006 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK); 1999 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2007 } 2000 }
2008 2001
2009 if (updated) { 2002 if (updated) {
2010 if (updated > he_dev->irq_peak) 2003 if (updated > he_dev->irq_peak)
2011 he_dev->irq_peak = updated; 2004 he_dev->irq_peak = updated;
2012 2005
2013 he_writel(he_dev, 2006 he_writel(he_dev,
2014 IRQ_SIZE(CONFIG_IRQ_SIZE) | 2007 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2015 IRQ_THRESH(CONFIG_IRQ_THRESH) | 2008 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2016 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD); 2009 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2017 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */ 2010 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2018 } 2011 }
2019 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2012 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2020 } 2013 }
2021 2014
2022 static irqreturn_t 2015 static irqreturn_t
2023 he_irq_handler(int irq, void *dev_id) 2016 he_irq_handler(int irq, void *dev_id)
2024 { 2017 {
2025 unsigned long flags; 2018 unsigned long flags;
2026 struct he_dev *he_dev = (struct he_dev * )dev_id; 2019 struct he_dev *he_dev = (struct he_dev * )dev_id;
2027 int handled = 0; 2020 int handled = 0;
2028 2021
2029 if (he_dev == NULL) 2022 if (he_dev == NULL)
2030 return IRQ_NONE; 2023 return IRQ_NONE;
2031 2024
2032 spin_lock_irqsave(&he_dev->global_lock, flags); 2025 spin_lock_irqsave(&he_dev->global_lock, flags);
2033 2026
2034 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) | 2027 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2035 (*he_dev->irq_tailoffset << 2)); 2028 (*he_dev->irq_tailoffset << 2));
2036 2029
2037 if (he_dev->irq_tail == he_dev->irq_head) { 2030 if (he_dev->irq_tail == he_dev->irq_head) {
2038 HPRINTK("tailoffset not updated?\n"); 2031 HPRINTK("tailoffset not updated?\n");
2039 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base | 2032 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2040 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2)); 2033 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2041 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */ 2034 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2042 } 2035 }
2043 2036
2044 #ifdef DEBUG 2037 #ifdef DEBUG
2045 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */) 2038 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2046 hprintk("spurious (or shared) interrupt?\n"); 2039 hprintk("spurious (or shared) interrupt?\n");
2047 #endif 2040 #endif
2048 2041
2049 if (he_dev->irq_head != he_dev->irq_tail) { 2042 if (he_dev->irq_head != he_dev->irq_tail) {
2050 handled = 1; 2043 handled = 1;
2051 tasklet_schedule(&he_dev->tasklet); 2044 tasklet_schedule(&he_dev->tasklet);
2052 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */ 2045 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2053 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */ 2046 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2054 } 2047 }
2055 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2048 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2056 return IRQ_RETVAL(handled); 2049 return IRQ_RETVAL(handled);
2057 2050
2058 } 2051 }
2059 2052
2060 static __inline__ void 2053 static __inline__ void
2061 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid) 2054 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2062 { 2055 {
2063 struct he_tpdrq *new_tail; 2056 struct he_tpdrq *new_tail;
2064 2057
2065 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n", 2058 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2066 tpd, cid, he_dev->tpdrq_tail); 2059 tpd, cid, he_dev->tpdrq_tail);
2067 2060
2068 /* new_tail = he_dev->tpdrq_tail; */ 2061 /* new_tail = he_dev->tpdrq_tail; */
2069 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base | 2062 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2070 TPDRQ_MASK(he_dev->tpdrq_tail+1)); 2063 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2071 2064
2072 /* 2065 /*
2073 * check to see if we are about to set the tail == head 2066 * check to see if we are about to set the tail == head
2074 * if true, update the head pointer from the adapter 2067 * if true, update the head pointer from the adapter
2075 * to see if this is really the case (reading the queue 2068 * to see if this is really the case (reading the queue
2076 * head for every enqueue would be unnecessarily slow) 2069 * head for every enqueue would be unnecessarily slow)
2077 */ 2070 */
2078 2071
2079 if (new_tail == he_dev->tpdrq_head) { 2072 if (new_tail == he_dev->tpdrq_head) {
2080 he_dev->tpdrq_head = (struct he_tpdrq *) 2073 he_dev->tpdrq_head = (struct he_tpdrq *)
2081 (((unsigned long)he_dev->tpdrq_base) | 2074 (((unsigned long)he_dev->tpdrq_base) |
2082 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H))); 2075 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2083 2076
2084 if (new_tail == he_dev->tpdrq_head) { 2077 if (new_tail == he_dev->tpdrq_head) {
2085 int slot; 2078 int slot;
2086 2079
2087 hprintk("tpdrq full (cid 0x%x)\n", cid); 2080 hprintk("tpdrq full (cid 0x%x)\n", cid);
2088 /* 2081 /*
2089 * FIXME 2082 * FIXME
2090 * push tpd onto a transmit backlog queue 2083 * push tpd onto a transmit backlog queue
2091 * after service_tbrq, service the backlog 2084 * after service_tbrq, service the backlog
2092 * for now, we just drop the pdu 2085 * for now, we just drop the pdu
2093 */ 2086 */
2094 for (slot = 0; slot < TPD_MAXIOV; ++slot) { 2087 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2095 if (tpd->iovec[slot].addr) 2088 if (tpd->iovec[slot].addr)
2096 pci_unmap_single(he_dev->pci_dev, 2089 pci_unmap_single(he_dev->pci_dev,
2097 tpd->iovec[slot].addr, 2090 tpd->iovec[slot].addr,
2098 tpd->iovec[slot].len & TPD_LEN_MASK, 2091 tpd->iovec[slot].len & TPD_LEN_MASK,
2099 PCI_DMA_TODEVICE); 2092 PCI_DMA_TODEVICE);
2100 } 2093 }
2101 if (tpd->skb) { 2094 if (tpd->skb) {
2102 if (tpd->vcc->pop) 2095 if (tpd->vcc->pop)
2103 tpd->vcc->pop(tpd->vcc, tpd->skb); 2096 tpd->vcc->pop(tpd->vcc, tpd->skb);
2104 else 2097 else
2105 dev_kfree_skb_any(tpd->skb); 2098 dev_kfree_skb_any(tpd->skb);
2106 atomic_inc(&tpd->vcc->stats->tx_err); 2099 atomic_inc(&tpd->vcc->stats->tx_err);
2107 } 2100 }
2108 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); 2101 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2109 return; 2102 return;
2110 } 2103 }
2111 } 2104 }
2112 2105
2113 /* 2.1.5 transmit packet descriptor ready queue */ 2106 /* 2.1.5 transmit packet descriptor ready queue */
2114 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds); 2107 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2115 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status); 2108 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2116 he_dev->tpdrq_tail->cid = cid; 2109 he_dev->tpdrq_tail->cid = cid;
2117 wmb(); 2110 wmb();
2118 2111
2119 he_dev->tpdrq_tail = new_tail; 2112 he_dev->tpdrq_tail = new_tail;
2120 2113
2121 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T); 2114 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2122 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */ 2115 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2123 } 2116 }
2124 2117
2125 static int 2118 static int
2126 he_open(struct atm_vcc *vcc) 2119 he_open(struct atm_vcc *vcc)
2127 { 2120 {
2128 unsigned long flags; 2121 unsigned long flags;
2129 struct he_dev *he_dev = HE_DEV(vcc->dev); 2122 struct he_dev *he_dev = HE_DEV(vcc->dev);
2130 struct he_vcc *he_vcc; 2123 struct he_vcc *he_vcc;
2131 int err = 0; 2124 int err = 0;
2132 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock; 2125 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2133 short vpi = vcc->vpi; 2126 short vpi = vcc->vpi;
2134 int vci = vcc->vci; 2127 int vci = vcc->vci;
2135 2128
2136 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC) 2129 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2137 return 0; 2130 return 0;
2138 2131
2139 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci); 2132 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2140 2133
2141 set_bit(ATM_VF_ADDR, &vcc->flags); 2134 set_bit(ATM_VF_ADDR, &vcc->flags);
2142 2135
2143 cid = he_mkcid(he_dev, vpi, vci); 2136 cid = he_mkcid(he_dev, vpi, vci);
2144 2137
2145 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC); 2138 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2146 if (he_vcc == NULL) { 2139 if (he_vcc == NULL) {
2147 hprintk("unable to allocate he_vcc during open\n"); 2140 hprintk("unable to allocate he_vcc during open\n");
2148 return -ENOMEM; 2141 return -ENOMEM;
2149 } 2142 }
2150 2143
2151 INIT_LIST_HEAD(&he_vcc->buffers); 2144 INIT_LIST_HEAD(&he_vcc->buffers);
2152 he_vcc->pdu_len = 0; 2145 he_vcc->pdu_len = 0;
2153 he_vcc->rc_index = -1; 2146 he_vcc->rc_index = -1;
2154 2147
2155 init_waitqueue_head(&he_vcc->rx_waitq); 2148 init_waitqueue_head(&he_vcc->rx_waitq);
2156 init_waitqueue_head(&he_vcc->tx_waitq); 2149 init_waitqueue_head(&he_vcc->tx_waitq);
2157 2150
2158 vcc->dev_data = he_vcc; 2151 vcc->dev_data = he_vcc;
2159 2152
2160 if (vcc->qos.txtp.traffic_class != ATM_NONE) { 2153 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2161 int pcr_goal; 2154 int pcr_goal;
2162 2155
2163 pcr_goal = atm_pcr_goal(&vcc->qos.txtp); 2156 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2164 if (pcr_goal == 0) 2157 if (pcr_goal == 0)
2165 pcr_goal = he_dev->atm_dev->link_rate; 2158 pcr_goal = he_dev->atm_dev->link_rate;
2166 if (pcr_goal < 0) /* means round down, technically */ 2159 if (pcr_goal < 0) /* means round down, technically */
2167 pcr_goal = -pcr_goal; 2160 pcr_goal = -pcr_goal;
2168 2161
2169 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal); 2162 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2170 2163
2171 switch (vcc->qos.aal) { 2164 switch (vcc->qos.aal) {
2172 case ATM_AAL5: 2165 case ATM_AAL5:
2173 tsr0_aal = TSR0_AAL5; 2166 tsr0_aal = TSR0_AAL5;
2174 tsr4 = TSR4_AAL5; 2167 tsr4 = TSR4_AAL5;
2175 break; 2168 break;
2176 case ATM_AAL0: 2169 case ATM_AAL0:
2177 tsr0_aal = TSR0_AAL0_SDU; 2170 tsr0_aal = TSR0_AAL0_SDU;
2178 tsr4 = TSR4_AAL0_SDU; 2171 tsr4 = TSR4_AAL0_SDU;
2179 break; 2172 break;
2180 default: 2173 default:
2181 err = -EINVAL; 2174 err = -EINVAL;
2182 goto open_failed; 2175 goto open_failed;
2183 } 2176 }
2184 2177
2185 spin_lock_irqsave(&he_dev->global_lock, flags); 2178 spin_lock_irqsave(&he_dev->global_lock, flags);
2186 tsr0 = he_readl_tsr0(he_dev, cid); 2179 tsr0 = he_readl_tsr0(he_dev, cid);
2187 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2180 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2188 2181
2189 if (TSR0_CONN_STATE(tsr0) != 0) { 2182 if (TSR0_CONN_STATE(tsr0) != 0) {
2190 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0); 2183 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2191 err = -EBUSY; 2184 err = -EBUSY;
2192 goto open_failed; 2185 goto open_failed;
2193 } 2186 }
2194 2187
2195 switch (vcc->qos.txtp.traffic_class) { 2188 switch (vcc->qos.txtp.traffic_class) {
2196 case ATM_UBR: 2189 case ATM_UBR:
2197 /* 2.3.3.1 open connection ubr */ 2190 /* 2.3.3.1 open connection ubr */
2198 2191
2199 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal | 2192 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2200 TSR0_USE_WMIN | TSR0_UPDATE_GER; 2193 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2201 break; 2194 break;
2202 2195
2203 case ATM_CBR: 2196 case ATM_CBR:
2204 /* 2.3.3.2 open connection cbr */ 2197 /* 2.3.3.2 open connection cbr */
2205 2198
2206 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */ 2199 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2207 if ((he_dev->total_bw + pcr_goal) 2200 if ((he_dev->total_bw + pcr_goal)
2208 > (he_dev->atm_dev->link_rate * 9 / 10)) 2201 > (he_dev->atm_dev->link_rate * 9 / 10))
2209 { 2202 {
2210 err = -EBUSY; 2203 err = -EBUSY;
2211 goto open_failed; 2204 goto open_failed;
2212 } 2205 }
2213 2206
2214 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */ 2207 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2215 2208
2216 /* find an unused cs_stper register */ 2209 /* find an unused cs_stper register */
2217 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg) 2210 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2218 if (he_dev->cs_stper[reg].inuse == 0 || 2211 if (he_dev->cs_stper[reg].inuse == 0 ||
2219 he_dev->cs_stper[reg].pcr == pcr_goal) 2212 he_dev->cs_stper[reg].pcr == pcr_goal)
2220 break; 2213 break;
2221 2214
2222 if (reg == HE_NUM_CS_STPER) { 2215 if (reg == HE_NUM_CS_STPER) {
2223 err = -EBUSY; 2216 err = -EBUSY;
2224 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2217 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2225 goto open_failed; 2218 goto open_failed;
2226 } 2219 }
2227 2220
2228 he_dev->total_bw += pcr_goal; 2221 he_dev->total_bw += pcr_goal;
2229 2222
2230 he_vcc->rc_index = reg; 2223 he_vcc->rc_index = reg;
2231 ++he_dev->cs_stper[reg].inuse; 2224 ++he_dev->cs_stper[reg].inuse;
2232 he_dev->cs_stper[reg].pcr = pcr_goal; 2225 he_dev->cs_stper[reg].pcr = pcr_goal;
2233 2226
2234 clock = he_is622(he_dev) ? 66667000 : 50000000; 2227 clock = he_is622(he_dev) ? 66667000 : 50000000;
2235 period = clock / pcr_goal; 2228 period = clock / pcr_goal;
2236 2229
2237 HPRINTK("rc_index = %d period = %d\n", 2230 HPRINTK("rc_index = %d period = %d\n",
2238 reg, period); 2231 reg, period);
2239 2232
2240 he_writel_mbox(he_dev, rate_to_atmf(period/2), 2233 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2241 CS_STPER0 + reg); 2234 CS_STPER0 + reg);
2242 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2235 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2243 2236
2244 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal | 2237 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2245 TSR0_RC_INDEX(reg); 2238 TSR0_RC_INDEX(reg);
2246 2239
2247 break; 2240 break;
2248 default: 2241 default:
2249 err = -EINVAL; 2242 err = -EINVAL;
2250 goto open_failed; 2243 goto open_failed;
2251 } 2244 }
2252 2245
2253 spin_lock_irqsave(&he_dev->global_lock, flags); 2246 spin_lock_irqsave(&he_dev->global_lock, flags);
2254 2247
2255 he_writel_tsr0(he_dev, tsr0, cid); 2248 he_writel_tsr0(he_dev, tsr0, cid);
2256 he_writel_tsr4(he_dev, tsr4 | 1, cid); 2249 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2257 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) | 2250 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2258 TSR1_PCR(rate_to_atmf(pcr_goal)), cid); 2251 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2259 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid); 2252 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2260 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid); 2253 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2261 2254
2262 he_writel_tsr3(he_dev, 0x0, cid); 2255 he_writel_tsr3(he_dev, 0x0, cid);
2263 he_writel_tsr5(he_dev, 0x0, cid); 2256 he_writel_tsr5(he_dev, 0x0, cid);
2264 he_writel_tsr6(he_dev, 0x0, cid); 2257 he_writel_tsr6(he_dev, 0x0, cid);
2265 he_writel_tsr7(he_dev, 0x0, cid); 2258 he_writel_tsr7(he_dev, 0x0, cid);
2266 he_writel_tsr8(he_dev, 0x0, cid); 2259 he_writel_tsr8(he_dev, 0x0, cid);
2267 he_writel_tsr10(he_dev, 0x0, cid); 2260 he_writel_tsr10(he_dev, 0x0, cid);
2268 he_writel_tsr11(he_dev, 0x0, cid); 2261 he_writel_tsr11(he_dev, 0x0, cid);
2269 he_writel_tsr12(he_dev, 0x0, cid); 2262 he_writel_tsr12(he_dev, 0x0, cid);
2270 he_writel_tsr13(he_dev, 0x0, cid); 2263 he_writel_tsr13(he_dev, 0x0, cid);
2271 he_writel_tsr14(he_dev, 0x0, cid); 2264 he_writel_tsr14(he_dev, 0x0, cid);
2272 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */ 2265 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2273 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2266 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2274 } 2267 }
2275 2268
2276 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 2269 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2277 unsigned aal; 2270 unsigned aal;
2278 2271
2279 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid, 2272 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2280 &HE_VCC(vcc)->rx_waitq); 2273 &HE_VCC(vcc)->rx_waitq);
2281 2274
2282 switch (vcc->qos.aal) { 2275 switch (vcc->qos.aal) {
2283 case ATM_AAL5: 2276 case ATM_AAL5:
2284 aal = RSR0_AAL5; 2277 aal = RSR0_AAL5;
2285 break; 2278 break;
2286 case ATM_AAL0: 2279 case ATM_AAL0:
2287 aal = RSR0_RAWCELL; 2280 aal = RSR0_RAWCELL;
2288 break; 2281 break;
2289 default: 2282 default:
2290 err = -EINVAL; 2283 err = -EINVAL;
2291 goto open_failed; 2284 goto open_failed;
2292 } 2285 }
2293 2286
2294 spin_lock_irqsave(&he_dev->global_lock, flags); 2287 spin_lock_irqsave(&he_dev->global_lock, flags);
2295 2288
2296 rsr0 = he_readl_rsr0(he_dev, cid); 2289 rsr0 = he_readl_rsr0(he_dev, cid);
2297 if (rsr0 & RSR0_OPEN_CONN) { 2290 if (rsr0 & RSR0_OPEN_CONN) {
2298 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2291 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2299 2292
2300 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0); 2293 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2301 err = -EBUSY; 2294 err = -EBUSY;
2302 goto open_failed; 2295 goto open_failed;
2303 } 2296 }
2304 2297
2305 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY; 2298 rsr1 = RSR1_GROUP(0) | RSR1_RBPL_ONLY;
2306 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY; 2299 rsr4 = RSR4_GROUP(0) | RSR4_RBPL_ONLY;
2307 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ? 2300 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2308 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0; 2301 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2309 2302
2310 #ifdef USE_CHECKSUM_HW 2303 #ifdef USE_CHECKSUM_HW
2311 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI) 2304 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2312 rsr0 |= RSR0_TCP_CKSUM; 2305 rsr0 |= RSR0_TCP_CKSUM;
2313 #endif 2306 #endif
2314 2307
2315 he_writel_rsr4(he_dev, rsr4, cid); 2308 he_writel_rsr4(he_dev, rsr4, cid);
2316 he_writel_rsr1(he_dev, rsr1, cid); 2309 he_writel_rsr1(he_dev, rsr1, cid);
2317 /* 5.1.11 last parameter initialized should be 2310 /* 5.1.11 last parameter initialized should be
2318 the open/closed indication in rsr0 */ 2311 the open/closed indication in rsr0 */
2319 he_writel_rsr0(he_dev, 2312 he_writel_rsr0(he_dev,
2320 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid); 2313 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2321 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ 2314 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2322 2315
2323 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2316 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2324 } 2317 }
2325 2318
2326 open_failed: 2319 open_failed:
2327 2320
2328 if (err) { 2321 if (err) {
2329 kfree(he_vcc); 2322 kfree(he_vcc);
2330 clear_bit(ATM_VF_ADDR, &vcc->flags); 2323 clear_bit(ATM_VF_ADDR, &vcc->flags);
2331 } 2324 }
2332 else 2325 else
2333 set_bit(ATM_VF_READY, &vcc->flags); 2326 set_bit(ATM_VF_READY, &vcc->flags);
2334 2327
2335 return err; 2328 return err;
2336 } 2329 }
2337 2330
2338 static void 2331 static void
2339 he_close(struct atm_vcc *vcc) 2332 he_close(struct atm_vcc *vcc)
2340 { 2333 {
2341 unsigned long flags; 2334 unsigned long flags;
2342 DECLARE_WAITQUEUE(wait, current); 2335 DECLARE_WAITQUEUE(wait, current);
2343 struct he_dev *he_dev = HE_DEV(vcc->dev); 2336 struct he_dev *he_dev = HE_DEV(vcc->dev);
2344 struct he_tpd *tpd; 2337 struct he_tpd *tpd;
2345 unsigned cid; 2338 unsigned cid;
2346 struct he_vcc *he_vcc = HE_VCC(vcc); 2339 struct he_vcc *he_vcc = HE_VCC(vcc);
2347 #define MAX_RETRY 30 2340 #define MAX_RETRY 30
2348 int retry = 0, sleep = 1, tx_inuse; 2341 int retry = 0, sleep = 1, tx_inuse;
2349 2342
2350 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci); 2343 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2351 2344
2352 clear_bit(ATM_VF_READY, &vcc->flags); 2345 clear_bit(ATM_VF_READY, &vcc->flags);
2353 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); 2346 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2354 2347
2355 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 2348 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2356 int timeout; 2349 int timeout;
2357 2350
2358 HPRINTK("close rx cid 0x%x\n", cid); 2351 HPRINTK("close rx cid 0x%x\n", cid);
2359 2352
2360 /* 2.7.2.2 close receive operation */ 2353 /* 2.7.2.2 close receive operation */
2361 2354
2362 /* wait for previous close (if any) to finish */ 2355 /* wait for previous close (if any) to finish */
2363 2356
2364 spin_lock_irqsave(&he_dev->global_lock, flags); 2357 spin_lock_irqsave(&he_dev->global_lock, flags);
2365 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) { 2358 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2366 HPRINTK("close cid 0x%x RCC_BUSY\n", cid); 2359 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2367 udelay(250); 2360 udelay(250);
2368 } 2361 }
2369 2362
2370 set_current_state(TASK_UNINTERRUPTIBLE); 2363 set_current_state(TASK_UNINTERRUPTIBLE);
2371 add_wait_queue(&he_vcc->rx_waitq, &wait); 2364 add_wait_queue(&he_vcc->rx_waitq, &wait);
2372 2365
2373 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid); 2366 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2374 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */ 2367 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2375 he_writel_mbox(he_dev, cid, RXCON_CLOSE); 2368 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2376 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2369 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2377 2370
2378 timeout = schedule_timeout(30*HZ); 2371 timeout = schedule_timeout(30*HZ);
2379 2372
2380 remove_wait_queue(&he_vcc->rx_waitq, &wait); 2373 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2381 set_current_state(TASK_RUNNING); 2374 set_current_state(TASK_RUNNING);
2382 2375
2383 if (timeout == 0) 2376 if (timeout == 0)
2384 hprintk("close rx timeout cid 0x%x\n", cid); 2377 hprintk("close rx timeout cid 0x%x\n", cid);
2385 2378
2386 HPRINTK("close rx cid 0x%x complete\n", cid); 2379 HPRINTK("close rx cid 0x%x complete\n", cid);
2387 2380
2388 } 2381 }
2389 2382
2390 if (vcc->qos.txtp.traffic_class != ATM_NONE) { 2383 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2391 volatile unsigned tsr4, tsr0; 2384 volatile unsigned tsr4, tsr0;
2392 int timeout; 2385 int timeout;
2393 2386
2394 HPRINTK("close tx cid 0x%x\n", cid); 2387 HPRINTK("close tx cid 0x%x\n", cid);
2395 2388
2396 /* 2.1.2 2389 /* 2.1.2
2397 * 2390 *
2398 * ... the host must first stop queueing packets to the TPDRQ 2391 * ... the host must first stop queueing packets to the TPDRQ
2399 * on the connection to be closed, then wait for all outstanding 2392 * on the connection to be closed, then wait for all outstanding
2400 * packets to be transmitted and their buffers returned to the 2393 * packets to be transmitted and their buffers returned to the
2401 * TBRQ. When the last packet on the connection arrives in the 2394 * TBRQ. When the last packet on the connection arrives in the
2402 * TBRQ, the host issues the close command to the adapter. 2395 * TBRQ, the host issues the close command to the adapter.
2403 */ 2396 */
2404 2397
2405 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) && 2398 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
2406 (retry < MAX_RETRY)) { 2399 (retry < MAX_RETRY)) {
2407 msleep(sleep); 2400 msleep(sleep);
2408 if (sleep < 250) 2401 if (sleep < 250)
2409 sleep = sleep * 2; 2402 sleep = sleep * 2;
2410 2403
2411 ++retry; 2404 ++retry;
2412 } 2405 }
2413 2406
2414 if (tx_inuse > 1) 2407 if (tx_inuse > 1)
2415 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse); 2408 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2416 2409
2417 /* 2.3.1.1 generic close operations with flush */ 2410 /* 2.3.1.1 generic close operations with flush */
2418 2411
2419 spin_lock_irqsave(&he_dev->global_lock, flags); 2412 spin_lock_irqsave(&he_dev->global_lock, flags);
2420 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid); 2413 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2421 /* also clears TSR4_SESSION_ENDED */ 2414 /* also clears TSR4_SESSION_ENDED */
2422 2415
2423 switch (vcc->qos.txtp.traffic_class) { 2416 switch (vcc->qos.txtp.traffic_class) {
2424 case ATM_UBR: 2417 case ATM_UBR:
2425 he_writel_tsr1(he_dev, 2418 he_writel_tsr1(he_dev,
2426 TSR1_MCR(rate_to_atmf(200000)) 2419 TSR1_MCR(rate_to_atmf(200000))
2427 | TSR1_PCR(0), cid); 2420 | TSR1_PCR(0), cid);
2428 break; 2421 break;
2429 case ATM_CBR: 2422 case ATM_CBR:
2430 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid); 2423 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2431 break; 2424 break;
2432 } 2425 }
2433 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */ 2426 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2434 2427
2435 tpd = __alloc_tpd(he_dev); 2428 tpd = __alloc_tpd(he_dev);
2436 if (tpd == NULL) { 2429 if (tpd == NULL) {
2437 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid); 2430 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2438 goto close_tx_incomplete; 2431 goto close_tx_incomplete;
2439 } 2432 }
2440 tpd->status |= TPD_EOS | TPD_INT; 2433 tpd->status |= TPD_EOS | TPD_INT;
2441 tpd->skb = NULL; 2434 tpd->skb = NULL;
2442 tpd->vcc = vcc; 2435 tpd->vcc = vcc;
2443 wmb(); 2436 wmb();
2444 2437
2445 set_current_state(TASK_UNINTERRUPTIBLE); 2438 set_current_state(TASK_UNINTERRUPTIBLE);
2446 add_wait_queue(&he_vcc->tx_waitq, &wait); 2439 add_wait_queue(&he_vcc->tx_waitq, &wait);
2447 __enqueue_tpd(he_dev, tpd, cid); 2440 __enqueue_tpd(he_dev, tpd, cid);
2448 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2441 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2449 2442
2450 timeout = schedule_timeout(30*HZ); 2443 timeout = schedule_timeout(30*HZ);
2451 2444
2452 remove_wait_queue(&he_vcc->tx_waitq, &wait); 2445 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2453 set_current_state(TASK_RUNNING); 2446 set_current_state(TASK_RUNNING);
2454 2447
2455 spin_lock_irqsave(&he_dev->global_lock, flags); 2448 spin_lock_irqsave(&he_dev->global_lock, flags);
2456 2449
2457 if (timeout == 0) { 2450 if (timeout == 0) {
2458 hprintk("close tx timeout cid 0x%x\n", cid); 2451 hprintk("close tx timeout cid 0x%x\n", cid);
2459 goto close_tx_incomplete; 2452 goto close_tx_incomplete;
2460 } 2453 }
2461 2454
2462 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) { 2455 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2463 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4); 2456 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2464 udelay(250); 2457 udelay(250);
2465 } 2458 }
2466 2459
2467 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) { 2460 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2468 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0); 2461 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2469 udelay(250); 2462 udelay(250);
2470 } 2463 }
2471 2464
2472 close_tx_incomplete: 2465 close_tx_incomplete:
2473 2466
2474 if (vcc->qos.txtp.traffic_class == ATM_CBR) { 2467 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2475 int reg = he_vcc->rc_index; 2468 int reg = he_vcc->rc_index;
2476 2469
2477 HPRINTK("cs_stper reg = %d\n", reg); 2470 HPRINTK("cs_stper reg = %d\n", reg);
2478 2471
2479 if (he_dev->cs_stper[reg].inuse == 0) 2472 if (he_dev->cs_stper[reg].inuse == 0)
2480 hprintk("cs_stper[%d].inuse = 0!\n", reg); 2473 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2481 else 2474 else
2482 --he_dev->cs_stper[reg].inuse; 2475 --he_dev->cs_stper[reg].inuse;
2483 2476
2484 he_dev->total_bw -= he_dev->cs_stper[reg].pcr; 2477 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2485 } 2478 }
2486 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2479 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2487 2480
2488 HPRINTK("close tx cid 0x%x complete\n", cid); 2481 HPRINTK("close tx cid 0x%x complete\n", cid);
2489 } 2482 }
2490 2483
2491 kfree(he_vcc); 2484 kfree(he_vcc);
2492 2485
2493 clear_bit(ATM_VF_ADDR, &vcc->flags); 2486 clear_bit(ATM_VF_ADDR, &vcc->flags);
2494 } 2487 }
2495 2488
2496 static int 2489 static int
2497 he_send(struct atm_vcc *vcc, struct sk_buff *skb) 2490 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2498 { 2491 {
2499 unsigned long flags; 2492 unsigned long flags;
2500 struct he_dev *he_dev = HE_DEV(vcc->dev); 2493 struct he_dev *he_dev = HE_DEV(vcc->dev);
2501 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci); 2494 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2502 struct he_tpd *tpd; 2495 struct he_tpd *tpd;
2503 #ifdef USE_SCATTERGATHER 2496 #ifdef USE_SCATTERGATHER
2504 int i, slot = 0; 2497 int i, slot = 0;
2505 #endif 2498 #endif
2506 2499
2507 #define HE_TPD_BUFSIZE 0xffff 2500 #define HE_TPD_BUFSIZE 0xffff
2508 2501
2509 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci); 2502 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2510 2503
2511 if ((skb->len > HE_TPD_BUFSIZE) || 2504 if ((skb->len > HE_TPD_BUFSIZE) ||
2512 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) { 2505 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2513 hprintk("buffer too large (or small) -- %d bytes\n", skb->len ); 2506 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2514 if (vcc->pop) 2507 if (vcc->pop)
2515 vcc->pop(vcc, skb); 2508 vcc->pop(vcc, skb);
2516 else 2509 else
2517 dev_kfree_skb_any(skb); 2510 dev_kfree_skb_any(skb);
2518 atomic_inc(&vcc->stats->tx_err); 2511 atomic_inc(&vcc->stats->tx_err);
2519 return -EINVAL; 2512 return -EINVAL;
2520 } 2513 }
2521 2514
2522 #ifndef USE_SCATTERGATHER 2515 #ifndef USE_SCATTERGATHER
2523 if (skb_shinfo(skb)->nr_frags) { 2516 if (skb_shinfo(skb)->nr_frags) {
2524 hprintk("no scatter/gather support\n"); 2517 hprintk("no scatter/gather support\n");
2525 if (vcc->pop) 2518 if (vcc->pop)
2526 vcc->pop(vcc, skb); 2519 vcc->pop(vcc, skb);
2527 else 2520 else
2528 dev_kfree_skb_any(skb); 2521 dev_kfree_skb_any(skb);
2529 atomic_inc(&vcc->stats->tx_err); 2522 atomic_inc(&vcc->stats->tx_err);
2530 return -EINVAL; 2523 return -EINVAL;
2531 } 2524 }
2532 #endif 2525 #endif
2533 spin_lock_irqsave(&he_dev->global_lock, flags); 2526 spin_lock_irqsave(&he_dev->global_lock, flags);
2534 2527
2535 tpd = __alloc_tpd(he_dev); 2528 tpd = __alloc_tpd(he_dev);
2536 if (tpd == NULL) { 2529 if (tpd == NULL) {
2537 if (vcc->pop) 2530 if (vcc->pop)
2538 vcc->pop(vcc, skb); 2531 vcc->pop(vcc, skb);
2539 else 2532 else
2540 dev_kfree_skb_any(skb); 2533 dev_kfree_skb_any(skb);
2541 atomic_inc(&vcc->stats->tx_err); 2534 atomic_inc(&vcc->stats->tx_err);
2542 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2535 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2543 return -ENOMEM; 2536 return -ENOMEM;
2544 } 2537 }
2545 2538
2546 if (vcc->qos.aal == ATM_AAL5) 2539 if (vcc->qos.aal == ATM_AAL5)
2547 tpd->status |= TPD_CELLTYPE(TPD_USERCELL); 2540 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2548 else { 2541 else {
2549 char *pti_clp = (void *) (skb->data + 3); 2542 char *pti_clp = (void *) (skb->data + 3);
2550 int clp, pti; 2543 int clp, pti;
2551 2544
2552 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 2545 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2553 clp = (*pti_clp & ATM_HDR_CLP); 2546 clp = (*pti_clp & ATM_HDR_CLP);
2554 tpd->status |= TPD_CELLTYPE(pti); 2547 tpd->status |= TPD_CELLTYPE(pti);
2555 if (clp) 2548 if (clp)
2556 tpd->status |= TPD_CLP; 2549 tpd->status |= TPD_CLP;
2557 2550
2558 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD); 2551 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2559 } 2552 }
2560 2553
2561 #ifdef USE_SCATTERGATHER 2554 #ifdef USE_SCATTERGATHER
2562 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, 2555 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2563 skb_headlen(skb), PCI_DMA_TODEVICE); 2556 skb_headlen(skb), PCI_DMA_TODEVICE);
2564 tpd->iovec[slot].len = skb_headlen(skb); 2557 tpd->iovec[slot].len = skb_headlen(skb);
2565 ++slot; 2558 ++slot;
2566 2559
2567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2561 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2569 2562
2570 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */ 2563 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2571 tpd->vcc = vcc; 2564 tpd->vcc = vcc;
2572 tpd->skb = NULL; /* not the last fragment 2565 tpd->skb = NULL; /* not the last fragment
2573 so dont ->push() yet */ 2566 so dont ->push() yet */
2574 wmb(); 2567 wmb();
2575 2568
2576 __enqueue_tpd(he_dev, tpd, cid); 2569 __enqueue_tpd(he_dev, tpd, cid);
2577 tpd = __alloc_tpd(he_dev); 2570 tpd = __alloc_tpd(he_dev);
2578 if (tpd == NULL) { 2571 if (tpd == NULL) {
2579 if (vcc->pop) 2572 if (vcc->pop)
2580 vcc->pop(vcc, skb); 2573 vcc->pop(vcc, skb);
2581 else 2574 else
2582 dev_kfree_skb_any(skb); 2575 dev_kfree_skb_any(skb);
2583 atomic_inc(&vcc->stats->tx_err); 2576 atomic_inc(&vcc->stats->tx_err);
2584 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2577 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2585 return -ENOMEM; 2578 return -ENOMEM;
2586 } 2579 }
2587 tpd->status |= TPD_USERCELL; 2580 tpd->status |= TPD_USERCELL;
2588 slot = 0; 2581 slot = 0;
2589 } 2582 }
2590 2583
2591 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, 2584 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2592 (void *) page_address(frag->page) + frag->page_offset, 2585 (void *) page_address(frag->page) + frag->page_offset,
2593 frag->size, PCI_DMA_TODEVICE); 2586 frag->size, PCI_DMA_TODEVICE);
2594 tpd->iovec[slot].len = frag->size; 2587 tpd->iovec[slot].len = frag->size;
2595 ++slot; 2588 ++slot;
2596 2589
2597 } 2590 }
2598 2591
2599 tpd->iovec[slot - 1].len |= TPD_LST; 2592 tpd->iovec[slot - 1].len |= TPD_LST;
2600 #else 2593 #else
2601 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 2594 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2602 tpd->length0 = skb->len | TPD_LST; 2595 tpd->length0 = skb->len | TPD_LST;
2603 #endif 2596 #endif
2604 tpd->status |= TPD_INT; 2597 tpd->status |= TPD_INT;
2605 2598
2606 tpd->vcc = vcc; 2599 tpd->vcc = vcc;
2607 tpd->skb = skb; 2600 tpd->skb = skb;
2608 wmb(); 2601 wmb();
2609 ATM_SKB(skb)->vcc = vcc; 2602 ATM_SKB(skb)->vcc = vcc;
2610 2603
2611 __enqueue_tpd(he_dev, tpd, cid); 2604 __enqueue_tpd(he_dev, tpd, cid);
2612 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2605 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2613 2606
2614 atomic_inc(&vcc->stats->tx); 2607 atomic_inc(&vcc->stats->tx);
2615 2608
2616 return 0; 2609 return 0;
2617 } 2610 }
2618 2611
2619 static int 2612 static int
2620 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg) 2613 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2621 { 2614 {
2622 unsigned long flags; 2615 unsigned long flags;
2623 struct he_dev *he_dev = HE_DEV(atm_dev); 2616 struct he_dev *he_dev = HE_DEV(atm_dev);
2624 struct he_ioctl_reg reg; 2617 struct he_ioctl_reg reg;
2625 int err = 0; 2618 int err = 0;
2626 2619
2627 switch (cmd) { 2620 switch (cmd) {
2628 case HE_GET_REG: 2621 case HE_GET_REG:
2629 if (!capable(CAP_NET_ADMIN)) 2622 if (!capable(CAP_NET_ADMIN))
2630 return -EPERM; 2623 return -EPERM;
2631 2624
2632 if (copy_from_user(&reg, arg, 2625 if (copy_from_user(&reg, arg,
2633 sizeof(struct he_ioctl_reg))) 2626 sizeof(struct he_ioctl_reg)))
2634 return -EFAULT; 2627 return -EFAULT;
2635 2628
2636 spin_lock_irqsave(&he_dev->global_lock, flags); 2629 spin_lock_irqsave(&he_dev->global_lock, flags);
2637 switch (reg.type) { 2630 switch (reg.type) {
2638 case HE_REGTYPE_PCI: 2631 case HE_REGTYPE_PCI:
2639 if (reg.addr >= HE_REGMAP_SIZE) { 2632 if (reg.addr >= HE_REGMAP_SIZE) {
2640 err = -EINVAL; 2633 err = -EINVAL;
2641 break; 2634 break;
2642 } 2635 }
2643 2636
2644 reg.val = he_readl(he_dev, reg.addr); 2637 reg.val = he_readl(he_dev, reg.addr);
2645 break; 2638 break;
2646 case HE_REGTYPE_RCM: 2639 case HE_REGTYPE_RCM:
2647 reg.val = 2640 reg.val =
2648 he_readl_rcm(he_dev, reg.addr); 2641 he_readl_rcm(he_dev, reg.addr);
2649 break; 2642 break;
2650 case HE_REGTYPE_TCM: 2643 case HE_REGTYPE_TCM:
2651 reg.val = 2644 reg.val =
2652 he_readl_tcm(he_dev, reg.addr); 2645 he_readl_tcm(he_dev, reg.addr);
2653 break; 2646 break;
2654 case HE_REGTYPE_MBOX: 2647 case HE_REGTYPE_MBOX:
2655 reg.val = 2648 reg.val =
2656 he_readl_mbox(he_dev, reg.addr); 2649 he_readl_mbox(he_dev, reg.addr);
2657 break; 2650 break;
2658 default: 2651 default:
2659 err = -EINVAL; 2652 err = -EINVAL;
2660 break; 2653 break;
2661 } 2654 }
2662 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2655 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2663 if (err == 0) 2656 if (err == 0)
2664 if (copy_to_user(arg, &reg, 2657 if (copy_to_user(arg, &reg,
2665 sizeof(struct he_ioctl_reg))) 2658 sizeof(struct he_ioctl_reg)))
2666 return -EFAULT; 2659 return -EFAULT;
2667 break; 2660 break;
2668 default: 2661 default:
2669 #ifdef CONFIG_ATM_HE_USE_SUNI 2662 #ifdef CONFIG_ATM_HE_USE_SUNI
2670 if (atm_dev->phy && atm_dev->phy->ioctl) 2663 if (atm_dev->phy && atm_dev->phy->ioctl)
2671 err = atm_dev->phy->ioctl(atm_dev, cmd, arg); 2664 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2672 #else /* CONFIG_ATM_HE_USE_SUNI */ 2665 #else /* CONFIG_ATM_HE_USE_SUNI */
2673 err = -EINVAL; 2666 err = -EINVAL;
2674 #endif /* CONFIG_ATM_HE_USE_SUNI */ 2667 #endif /* CONFIG_ATM_HE_USE_SUNI */
2675 break; 2668 break;
2676 } 2669 }
2677 2670
2678 return err; 2671 return err;
2679 } 2672 }
2680 2673
2681 static void 2674 static void
2682 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr) 2675 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2683 { 2676 {
2684 unsigned long flags; 2677 unsigned long flags;
2685 struct he_dev *he_dev = HE_DEV(atm_dev); 2678 struct he_dev *he_dev = HE_DEV(atm_dev);
2686 2679
2687 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr); 2680 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2688 2681
2689 spin_lock_irqsave(&he_dev->global_lock, flags); 2682 spin_lock_irqsave(&he_dev->global_lock, flags);
2690 he_writel(he_dev, val, FRAMER + (addr*4)); 2683 he_writel(he_dev, val, FRAMER + (addr*4));
2691 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */ 2684 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2692 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2685 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2693 } 2686 }
2694 2687
2695 2688
2696 static unsigned char 2689 static unsigned char
2697 he_phy_get(struct atm_dev *atm_dev, unsigned long addr) 2690 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2698 { 2691 {
2699 unsigned long flags; 2692 unsigned long flags;
2700 struct he_dev *he_dev = HE_DEV(atm_dev); 2693 struct he_dev *he_dev = HE_DEV(atm_dev);
2701 unsigned reg; 2694 unsigned reg;
2702 2695
2703 spin_lock_irqsave(&he_dev->global_lock, flags); 2696 spin_lock_irqsave(&he_dev->global_lock, flags);
2704 reg = he_readl(he_dev, FRAMER + (addr*4)); 2697 reg = he_readl(he_dev, FRAMER + (addr*4));
2705 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2698 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2706 2699
2707 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg); 2700 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2708 return reg; 2701 return reg;
2709 } 2702 }
2710 2703
2711 static int 2704 static int
2712 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page) 2705 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2713 { 2706 {
2714 unsigned long flags; 2707 unsigned long flags;
2715 struct he_dev *he_dev = HE_DEV(dev); 2708 struct he_dev *he_dev = HE_DEV(dev);
2716 int left, i; 2709 int left, i;
2717 #ifdef notdef 2710 #ifdef notdef
2718 struct he_rbrq *rbrq_tail; 2711 struct he_rbrq *rbrq_tail;
2719 struct he_tpdrq *tpdrq_head; 2712 struct he_tpdrq *tpdrq_head;
2720 int rbpl_head, rbpl_tail; 2713 int rbpl_head, rbpl_tail;
2721 #endif 2714 #endif
2722 static long mcc = 0, oec = 0, dcc = 0, cec = 0; 2715 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2723 2716
2724 2717
2725 left = *pos; 2718 left = *pos;
2726 if (!left--) 2719 if (!left--)
2727 return sprintf(page, "ATM he driver\n"); 2720 return sprintf(page, "ATM he driver\n");
2728 2721
2729 if (!left--) 2722 if (!left--)
2730 return sprintf(page, "%s%s\n\n", 2723 return sprintf(page, "%s%s\n\n",
2731 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM"); 2724 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2732 2725
2733 if (!left--) 2726 if (!left--)
2734 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n"); 2727 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2735 2728
2736 spin_lock_irqsave(&he_dev->global_lock, flags); 2729 spin_lock_irqsave(&he_dev->global_lock, flags);
2737 mcc += he_readl(he_dev, MCC); 2730 mcc += he_readl(he_dev, MCC);
2738 oec += he_readl(he_dev, OEC); 2731 oec += he_readl(he_dev, OEC);
2739 dcc += he_readl(he_dev, DCC); 2732 dcc += he_readl(he_dev, DCC);
2740 cec += he_readl(he_dev, CEC); 2733 cec += he_readl(he_dev, CEC);
2741 spin_unlock_irqrestore(&he_dev->global_lock, flags); 2734 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2742 2735
2743 if (!left--) 2736 if (!left--)
2744 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n", 2737 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2745 mcc, oec, dcc, cec); 2738 mcc, oec, dcc, cec);
2746 2739
2747 if (!left--) 2740 if (!left--)
2748 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n", 2741 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2749 CONFIG_IRQ_SIZE, he_dev->irq_peak); 2742 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2750 2743
2751 if (!left--) 2744 if (!left--)
2752 return sprintf(page, "tpdrq_size = %d inuse = ?\n", 2745 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2753 CONFIG_TPDRQ_SIZE); 2746 CONFIG_TPDRQ_SIZE);
2754 2747
2755 if (!left--) 2748 if (!left--)
2756 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n", 2749 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2757 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak); 2750 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2758 2751
2759 if (!left--) 2752 if (!left--)
2760 return sprintf(page, "tbrq_size = %d peak = %d\n", 2753 return sprintf(page, "tbrq_size = %d peak = %d\n",
2761 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak); 2754 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2762 2755
2763 2756
2764 #ifdef notdef 2757 #ifdef notdef
2765 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S)); 2758 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2766 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T)); 2759 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2767 2760
2768 inuse = rbpl_head - rbpl_tail; 2761 inuse = rbpl_head - rbpl_tail;
2769 if (inuse < 0) 2762 if (inuse < 0)
2770 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp); 2763 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2771 inuse /= sizeof(struct he_rbp); 2764 inuse /= sizeof(struct he_rbp);
2772 2765
2773 if (!left--) 2766 if (!left--)
2774 return sprintf(page, "rbpl_size = %d inuse = %d\n\n", 2767 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2775 CONFIG_RBPL_SIZE, inuse); 2768 CONFIG_RBPL_SIZE, inuse);
2776 #endif 2769 #endif
2777 2770
2778 if (!left--) 2771 if (!left--)
2779 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n"); 2772 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2780 2773
2781 for (i = 0; i < HE_NUM_CS_STPER; ++i) 2774 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2782 if (!left--) 2775 if (!left--)
2783 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i, 2776 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2784 he_dev->cs_stper[i].pcr, 2777 he_dev->cs_stper[i].pcr,
2785 he_dev->cs_stper[i].inuse); 2778 he_dev->cs_stper[i].inuse);
2786 2779
2787 if (!left--) 2780 if (!left--)
2788 return sprintf(page, "total bw (cbr): %d (limit %d)\n", 2781 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2789 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9); 2782 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2790 2783
2791 return 0; 2784 return 0;
2792 } 2785 }
2793 2786
2794 /* eeprom routines -- see 4.7 */ 2787 /* eeprom routines -- see 4.7 */
2795 2788
2796 static u8 read_prom_byte(struct he_dev *he_dev, int addr) 2789 static u8 read_prom_byte(struct he_dev *he_dev, int addr)
2797 { 2790 {
2798 u32 val = 0, tmp_read = 0; 2791 u32 val = 0, tmp_read = 0;
2799 int i, j = 0; 2792 int i, j = 0;
2800 u8 byte_read = 0; 2793 u8 byte_read = 0;
2801 2794
2802 val = readl(he_dev->membase + HOST_CNTL); 2795 val = readl(he_dev->membase + HOST_CNTL);
2803 val &= 0xFFFFE0FF; 2796 val &= 0xFFFFE0FF;
2804 2797
2805 /* Turn on write enable */ 2798 /* Turn on write enable */
2806 val |= 0x800; 2799 val |= 0x800;
2807 he_writel(he_dev, val, HOST_CNTL); 2800 he_writel(he_dev, val, HOST_CNTL);
2808 2801
2809 /* Send READ instruction */ 2802 /* Send READ instruction */
2810 for (i = 0; i < ARRAY_SIZE(readtab); i++) { 2803 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
2811 he_writel(he_dev, val | readtab[i], HOST_CNTL); 2804 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2812 udelay(EEPROM_DELAY); 2805 udelay(EEPROM_DELAY);
2813 } 2806 }
2814 2807
2815 /* Next, we need to send the byte address to read from */ 2808 /* Next, we need to send the byte address to read from */
2816 for (i = 7; i >= 0; i--) { 2809 for (i = 7; i >= 0; i--) {
2817 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); 2810 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2818 udelay(EEPROM_DELAY); 2811 udelay(EEPROM_DELAY);
2819 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL); 2812 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
2820 udelay(EEPROM_DELAY); 2813 udelay(EEPROM_DELAY);
2821 } 2814 }
2822 2815
2823 j = 0; 2816 j = 0;
2824 2817
2825 val &= 0xFFFFF7FF; /* Turn off write enable */ 2818 val &= 0xFFFFF7FF; /* Turn off write enable */
2826 he_writel(he_dev, val, HOST_CNTL); 2819 he_writel(he_dev, val, HOST_CNTL);
2827 2820
2828 /* Now, we can read data from the EEPROM by clocking it in */ 2821 /* Now, we can read data from the EEPROM by clocking it in */
2829 for (i = 7; i >= 0; i--) { 2822 for (i = 7; i >= 0; i--) {
2830 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); 2823 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2831 udelay(EEPROM_DELAY); 2824 udelay(EEPROM_DELAY);
2832 tmp_read = he_readl(he_dev, HOST_CNTL); 2825 tmp_read = he_readl(he_dev, HOST_CNTL);
2833 byte_read |= (unsigned char) 2826 byte_read |= (unsigned char)
2834 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i); 2827 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
2835 he_writel(he_dev, val | clocktab[j++], HOST_CNTL); 2828 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
2836 udelay(EEPROM_DELAY); 2829 udelay(EEPROM_DELAY);
2837 } 2830 }
2838 2831
2839 he_writel(he_dev, val | ID_CS, HOST_CNTL); 2832 he_writel(he_dev, val | ID_CS, HOST_CNTL);
2840 udelay(EEPROM_DELAY); 2833 udelay(EEPROM_DELAY);
2841 2834
2842 return byte_read; 2835 return byte_read;
2843 } 2836 }
2844 2837
2845 MODULE_LICENSE("GPL"); 2838 MODULE_LICENSE("GPL");
2846 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>"); 2839 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2847 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver"); 2840 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2848 module_param(disable64, bool, 0); 2841 module_param(disable64, bool, 0);
2849 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers"); 2842 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
2850 module_param(nvpibits, short, 0); 2843 module_param(nvpibits, short, 0);
2851 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)"); 2844 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
2852 module_param(nvcibits, short, 0); 2845 module_param(nvcibits, short, 0);
2853 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)"); 2846 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
2854 module_param(rx_skb_reserve, short, 0); 2847 module_param(rx_skb_reserve, short, 0);
2855 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)"); 2848 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
2856 module_param(irq_coalesce, bool, 0); 2849 module_param(irq_coalesce, bool, 0);
2857 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); 2850 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
2858 module_param(sdh, bool, 0); 2851 module_param(sdh, bool, 0);
2859 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); 2852 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
2860 2853
2861 static struct pci_device_id he_pci_tbl[] = { 2854 static struct pci_device_id he_pci_tbl[] = {
2862 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, 2855 { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
2863 { 0, } 2856 { 0, }
2864 }; 2857 };
2865 2858
2866 MODULE_DEVICE_TABLE(pci, he_pci_tbl); 2859 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
2867 2860
2868 static struct pci_driver he_driver = { 2861 static struct pci_driver he_driver = {
2869 .name = "he", 2862 .name = "he",
2870 .probe = he_init_one, 2863 .probe = he_init_one,
2871 .remove = he_remove_one, 2864 .remove = he_remove_one,
2872 .id_table = he_pci_tbl, 2865 .id_table = he_pci_tbl,
2873 }; 2866 };
2874 2867
2875 static int __init he_init(void) 2868 static int __init he_init(void)
2876 { 2869 {
2877 return pci_register_driver(&he_driver); 2870 return pci_register_driver(&he_driver);
2878 } 2871 }
2879 2872
2880 static void __exit he_cleanup(void) 2873 static void __exit he_cleanup(void)
2881 { 2874 {
2882 pci_unregister_driver(&he_driver); 2875 pci_unregister_driver(&he_driver);
2883 } 2876 }
2884 2877
2885 module_init(he_init); 2878 module_init(he_init);
2886 module_exit(he_cleanup); 2879 module_exit(he_cleanup);
2887 2880