Commit c767bf2ed3a3844a9d9341366cc388dab7c7ee05

Authored by Tejun Heo
Committed by Linus Torvalds
1 parent 9bb26bc1ff

atm/nicstar: don't use idr_remove_all()

idr_destroy() can destroy idr by itself and idr_remove_all() is being
deprecated.  Drop its usage.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chas Williams <chas@cmf.nrl.navy.mil>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 0 additions and 1 deletions Inline Diff

drivers/atm/nicstar.c
1 /* 1 /*
2 * nicstar.c 2 * nicstar.c
3 * 3 *
4 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. 4 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
5 * 5 *
6 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. 6 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
7 * It was taken from the frle-0.22 device driver. 7 * It was taken from the frle-0.22 device driver.
8 * As the file doesn't have a copyright notice, in the file 8 * As the file doesn't have a copyright notice, in the file
9 * nicstarmac.copyright I put the copyright notice from the 9 * nicstarmac.copyright I put the copyright notice from the
10 * frle-0.22 device driver. 10 * frle-0.22 device driver.
11 * Some code is based on the nicstar driver by M. Welsh. 11 * Some code is based on the nicstar driver by M. Welsh.
12 * 12 *
13 * Author: Rui Prior (rprior@inescn.pt) 13 * Author: Rui Prior (rprior@inescn.pt)
14 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 14 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
15 * 15 *
16 * 16 *
17 * (C) INESC 1999 17 * (C) INESC 1999
18 */ 18 */
19 19
20 /* 20 /*
21 * IMPORTANT INFORMATION 21 * IMPORTANT INFORMATION
22 * 22 *
23 * There are currently three types of spinlocks: 23 * There are currently three types of spinlocks:
24 * 24 *
25 * 1 - Per card interrupt spinlock (to protect structures and such) 25 * 1 - Per card interrupt spinlock (to protect structures and such)
26 * 2 - Per SCQ scq spinlock 26 * 2 - Per SCQ scq spinlock
27 * 3 - Per card resource spinlock (to access registers, etc.) 27 * 3 - Per card resource spinlock (to access registers, etc.)
28 * 28 *
29 * These must NEVER be grabbed in reverse order. 29 * These must NEVER be grabbed in reverse order.
30 * 30 *
31 */ 31 */
32 32
33 /* Header files */ 33 /* Header files */
34 34
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/kernel.h> 36 #include <linux/kernel.h>
37 #include <linux/skbuff.h> 37 #include <linux/skbuff.h>
38 #include <linux/atmdev.h> 38 #include <linux/atmdev.h>
39 #include <linux/atm.h> 39 #include <linux/atm.h>
40 #include <linux/pci.h> 40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h> 41 #include <linux/dma-mapping.h>
42 #include <linux/types.h> 42 #include <linux/types.h>
43 #include <linux/string.h> 43 #include <linux/string.h>
44 #include <linux/delay.h> 44 #include <linux/delay.h>
45 #include <linux/init.h> 45 #include <linux/init.h>
46 #include <linux/sched.h> 46 #include <linux/sched.h>
47 #include <linux/timer.h> 47 #include <linux/timer.h>
48 #include <linux/interrupt.h> 48 #include <linux/interrupt.h>
49 #include <linux/bitops.h> 49 #include <linux/bitops.h>
50 #include <linux/slab.h> 50 #include <linux/slab.h>
51 #include <linux/idr.h> 51 #include <linux/idr.h>
52 #include <asm/io.h> 52 #include <asm/io.h>
53 #include <asm/uaccess.h> 53 #include <asm/uaccess.h>
54 #include <linux/atomic.h> 54 #include <linux/atomic.h>
55 #include "nicstar.h" 55 #include "nicstar.h"
56 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI 56 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
57 #include "suni.h" 57 #include "suni.h"
58 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ 58 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
59 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 59 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
60 #include "idt77105.h" 60 #include "idt77105.h"
61 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ 61 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
62 62
63 /* Additional code */ 63 /* Additional code */
64 64
65 #include "nicstarmac.c" 65 #include "nicstarmac.c"
66 66
67 /* Configurable parameters */ 67 /* Configurable parameters */
68 68
69 #undef PHY_LOOPBACK 69 #undef PHY_LOOPBACK
70 #undef TX_DEBUG 70 #undef TX_DEBUG
71 #undef RX_DEBUG 71 #undef RX_DEBUG
72 #undef GENERAL_DEBUG 72 #undef GENERAL_DEBUG
73 #undef EXTRA_DEBUG 73 #undef EXTRA_DEBUG
74 74
75 #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know 75 #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
76 you're going to use only raw ATM */ 76 you're going to use only raw ATM */
77 77
78 /* Do not touch these */ 78 /* Do not touch these */
79 79
80 #ifdef TX_DEBUG 80 #ifdef TX_DEBUG
81 #define TXPRINTK(args...) printk(args) 81 #define TXPRINTK(args...) printk(args)
82 #else 82 #else
83 #define TXPRINTK(args...) 83 #define TXPRINTK(args...)
84 #endif /* TX_DEBUG */ 84 #endif /* TX_DEBUG */
85 85
86 #ifdef RX_DEBUG 86 #ifdef RX_DEBUG
87 #define RXPRINTK(args...) printk(args) 87 #define RXPRINTK(args...) printk(args)
88 #else 88 #else
89 #define RXPRINTK(args...) 89 #define RXPRINTK(args...)
90 #endif /* RX_DEBUG */ 90 #endif /* RX_DEBUG */
91 91
92 #ifdef GENERAL_DEBUG 92 #ifdef GENERAL_DEBUG
93 #define PRINTK(args...) printk(args) 93 #define PRINTK(args...) printk(args)
94 #else 94 #else
95 #define PRINTK(args...) 95 #define PRINTK(args...)
96 #endif /* GENERAL_DEBUG */ 96 #endif /* GENERAL_DEBUG */
97 97
98 #ifdef EXTRA_DEBUG 98 #ifdef EXTRA_DEBUG
99 #define XPRINTK(args...) printk(args) 99 #define XPRINTK(args...) printk(args)
100 #else 100 #else
101 #define XPRINTK(args...) 101 #define XPRINTK(args...)
102 #endif /* EXTRA_DEBUG */ 102 #endif /* EXTRA_DEBUG */
103 103
104 /* Macros */ 104 /* Macros */
105 105
106 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) 106 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
107 107
108 #define NS_DELAY mdelay(1) 108 #define NS_DELAY mdelay(1)
109 109
110 #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) 110 #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b)))
111 111
112 #ifndef ATM_SKB 112 #ifndef ATM_SKB
113 #define ATM_SKB(s) (&(s)->atm) 113 #define ATM_SKB(s) (&(s)->atm)
114 #endif 114 #endif
115 115
116 #define scq_virt_to_bus(scq, p) \ 116 #define scq_virt_to_bus(scq, p) \
117 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) 117 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
118 118
119 /* Function declarations */ 119 /* Function declarations */
120 120
121 static u32 ns_read_sram(ns_dev * card, u32 sram_address); 121 static u32 ns_read_sram(ns_dev * card, u32 sram_address);
122 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, 122 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
123 int count); 123 int count);
124 static int ns_init_card(int i, struct pci_dev *pcidev); 124 static int ns_init_card(int i, struct pci_dev *pcidev);
125 static void ns_init_card_error(ns_dev * card, int error); 125 static void ns_init_card_error(ns_dev * card, int error);
126 static scq_info *get_scq(ns_dev *card, int size, u32 scd); 126 static scq_info *get_scq(ns_dev *card, int size, u32 scd);
127 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); 127 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
128 static void push_rxbufs(ns_dev *, struct sk_buff *); 128 static void push_rxbufs(ns_dev *, struct sk_buff *);
129 static irqreturn_t ns_irq_handler(int irq, void *dev_id); 129 static irqreturn_t ns_irq_handler(int irq, void *dev_id);
130 static int ns_open(struct atm_vcc *vcc); 130 static int ns_open(struct atm_vcc *vcc);
131 static void ns_close(struct atm_vcc *vcc); 131 static void ns_close(struct atm_vcc *vcc);
132 static void fill_tst(ns_dev * card, int n, vc_map * vc); 132 static void fill_tst(ns_dev * card, int n, vc_map * vc);
133 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); 133 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
134 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 134 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
135 struct sk_buff *skb); 135 struct sk_buff *skb);
136 static void process_tsq(ns_dev * card); 136 static void process_tsq(ns_dev * card);
137 static void drain_scq(ns_dev * card, scq_info * scq, int pos); 137 static void drain_scq(ns_dev * card, scq_info * scq, int pos);
138 static void process_rsq(ns_dev * card); 138 static void process_rsq(ns_dev * card);
139 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); 139 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
140 #ifdef NS_USE_DESTRUCTORS 140 #ifdef NS_USE_DESTRUCTORS
141 static void ns_sb_destructor(struct sk_buff *sb); 141 static void ns_sb_destructor(struct sk_buff *sb);
142 static void ns_lb_destructor(struct sk_buff *lb); 142 static void ns_lb_destructor(struct sk_buff *lb);
143 static void ns_hb_destructor(struct sk_buff *hb); 143 static void ns_hb_destructor(struct sk_buff *hb);
144 #endif /* NS_USE_DESTRUCTORS */ 144 #endif /* NS_USE_DESTRUCTORS */
145 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); 145 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
146 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); 146 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
147 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); 147 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
148 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); 148 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
149 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); 149 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
150 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); 150 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
151 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); 151 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
152 #ifdef EXTRA_DEBUG 152 #ifdef EXTRA_DEBUG
153 static void which_list(ns_dev * card, struct sk_buff *skb); 153 static void which_list(ns_dev * card, struct sk_buff *skb);
154 #endif 154 #endif
155 static void ns_poll(unsigned long arg); 155 static void ns_poll(unsigned long arg);
156 static int ns_parse_mac(char *mac, unsigned char *esi); 156 static int ns_parse_mac(char *mac, unsigned char *esi);
157 static void ns_phy_put(struct atm_dev *dev, unsigned char value, 157 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
158 unsigned long addr); 158 unsigned long addr);
159 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); 159 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
160 160
161 /* Global variables */ 161 /* Global variables */
162 162
163 static struct ns_dev *cards[NS_MAX_CARDS]; 163 static struct ns_dev *cards[NS_MAX_CARDS];
164 static unsigned num_cards; 164 static unsigned num_cards;
165 static struct atmdev_ops atm_ops = { 165 static struct atmdev_ops atm_ops = {
166 .open = ns_open, 166 .open = ns_open,
167 .close = ns_close, 167 .close = ns_close,
168 .ioctl = ns_ioctl, 168 .ioctl = ns_ioctl,
169 .send = ns_send, 169 .send = ns_send,
170 .phy_put = ns_phy_put, 170 .phy_put = ns_phy_put,
171 .phy_get = ns_phy_get, 171 .phy_get = ns_phy_get,
172 .proc_read = ns_proc_read, 172 .proc_read = ns_proc_read,
173 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 }; 174 };
175 175
176 static struct timer_list ns_timer; 176 static struct timer_list ns_timer;
177 static char *mac[NS_MAX_CARDS]; 177 static char *mac[NS_MAX_CARDS];
178 module_param_array(mac, charp, NULL, 0); 178 module_param_array(mac, charp, NULL, 0);
179 MODULE_LICENSE("GPL"); 179 MODULE_LICENSE("GPL");
180 180
181 /* Functions */ 181 /* Functions */
182 182
183 static int nicstar_init_one(struct pci_dev *pcidev, 183 static int nicstar_init_one(struct pci_dev *pcidev,
184 const struct pci_device_id *ent) 184 const struct pci_device_id *ent)
185 { 185 {
186 static int index = -1; 186 static int index = -1;
187 unsigned int error; 187 unsigned int error;
188 188
189 index++; 189 index++;
190 cards[index] = NULL; 190 cards[index] = NULL;
191 191
192 error = ns_init_card(index, pcidev); 192 error = ns_init_card(index, pcidev);
193 if (error) { 193 if (error) {
194 cards[index--] = NULL; /* don't increment index */ 194 cards[index--] = NULL; /* don't increment index */
195 goto err_out; 195 goto err_out;
196 } 196 }
197 197
198 return 0; 198 return 0;
199 err_out: 199 err_out:
200 return -ENODEV; 200 return -ENODEV;
201 } 201 }
202 202
203 static void nicstar_remove_one(struct pci_dev *pcidev) 203 static void nicstar_remove_one(struct pci_dev *pcidev)
204 { 204 {
205 int i, j; 205 int i, j;
206 ns_dev *card = pci_get_drvdata(pcidev); 206 ns_dev *card = pci_get_drvdata(pcidev);
207 struct sk_buff *hb; 207 struct sk_buff *hb;
208 struct sk_buff *iovb; 208 struct sk_buff *iovb;
209 struct sk_buff *lb; 209 struct sk_buff *lb;
210 struct sk_buff *sb; 210 struct sk_buff *sb;
211 211
212 i = card->index; 212 i = card->index;
213 213
214 if (cards[i] == NULL) 214 if (cards[i] == NULL)
215 return; 215 return;
216 216
217 if (card->atmdev->phy && card->atmdev->phy->stop) 217 if (card->atmdev->phy && card->atmdev->phy->stop)
218 card->atmdev->phy->stop(card->atmdev); 218 card->atmdev->phy->stop(card->atmdev);
219 219
220 /* Stop everything */ 220 /* Stop everything */
221 writel(0x00000000, card->membase + CFG); 221 writel(0x00000000, card->membase + CFG);
222 222
223 /* De-register device */ 223 /* De-register device */
224 atm_dev_deregister(card->atmdev); 224 atm_dev_deregister(card->atmdev);
225 225
226 /* Disable PCI device */ 226 /* Disable PCI device */
227 pci_disable_device(pcidev); 227 pci_disable_device(pcidev);
228 228
229 /* Free up resources */ 229 /* Free up resources */
230 j = 0; 230 j = 0;
231 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); 231 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
232 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { 232 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
233 dev_kfree_skb_any(hb); 233 dev_kfree_skb_any(hb);
234 j++; 234 j++;
235 } 235 }
236 PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); 236 PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
237 j = 0; 237 j = 0;
238 PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, 238 PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
239 card->iovpool.count); 239 card->iovpool.count);
240 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { 240 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
241 dev_kfree_skb_any(iovb); 241 dev_kfree_skb_any(iovb);
242 j++; 242 j++;
243 } 243 }
244 PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); 244 PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
245 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 245 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
246 dev_kfree_skb_any(lb); 246 dev_kfree_skb_any(lb);
247 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 247 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
248 dev_kfree_skb_any(sb); 248 dev_kfree_skb_any(sb);
249 free_scq(card, card->scq0, NULL); 249 free_scq(card, card->scq0, NULL);
250 for (j = 0; j < NS_FRSCD_NUM; j++) { 250 for (j = 0; j < NS_FRSCD_NUM; j++) {
251 if (card->scd2vc[j] != NULL) 251 if (card->scd2vc[j] != NULL)
252 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); 252 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
253 } 253 }
254 idr_remove_all(&card->idr);
255 idr_destroy(&card->idr); 254 idr_destroy(&card->idr);
256 pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, 255 pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
257 card->rsq.org, card->rsq.dma); 256 card->rsq.org, card->rsq.dma);
258 pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, 257 pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
259 card->tsq.org, card->tsq.dma); 258 card->tsq.org, card->tsq.dma);
260 free_irq(card->pcidev->irq, card); 259 free_irq(card->pcidev->irq, card);
261 iounmap(card->membase); 260 iounmap(card->membase);
262 kfree(card); 261 kfree(card);
263 } 262 }
264 263
265 static struct pci_device_id nicstar_pci_tbl[] = { 264 static struct pci_device_id nicstar_pci_tbl[] = {
266 { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, 265 { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
267 {0,} /* terminate list */ 266 {0,} /* terminate list */
268 }; 267 };
269 268
270 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); 269 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
271 270
272 static struct pci_driver nicstar_driver = { 271 static struct pci_driver nicstar_driver = {
273 .name = "nicstar", 272 .name = "nicstar",
274 .id_table = nicstar_pci_tbl, 273 .id_table = nicstar_pci_tbl,
275 .probe = nicstar_init_one, 274 .probe = nicstar_init_one,
276 .remove = nicstar_remove_one, 275 .remove = nicstar_remove_one,
277 }; 276 };
278 277
279 static int __init nicstar_init(void) 278 static int __init nicstar_init(void)
280 { 279 {
281 unsigned error = 0; /* Initialized to remove compile warning */ 280 unsigned error = 0; /* Initialized to remove compile warning */
282 281
283 XPRINTK("nicstar: nicstar_init() called.\n"); 282 XPRINTK("nicstar: nicstar_init() called.\n");
284 283
285 error = pci_register_driver(&nicstar_driver); 284 error = pci_register_driver(&nicstar_driver);
286 285
287 TXPRINTK("nicstar: TX debug enabled.\n"); 286 TXPRINTK("nicstar: TX debug enabled.\n");
288 RXPRINTK("nicstar: RX debug enabled.\n"); 287 RXPRINTK("nicstar: RX debug enabled.\n");
289 PRINTK("nicstar: General debug enabled.\n"); 288 PRINTK("nicstar: General debug enabled.\n");
290 #ifdef PHY_LOOPBACK 289 #ifdef PHY_LOOPBACK
291 printk("nicstar: using PHY loopback.\n"); 290 printk("nicstar: using PHY loopback.\n");
292 #endif /* PHY_LOOPBACK */ 291 #endif /* PHY_LOOPBACK */
293 XPRINTK("nicstar: nicstar_init() returned.\n"); 292 XPRINTK("nicstar: nicstar_init() returned.\n");
294 293
295 if (!error) { 294 if (!error) {
296 init_timer(&ns_timer); 295 init_timer(&ns_timer);
297 ns_timer.expires = jiffies + NS_POLL_PERIOD; 296 ns_timer.expires = jiffies + NS_POLL_PERIOD;
298 ns_timer.data = 0UL; 297 ns_timer.data = 0UL;
299 ns_timer.function = ns_poll; 298 ns_timer.function = ns_poll;
300 add_timer(&ns_timer); 299 add_timer(&ns_timer);
301 } 300 }
302 301
303 return error; 302 return error;
304 } 303 }
305 304
306 static void __exit nicstar_cleanup(void) 305 static void __exit nicstar_cleanup(void)
307 { 306 {
308 XPRINTK("nicstar: nicstar_cleanup() called.\n"); 307 XPRINTK("nicstar: nicstar_cleanup() called.\n");
309 308
310 del_timer(&ns_timer); 309 del_timer(&ns_timer);
311 310
312 pci_unregister_driver(&nicstar_driver); 311 pci_unregister_driver(&nicstar_driver);
313 312
314 XPRINTK("nicstar: nicstar_cleanup() returned.\n"); 313 XPRINTK("nicstar: nicstar_cleanup() returned.\n");
315 } 314 }
316 315
317 static u32 ns_read_sram(ns_dev * card, u32 sram_address) 316 static u32 ns_read_sram(ns_dev * card, u32 sram_address)
318 { 317 {
319 unsigned long flags; 318 unsigned long flags;
320 u32 data; 319 u32 data;
321 sram_address <<= 2; 320 sram_address <<= 2;
322 sram_address &= 0x0007FFFC; /* address must be dword aligned */ 321 sram_address &= 0x0007FFFC; /* address must be dword aligned */
323 sram_address |= 0x50000000; /* SRAM read command */ 322 sram_address |= 0x50000000; /* SRAM read command */
324 spin_lock_irqsave(&card->res_lock, flags); 323 spin_lock_irqsave(&card->res_lock, flags);
325 while (CMD_BUSY(card)) ; 324 while (CMD_BUSY(card)) ;
326 writel(sram_address, card->membase + CMD); 325 writel(sram_address, card->membase + CMD);
327 while (CMD_BUSY(card)) ; 326 while (CMD_BUSY(card)) ;
328 data = readl(card->membase + DR0); 327 data = readl(card->membase + DR0);
329 spin_unlock_irqrestore(&card->res_lock, flags); 328 spin_unlock_irqrestore(&card->res_lock, flags);
330 return data; 329 return data;
331 } 330 }
332 331
333 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, 332 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
334 int count) 333 int count)
335 { 334 {
336 unsigned long flags; 335 unsigned long flags;
337 int i, c; 336 int i, c;
338 count--; /* count range now is 0..3 instead of 1..4 */ 337 count--; /* count range now is 0..3 instead of 1..4 */
339 c = count; 338 c = count;
340 c <<= 2; /* to use increments of 4 */ 339 c <<= 2; /* to use increments of 4 */
341 spin_lock_irqsave(&card->res_lock, flags); 340 spin_lock_irqsave(&card->res_lock, flags);
342 while (CMD_BUSY(card)) ; 341 while (CMD_BUSY(card)) ;
343 for (i = 0; i <= c; i += 4) 342 for (i = 0; i <= c; i += 4)
344 writel(*(value++), card->membase + i); 343 writel(*(value++), card->membase + i);
345 /* Note: DR# registers are the first 4 dwords in nicstar's memspace, 344 /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
346 so card->membase + DR0 == card->membase */ 345 so card->membase + DR0 == card->membase */
347 sram_address <<= 2; 346 sram_address <<= 2;
348 sram_address &= 0x0007FFFC; 347 sram_address &= 0x0007FFFC;
349 sram_address |= (0x40000000 | count); 348 sram_address |= (0x40000000 | count);
350 writel(sram_address, card->membase + CMD); 349 writel(sram_address, card->membase + CMD);
351 spin_unlock_irqrestore(&card->res_lock, flags); 350 spin_unlock_irqrestore(&card->res_lock, flags);
352 } 351 }
353 352
354 static int ns_init_card(int i, struct pci_dev *pcidev) 353 static int ns_init_card(int i, struct pci_dev *pcidev)
355 { 354 {
356 int j; 355 int j;
357 struct ns_dev *card = NULL; 356 struct ns_dev *card = NULL;
358 unsigned char pci_latency; 357 unsigned char pci_latency;
359 unsigned error; 358 unsigned error;
360 u32 data; 359 u32 data;
361 u32 u32d[4]; 360 u32 u32d[4];
362 u32 ns_cfg_rctsize; 361 u32 ns_cfg_rctsize;
363 int bcount; 362 int bcount;
364 unsigned long membase; 363 unsigned long membase;
365 364
366 error = 0; 365 error = 0;
367 366
368 if (pci_enable_device(pcidev)) { 367 if (pci_enable_device(pcidev)) {
369 printk("nicstar%d: can't enable PCI device\n", i); 368 printk("nicstar%d: can't enable PCI device\n", i);
370 error = 2; 369 error = 2;
371 ns_init_card_error(card, error); 370 ns_init_card_error(card, error);
372 return error; 371 return error;
373 } 372 }
374 if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || 373 if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
375 (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { 374 (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
376 printk(KERN_WARNING 375 printk(KERN_WARNING
377 "nicstar%d: No suitable DMA available.\n", i); 376 "nicstar%d: No suitable DMA available.\n", i);
378 error = 2; 377 error = 2;
379 ns_init_card_error(card, error); 378 ns_init_card_error(card, error);
380 return error; 379 return error;
381 } 380 }
382 381
383 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { 382 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
384 printk 383 printk
385 ("nicstar%d: can't allocate memory for device structure.\n", 384 ("nicstar%d: can't allocate memory for device structure.\n",
386 i); 385 i);
387 error = 2; 386 error = 2;
388 ns_init_card_error(card, error); 387 ns_init_card_error(card, error);
389 return error; 388 return error;
390 } 389 }
391 cards[i] = card; 390 cards[i] = card;
392 spin_lock_init(&card->int_lock); 391 spin_lock_init(&card->int_lock);
393 spin_lock_init(&card->res_lock); 392 spin_lock_init(&card->res_lock);
394 393
395 pci_set_drvdata(pcidev, card); 394 pci_set_drvdata(pcidev, card);
396 395
397 card->index = i; 396 card->index = i;
398 card->atmdev = NULL; 397 card->atmdev = NULL;
399 card->pcidev = pcidev; 398 card->pcidev = pcidev;
400 membase = pci_resource_start(pcidev, 1); 399 membase = pci_resource_start(pcidev, 1);
401 card->membase = ioremap(membase, NS_IOREMAP_SIZE); 400 card->membase = ioremap(membase, NS_IOREMAP_SIZE);
402 if (!card->membase) { 401 if (!card->membase) {
403 printk("nicstar%d: can't ioremap() membase.\n", i); 402 printk("nicstar%d: can't ioremap() membase.\n", i);
404 error = 3; 403 error = 3;
405 ns_init_card_error(card, error); 404 ns_init_card_error(card, error);
406 return error; 405 return error;
407 } 406 }
408 PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); 407 PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
409 408
410 pci_set_master(pcidev); 409 pci_set_master(pcidev);
411 410
412 if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { 411 if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
413 printk("nicstar%d: can't read PCI latency timer.\n", i); 412 printk("nicstar%d: can't read PCI latency timer.\n", i);
414 error = 6; 413 error = 6;
415 ns_init_card_error(card, error); 414 ns_init_card_error(card, error);
416 return error; 415 return error;
417 } 416 }
418 #ifdef NS_PCI_LATENCY 417 #ifdef NS_PCI_LATENCY
419 if (pci_latency < NS_PCI_LATENCY) { 418 if (pci_latency < NS_PCI_LATENCY) {
420 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, 419 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
421 NS_PCI_LATENCY); 420 NS_PCI_LATENCY);
422 for (j = 1; j < 4; j++) { 421 for (j = 1; j < 4; j++) {
423 if (pci_write_config_byte 422 if (pci_write_config_byte
424 (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) 423 (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
425 break; 424 break;
426 } 425 }
427 if (j == 4) { 426 if (j == 4) {
428 printk 427 printk
429 ("nicstar%d: can't set PCI latency timer to %d.\n", 428 ("nicstar%d: can't set PCI latency timer to %d.\n",
430 i, NS_PCI_LATENCY); 429 i, NS_PCI_LATENCY);
431 error = 7; 430 error = 7;
432 ns_init_card_error(card, error); 431 ns_init_card_error(card, error);
433 return error; 432 return error;
434 } 433 }
435 } 434 }
436 #endif /* NS_PCI_LATENCY */ 435 #endif /* NS_PCI_LATENCY */
437 436
438 /* Clear timer overflow */ 437 /* Clear timer overflow */
439 data = readl(card->membase + STAT); 438 data = readl(card->membase + STAT);
440 if (data & NS_STAT_TMROF) 439 if (data & NS_STAT_TMROF)
441 writel(NS_STAT_TMROF, card->membase + STAT); 440 writel(NS_STAT_TMROF, card->membase + STAT);
442 441
443 /* Software reset */ 442 /* Software reset */
444 writel(NS_CFG_SWRST, card->membase + CFG); 443 writel(NS_CFG_SWRST, card->membase + CFG);
445 NS_DELAY; 444 NS_DELAY;
446 writel(0x00000000, card->membase + CFG); 445 writel(0x00000000, card->membase + CFG);
447 446
448 /* PHY reset */ 447 /* PHY reset */
449 writel(0x00000008, card->membase + GP); 448 writel(0x00000008, card->membase + GP);
450 NS_DELAY; 449 NS_DELAY;
451 writel(0x00000001, card->membase + GP); 450 writel(0x00000001, card->membase + GP);
452 NS_DELAY; 451 NS_DELAY;
453 while (CMD_BUSY(card)) ; 452 while (CMD_BUSY(card)) ;
454 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ 453 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
455 NS_DELAY; 454 NS_DELAY;
456 455
457 /* Detect PHY type */ 456 /* Detect PHY type */
458 while (CMD_BUSY(card)) ; 457 while (CMD_BUSY(card)) ;
459 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); 458 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
460 while (CMD_BUSY(card)) ; 459 while (CMD_BUSY(card)) ;
461 data = readl(card->membase + DR0); 460 data = readl(card->membase + DR0);
462 switch (data) { 461 switch (data) {
463 case 0x00000009: 462 case 0x00000009:
464 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); 463 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
465 card->max_pcr = ATM_25_PCR; 464 card->max_pcr = ATM_25_PCR;
466 while (CMD_BUSY(card)) ; 465 while (CMD_BUSY(card)) ;
467 writel(0x00000008, card->membase + DR0); 466 writel(0x00000008, card->membase + DR0);
468 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); 467 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
469 /* Clear an eventual pending interrupt */ 468 /* Clear an eventual pending interrupt */
470 writel(NS_STAT_SFBQF, card->membase + STAT); 469 writel(NS_STAT_SFBQF, card->membase + STAT);
471 #ifdef PHY_LOOPBACK 470 #ifdef PHY_LOOPBACK
472 while (CMD_BUSY(card)) ; 471 while (CMD_BUSY(card)) ;
473 writel(0x00000022, card->membase + DR0); 472 writel(0x00000022, card->membase + DR0);
474 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); 473 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
475 #endif /* PHY_LOOPBACK */ 474 #endif /* PHY_LOOPBACK */
476 break; 475 break;
477 case 0x00000030: 476 case 0x00000030:
478 case 0x00000031: 477 case 0x00000031:
479 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); 478 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
480 card->max_pcr = ATM_OC3_PCR; 479 card->max_pcr = ATM_OC3_PCR;
481 #ifdef PHY_LOOPBACK 480 #ifdef PHY_LOOPBACK
482 while (CMD_BUSY(card)) ; 481 while (CMD_BUSY(card)) ;
483 writel(0x00000002, card->membase + DR0); 482 writel(0x00000002, card->membase + DR0);
484 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); 483 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
485 #endif /* PHY_LOOPBACK */ 484 #endif /* PHY_LOOPBACK */
486 break; 485 break;
487 default: 486 default:
488 printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); 487 printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
489 error = 8; 488 error = 8;
490 ns_init_card_error(card, error); 489 ns_init_card_error(card, error);
491 return error; 490 return error;
492 } 491 }
493 writel(0x00000000, card->membase + GP); 492 writel(0x00000000, card->membase + GP);
494 493
495 /* Determine SRAM size */ 494 /* Determine SRAM size */
496 data = 0x76543210; 495 data = 0x76543210;
497 ns_write_sram(card, 0x1C003, &data, 1); 496 ns_write_sram(card, 0x1C003, &data, 1);
498 data = 0x89ABCDEF; 497 data = 0x89ABCDEF;
499 ns_write_sram(card, 0x14003, &data, 1); 498 ns_write_sram(card, 0x14003, &data, 1);
500 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && 499 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
501 ns_read_sram(card, 0x1C003) == 0x76543210) 500 ns_read_sram(card, 0x1C003) == 0x76543210)
502 card->sram_size = 128; 501 card->sram_size = 128;
503 else 502 else
504 card->sram_size = 32; 503 card->sram_size = 32;
505 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); 504 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
506 505
507 card->rct_size = NS_MAX_RCTSIZE; 506 card->rct_size = NS_MAX_RCTSIZE;
508 507
509 #if (NS_MAX_RCTSIZE == 4096) 508 #if (NS_MAX_RCTSIZE == 4096)
510 if (card->sram_size == 128) 509 if (card->sram_size == 128)
511 printk 510 printk
512 ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", 511 ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
513 i); 512 i);
514 #elif (NS_MAX_RCTSIZE == 16384) 513 #elif (NS_MAX_RCTSIZE == 16384)
515 if (card->sram_size == 32) { 514 if (card->sram_size == 32) {
516 printk 515 printk
517 ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", 516 ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
518 i); 517 i);
519 card->rct_size = 4096; 518 card->rct_size = 4096;
520 } 519 }
521 #else 520 #else
522 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c 521 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
523 #endif 522 #endif
524 523
525 card->vpibits = NS_VPIBITS; 524 card->vpibits = NS_VPIBITS;
526 if (card->rct_size == 4096) 525 if (card->rct_size == 4096)
527 card->vcibits = 12 - NS_VPIBITS; 526 card->vcibits = 12 - NS_VPIBITS;
528 else /* card->rct_size == 16384 */ 527 else /* card->rct_size == 16384 */
529 card->vcibits = 14 - NS_VPIBITS; 528 card->vcibits = 14 - NS_VPIBITS;
530 529
531 /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ 530 /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
532 if (mac[i] == NULL) 531 if (mac[i] == NULL)
533 nicstar_init_eprom(card->membase); 532 nicstar_init_eprom(card->membase);
534 533
535 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ 534 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
536 writel(0x00000000, card->membase + VPM); 535 writel(0x00000000, card->membase + VPM);
537 536
538 /* Initialize TSQ */ 537 /* Initialize TSQ */
539 card->tsq.org = pci_alloc_consistent(card->pcidev, 538 card->tsq.org = pci_alloc_consistent(card->pcidev,
540 NS_TSQSIZE + NS_TSQ_ALIGNMENT, 539 NS_TSQSIZE + NS_TSQ_ALIGNMENT,
541 &card->tsq.dma); 540 &card->tsq.dma);
542 if (card->tsq.org == NULL) { 541 if (card->tsq.org == NULL) {
543 printk("nicstar%d: can't allocate TSQ.\n", i); 542 printk("nicstar%d: can't allocate TSQ.\n", i);
544 error = 10; 543 error = 10;
545 ns_init_card_error(card, error); 544 ns_init_card_error(card, error);
546 return error; 545 return error;
547 } 546 }
548 card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); 547 card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
549 card->tsq.next = card->tsq.base; 548 card->tsq.next = card->tsq.base;
550 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); 549 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
551 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) 550 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
552 ns_tsi_init(card->tsq.base + j); 551 ns_tsi_init(card->tsq.base + j);
553 writel(0x00000000, card->membase + TSQH); 552 writel(0x00000000, card->membase + TSQH);
554 writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); 553 writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
555 PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); 554 PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
556 555
557 /* Initialize RSQ */ 556 /* Initialize RSQ */
558 card->rsq.org = pci_alloc_consistent(card->pcidev, 557 card->rsq.org = pci_alloc_consistent(card->pcidev,
559 NS_RSQSIZE + NS_RSQ_ALIGNMENT, 558 NS_RSQSIZE + NS_RSQ_ALIGNMENT,
560 &card->rsq.dma); 559 &card->rsq.dma);
561 if (card->rsq.org == NULL) { 560 if (card->rsq.org == NULL) {
562 printk("nicstar%d: can't allocate RSQ.\n", i); 561 printk("nicstar%d: can't allocate RSQ.\n", i);
563 error = 11; 562 error = 11;
564 ns_init_card_error(card, error); 563 ns_init_card_error(card, error);
565 return error; 564 return error;
566 } 565 }
567 card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); 566 card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
568 card->rsq.next = card->rsq.base; 567 card->rsq.next = card->rsq.base;
569 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); 568 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
570 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) 569 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
571 ns_rsqe_init(card->rsq.base + j); 570 ns_rsqe_init(card->rsq.base + j);
572 writel(0x00000000, card->membase + RSQH); 571 writel(0x00000000, card->membase + RSQH);
573 writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); 572 writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
574 PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); 573 PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
575 574
576 /* Initialize SCQ0, the only VBR SCQ used */ 575 /* Initialize SCQ0, the only VBR SCQ used */
577 card->scq1 = NULL; 576 card->scq1 = NULL;
578 card->scq2 = NULL; 577 card->scq2 = NULL;
579 card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); 578 card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
580 if (card->scq0 == NULL) { 579 if (card->scq0 == NULL) {
581 printk("nicstar%d: can't get SCQ0.\n", i); 580 printk("nicstar%d: can't get SCQ0.\n", i);
582 error = 12; 581 error = 12;
583 ns_init_card_error(card, error); 582 ns_init_card_error(card, error);
584 return error; 583 return error;
585 } 584 }
586 u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); 585 u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
587 u32d[1] = (u32) 0x00000000; 586 u32d[1] = (u32) 0x00000000;
588 u32d[2] = (u32) 0xffffffff; 587 u32d[2] = (u32) 0xffffffff;
589 u32d[3] = (u32) 0x00000000; 588 u32d[3] = (u32) 0x00000000;
590 ns_write_sram(card, NS_VRSCD0, u32d, 4); 589 ns_write_sram(card, NS_VRSCD0, u32d, 4);
591 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ 590 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
592 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ 591 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
593 card->scq0->scd = NS_VRSCD0; 592 card->scq0->scd = NS_VRSCD0;
594 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); 593 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
595 594
596 /* Initialize TSTs */ 595 /* Initialize TSTs */
597 card->tst_addr = NS_TST0; 596 card->tst_addr = NS_TST0;
598 card->tst_free_entries = NS_TST_NUM_ENTRIES; 597 card->tst_free_entries = NS_TST_NUM_ENTRIES;
599 data = NS_TST_OPCODE_VARIABLE; 598 data = NS_TST_OPCODE_VARIABLE;
600 for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 599 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
601 ns_write_sram(card, NS_TST0 + j, &data, 1); 600 ns_write_sram(card, NS_TST0 + j, &data, 1);
602 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); 601 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
603 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); 602 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
604 for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 603 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
605 ns_write_sram(card, NS_TST1 + j, &data, 1); 604 ns_write_sram(card, NS_TST1 + j, &data, 1);
606 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); 605 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
607 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); 606 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
608 for (j = 0; j < NS_TST_NUM_ENTRIES; j++) 607 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
609 card->tste2vc[j] = NULL; 608 card->tste2vc[j] = NULL;
610 writel(NS_TST0 << 2, card->membase + TSTB); 609 writel(NS_TST0 << 2, card->membase + TSTB);
611 610
612 /* Initialize RCT. AAL type is set on opening the VC. */ 611 /* Initialize RCT. AAL type is set on opening the VC. */
613 #ifdef RCQ_SUPPORT 612 #ifdef RCQ_SUPPORT
614 u32d[0] = NS_RCTE_RAWCELLINTEN; 613 u32d[0] = NS_RCTE_RAWCELLINTEN;
615 #else 614 #else
616 u32d[0] = 0x00000000; 615 u32d[0] = 0x00000000;
617 #endif /* RCQ_SUPPORT */ 616 #endif /* RCQ_SUPPORT */
618 u32d[1] = 0x00000000; 617 u32d[1] = 0x00000000;
619 u32d[2] = 0x00000000; 618 u32d[2] = 0x00000000;
620 u32d[3] = 0xFFFFFFFF; 619 u32d[3] = 0xFFFFFFFF;
621 for (j = 0; j < card->rct_size; j++) 620 for (j = 0; j < card->rct_size; j++)
622 ns_write_sram(card, j * 4, u32d, 4); 621 ns_write_sram(card, j * 4, u32d, 4);
623 622
624 memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); 623 memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
625 624
626 for (j = 0; j < NS_FRSCD_NUM; j++) 625 for (j = 0; j < NS_FRSCD_NUM; j++)
627 card->scd2vc[j] = NULL; 626 card->scd2vc[j] = NULL;
628 627
629 /* Initialize buffer levels */ 628 /* Initialize buffer levels */
630 card->sbnr.min = MIN_SB; 629 card->sbnr.min = MIN_SB;
631 card->sbnr.init = NUM_SB; 630 card->sbnr.init = NUM_SB;
632 card->sbnr.max = MAX_SB; 631 card->sbnr.max = MAX_SB;
633 card->lbnr.min = MIN_LB; 632 card->lbnr.min = MIN_LB;
634 card->lbnr.init = NUM_LB; 633 card->lbnr.init = NUM_LB;
635 card->lbnr.max = MAX_LB; 634 card->lbnr.max = MAX_LB;
636 card->iovnr.min = MIN_IOVB; 635 card->iovnr.min = MIN_IOVB;
637 card->iovnr.init = NUM_IOVB; 636 card->iovnr.init = NUM_IOVB;
638 card->iovnr.max = MAX_IOVB; 637 card->iovnr.max = MAX_IOVB;
639 card->hbnr.min = MIN_HB; 638 card->hbnr.min = MIN_HB;
640 card->hbnr.init = NUM_HB; 639 card->hbnr.init = NUM_HB;
641 card->hbnr.max = MAX_HB; 640 card->hbnr.max = MAX_HB;
642 641
643 card->sm_handle = 0x00000000; 642 card->sm_handle = 0x00000000;
644 card->sm_addr = 0x00000000; 643 card->sm_addr = 0x00000000;
645 card->lg_handle = 0x00000000; 644 card->lg_handle = 0x00000000;
646 card->lg_addr = 0x00000000; 645 card->lg_addr = 0x00000000;
647 646
648 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ 647 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
649 648
650 idr_init(&card->idr); 649 idr_init(&card->idr);
651 650
652 /* Pre-allocate some huge buffers */ 651 /* Pre-allocate some huge buffers */
653 skb_queue_head_init(&card->hbpool.queue); 652 skb_queue_head_init(&card->hbpool.queue);
654 card->hbpool.count = 0; 653 card->hbpool.count = 0;
655 for (j = 0; j < NUM_HB; j++) { 654 for (j = 0; j < NUM_HB; j++) {
656 struct sk_buff *hb; 655 struct sk_buff *hb;
657 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 656 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
658 if (hb == NULL) { 657 if (hb == NULL) {
659 printk 658 printk
660 ("nicstar%d: can't allocate %dth of %d huge buffers.\n", 659 ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
661 i, j, NUM_HB); 660 i, j, NUM_HB);
662 error = 13; 661 error = 13;
663 ns_init_card_error(card, error); 662 ns_init_card_error(card, error);
664 return error; 663 return error;
665 } 664 }
666 NS_PRV_BUFTYPE(hb) = BUF_NONE; 665 NS_PRV_BUFTYPE(hb) = BUF_NONE;
667 skb_queue_tail(&card->hbpool.queue, hb); 666 skb_queue_tail(&card->hbpool.queue, hb);
668 card->hbpool.count++; 667 card->hbpool.count++;
669 } 668 }
670 669
671 /* Allocate large buffers */ 670 /* Allocate large buffers */
672 skb_queue_head_init(&card->lbpool.queue); 671 skb_queue_head_init(&card->lbpool.queue);
673 card->lbpool.count = 0; /* Not used */ 672 card->lbpool.count = 0; /* Not used */
674 for (j = 0; j < NUM_LB; j++) { 673 for (j = 0; j < NUM_LB; j++) {
675 struct sk_buff *lb; 674 struct sk_buff *lb;
676 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 675 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
677 if (lb == NULL) { 676 if (lb == NULL) {
678 printk 677 printk
679 ("nicstar%d: can't allocate %dth of %d large buffers.\n", 678 ("nicstar%d: can't allocate %dth of %d large buffers.\n",
680 i, j, NUM_LB); 679 i, j, NUM_LB);
681 error = 14; 680 error = 14;
682 ns_init_card_error(card, error); 681 ns_init_card_error(card, error);
683 return error; 682 return error;
684 } 683 }
685 NS_PRV_BUFTYPE(lb) = BUF_LG; 684 NS_PRV_BUFTYPE(lb) = BUF_LG;
686 skb_queue_tail(&card->lbpool.queue, lb); 685 skb_queue_tail(&card->lbpool.queue, lb);
687 skb_reserve(lb, NS_SMBUFSIZE); 686 skb_reserve(lb, NS_SMBUFSIZE);
688 push_rxbufs(card, lb); 687 push_rxbufs(card, lb);
689 /* Due to the implementation of push_rxbufs() this is 1, not 0 */ 688 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
690 if (j == 1) { 689 if (j == 1) {
691 card->rcbuf = lb; 690 card->rcbuf = lb;
692 card->rawcell = (struct ns_rcqe *) lb->data; 691 card->rawcell = (struct ns_rcqe *) lb->data;
693 card->rawch = NS_PRV_DMA(lb); 692 card->rawch = NS_PRV_DMA(lb);
694 } 693 }
695 } 694 }
696 /* Test for strange behaviour which leads to crashes */ 695 /* Test for strange behaviour which leads to crashes */
697 if ((bcount = 696 if ((bcount =
698 ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { 697 ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
699 printk 698 printk
700 ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", 699 ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
701 i, j, bcount); 700 i, j, bcount);
702 error = 14; 701 error = 14;
703 ns_init_card_error(card, error); 702 ns_init_card_error(card, error);
704 return error; 703 return error;
705 } 704 }
706 705
707 /* Allocate small buffers */ 706 /* Allocate small buffers */
708 skb_queue_head_init(&card->sbpool.queue); 707 skb_queue_head_init(&card->sbpool.queue);
709 card->sbpool.count = 0; /* Not used */ 708 card->sbpool.count = 0; /* Not used */
710 for (j = 0; j < NUM_SB; j++) { 709 for (j = 0; j < NUM_SB; j++) {
711 struct sk_buff *sb; 710 struct sk_buff *sb;
712 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 711 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
713 if (sb == NULL) { 712 if (sb == NULL) {
714 printk 713 printk
715 ("nicstar%d: can't allocate %dth of %d small buffers.\n", 714 ("nicstar%d: can't allocate %dth of %d small buffers.\n",
716 i, j, NUM_SB); 715 i, j, NUM_SB);
717 error = 15; 716 error = 15;
718 ns_init_card_error(card, error); 717 ns_init_card_error(card, error);
719 return error; 718 return error;
720 } 719 }
721 NS_PRV_BUFTYPE(sb) = BUF_SM; 720 NS_PRV_BUFTYPE(sb) = BUF_SM;
722 skb_queue_tail(&card->sbpool.queue, sb); 721 skb_queue_tail(&card->sbpool.queue, sb);
723 skb_reserve(sb, NS_AAL0_HEADER); 722 skb_reserve(sb, NS_AAL0_HEADER);
724 push_rxbufs(card, sb); 723 push_rxbufs(card, sb);
725 } 724 }
726 /* Test for strange behaviour which leads to crashes */ 725 /* Test for strange behaviour which leads to crashes */
727 if ((bcount = 726 if ((bcount =
728 ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { 727 ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
729 printk 728 printk
730 ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", 729 ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
731 i, j, bcount); 730 i, j, bcount);
732 error = 15; 731 error = 15;
733 ns_init_card_error(card, error); 732 ns_init_card_error(card, error);
734 return error; 733 return error;
735 } 734 }
736 735
737 /* Allocate iovec buffers */ 736 /* Allocate iovec buffers */
738 skb_queue_head_init(&card->iovpool.queue); 737 skb_queue_head_init(&card->iovpool.queue);
739 card->iovpool.count = 0; 738 card->iovpool.count = 0;
740 for (j = 0; j < NUM_IOVB; j++) { 739 for (j = 0; j < NUM_IOVB; j++) {
741 struct sk_buff *iovb; 740 struct sk_buff *iovb;
742 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 741 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
743 if (iovb == NULL) { 742 if (iovb == NULL) {
744 printk 743 printk
745 ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", 744 ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
746 i, j, NUM_IOVB); 745 i, j, NUM_IOVB);
747 error = 16; 746 error = 16;
748 ns_init_card_error(card, error); 747 ns_init_card_error(card, error);
749 return error; 748 return error;
750 } 749 }
751 NS_PRV_BUFTYPE(iovb) = BUF_NONE; 750 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
752 skb_queue_tail(&card->iovpool.queue, iovb); 751 skb_queue_tail(&card->iovpool.queue, iovb);
753 card->iovpool.count++; 752 card->iovpool.count++;
754 } 753 }
755 754
756 /* Configure NICStAR */ 755 /* Configure NICStAR */
757 if (card->rct_size == 4096) 756 if (card->rct_size == 4096)
758 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; 757 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
759 else /* (card->rct_size == 16384) */ 758 else /* (card->rct_size == 16384) */
760 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; 759 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
761 760
762 card->efbie = 1; 761 card->efbie = 1;
763 762
764 card->intcnt = 0; 763 card->intcnt = 0;
765 if (request_irq 764 if (request_irq
766 (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { 765 (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
767 printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); 766 printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
768 error = 9; 767 error = 9;
769 ns_init_card_error(card, error); 768 ns_init_card_error(card, error);
770 return error; 769 return error;
771 } 770 }
772 771
773 /* Register device */ 772 /* Register device */
774 card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, 773 card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
775 -1, NULL); 774 -1, NULL);
776 if (card->atmdev == NULL) { 775 if (card->atmdev == NULL) {
777 printk("nicstar%d: can't register device.\n", i); 776 printk("nicstar%d: can't register device.\n", i);
778 error = 17; 777 error = 17;
779 ns_init_card_error(card, error); 778 ns_init_card_error(card, error);
780 return error; 779 return error;
781 } 780 }
782 781
783 if (ns_parse_mac(mac[i], card->atmdev->esi)) { 782 if (ns_parse_mac(mac[i], card->atmdev->esi)) {
784 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 783 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
785 card->atmdev->esi, 6); 784 card->atmdev->esi, 6);
786 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 785 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
787 0) { 786 0) {
788 nicstar_read_eprom(card->membase, 787 nicstar_read_eprom(card->membase,
789 NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, 788 NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
790 card->atmdev->esi, 6); 789 card->atmdev->esi, 6);
791 } 790 }
792 } 791 }
793 792
794 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); 793 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
795 794
796 card->atmdev->dev_data = card; 795 card->atmdev->dev_data = card;
797 card->atmdev->ci_range.vpi_bits = card->vpibits; 796 card->atmdev->ci_range.vpi_bits = card->vpibits;
798 card->atmdev->ci_range.vci_bits = card->vcibits; 797 card->atmdev->ci_range.vci_bits = card->vcibits;
799 card->atmdev->link_rate = card->max_pcr; 798 card->atmdev->link_rate = card->max_pcr;
800 card->atmdev->phy = NULL; 799 card->atmdev->phy = NULL;
801 800
802 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI 801 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
803 if (card->max_pcr == ATM_OC3_PCR) 802 if (card->max_pcr == ATM_OC3_PCR)
804 suni_init(card->atmdev); 803 suni_init(card->atmdev);
805 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ 804 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
806 805
807 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 806 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
808 if (card->max_pcr == ATM_25_PCR) 807 if (card->max_pcr == ATM_25_PCR)
809 idt77105_init(card->atmdev); 808 idt77105_init(card->atmdev);
810 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ 809 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
811 810
812 if (card->atmdev->phy && card->atmdev->phy->start) 811 if (card->atmdev->phy && card->atmdev->phy->start)
813 card->atmdev->phy->start(card->atmdev); 812 card->atmdev->phy->start(card->atmdev);
814 813
815 writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ 814 writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
816 NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ 815 NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
817 NS_CFG_PHYIE, card->membase + CFG); 816 NS_CFG_PHYIE, card->membase + CFG);
818 817
819 num_cards++; 818 num_cards++;
820 819
821 return error; 820 return error;
822 } 821 }
823 822
824 static void ns_init_card_error(ns_dev *card, int error) 823 static void ns_init_card_error(ns_dev *card, int error)
825 { 824 {
826 if (error >= 17) { 825 if (error >= 17) {
827 writel(0x00000000, card->membase + CFG); 826 writel(0x00000000, card->membase + CFG);
828 } 827 }
829 if (error >= 16) { 828 if (error >= 16) {
830 struct sk_buff *iovb; 829 struct sk_buff *iovb;
831 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) 830 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
832 dev_kfree_skb_any(iovb); 831 dev_kfree_skb_any(iovb);
833 } 832 }
834 if (error >= 15) { 833 if (error >= 15) {
835 struct sk_buff *sb; 834 struct sk_buff *sb;
836 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) 835 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
837 dev_kfree_skb_any(sb); 836 dev_kfree_skb_any(sb);
838 free_scq(card, card->scq0, NULL); 837 free_scq(card, card->scq0, NULL);
839 } 838 }
840 if (error >= 14) { 839 if (error >= 14) {
841 struct sk_buff *lb; 840 struct sk_buff *lb;
842 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) 841 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
843 dev_kfree_skb_any(lb); 842 dev_kfree_skb_any(lb);
844 } 843 }
845 if (error >= 13) { 844 if (error >= 13) {
846 struct sk_buff *hb; 845 struct sk_buff *hb;
847 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) 846 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
848 dev_kfree_skb_any(hb); 847 dev_kfree_skb_any(hb);
849 } 848 }
850 if (error >= 12) { 849 if (error >= 12) {
851 kfree(card->rsq.org); 850 kfree(card->rsq.org);
852 } 851 }
853 if (error >= 11) { 852 if (error >= 11) {
854 kfree(card->tsq.org); 853 kfree(card->tsq.org);
855 } 854 }
856 if (error >= 10) { 855 if (error >= 10) {
857 free_irq(card->pcidev->irq, card); 856 free_irq(card->pcidev->irq, card);
858 } 857 }
859 if (error >= 4) { 858 if (error >= 4) {
860 iounmap(card->membase); 859 iounmap(card->membase);
861 } 860 }
862 if (error >= 3) { 861 if (error >= 3) {
863 pci_disable_device(card->pcidev); 862 pci_disable_device(card->pcidev);
864 kfree(card); 863 kfree(card);
865 } 864 }
866 } 865 }
867 866
868 static scq_info *get_scq(ns_dev *card, int size, u32 scd) 867 static scq_info *get_scq(ns_dev *card, int size, u32 scd)
869 { 868 {
870 scq_info *scq; 869 scq_info *scq;
871 int i; 870 int i;
872 871
873 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) 872 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
874 return NULL; 873 return NULL;
875 874
876 scq = kmalloc(sizeof(scq_info), GFP_KERNEL); 875 scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
877 if (!scq) 876 if (!scq)
878 return NULL; 877 return NULL;
879 scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); 878 scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
880 if (!scq->org) { 879 if (!scq->org) {
881 kfree(scq); 880 kfree(scq);
882 return NULL; 881 return NULL;
883 } 882 }
884 scq->skb = kmalloc(sizeof(struct sk_buff *) * 883 scq->skb = kmalloc(sizeof(struct sk_buff *) *
885 (size / NS_SCQE_SIZE), GFP_KERNEL); 884 (size / NS_SCQE_SIZE), GFP_KERNEL);
886 if (!scq->skb) { 885 if (!scq->skb) {
887 kfree(scq->org); 886 kfree(scq->org);
888 kfree(scq); 887 kfree(scq);
889 return NULL; 888 return NULL;
890 } 889 }
891 scq->num_entries = size / NS_SCQE_SIZE; 890 scq->num_entries = size / NS_SCQE_SIZE;
892 scq->base = PTR_ALIGN(scq->org, size); 891 scq->base = PTR_ALIGN(scq->org, size);
893 scq->next = scq->base; 892 scq->next = scq->base;
894 scq->last = scq->base + (scq->num_entries - 1); 893 scq->last = scq->base + (scq->num_entries - 1);
895 scq->tail = scq->last; 894 scq->tail = scq->last;
896 scq->scd = scd; 895 scq->scd = scd;
897 scq->num_entries = size / NS_SCQE_SIZE; 896 scq->num_entries = size / NS_SCQE_SIZE;
898 scq->tbd_count = 0; 897 scq->tbd_count = 0;
899 init_waitqueue_head(&scq->scqfull_waitq); 898 init_waitqueue_head(&scq->scqfull_waitq);
900 scq->full = 0; 899 scq->full = 0;
901 spin_lock_init(&scq->lock); 900 spin_lock_init(&scq->lock);
902 901
903 for (i = 0; i < scq->num_entries; i++) 902 for (i = 0; i < scq->num_entries; i++)
904 scq->skb[i] = NULL; 903 scq->skb[i] = NULL;
905 904
906 return scq; 905 return scq;
907 } 906 }
908 907
909 /* For variable rate SCQ vcc must be NULL */ 908 /* For variable rate SCQ vcc must be NULL */
910 static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) 909 static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
911 { 910 {
912 int i; 911 int i;
913 912
914 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) 913 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
915 for (i = 0; i < scq->num_entries; i++) { 914 for (i = 0; i < scq->num_entries; i++) {
916 if (scq->skb[i] != NULL) { 915 if (scq->skb[i] != NULL) {
917 vcc = ATM_SKB(scq->skb[i])->vcc; 916 vcc = ATM_SKB(scq->skb[i])->vcc;
918 if (vcc->pop != NULL) 917 if (vcc->pop != NULL)
919 vcc->pop(vcc, scq->skb[i]); 918 vcc->pop(vcc, scq->skb[i]);
920 else 919 else
921 dev_kfree_skb_any(scq->skb[i]); 920 dev_kfree_skb_any(scq->skb[i]);
922 } 921 }
923 } else { /* vcc must be != NULL */ 922 } else { /* vcc must be != NULL */
924 923
925 if (vcc == NULL) { 924 if (vcc == NULL) {
926 printk 925 printk
927 ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); 926 ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
928 for (i = 0; i < scq->num_entries; i++) 927 for (i = 0; i < scq->num_entries; i++)
929 dev_kfree_skb_any(scq->skb[i]); 928 dev_kfree_skb_any(scq->skb[i]);
930 } else 929 } else
931 for (i = 0; i < scq->num_entries; i++) { 930 for (i = 0; i < scq->num_entries; i++) {
932 if (scq->skb[i] != NULL) { 931 if (scq->skb[i] != NULL) {
933 if (vcc->pop != NULL) 932 if (vcc->pop != NULL)
934 vcc->pop(vcc, scq->skb[i]); 933 vcc->pop(vcc, scq->skb[i]);
935 else 934 else
936 dev_kfree_skb_any(scq->skb[i]); 935 dev_kfree_skb_any(scq->skb[i]);
937 } 936 }
938 } 937 }
939 } 938 }
940 kfree(scq->skb); 939 kfree(scq->skb);
941 pci_free_consistent(card->pcidev, 940 pci_free_consistent(card->pcidev,
942 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? 941 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
943 VBR_SCQSIZE : CBR_SCQSIZE), 942 VBR_SCQSIZE : CBR_SCQSIZE),
944 scq->org, scq->dma); 943 scq->org, scq->dma);
945 kfree(scq); 944 kfree(scq);
946 } 945 }
947 946
948 /* The handles passed must be pointers to the sk_buff containing the small 947 /* The handles passed must be pointers to the sk_buff containing the small
949 or large buffer(s) cast to u32. */ 948 or large buffer(s) cast to u32. */
950 static void push_rxbufs(ns_dev * card, struct sk_buff *skb) 949 static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
951 { 950 {
952 struct sk_buff *handle1, *handle2; 951 struct sk_buff *handle1, *handle2;
953 u32 id1 = 0, id2 = 0; 952 u32 id1 = 0, id2 = 0;
954 u32 addr1, addr2; 953 u32 addr1, addr2;
955 u32 stat; 954 u32 stat;
956 unsigned long flags; 955 unsigned long flags;
957 int err; 956 int err;
958 957
959 /* *BARF* */ 958 /* *BARF* */
960 handle2 = NULL; 959 handle2 = NULL;
961 addr2 = 0; 960 addr2 = 0;
962 handle1 = skb; 961 handle1 = skb;
963 addr1 = pci_map_single(card->pcidev, 962 addr1 = pci_map_single(card->pcidev,
964 skb->data, 963 skb->data,
965 (NS_PRV_BUFTYPE(skb) == BUF_SM 964 (NS_PRV_BUFTYPE(skb) == BUF_SM
966 ? NS_SMSKBSIZE : NS_LGSKBSIZE), 965 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
967 PCI_DMA_TODEVICE); 966 PCI_DMA_TODEVICE);
968 NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ 967 NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
969 968
970 #ifdef GENERAL_DEBUG 969 #ifdef GENERAL_DEBUG
971 if (!addr1) 970 if (!addr1)
972 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", 971 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
973 card->index); 972 card->index);
974 #endif /* GENERAL_DEBUG */ 973 #endif /* GENERAL_DEBUG */
975 974
976 stat = readl(card->membase + STAT); 975 stat = readl(card->membase + STAT);
977 card->sbfqc = ns_stat_sfbqc_get(stat); 976 card->sbfqc = ns_stat_sfbqc_get(stat);
978 card->lbfqc = ns_stat_lfbqc_get(stat); 977 card->lbfqc = ns_stat_lfbqc_get(stat);
979 if (NS_PRV_BUFTYPE(skb) == BUF_SM) { 978 if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
980 if (!addr2) { 979 if (!addr2) {
981 if (card->sm_addr) { 980 if (card->sm_addr) {
982 addr2 = card->sm_addr; 981 addr2 = card->sm_addr;
983 handle2 = card->sm_handle; 982 handle2 = card->sm_handle;
984 card->sm_addr = 0x00000000; 983 card->sm_addr = 0x00000000;
985 card->sm_handle = 0x00000000; 984 card->sm_handle = 0x00000000;
986 } else { /* (!sm_addr) */ 985 } else { /* (!sm_addr) */
987 986
988 card->sm_addr = addr1; 987 card->sm_addr = addr1;
989 card->sm_handle = handle1; 988 card->sm_handle = handle1;
990 } 989 }
991 } 990 }
992 } else { /* buf_type == BUF_LG */ 991 } else { /* buf_type == BUF_LG */
993 992
994 if (!addr2) { 993 if (!addr2) {
995 if (card->lg_addr) { 994 if (card->lg_addr) {
996 addr2 = card->lg_addr; 995 addr2 = card->lg_addr;
997 handle2 = card->lg_handle; 996 handle2 = card->lg_handle;
998 card->lg_addr = 0x00000000; 997 card->lg_addr = 0x00000000;
999 card->lg_handle = 0x00000000; 998 card->lg_handle = 0x00000000;
1000 } else { /* (!lg_addr) */ 999 } else { /* (!lg_addr) */
1001 1000
1002 card->lg_addr = addr1; 1001 card->lg_addr = addr1;
1003 card->lg_handle = handle1; 1002 card->lg_handle = handle1;
1004 } 1003 }
1005 } 1004 }
1006 } 1005 }
1007 1006
1008 if (addr2) { 1007 if (addr2) {
1009 if (NS_PRV_BUFTYPE(skb) == BUF_SM) { 1008 if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
1010 if (card->sbfqc >= card->sbnr.max) { 1009 if (card->sbfqc >= card->sbnr.max) {
1011 skb_unlink(handle1, &card->sbpool.queue); 1010 skb_unlink(handle1, &card->sbpool.queue);
1012 dev_kfree_skb_any(handle1); 1011 dev_kfree_skb_any(handle1);
1013 skb_unlink(handle2, &card->sbpool.queue); 1012 skb_unlink(handle2, &card->sbpool.queue);
1014 dev_kfree_skb_any(handle2); 1013 dev_kfree_skb_any(handle2);
1015 return; 1014 return;
1016 } else 1015 } else
1017 card->sbfqc += 2; 1016 card->sbfqc += 2;
1018 } else { /* (buf_type == BUF_LG) */ 1017 } else { /* (buf_type == BUF_LG) */
1019 1018
1020 if (card->lbfqc >= card->lbnr.max) { 1019 if (card->lbfqc >= card->lbnr.max) {
1021 skb_unlink(handle1, &card->lbpool.queue); 1020 skb_unlink(handle1, &card->lbpool.queue);
1022 dev_kfree_skb_any(handle1); 1021 dev_kfree_skb_any(handle1);
1023 skb_unlink(handle2, &card->lbpool.queue); 1022 skb_unlink(handle2, &card->lbpool.queue);
1024 dev_kfree_skb_any(handle2); 1023 dev_kfree_skb_any(handle2);
1025 return; 1024 return;
1026 } else 1025 } else
1027 card->lbfqc += 2; 1026 card->lbfqc += 2;
1028 } 1027 }
1029 1028
1030 do { 1029 do {
1031 if (!idr_pre_get(&card->idr, GFP_ATOMIC)) { 1030 if (!idr_pre_get(&card->idr, GFP_ATOMIC)) {
1032 printk(KERN_ERR 1031 printk(KERN_ERR
1033 "nicstar%d: no free memory for idr\n", 1032 "nicstar%d: no free memory for idr\n",
1034 card->index); 1033 card->index);
1035 goto out; 1034 goto out;
1036 } 1035 }
1037 1036
1038 if (!id1) 1037 if (!id1)
1039 err = idr_get_new_above(&card->idr, handle1, 0, &id1); 1038 err = idr_get_new_above(&card->idr, handle1, 0, &id1);
1040 1039
1041 if (!id2 && err == 0) 1040 if (!id2 && err == 0)
1042 err = idr_get_new_above(&card->idr, handle2, 0, &id2); 1041 err = idr_get_new_above(&card->idr, handle2, 0, &id2);
1043 1042
1044 } while (err == -EAGAIN); 1043 } while (err == -EAGAIN);
1045 1044
1046 if (err) 1045 if (err)
1047 goto out; 1046 goto out;
1048 1047
1049 spin_lock_irqsave(&card->res_lock, flags); 1048 spin_lock_irqsave(&card->res_lock, flags);
1050 while (CMD_BUSY(card)) ; 1049 while (CMD_BUSY(card)) ;
1051 writel(addr2, card->membase + DR3); 1050 writel(addr2, card->membase + DR3);
1052 writel(id2, card->membase + DR2); 1051 writel(id2, card->membase + DR2);
1053 writel(addr1, card->membase + DR1); 1052 writel(addr1, card->membase + DR1);
1054 writel(id1, card->membase + DR0); 1053 writel(id1, card->membase + DR0);
1055 writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), 1054 writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
1056 card->membase + CMD); 1055 card->membase + CMD);
1057 spin_unlock_irqrestore(&card->res_lock, flags); 1056 spin_unlock_irqrestore(&card->res_lock, flags);
1058 1057
1059 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", 1058 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
1060 card->index, 1059 card->index,
1061 (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), 1060 (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
1062 addr1, addr2); 1061 addr1, addr2);
1063 } 1062 }
1064 1063
1065 if (!card->efbie && card->sbfqc >= card->sbnr.min && 1064 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1066 card->lbfqc >= card->lbnr.min) { 1065 card->lbfqc >= card->lbnr.min) {
1067 card->efbie = 1; 1066 card->efbie = 1;
1068 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), 1067 writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
1069 card->membase + CFG); 1068 card->membase + CFG);
1070 } 1069 }
1071 1070
1072 out: 1071 out:
1073 return; 1072 return;
1074 } 1073 }
1075 1074
1076 static irqreturn_t ns_irq_handler(int irq, void *dev_id) 1075 static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1077 { 1076 {
1078 u32 stat_r; 1077 u32 stat_r;
1079 ns_dev *card; 1078 ns_dev *card;
1080 struct atm_dev *dev; 1079 struct atm_dev *dev;
1081 unsigned long flags; 1080 unsigned long flags;
1082 1081
1083 card = (ns_dev *) dev_id; 1082 card = (ns_dev *) dev_id;
1084 dev = card->atmdev; 1083 dev = card->atmdev;
1085 card->intcnt++; 1084 card->intcnt++;
1086 1085
1087 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); 1086 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1088 1087
1089 spin_lock_irqsave(&card->int_lock, flags); 1088 spin_lock_irqsave(&card->int_lock, flags);
1090 1089
1091 stat_r = readl(card->membase + STAT); 1090 stat_r = readl(card->membase + STAT);
1092 1091
1093 /* Transmit Status Indicator has been written to T. S. Queue */ 1092 /* Transmit Status Indicator has been written to T. S. Queue */
1094 if (stat_r & NS_STAT_TSIF) { 1093 if (stat_r & NS_STAT_TSIF) {
1095 TXPRINTK("nicstar%d: TSI interrupt\n", card->index); 1094 TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1096 process_tsq(card); 1095 process_tsq(card);
1097 writel(NS_STAT_TSIF, card->membase + STAT); 1096 writel(NS_STAT_TSIF, card->membase + STAT);
1098 } 1097 }
1099 1098
1100 /* Incomplete CS-PDU has been transmitted */ 1099 /* Incomplete CS-PDU has been transmitted */
1101 if (stat_r & NS_STAT_TXICP) { 1100 if (stat_r & NS_STAT_TXICP) {
1102 writel(NS_STAT_TXICP, card->membase + STAT); 1101 writel(NS_STAT_TXICP, card->membase + STAT);
1103 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", 1102 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1104 card->index); 1103 card->index);
1105 } 1104 }
1106 1105
1107 /* Transmit Status Queue 7/8 full */ 1106 /* Transmit Status Queue 7/8 full */
1108 if (stat_r & NS_STAT_TSQF) { 1107 if (stat_r & NS_STAT_TSQF) {
1109 writel(NS_STAT_TSQF, card->membase + STAT); 1108 writel(NS_STAT_TSQF, card->membase + STAT);
1110 PRINTK("nicstar%d: TSQ full.\n", card->index); 1109 PRINTK("nicstar%d: TSQ full.\n", card->index);
1111 process_tsq(card); 1110 process_tsq(card);
1112 } 1111 }
1113 1112
1114 /* Timer overflow */ 1113 /* Timer overflow */
1115 if (stat_r & NS_STAT_TMROF) { 1114 if (stat_r & NS_STAT_TMROF) {
1116 writel(NS_STAT_TMROF, card->membase + STAT); 1115 writel(NS_STAT_TMROF, card->membase + STAT);
1117 PRINTK("nicstar%d: Timer overflow.\n", card->index); 1116 PRINTK("nicstar%d: Timer overflow.\n", card->index);
1118 } 1117 }
1119 1118
1120 /* PHY device interrupt signal active */ 1119 /* PHY device interrupt signal active */
1121 if (stat_r & NS_STAT_PHYI) { 1120 if (stat_r & NS_STAT_PHYI) {
1122 writel(NS_STAT_PHYI, card->membase + STAT); 1121 writel(NS_STAT_PHYI, card->membase + STAT);
1123 PRINTK("nicstar%d: PHY interrupt.\n", card->index); 1122 PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1124 if (dev->phy && dev->phy->interrupt) { 1123 if (dev->phy && dev->phy->interrupt) {
1125 dev->phy->interrupt(dev); 1124 dev->phy->interrupt(dev);
1126 } 1125 }
1127 } 1126 }
1128 1127
1129 /* Small Buffer Queue is full */ 1128 /* Small Buffer Queue is full */
1130 if (stat_r & NS_STAT_SFBQF) { 1129 if (stat_r & NS_STAT_SFBQF) {
1131 writel(NS_STAT_SFBQF, card->membase + STAT); 1130 writel(NS_STAT_SFBQF, card->membase + STAT);
1132 printk("nicstar%d: Small free buffer queue is full.\n", 1131 printk("nicstar%d: Small free buffer queue is full.\n",
1133 card->index); 1132 card->index);
1134 } 1133 }
1135 1134
1136 /* Large Buffer Queue is full */ 1135 /* Large Buffer Queue is full */
1137 if (stat_r & NS_STAT_LFBQF) { 1136 if (stat_r & NS_STAT_LFBQF) {
1138 writel(NS_STAT_LFBQF, card->membase + STAT); 1137 writel(NS_STAT_LFBQF, card->membase + STAT);
1139 printk("nicstar%d: Large free buffer queue is full.\n", 1138 printk("nicstar%d: Large free buffer queue is full.\n",
1140 card->index); 1139 card->index);
1141 } 1140 }
1142 1141
1143 /* Receive Status Queue is full */ 1142 /* Receive Status Queue is full */
1144 if (stat_r & NS_STAT_RSQF) { 1143 if (stat_r & NS_STAT_RSQF) {
1145 writel(NS_STAT_RSQF, card->membase + STAT); 1144 writel(NS_STAT_RSQF, card->membase + STAT);
1146 printk("nicstar%d: RSQ full.\n", card->index); 1145 printk("nicstar%d: RSQ full.\n", card->index);
1147 process_rsq(card); 1146 process_rsq(card);
1148 } 1147 }
1149 1148
1150 /* Complete CS-PDU received */ 1149 /* Complete CS-PDU received */
1151 if (stat_r & NS_STAT_EOPDU) { 1150 if (stat_r & NS_STAT_EOPDU) {
1152 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); 1151 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1153 process_rsq(card); 1152 process_rsq(card);
1154 writel(NS_STAT_EOPDU, card->membase + STAT); 1153 writel(NS_STAT_EOPDU, card->membase + STAT);
1155 } 1154 }
1156 1155
1157 /* Raw cell received */ 1156 /* Raw cell received */
1158 if (stat_r & NS_STAT_RAWCF) { 1157 if (stat_r & NS_STAT_RAWCF) {
1159 writel(NS_STAT_RAWCF, card->membase + STAT); 1158 writel(NS_STAT_RAWCF, card->membase + STAT);
1160 #ifndef RCQ_SUPPORT 1159 #ifndef RCQ_SUPPORT
1161 printk("nicstar%d: Raw cell received and no support yet...\n", 1160 printk("nicstar%d: Raw cell received and no support yet...\n",
1162 card->index); 1161 card->index);
1163 #endif /* RCQ_SUPPORT */ 1162 #endif /* RCQ_SUPPORT */
1164 /* NOTE: the following procedure may keep a raw cell pending until the 1163 /* NOTE: the following procedure may keep a raw cell pending until the
1165 next interrupt. As this preliminary support is only meant to 1164 next interrupt. As this preliminary support is only meant to
1166 avoid buffer leakage, this is not an issue. */ 1165 avoid buffer leakage, this is not an issue. */
1167 while (readl(card->membase + RAWCT) != card->rawch) { 1166 while (readl(card->membase + RAWCT) != card->rawch) {
1168 1167
1169 if (ns_rcqe_islast(card->rawcell)) { 1168 if (ns_rcqe_islast(card->rawcell)) {
1170 struct sk_buff *oldbuf; 1169 struct sk_buff *oldbuf;
1171 1170
1172 oldbuf = card->rcbuf; 1171 oldbuf = card->rcbuf;
1173 card->rcbuf = idr_find(&card->idr, 1172 card->rcbuf = idr_find(&card->idr,
1174 ns_rcqe_nextbufhandle(card->rawcell)); 1173 ns_rcqe_nextbufhandle(card->rawcell));
1175 card->rawch = NS_PRV_DMA(card->rcbuf); 1174 card->rawch = NS_PRV_DMA(card->rcbuf);
1176 card->rawcell = (struct ns_rcqe *) 1175 card->rawcell = (struct ns_rcqe *)
1177 card->rcbuf->data; 1176 card->rcbuf->data;
1178 recycle_rx_buf(card, oldbuf); 1177 recycle_rx_buf(card, oldbuf);
1179 } else { 1178 } else {
1180 card->rawch += NS_RCQE_SIZE; 1179 card->rawch += NS_RCQE_SIZE;
1181 card->rawcell++; 1180 card->rawcell++;
1182 } 1181 }
1183 } 1182 }
1184 } 1183 }
1185 1184
1186 /* Small buffer queue is empty */ 1185 /* Small buffer queue is empty */
1187 if (stat_r & NS_STAT_SFBQE) { 1186 if (stat_r & NS_STAT_SFBQE) {
1188 int i; 1187 int i;
1189 struct sk_buff *sb; 1188 struct sk_buff *sb;
1190 1189
1191 writel(NS_STAT_SFBQE, card->membase + STAT); 1190 writel(NS_STAT_SFBQE, card->membase + STAT);
1192 printk("nicstar%d: Small free buffer queue empty.\n", 1191 printk("nicstar%d: Small free buffer queue empty.\n",
1193 card->index); 1192 card->index);
1194 for (i = 0; i < card->sbnr.min; i++) { 1193 for (i = 0; i < card->sbnr.min; i++) {
1195 sb = dev_alloc_skb(NS_SMSKBSIZE); 1194 sb = dev_alloc_skb(NS_SMSKBSIZE);
1196 if (sb == NULL) { 1195 if (sb == NULL) {
1197 writel(readl(card->membase + CFG) & 1196 writel(readl(card->membase + CFG) &
1198 ~NS_CFG_EFBIE, card->membase + CFG); 1197 ~NS_CFG_EFBIE, card->membase + CFG);
1199 card->efbie = 0; 1198 card->efbie = 0;
1200 break; 1199 break;
1201 } 1200 }
1202 NS_PRV_BUFTYPE(sb) = BUF_SM; 1201 NS_PRV_BUFTYPE(sb) = BUF_SM;
1203 skb_queue_tail(&card->sbpool.queue, sb); 1202 skb_queue_tail(&card->sbpool.queue, sb);
1204 skb_reserve(sb, NS_AAL0_HEADER); 1203 skb_reserve(sb, NS_AAL0_HEADER);
1205 push_rxbufs(card, sb); 1204 push_rxbufs(card, sb);
1206 } 1205 }
1207 card->sbfqc = i; 1206 card->sbfqc = i;
1208 process_rsq(card); 1207 process_rsq(card);
1209 } 1208 }
1210 1209
1211 /* Large buffer queue empty */ 1210 /* Large buffer queue empty */
1212 if (stat_r & NS_STAT_LFBQE) { 1211 if (stat_r & NS_STAT_LFBQE) {
1213 int i; 1212 int i;
1214 struct sk_buff *lb; 1213 struct sk_buff *lb;
1215 1214
1216 writel(NS_STAT_LFBQE, card->membase + STAT); 1215 writel(NS_STAT_LFBQE, card->membase + STAT);
1217 printk("nicstar%d: Large free buffer queue empty.\n", 1216 printk("nicstar%d: Large free buffer queue empty.\n",
1218 card->index); 1217 card->index);
1219 for (i = 0; i < card->lbnr.min; i++) { 1218 for (i = 0; i < card->lbnr.min; i++) {
1220 lb = dev_alloc_skb(NS_LGSKBSIZE); 1219 lb = dev_alloc_skb(NS_LGSKBSIZE);
1221 if (lb == NULL) { 1220 if (lb == NULL) {
1222 writel(readl(card->membase + CFG) & 1221 writel(readl(card->membase + CFG) &
1223 ~NS_CFG_EFBIE, card->membase + CFG); 1222 ~NS_CFG_EFBIE, card->membase + CFG);
1224 card->efbie = 0; 1223 card->efbie = 0;
1225 break; 1224 break;
1226 } 1225 }
1227 NS_PRV_BUFTYPE(lb) = BUF_LG; 1226 NS_PRV_BUFTYPE(lb) = BUF_LG;
1228 skb_queue_tail(&card->lbpool.queue, lb); 1227 skb_queue_tail(&card->lbpool.queue, lb);
1229 skb_reserve(lb, NS_SMBUFSIZE); 1228 skb_reserve(lb, NS_SMBUFSIZE);
1230 push_rxbufs(card, lb); 1229 push_rxbufs(card, lb);
1231 } 1230 }
1232 card->lbfqc = i; 1231 card->lbfqc = i;
1233 process_rsq(card); 1232 process_rsq(card);
1234 } 1233 }
1235 1234
1236 /* Receive Status Queue is 7/8 full */ 1235 /* Receive Status Queue is 7/8 full */
1237 if (stat_r & NS_STAT_RSQAF) { 1236 if (stat_r & NS_STAT_RSQAF) {
1238 writel(NS_STAT_RSQAF, card->membase + STAT); 1237 writel(NS_STAT_RSQAF, card->membase + STAT);
1239 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); 1238 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1240 process_rsq(card); 1239 process_rsq(card);
1241 } 1240 }
1242 1241
1243 spin_unlock_irqrestore(&card->int_lock, flags); 1242 spin_unlock_irqrestore(&card->int_lock, flags);
1244 PRINTK("nicstar%d: end of interrupt service\n", card->index); 1243 PRINTK("nicstar%d: end of interrupt service\n", card->index);
1245 return IRQ_HANDLED; 1244 return IRQ_HANDLED;
1246 } 1245 }
1247 1246
1248 static int ns_open(struct atm_vcc *vcc) 1247 static int ns_open(struct atm_vcc *vcc)
1249 { 1248 {
1250 ns_dev *card; 1249 ns_dev *card;
1251 vc_map *vc; 1250 vc_map *vc;
1252 unsigned long tmpl, modl; 1251 unsigned long tmpl, modl;
1253 int tcr, tcra; /* target cell rate, and absolute value */ 1252 int tcr, tcra; /* target cell rate, and absolute value */
1254 int n = 0; /* Number of entries in the TST. Initialized to remove 1253 int n = 0; /* Number of entries in the TST. Initialized to remove
1255 the compiler warning. */ 1254 the compiler warning. */
1256 u32 u32d[4]; 1255 u32 u32d[4];
1257 int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler 1256 int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
1258 warning. How I wish compilers were clever enough to 1257 warning. How I wish compilers were clever enough to
1259 tell which variables can truly be used 1258 tell which variables can truly be used
1260 uninitialized... */ 1259 uninitialized... */
1261 int inuse; /* tx or rx vc already in use by another vcc */ 1260 int inuse; /* tx or rx vc already in use by another vcc */
1262 short vpi = vcc->vpi; 1261 short vpi = vcc->vpi;
1263 int vci = vcc->vci; 1262 int vci = vcc->vci;
1264 1263
1265 card = (ns_dev *) vcc->dev->dev_data; 1264 card = (ns_dev *) vcc->dev->dev_data;
1266 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, 1265 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
1267 vci); 1266 vci);
1268 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { 1267 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1269 PRINTK("nicstar%d: unsupported AAL.\n", card->index); 1268 PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1270 return -EINVAL; 1269 return -EINVAL;
1271 } 1270 }
1272 1271
1273 vc = &(card->vcmap[vpi << card->vcibits | vci]); 1272 vc = &(card->vcmap[vpi << card->vcibits | vci]);
1274 vcc->dev_data = vc; 1273 vcc->dev_data = vc;
1275 1274
1276 inuse = 0; 1275 inuse = 0;
1277 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) 1276 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1278 inuse = 1; 1277 inuse = 1;
1279 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) 1278 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1280 inuse += 2; 1279 inuse += 2;
1281 if (inuse) { 1280 if (inuse) {
1282 printk("nicstar%d: %s vci already in use.\n", card->index, 1281 printk("nicstar%d: %s vci already in use.\n", card->index,
1283 inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); 1282 inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1284 return -EINVAL; 1283 return -EINVAL;
1285 } 1284 }
1286 1285
1287 set_bit(ATM_VF_ADDR, &vcc->flags); 1286 set_bit(ATM_VF_ADDR, &vcc->flags);
1288 1287
1289 /* NOTE: You are not allowed to modify an open connection's QOS. To change 1288 /* NOTE: You are not allowed to modify an open connection's QOS. To change
1290 that, remove the ATM_VF_PARTIAL flag checking. There may be other changes 1289 that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1291 needed to do that. */ 1290 needed to do that. */
1292 if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { 1291 if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
1293 scq_info *scq; 1292 scq_info *scq;
1294 1293
1295 set_bit(ATM_VF_PARTIAL, &vcc->flags); 1294 set_bit(ATM_VF_PARTIAL, &vcc->flags);
1296 if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1295 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1297 /* Check requested cell rate and availability of SCD */ 1296 /* Check requested cell rate and availability of SCD */
1298 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 1297 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
1299 && vcc->qos.txtp.min_pcr == 0) { 1298 && vcc->qos.txtp.min_pcr == 0) {
1300 PRINTK 1299 PRINTK
1301 ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", 1300 ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1302 card->index); 1301 card->index);
1303 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1302 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1304 clear_bit(ATM_VF_ADDR, &vcc->flags); 1303 clear_bit(ATM_VF_ADDR, &vcc->flags);
1305 return -EINVAL; 1304 return -EINVAL;
1306 } 1305 }
1307 1306
1308 tcr = atm_pcr_goal(&(vcc->qos.txtp)); 1307 tcr = atm_pcr_goal(&(vcc->qos.txtp));
1309 tcra = tcr >= 0 ? tcr : -tcr; 1308 tcra = tcr >= 0 ? tcr : -tcr;
1310 1309
1311 PRINTK("nicstar%d: target cell rate = %d.\n", 1310 PRINTK("nicstar%d: target cell rate = %d.\n",
1312 card->index, vcc->qos.txtp.max_pcr); 1311 card->index, vcc->qos.txtp.max_pcr);
1313 1312
1314 tmpl = 1313 tmpl =
1315 (unsigned long)tcra *(unsigned long) 1314 (unsigned long)tcra *(unsigned long)
1316 NS_TST_NUM_ENTRIES; 1315 NS_TST_NUM_ENTRIES;
1317 modl = tmpl % card->max_pcr; 1316 modl = tmpl % card->max_pcr;
1318 1317
1319 n = (int)(tmpl / card->max_pcr); 1318 n = (int)(tmpl / card->max_pcr);
1320 if (tcr > 0) { 1319 if (tcr > 0) {
1321 if (modl > 0) 1320 if (modl > 0)
1322 n++; 1321 n++;
1323 } else if (tcr == 0) { 1322 } else if (tcr == 0) {
1324 if ((n = 1323 if ((n =
1325 (card->tst_free_entries - 1324 (card->tst_free_entries -
1326 NS_TST_RESERVED)) <= 0) { 1325 NS_TST_RESERVED)) <= 0) {
1327 PRINTK 1326 PRINTK
1328 ("nicstar%d: no CBR bandwidth free.\n", 1327 ("nicstar%d: no CBR bandwidth free.\n",
1329 card->index); 1328 card->index);
1330 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1329 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1331 clear_bit(ATM_VF_ADDR, &vcc->flags); 1330 clear_bit(ATM_VF_ADDR, &vcc->flags);
1332 return -EINVAL; 1331 return -EINVAL;
1333 } 1332 }
1334 } 1333 }
1335 1334
1336 if (n == 0) { 1335 if (n == 0) {
1337 printk 1336 printk
1338 ("nicstar%d: selected bandwidth < granularity.\n", 1337 ("nicstar%d: selected bandwidth < granularity.\n",
1339 card->index); 1338 card->index);
1340 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1339 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1341 clear_bit(ATM_VF_ADDR, &vcc->flags); 1340 clear_bit(ATM_VF_ADDR, &vcc->flags);
1342 return -EINVAL; 1341 return -EINVAL;
1343 } 1342 }
1344 1343
1345 if (n > (card->tst_free_entries - NS_TST_RESERVED)) { 1344 if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
1346 PRINTK 1345 PRINTK
1347 ("nicstar%d: not enough free CBR bandwidth.\n", 1346 ("nicstar%d: not enough free CBR bandwidth.\n",
1348 card->index); 1347 card->index);
1349 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1348 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1350 clear_bit(ATM_VF_ADDR, &vcc->flags); 1349 clear_bit(ATM_VF_ADDR, &vcc->flags);
1351 return -EINVAL; 1350 return -EINVAL;
1352 } else 1351 } else
1353 card->tst_free_entries -= n; 1352 card->tst_free_entries -= n;
1354 1353
1355 XPRINTK("nicstar%d: writing %d tst entries.\n", 1354 XPRINTK("nicstar%d: writing %d tst entries.\n",
1356 card->index, n); 1355 card->index, n);
1357 for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { 1356 for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
1358 if (card->scd2vc[frscdi] == NULL) { 1357 if (card->scd2vc[frscdi] == NULL) {
1359 card->scd2vc[frscdi] = vc; 1358 card->scd2vc[frscdi] = vc;
1360 break; 1359 break;
1361 } 1360 }
1362 } 1361 }
1363 if (frscdi == NS_FRSCD_NUM) { 1362 if (frscdi == NS_FRSCD_NUM) {
1364 PRINTK 1363 PRINTK
1365 ("nicstar%d: no SCD available for CBR channel.\n", 1364 ("nicstar%d: no SCD available for CBR channel.\n",
1366 card->index); 1365 card->index);
1367 card->tst_free_entries += n; 1366 card->tst_free_entries += n;
1368 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1367 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1369 clear_bit(ATM_VF_ADDR, &vcc->flags); 1368 clear_bit(ATM_VF_ADDR, &vcc->flags);
1370 return -EBUSY; 1369 return -EBUSY;
1371 } 1370 }
1372 1371
1373 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; 1372 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1374 1373
1375 scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); 1374 scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
1376 if (scq == NULL) { 1375 if (scq == NULL) {
1377 PRINTK("nicstar%d: can't get fixed rate SCQ.\n", 1376 PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
1378 card->index); 1377 card->index);
1379 card->scd2vc[frscdi] = NULL; 1378 card->scd2vc[frscdi] = NULL;
1380 card->tst_free_entries += n; 1379 card->tst_free_entries += n;
1381 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1380 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1382 clear_bit(ATM_VF_ADDR, &vcc->flags); 1381 clear_bit(ATM_VF_ADDR, &vcc->flags);
1383 return -ENOMEM; 1382 return -ENOMEM;
1384 } 1383 }
1385 vc->scq = scq; 1384 vc->scq = scq;
1386 u32d[0] = scq_virt_to_bus(scq, scq->base); 1385 u32d[0] = scq_virt_to_bus(scq, scq->base);
1387 u32d[1] = (u32) 0x00000000; 1386 u32d[1] = (u32) 0x00000000;
1388 u32d[2] = (u32) 0xffffffff; 1387 u32d[2] = (u32) 0xffffffff;
1389 u32d[3] = (u32) 0x00000000; 1388 u32d[3] = (u32) 0x00000000;
1390 ns_write_sram(card, vc->cbr_scd, u32d, 4); 1389 ns_write_sram(card, vc->cbr_scd, u32d, 4);
1391 1390
1392 fill_tst(card, n, vc); 1391 fill_tst(card, n, vc);
1393 } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { 1392 } else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
1394 vc->cbr_scd = 0x00000000; 1393 vc->cbr_scd = 0x00000000;
1395 vc->scq = card->scq0; 1394 vc->scq = card->scq0;
1396 } 1395 }
1397 1396
1398 if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1397 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1399 vc->tx = 1; 1398 vc->tx = 1;
1400 vc->tx_vcc = vcc; 1399 vc->tx_vcc = vcc;
1401 vc->tbd_count = 0; 1400 vc->tbd_count = 0;
1402 } 1401 }
1403 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 1402 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1404 u32 status; 1403 u32 status;
1405 1404
1406 vc->rx = 1; 1405 vc->rx = 1;
1407 vc->rx_vcc = vcc; 1406 vc->rx_vcc = vcc;
1408 vc->rx_iov = NULL; 1407 vc->rx_iov = NULL;
1409 1408
1410 /* Open the connection in hardware */ 1409 /* Open the connection in hardware */
1411 if (vcc->qos.aal == ATM_AAL5) 1410 if (vcc->qos.aal == ATM_AAL5)
1412 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; 1411 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1413 else /* vcc->qos.aal == ATM_AAL0 */ 1412 else /* vcc->qos.aal == ATM_AAL0 */
1414 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; 1413 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1415 #ifdef RCQ_SUPPORT 1414 #ifdef RCQ_SUPPORT
1416 status |= NS_RCTE_RAWCELLINTEN; 1415 status |= NS_RCTE_RAWCELLINTEN;
1417 #endif /* RCQ_SUPPORT */ 1416 #endif /* RCQ_SUPPORT */
1418 ns_write_sram(card, 1417 ns_write_sram(card,
1419 NS_RCT + 1418 NS_RCT +
1420 (vpi << card->vcibits | vci) * 1419 (vpi << card->vcibits | vci) *
1421 NS_RCT_ENTRY_SIZE, &status, 1); 1420 NS_RCT_ENTRY_SIZE, &status, 1);
1422 } 1421 }
1423 1422
1424 } 1423 }
1425 1424
1426 set_bit(ATM_VF_READY, &vcc->flags); 1425 set_bit(ATM_VF_READY, &vcc->flags);
1427 return 0; 1426 return 0;
1428 } 1427 }
1429 1428
1430 static void ns_close(struct atm_vcc *vcc) 1429 static void ns_close(struct atm_vcc *vcc)
1431 { 1430 {
1432 vc_map *vc; 1431 vc_map *vc;
1433 ns_dev *card; 1432 ns_dev *card;
1434 u32 data; 1433 u32 data;
1435 int i; 1434 int i;
1436 1435
1437 vc = vcc->dev_data; 1436 vc = vcc->dev_data;
1438 card = vcc->dev->dev_data; 1437 card = vcc->dev->dev_data;
1439 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, 1438 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1440 (int)vcc->vpi, vcc->vci); 1439 (int)vcc->vpi, vcc->vci);
1441 1440
1442 clear_bit(ATM_VF_READY, &vcc->flags); 1441 clear_bit(ATM_VF_READY, &vcc->flags);
1443 1442
1444 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { 1443 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1445 u32 addr; 1444 u32 addr;
1446 unsigned long flags; 1445 unsigned long flags;
1447 1446
1448 addr = 1447 addr =
1449 NS_RCT + 1448 NS_RCT +
1450 (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; 1449 (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1451 spin_lock_irqsave(&card->res_lock, flags); 1450 spin_lock_irqsave(&card->res_lock, flags);
1452 while (CMD_BUSY(card)) ; 1451 while (CMD_BUSY(card)) ;
1453 writel(NS_CMD_CLOSE_CONNECTION | addr << 2, 1452 writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
1454 card->membase + CMD); 1453 card->membase + CMD);
1455 spin_unlock_irqrestore(&card->res_lock, flags); 1454 spin_unlock_irqrestore(&card->res_lock, flags);
1456 1455
1457 vc->rx = 0; 1456 vc->rx = 0;
1458 if (vc->rx_iov != NULL) { 1457 if (vc->rx_iov != NULL) {
1459 struct sk_buff *iovb; 1458 struct sk_buff *iovb;
1460 u32 stat; 1459 u32 stat;
1461 1460
1462 stat = readl(card->membase + STAT); 1461 stat = readl(card->membase + STAT);
1463 card->sbfqc = ns_stat_sfbqc_get(stat); 1462 card->sbfqc = ns_stat_sfbqc_get(stat);
1464 card->lbfqc = ns_stat_lfbqc_get(stat); 1463 card->lbfqc = ns_stat_lfbqc_get(stat);
1465 1464
1466 PRINTK 1465 PRINTK
1467 ("nicstar%d: closing a VC with pending rx buffers.\n", 1466 ("nicstar%d: closing a VC with pending rx buffers.\n",
1468 card->index); 1467 card->index);
1469 iovb = vc->rx_iov; 1468 iovb = vc->rx_iov;
1470 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 1469 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1471 NS_PRV_IOVCNT(iovb)); 1470 NS_PRV_IOVCNT(iovb));
1472 NS_PRV_IOVCNT(iovb) = 0; 1471 NS_PRV_IOVCNT(iovb) = 0;
1473 spin_lock_irqsave(&card->int_lock, flags); 1472 spin_lock_irqsave(&card->int_lock, flags);
1474 recycle_iov_buf(card, iovb); 1473 recycle_iov_buf(card, iovb);
1475 spin_unlock_irqrestore(&card->int_lock, flags); 1474 spin_unlock_irqrestore(&card->int_lock, flags);
1476 vc->rx_iov = NULL; 1475 vc->rx_iov = NULL;
1477 } 1476 }
1478 } 1477 }
1479 1478
1480 if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1479 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1481 vc->tx = 0; 1480 vc->tx = 0;
1482 } 1481 }
1483 1482
1484 if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1483 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1485 unsigned long flags; 1484 unsigned long flags;
1486 ns_scqe *scqep; 1485 ns_scqe *scqep;
1487 scq_info *scq; 1486 scq_info *scq;
1488 1487
1489 scq = vc->scq; 1488 scq = vc->scq;
1490 1489
1491 for (;;) { 1490 for (;;) {
1492 spin_lock_irqsave(&scq->lock, flags); 1491 spin_lock_irqsave(&scq->lock, flags);
1493 scqep = scq->next; 1492 scqep = scq->next;
1494 if (scqep == scq->base) 1493 if (scqep == scq->base)
1495 scqep = scq->last; 1494 scqep = scq->last;
1496 else 1495 else
1497 scqep--; 1496 scqep--;
1498 if (scqep == scq->tail) { 1497 if (scqep == scq->tail) {
1499 spin_unlock_irqrestore(&scq->lock, flags); 1498 spin_unlock_irqrestore(&scq->lock, flags);
1500 break; 1499 break;
1501 } 1500 }
1502 /* If the last entry is not a TSR, place one in the SCQ in order to 1501 /* If the last entry is not a TSR, place one in the SCQ in order to
1503 be able to completely drain it and then close. */ 1502 be able to completely drain it and then close. */
1504 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { 1503 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
1505 ns_scqe tsr; 1504 ns_scqe tsr;
1506 u32 scdi, scqi; 1505 u32 scdi, scqi;
1507 u32 data; 1506 u32 data;
1508 int index; 1507 int index;
1509 1508
1510 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1509 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1511 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1510 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1512 scqi = scq->next - scq->base; 1511 scqi = scq->next - scq->base;
1513 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1512 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1514 tsr.word_3 = 0x00000000; 1513 tsr.word_3 = 0x00000000;
1515 tsr.word_4 = 0x00000000; 1514 tsr.word_4 = 0x00000000;
1516 *scq->next = tsr; 1515 *scq->next = tsr;
1517 index = (int)scqi; 1516 index = (int)scqi;
1518 scq->skb[index] = NULL; 1517 scq->skb[index] = NULL;
1519 if (scq->next == scq->last) 1518 if (scq->next == scq->last)
1520 scq->next = scq->base; 1519 scq->next = scq->base;
1521 else 1520 else
1522 scq->next++; 1521 scq->next++;
1523 data = scq_virt_to_bus(scq, scq->next); 1522 data = scq_virt_to_bus(scq, scq->next);
1524 ns_write_sram(card, scq->scd, &data, 1); 1523 ns_write_sram(card, scq->scd, &data, 1);
1525 } 1524 }
1526 spin_unlock_irqrestore(&scq->lock, flags); 1525 spin_unlock_irqrestore(&scq->lock, flags);
1527 schedule(); 1526 schedule();
1528 } 1527 }
1529 1528
1530 /* Free all TST entries */ 1529 /* Free all TST entries */
1531 data = NS_TST_OPCODE_VARIABLE; 1530 data = NS_TST_OPCODE_VARIABLE;
1532 for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { 1531 for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
1533 if (card->tste2vc[i] == vc) { 1532 if (card->tste2vc[i] == vc) {
1534 ns_write_sram(card, card->tst_addr + i, &data, 1533 ns_write_sram(card, card->tst_addr + i, &data,
1535 1); 1534 1);
1536 card->tste2vc[i] = NULL; 1535 card->tste2vc[i] = NULL;
1537 card->tst_free_entries++; 1536 card->tst_free_entries++;
1538 } 1537 }
1539 } 1538 }
1540 1539
1541 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; 1540 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1542 free_scq(card, vc->scq, vcc); 1541 free_scq(card, vc->scq, vcc);
1543 } 1542 }
1544 1543
1545 /* remove all references to vcc before deleting it */ 1544 /* remove all references to vcc before deleting it */
1546 if (vcc->qos.txtp.traffic_class != ATM_NONE) { 1545 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1547 unsigned long flags; 1546 unsigned long flags;
1548 scq_info *scq = card->scq0; 1547 scq_info *scq = card->scq0;
1549 1548
1550 spin_lock_irqsave(&scq->lock, flags); 1549 spin_lock_irqsave(&scq->lock, flags);
1551 1550
1552 for (i = 0; i < scq->num_entries; i++) { 1551 for (i = 0; i < scq->num_entries; i++) {
1553 if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { 1552 if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
1554 ATM_SKB(scq->skb[i])->vcc = NULL; 1553 ATM_SKB(scq->skb[i])->vcc = NULL;
1555 atm_return(vcc, scq->skb[i]->truesize); 1554 atm_return(vcc, scq->skb[i]->truesize);
1556 PRINTK 1555 PRINTK
1557 ("nicstar: deleted pending vcc mapping\n"); 1556 ("nicstar: deleted pending vcc mapping\n");
1558 } 1557 }
1559 } 1558 }
1560 1559
1561 spin_unlock_irqrestore(&scq->lock, flags); 1560 spin_unlock_irqrestore(&scq->lock, flags);
1562 } 1561 }
1563 1562
1564 vcc->dev_data = NULL; 1563 vcc->dev_data = NULL;
1565 clear_bit(ATM_VF_PARTIAL, &vcc->flags); 1564 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1566 clear_bit(ATM_VF_ADDR, &vcc->flags); 1565 clear_bit(ATM_VF_ADDR, &vcc->flags);
1567 1566
1568 #ifdef RX_DEBUG 1567 #ifdef RX_DEBUG
1569 { 1568 {
1570 u32 stat, cfg; 1569 u32 stat, cfg;
1571 stat = readl(card->membase + STAT); 1570 stat = readl(card->membase + STAT);
1572 cfg = readl(card->membase + CFG); 1571 cfg = readl(card->membase + CFG);
1573 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); 1572 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
1574 printk 1573 printk
1575 ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", 1574 ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n",
1576 card->tsq.base, card->tsq.next, 1575 card->tsq.base, card->tsq.next,
1577 card->tsq.last, readl(card->membase + TSQT)); 1576 card->tsq.last, readl(card->membase + TSQT));
1578 printk 1577 printk
1579 ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", 1578 ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n",
1580 card->rsq.base, card->rsq.next, 1579 card->rsq.base, card->rsq.next,
1581 card->rsq.last, readl(card->membase + RSQT)); 1580 card->rsq.last, readl(card->membase + RSQT));
1582 printk("Empty free buffer queue interrupt %s \n", 1581 printk("Empty free buffer queue interrupt %s \n",
1583 card->efbie ? "enabled" : "disabled"); 1582 card->efbie ? "enabled" : "disabled");
1584 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", 1583 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
1585 ns_stat_sfbqc_get(stat), card->sbpool.count, 1584 ns_stat_sfbqc_get(stat), card->sbpool.count,
1586 ns_stat_lfbqc_get(stat), card->lbpool.count); 1585 ns_stat_lfbqc_get(stat), card->lbpool.count);
1587 printk("hbpool.count = %d iovpool.count = %d \n", 1586 printk("hbpool.count = %d iovpool.count = %d \n",
1588 card->hbpool.count, card->iovpool.count); 1587 card->hbpool.count, card->iovpool.count);
1589 } 1588 }
1590 #endif /* RX_DEBUG */ 1589 #endif /* RX_DEBUG */
1591 } 1590 }
1592 1591
1593 static void fill_tst(ns_dev * card, int n, vc_map * vc) 1592 static void fill_tst(ns_dev * card, int n, vc_map * vc)
1594 { 1593 {
1595 u32 new_tst; 1594 u32 new_tst;
1596 unsigned long cl; 1595 unsigned long cl;
1597 int e, r; 1596 int e, r;
1598 u32 data; 1597 u32 data;
1599 1598
1600 /* It would be very complicated to keep the two TSTs synchronized while 1599 /* It would be very complicated to keep the two TSTs synchronized while
1601 assuring that writes are only made to the inactive TST. So, for now I 1600 assuring that writes are only made to the inactive TST. So, for now I
1602 will use only one TST. If problems occur, I will change this again */ 1601 will use only one TST. If problems occur, I will change this again */
1603 1602
1604 new_tst = card->tst_addr; 1603 new_tst = card->tst_addr;
1605 1604
1606 /* Fill procedure */ 1605 /* Fill procedure */
1607 1606
1608 for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { 1607 for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
1609 if (card->tste2vc[e] == NULL) 1608 if (card->tste2vc[e] == NULL)
1610 break; 1609 break;
1611 } 1610 }
1612 if (e == NS_TST_NUM_ENTRIES) { 1611 if (e == NS_TST_NUM_ENTRIES) {
1613 printk("nicstar%d: No free TST entries found. \n", card->index); 1612 printk("nicstar%d: No free TST entries found. \n", card->index);
1614 return; 1613 return;
1615 } 1614 }
1616 1615
1617 r = n; 1616 r = n;
1618 cl = NS_TST_NUM_ENTRIES; 1617 cl = NS_TST_NUM_ENTRIES;
1619 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); 1618 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1620 1619
1621 while (r > 0) { 1620 while (r > 0) {
1622 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { 1621 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
1623 card->tste2vc[e] = vc; 1622 card->tste2vc[e] = vc;
1624 ns_write_sram(card, new_tst + e, &data, 1); 1623 ns_write_sram(card, new_tst + e, &data, 1);
1625 cl -= NS_TST_NUM_ENTRIES; 1624 cl -= NS_TST_NUM_ENTRIES;
1626 r--; 1625 r--;
1627 } 1626 }
1628 1627
1629 if (++e == NS_TST_NUM_ENTRIES) { 1628 if (++e == NS_TST_NUM_ENTRIES) {
1630 e = 0; 1629 e = 0;
1631 } 1630 }
1632 cl += n; 1631 cl += n;
1633 } 1632 }
1634 1633
1635 /* End of fill procedure */ 1634 /* End of fill procedure */
1636 1635
1637 data = ns_tste_make(NS_TST_OPCODE_END, new_tst); 1636 data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1638 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); 1637 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1639 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); 1638 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1640 card->tst_addr = new_tst; 1639 card->tst_addr = new_tst;
1641 } 1640 }
1642 1641
1643 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) 1642 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1644 { 1643 {
1645 ns_dev *card; 1644 ns_dev *card;
1646 vc_map *vc; 1645 vc_map *vc;
1647 scq_info *scq; 1646 scq_info *scq;
1648 unsigned long buflen; 1647 unsigned long buflen;
1649 ns_scqe scqe; 1648 ns_scqe scqe;
1650 u32 flags; /* TBD flags, not CPU flags */ 1649 u32 flags; /* TBD flags, not CPU flags */
1651 1650
1652 card = vcc->dev->dev_data; 1651 card = vcc->dev->dev_data;
1653 TXPRINTK("nicstar%d: ns_send() called.\n", card->index); 1652 TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1654 if ((vc = (vc_map *) vcc->dev_data) == NULL) { 1653 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
1655 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", 1654 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
1656 card->index); 1655 card->index);
1657 atomic_inc(&vcc->stats->tx_err); 1656 atomic_inc(&vcc->stats->tx_err);
1658 dev_kfree_skb_any(skb); 1657 dev_kfree_skb_any(skb);
1659 return -EINVAL; 1658 return -EINVAL;
1660 } 1659 }
1661 1660
1662 if (!vc->tx) { 1661 if (!vc->tx) {
1663 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", 1662 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
1664 card->index); 1663 card->index);
1665 atomic_inc(&vcc->stats->tx_err); 1664 atomic_inc(&vcc->stats->tx_err);
1666 dev_kfree_skb_any(skb); 1665 dev_kfree_skb_any(skb);
1667 return -EINVAL; 1666 return -EINVAL;
1668 } 1667 }
1669 1668
1670 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { 1669 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1671 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", 1670 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
1672 card->index); 1671 card->index);
1673 atomic_inc(&vcc->stats->tx_err); 1672 atomic_inc(&vcc->stats->tx_err);
1674 dev_kfree_skb_any(skb); 1673 dev_kfree_skb_any(skb);
1675 return -EINVAL; 1674 return -EINVAL;
1676 } 1675 }
1677 1676
1678 if (skb_shinfo(skb)->nr_frags != 0) { 1677 if (skb_shinfo(skb)->nr_frags != 0) {
1679 printk("nicstar%d: No scatter-gather yet.\n", card->index); 1678 printk("nicstar%d: No scatter-gather yet.\n", card->index);
1680 atomic_inc(&vcc->stats->tx_err); 1679 atomic_inc(&vcc->stats->tx_err);
1681 dev_kfree_skb_any(skb); 1680 dev_kfree_skb_any(skb);
1682 return -EINVAL; 1681 return -EINVAL;
1683 } 1682 }
1684 1683
1685 ATM_SKB(skb)->vcc = vcc; 1684 ATM_SKB(skb)->vcc = vcc;
1686 1685
1687 NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, 1686 NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
1688 skb->len, PCI_DMA_TODEVICE); 1687 skb->len, PCI_DMA_TODEVICE);
1689 1688
1690 if (vcc->qos.aal == ATM_AAL5) { 1689 if (vcc->qos.aal == ATM_AAL5) {
1691 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ 1690 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1692 flags = NS_TBD_AAL5; 1691 flags = NS_TBD_AAL5;
1693 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); 1692 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
1694 scqe.word_3 = cpu_to_le32(skb->len); 1693 scqe.word_3 = cpu_to_le32(skb->len);
1695 scqe.word_4 = 1694 scqe.word_4 =
1696 ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, 1695 ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1697 ATM_SKB(skb)-> 1696 ATM_SKB(skb)->
1698 atm_options & ATM_ATMOPT_CLP ? 1 : 0); 1697 atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1699 flags |= NS_TBD_EOPDU; 1698 flags |= NS_TBD_EOPDU;
1700 } else { /* (vcc->qos.aal == ATM_AAL0) */ 1699 } else { /* (vcc->qos.aal == ATM_AAL0) */
1701 1700
1702 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ 1701 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
1703 flags = NS_TBD_AAL0; 1702 flags = NS_TBD_AAL0;
1704 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); 1703 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
1705 scqe.word_3 = cpu_to_le32(0x00000000); 1704 scqe.word_3 = cpu_to_le32(0x00000000);
1706 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ 1705 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
1707 flags |= NS_TBD_EOPDU; 1706 flags |= NS_TBD_EOPDU;
1708 scqe.word_4 = 1707 scqe.word_4 =
1709 cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); 1708 cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1710 /* Force the VPI/VCI to be the same as in VCC struct */ 1709 /* Force the VPI/VCI to be the same as in VCC struct */
1711 scqe.word_4 |= 1710 scqe.word_4 |=
1712 cpu_to_le32((((u32) vcc-> 1711 cpu_to_le32((((u32) vcc->
1713 vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> 1712 vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
1714 vci) << 1713 vci) <<
1715 NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); 1714 NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
1716 } 1715 }
1717 1716
1718 if (vcc->qos.txtp.traffic_class == ATM_CBR) { 1717 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1719 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); 1718 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1720 scq = ((vc_map *) vcc->dev_data)->scq; 1719 scq = ((vc_map *) vcc->dev_data)->scq;
1721 } else { 1720 } else {
1722 scqe.word_1 = 1721 scqe.word_1 =
1723 ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); 1722 ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1724 scq = card->scq0; 1723 scq = card->scq0;
1725 } 1724 }
1726 1725
1727 if (push_scqe(card, vc, scq, &scqe, skb) != 0) { 1726 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
1728 atomic_inc(&vcc->stats->tx_err); 1727 atomic_inc(&vcc->stats->tx_err);
1729 dev_kfree_skb_any(skb); 1728 dev_kfree_skb_any(skb);
1730 return -EIO; 1729 return -EIO;
1731 } 1730 }
1732 atomic_inc(&vcc->stats->tx); 1731 atomic_inc(&vcc->stats->tx);
1733 1732
1734 return 0; 1733 return 0;
1735 } 1734 }
1736 1735
1737 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 1736 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1738 struct sk_buff *skb) 1737 struct sk_buff *skb)
1739 { 1738 {
1740 unsigned long flags; 1739 unsigned long flags;
1741 ns_scqe tsr; 1740 ns_scqe tsr;
1742 u32 scdi, scqi; 1741 u32 scdi, scqi;
1743 int scq_is_vbr; 1742 int scq_is_vbr;
1744 u32 data; 1743 u32 data;
1745 int index; 1744 int index;
1746 1745
1747 spin_lock_irqsave(&scq->lock, flags); 1746 spin_lock_irqsave(&scq->lock, flags);
1748 while (scq->tail == scq->next) { 1747 while (scq->tail == scq->next) {
1749 if (in_interrupt()) { 1748 if (in_interrupt()) {
1750 spin_unlock_irqrestore(&scq->lock, flags); 1749 spin_unlock_irqrestore(&scq->lock, flags);
1751 printk("nicstar%d: Error pushing TBD.\n", card->index); 1750 printk("nicstar%d: Error pushing TBD.\n", card->index);
1752 return 1; 1751 return 1;
1753 } 1752 }
1754 1753
1755 scq->full = 1; 1754 scq->full = 1;
1756 spin_unlock_irqrestore(&scq->lock, flags); 1755 spin_unlock_irqrestore(&scq->lock, flags);
1757 interruptible_sleep_on_timeout(&scq->scqfull_waitq, 1756 interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1758 SCQFULL_TIMEOUT); 1757 SCQFULL_TIMEOUT);
1759 spin_lock_irqsave(&scq->lock, flags); 1758 spin_lock_irqsave(&scq->lock, flags);
1760 1759
1761 if (scq->full) { 1760 if (scq->full) {
1762 spin_unlock_irqrestore(&scq->lock, flags); 1761 spin_unlock_irqrestore(&scq->lock, flags);
1763 printk("nicstar%d: Timeout pushing TBD.\n", 1762 printk("nicstar%d: Timeout pushing TBD.\n",
1764 card->index); 1763 card->index);
1765 return 1; 1764 return 1;
1766 } 1765 }
1767 } 1766 }
1768 *scq->next = *tbd; 1767 *scq->next = *tbd;
1769 index = (int)(scq->next - scq->base); 1768 index = (int)(scq->next - scq->base);
1770 scq->skb[index] = skb; 1769 scq->skb[index] = skb;
1771 XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", 1770 XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
1772 card->index, skb, index); 1771 card->index, skb, index);
1773 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", 1772 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1774 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), 1773 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1775 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), 1774 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1776 scq->next); 1775 scq->next);
1777 if (scq->next == scq->last) 1776 if (scq->next == scq->last)
1778 scq->next = scq->base; 1777 scq->next = scq->base;
1779 else 1778 else
1780 scq->next++; 1779 scq->next++;
1781 1780
1782 vc->tbd_count++; 1781 vc->tbd_count++;
1783 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { 1782 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
1784 scq->tbd_count++; 1783 scq->tbd_count++;
1785 scq_is_vbr = 1; 1784 scq_is_vbr = 1;
1786 } else 1785 } else
1787 scq_is_vbr = 0; 1786 scq_is_vbr = 0;
1788 1787
1789 if (vc->tbd_count >= MAX_TBD_PER_VC 1788 if (vc->tbd_count >= MAX_TBD_PER_VC
1790 || scq->tbd_count >= MAX_TBD_PER_SCQ) { 1789 || scq->tbd_count >= MAX_TBD_PER_SCQ) {
1791 int has_run = 0; 1790 int has_run = 0;
1792 1791
1793 while (scq->tail == scq->next) { 1792 while (scq->tail == scq->next) {
1794 if (in_interrupt()) { 1793 if (in_interrupt()) {
1795 data = scq_virt_to_bus(scq, scq->next); 1794 data = scq_virt_to_bus(scq, scq->next);
1796 ns_write_sram(card, scq->scd, &data, 1); 1795 ns_write_sram(card, scq->scd, &data, 1);
1797 spin_unlock_irqrestore(&scq->lock, flags); 1796 spin_unlock_irqrestore(&scq->lock, flags);
1798 printk("nicstar%d: Error pushing TSR.\n", 1797 printk("nicstar%d: Error pushing TSR.\n",
1799 card->index); 1798 card->index);
1800 return 0; 1799 return 0;
1801 } 1800 }
1802 1801
1803 scq->full = 1; 1802 scq->full = 1;
1804 if (has_run++) 1803 if (has_run++)
1805 break; 1804 break;
1806 spin_unlock_irqrestore(&scq->lock, flags); 1805 spin_unlock_irqrestore(&scq->lock, flags);
1807 interruptible_sleep_on_timeout(&scq->scqfull_waitq, 1806 interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1808 SCQFULL_TIMEOUT); 1807 SCQFULL_TIMEOUT);
1809 spin_lock_irqsave(&scq->lock, flags); 1808 spin_lock_irqsave(&scq->lock, flags);
1810 } 1809 }
1811 1810
1812 if (!scq->full) { 1811 if (!scq->full) {
1813 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); 1812 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1814 if (scq_is_vbr) 1813 if (scq_is_vbr)
1815 scdi = NS_TSR_SCDISVBR; 1814 scdi = NS_TSR_SCDISVBR;
1816 else 1815 else
1817 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; 1816 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1818 scqi = scq->next - scq->base; 1817 scqi = scq->next - scq->base;
1819 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); 1818 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1820 tsr.word_3 = 0x00000000; 1819 tsr.word_3 = 0x00000000;
1821 tsr.word_4 = 0x00000000; 1820 tsr.word_4 = 0x00000000;
1822 1821
1823 *scq->next = tsr; 1822 *scq->next = tsr;
1824 index = (int)scqi; 1823 index = (int)scqi;
1825 scq->skb[index] = NULL; 1824 scq->skb[index] = NULL;
1826 XPRINTK 1825 XPRINTK
1827 ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", 1826 ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1828 card->index, le32_to_cpu(tsr.word_1), 1827 card->index, le32_to_cpu(tsr.word_1),
1829 le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), 1828 le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
1830 le32_to_cpu(tsr.word_4), scq->next); 1829 le32_to_cpu(tsr.word_4), scq->next);
1831 if (scq->next == scq->last) 1830 if (scq->next == scq->last)
1832 scq->next = scq->base; 1831 scq->next = scq->base;
1833 else 1832 else
1834 scq->next++; 1833 scq->next++;
1835 vc->tbd_count = 0; 1834 vc->tbd_count = 0;
1836 scq->tbd_count = 0; 1835 scq->tbd_count = 0;
1837 } else 1836 } else
1838 PRINTK("nicstar%d: Timeout pushing TSR.\n", 1837 PRINTK("nicstar%d: Timeout pushing TSR.\n",
1839 card->index); 1838 card->index);
1840 } 1839 }
1841 data = scq_virt_to_bus(scq, scq->next); 1840 data = scq_virt_to_bus(scq, scq->next);
1842 ns_write_sram(card, scq->scd, &data, 1); 1841 ns_write_sram(card, scq->scd, &data, 1);
1843 1842
1844 spin_unlock_irqrestore(&scq->lock, flags); 1843 spin_unlock_irqrestore(&scq->lock, flags);
1845 1844
1846 return 0; 1845 return 0;
1847 } 1846 }
1848 1847
1849 static void process_tsq(ns_dev * card) 1848 static void process_tsq(ns_dev * card)
1850 { 1849 {
1851 u32 scdi; 1850 u32 scdi;
1852 scq_info *scq; 1851 scq_info *scq;
1853 ns_tsi *previous = NULL, *one_ahead, *two_ahead; 1852 ns_tsi *previous = NULL, *one_ahead, *two_ahead;
1854 int serviced_entries; /* flag indicating at least on entry was serviced */ 1853 int serviced_entries; /* flag indicating at least on entry was serviced */
1855 1854
1856 serviced_entries = 0; 1855 serviced_entries = 0;
1857 1856
1858 if (card->tsq.next == card->tsq.last) 1857 if (card->tsq.next == card->tsq.last)
1859 one_ahead = card->tsq.base; 1858 one_ahead = card->tsq.base;
1860 else 1859 else
1861 one_ahead = card->tsq.next + 1; 1860 one_ahead = card->tsq.next + 1;
1862 1861
1863 if (one_ahead == card->tsq.last) 1862 if (one_ahead == card->tsq.last)
1864 two_ahead = card->tsq.base; 1863 two_ahead = card->tsq.base;
1865 else 1864 else
1866 two_ahead = one_ahead + 1; 1865 two_ahead = one_ahead + 1;
1867 1866
1868 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || 1867 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1869 !ns_tsi_isempty(two_ahead)) 1868 !ns_tsi_isempty(two_ahead))
1870 /* At most two empty, as stated in the 77201 errata */ 1869 /* At most two empty, as stated in the 77201 errata */
1871 { 1870 {
1872 serviced_entries = 1; 1871 serviced_entries = 1;
1873 1872
1874 /* Skip the one or two possible empty entries */ 1873 /* Skip the one or two possible empty entries */
1875 while (ns_tsi_isempty(card->tsq.next)) { 1874 while (ns_tsi_isempty(card->tsq.next)) {
1876 if (card->tsq.next == card->tsq.last) 1875 if (card->tsq.next == card->tsq.last)
1877 card->tsq.next = card->tsq.base; 1876 card->tsq.next = card->tsq.base;
1878 else 1877 else
1879 card->tsq.next++; 1878 card->tsq.next++;
1880 } 1879 }
1881 1880
1882 if (!ns_tsi_tmrof(card->tsq.next)) { 1881 if (!ns_tsi_tmrof(card->tsq.next)) {
1883 scdi = ns_tsi_getscdindex(card->tsq.next); 1882 scdi = ns_tsi_getscdindex(card->tsq.next);
1884 if (scdi == NS_TSI_SCDISVBR) 1883 if (scdi == NS_TSI_SCDISVBR)
1885 scq = card->scq0; 1884 scq = card->scq0;
1886 else { 1885 else {
1887 if (card->scd2vc[scdi] == NULL) { 1886 if (card->scd2vc[scdi] == NULL) {
1888 printk 1887 printk
1889 ("nicstar%d: could not find VC from SCD index.\n", 1888 ("nicstar%d: could not find VC from SCD index.\n",
1890 card->index); 1889 card->index);
1891 ns_tsi_init(card->tsq.next); 1890 ns_tsi_init(card->tsq.next);
1892 return; 1891 return;
1893 } 1892 }
1894 scq = card->scd2vc[scdi]->scq; 1893 scq = card->scd2vc[scdi]->scq;
1895 } 1894 }
1896 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); 1895 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1897 scq->full = 0; 1896 scq->full = 0;
1898 wake_up_interruptible(&(scq->scqfull_waitq)); 1897 wake_up_interruptible(&(scq->scqfull_waitq));
1899 } 1898 }
1900 1899
1901 ns_tsi_init(card->tsq.next); 1900 ns_tsi_init(card->tsq.next);
1902 previous = card->tsq.next; 1901 previous = card->tsq.next;
1903 if (card->tsq.next == card->tsq.last) 1902 if (card->tsq.next == card->tsq.last)
1904 card->tsq.next = card->tsq.base; 1903 card->tsq.next = card->tsq.base;
1905 else 1904 else
1906 card->tsq.next++; 1905 card->tsq.next++;
1907 1906
1908 if (card->tsq.next == card->tsq.last) 1907 if (card->tsq.next == card->tsq.last)
1909 one_ahead = card->tsq.base; 1908 one_ahead = card->tsq.base;
1910 else 1909 else
1911 one_ahead = card->tsq.next + 1; 1910 one_ahead = card->tsq.next + 1;
1912 1911
1913 if (one_ahead == card->tsq.last) 1912 if (one_ahead == card->tsq.last)
1914 two_ahead = card->tsq.base; 1913 two_ahead = card->tsq.base;
1915 else 1914 else
1916 two_ahead = one_ahead + 1; 1915 two_ahead = one_ahead + 1;
1917 } 1916 }
1918 1917
1919 if (serviced_entries) 1918 if (serviced_entries)
1920 writel(PTR_DIFF(previous, card->tsq.base), 1919 writel(PTR_DIFF(previous, card->tsq.base),
1921 card->membase + TSQH); 1920 card->membase + TSQH);
1922 } 1921 }
1923 1922
1924 static void drain_scq(ns_dev * card, scq_info * scq, int pos) 1923 static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1925 { 1924 {
1926 struct atm_vcc *vcc; 1925 struct atm_vcc *vcc;
1927 struct sk_buff *skb; 1926 struct sk_buff *skb;
1928 int i; 1927 int i;
1929 unsigned long flags; 1928 unsigned long flags;
1930 1929
1931 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", 1930 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
1932 card->index, scq, pos); 1931 card->index, scq, pos);
1933 if (pos >= scq->num_entries) { 1932 if (pos >= scq->num_entries) {
1934 printk("nicstar%d: Bad index on drain_scq().\n", card->index); 1933 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1935 return; 1934 return;
1936 } 1935 }
1937 1936
1938 spin_lock_irqsave(&scq->lock, flags); 1937 spin_lock_irqsave(&scq->lock, flags);
1939 i = (int)(scq->tail - scq->base); 1938 i = (int)(scq->tail - scq->base);
1940 if (++i == scq->num_entries) 1939 if (++i == scq->num_entries)
1941 i = 0; 1940 i = 0;
1942 while (i != pos) { 1941 while (i != pos) {
1943 skb = scq->skb[i]; 1942 skb = scq->skb[i];
1944 XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", 1943 XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
1945 card->index, skb, i); 1944 card->index, skb, i);
1946 if (skb != NULL) { 1945 if (skb != NULL) {
1947 pci_unmap_single(card->pcidev, 1946 pci_unmap_single(card->pcidev,
1948 NS_PRV_DMA(skb), 1947 NS_PRV_DMA(skb),
1949 skb->len, 1948 skb->len,
1950 PCI_DMA_TODEVICE); 1949 PCI_DMA_TODEVICE);
1951 vcc = ATM_SKB(skb)->vcc; 1950 vcc = ATM_SKB(skb)->vcc;
1952 if (vcc && vcc->pop != NULL) { 1951 if (vcc && vcc->pop != NULL) {
1953 vcc->pop(vcc, skb); 1952 vcc->pop(vcc, skb);
1954 } else { 1953 } else {
1955 dev_kfree_skb_irq(skb); 1954 dev_kfree_skb_irq(skb);
1956 } 1955 }
1957 scq->skb[i] = NULL; 1956 scq->skb[i] = NULL;
1958 } 1957 }
1959 if (++i == scq->num_entries) 1958 if (++i == scq->num_entries)
1960 i = 0; 1959 i = 0;
1961 } 1960 }
1962 scq->tail = scq->base + pos; 1961 scq->tail = scq->base + pos;
1963 spin_unlock_irqrestore(&scq->lock, flags); 1962 spin_unlock_irqrestore(&scq->lock, flags);
1964 } 1963 }
1965 1964
1966 static void process_rsq(ns_dev * card) 1965 static void process_rsq(ns_dev * card)
1967 { 1966 {
1968 ns_rsqe *previous; 1967 ns_rsqe *previous;
1969 1968
1970 if (!ns_rsqe_valid(card->rsq.next)) 1969 if (!ns_rsqe_valid(card->rsq.next))
1971 return; 1970 return;
1972 do { 1971 do {
1973 dequeue_rx(card, card->rsq.next); 1972 dequeue_rx(card, card->rsq.next);
1974 ns_rsqe_init(card->rsq.next); 1973 ns_rsqe_init(card->rsq.next);
1975 previous = card->rsq.next; 1974 previous = card->rsq.next;
1976 if (card->rsq.next == card->rsq.last) 1975 if (card->rsq.next == card->rsq.last)
1977 card->rsq.next = card->rsq.base; 1976 card->rsq.next = card->rsq.base;
1978 else 1977 else
1979 card->rsq.next++; 1978 card->rsq.next++;
1980 } while (ns_rsqe_valid(card->rsq.next)); 1979 } while (ns_rsqe_valid(card->rsq.next));
1981 writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); 1980 writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
1982 } 1981 }
1983 1982
1984 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) 1983 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1985 { 1984 {
1986 u32 vpi, vci; 1985 u32 vpi, vci;
1987 vc_map *vc; 1986 vc_map *vc;
1988 struct sk_buff *iovb; 1987 struct sk_buff *iovb;
1989 struct iovec *iov; 1988 struct iovec *iov;
1990 struct atm_vcc *vcc; 1989 struct atm_vcc *vcc;
1991 struct sk_buff *skb; 1990 struct sk_buff *skb;
1992 unsigned short aal5_len; 1991 unsigned short aal5_len;
1993 int len; 1992 int len;
1994 u32 stat; 1993 u32 stat;
1995 u32 id; 1994 u32 id;
1996 1995
1997 stat = readl(card->membase + STAT); 1996 stat = readl(card->membase + STAT);
1998 card->sbfqc = ns_stat_sfbqc_get(stat); 1997 card->sbfqc = ns_stat_sfbqc_get(stat);
1999 card->lbfqc = ns_stat_lfbqc_get(stat); 1998 card->lbfqc = ns_stat_lfbqc_get(stat);
2000 1999
2001 id = le32_to_cpu(rsqe->buffer_handle); 2000 id = le32_to_cpu(rsqe->buffer_handle);
2002 skb = idr_find(&card->idr, id); 2001 skb = idr_find(&card->idr, id);
2003 if (!skb) { 2002 if (!skb) {
2004 RXPRINTK(KERN_ERR 2003 RXPRINTK(KERN_ERR
2005 "nicstar%d: idr_find() failed!\n", card->index); 2004 "nicstar%d: idr_find() failed!\n", card->index);
2006 return; 2005 return;
2007 } 2006 }
2008 idr_remove(&card->idr, id); 2007 idr_remove(&card->idr, id);
2009 pci_dma_sync_single_for_cpu(card->pcidev, 2008 pci_dma_sync_single_for_cpu(card->pcidev,
2010 NS_PRV_DMA(skb), 2009 NS_PRV_DMA(skb),
2011 (NS_PRV_BUFTYPE(skb) == BUF_SM 2010 (NS_PRV_BUFTYPE(skb) == BUF_SM
2012 ? NS_SMSKBSIZE : NS_LGSKBSIZE), 2011 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2013 PCI_DMA_FROMDEVICE); 2012 PCI_DMA_FROMDEVICE);
2014 pci_unmap_single(card->pcidev, 2013 pci_unmap_single(card->pcidev,
2015 NS_PRV_DMA(skb), 2014 NS_PRV_DMA(skb),
2016 (NS_PRV_BUFTYPE(skb) == BUF_SM 2015 (NS_PRV_BUFTYPE(skb) == BUF_SM
2017 ? NS_SMSKBSIZE : NS_LGSKBSIZE), 2016 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2018 PCI_DMA_FROMDEVICE); 2017 PCI_DMA_FROMDEVICE);
2019 vpi = ns_rsqe_vpi(rsqe); 2018 vpi = ns_rsqe_vpi(rsqe);
2020 vci = ns_rsqe_vci(rsqe); 2019 vci = ns_rsqe_vci(rsqe);
2021 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { 2020 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
2022 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", 2021 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2023 card->index, vpi, vci); 2022 card->index, vpi, vci);
2024 recycle_rx_buf(card, skb); 2023 recycle_rx_buf(card, skb);
2025 return; 2024 return;
2026 } 2025 }
2027 2026
2028 vc = &(card->vcmap[vpi << card->vcibits | vci]); 2027 vc = &(card->vcmap[vpi << card->vcibits | vci]);
2029 if (!vc->rx) { 2028 if (!vc->rx) {
2030 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", 2029 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2031 card->index, vpi, vci); 2030 card->index, vpi, vci);
2032 recycle_rx_buf(card, skb); 2031 recycle_rx_buf(card, skb);
2033 return; 2032 return;
2034 } 2033 }
2035 2034
2036 vcc = vc->rx_vcc; 2035 vcc = vc->rx_vcc;
2037 2036
2038 if (vcc->qos.aal == ATM_AAL0) { 2037 if (vcc->qos.aal == ATM_AAL0) {
2039 struct sk_buff *sb; 2038 struct sk_buff *sb;
2040 unsigned char *cell; 2039 unsigned char *cell;
2041 int i; 2040 int i;
2042 2041
2043 cell = skb->data; 2042 cell = skb->data;
2044 for (i = ns_rsqe_cellcount(rsqe); i; i--) { 2043 for (i = ns_rsqe_cellcount(rsqe); i; i--) {
2045 if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { 2044 if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
2046 printk 2045 printk
2047 ("nicstar%d: Can't allocate buffers for aal0.\n", 2046 ("nicstar%d: Can't allocate buffers for aal0.\n",
2048 card->index); 2047 card->index);
2049 atomic_add(i, &vcc->stats->rx_drop); 2048 atomic_add(i, &vcc->stats->rx_drop);
2050 break; 2049 break;
2051 } 2050 }
2052 if (!atm_charge(vcc, sb->truesize)) { 2051 if (!atm_charge(vcc, sb->truesize)) {
2053 RXPRINTK 2052 RXPRINTK
2054 ("nicstar%d: atm_charge() dropped aal0 packets.\n", 2053 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
2055 card->index); 2054 card->index);
2056 atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ 2055 atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
2057 dev_kfree_skb_any(sb); 2056 dev_kfree_skb_any(sb);
2058 break; 2057 break;
2059 } 2058 }
2060 /* Rebuild the header */ 2059 /* Rebuild the header */
2061 *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | 2060 *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2062 (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); 2061 (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2063 if (i == 1 && ns_rsqe_eopdu(rsqe)) 2062 if (i == 1 && ns_rsqe_eopdu(rsqe))
2064 *((u32 *) sb->data) |= 0x00000002; 2063 *((u32 *) sb->data) |= 0x00000002;
2065 skb_put(sb, NS_AAL0_HEADER); 2064 skb_put(sb, NS_AAL0_HEADER);
2066 memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); 2065 memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2067 skb_put(sb, ATM_CELL_PAYLOAD); 2066 skb_put(sb, ATM_CELL_PAYLOAD);
2068 ATM_SKB(sb)->vcc = vcc; 2067 ATM_SKB(sb)->vcc = vcc;
2069 __net_timestamp(sb); 2068 __net_timestamp(sb);
2070 vcc->push(vcc, sb); 2069 vcc->push(vcc, sb);
2071 atomic_inc(&vcc->stats->rx); 2070 atomic_inc(&vcc->stats->rx);
2072 cell += ATM_CELL_PAYLOAD; 2071 cell += ATM_CELL_PAYLOAD;
2073 } 2072 }
2074 2073
2075 recycle_rx_buf(card, skb); 2074 recycle_rx_buf(card, skb);
2076 return; 2075 return;
2077 } 2076 }
2078 2077
2079 /* To reach this point, the AAL layer can only be AAL5 */ 2078 /* To reach this point, the AAL layer can only be AAL5 */
2080 2079
2081 if ((iovb = vc->rx_iov) == NULL) { 2080 if ((iovb = vc->rx_iov) == NULL) {
2082 iovb = skb_dequeue(&(card->iovpool.queue)); 2081 iovb = skb_dequeue(&(card->iovpool.queue));
2083 if (iovb == NULL) { /* No buffers in the queue */ 2082 if (iovb == NULL) { /* No buffers in the queue */
2084 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); 2083 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2085 if (iovb == NULL) { 2084 if (iovb == NULL) {
2086 printk("nicstar%d: Out of iovec buffers.\n", 2085 printk("nicstar%d: Out of iovec buffers.\n",
2087 card->index); 2086 card->index);
2088 atomic_inc(&vcc->stats->rx_drop); 2087 atomic_inc(&vcc->stats->rx_drop);
2089 recycle_rx_buf(card, skb); 2088 recycle_rx_buf(card, skb);
2090 return; 2089 return;
2091 } 2090 }
2092 NS_PRV_BUFTYPE(iovb) = BUF_NONE; 2091 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2093 } else if (--card->iovpool.count < card->iovnr.min) { 2092 } else if (--card->iovpool.count < card->iovnr.min) {
2094 struct sk_buff *new_iovb; 2093 struct sk_buff *new_iovb;
2095 if ((new_iovb = 2094 if ((new_iovb =
2096 alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { 2095 alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
2097 NS_PRV_BUFTYPE(iovb) = BUF_NONE; 2096 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2098 skb_queue_tail(&card->iovpool.queue, new_iovb); 2097 skb_queue_tail(&card->iovpool.queue, new_iovb);
2099 card->iovpool.count++; 2098 card->iovpool.count++;
2100 } 2099 }
2101 } 2100 }
2102 vc->rx_iov = iovb; 2101 vc->rx_iov = iovb;
2103 NS_PRV_IOVCNT(iovb) = 0; 2102 NS_PRV_IOVCNT(iovb) = 0;
2104 iovb->len = 0; 2103 iovb->len = 0;
2105 iovb->data = iovb->head; 2104 iovb->data = iovb->head;
2106 skb_reset_tail_pointer(iovb); 2105 skb_reset_tail_pointer(iovb);
2107 /* IMPORTANT: a pointer to the sk_buff containing the small or large 2106 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2108 buffer is stored as iovec base, NOT a pointer to the 2107 buffer is stored as iovec base, NOT a pointer to the
2109 small or large buffer itself. */ 2108 small or large buffer itself. */
2110 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { 2109 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2111 printk("nicstar%d: received too big AAL5 SDU.\n", card->index); 2110 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2112 atomic_inc(&vcc->stats->rx_err); 2111 atomic_inc(&vcc->stats->rx_err);
2113 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2112 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2114 NS_MAX_IOVECS); 2113 NS_MAX_IOVECS);
2115 NS_PRV_IOVCNT(iovb) = 0; 2114 NS_PRV_IOVCNT(iovb) = 0;
2116 iovb->len = 0; 2115 iovb->len = 0;
2117 iovb->data = iovb->head; 2116 iovb->data = iovb->head;
2118 skb_reset_tail_pointer(iovb); 2117 skb_reset_tail_pointer(iovb);
2119 } 2118 }
2120 iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; 2119 iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2121 iov->iov_base = (void *)skb; 2120 iov->iov_base = (void *)skb;
2122 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; 2121 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2123 iovb->len += iov->iov_len; 2122 iovb->len += iov->iov_len;
2124 2123
2125 #ifdef EXTRA_DEBUG 2124 #ifdef EXTRA_DEBUG
2126 if (NS_PRV_IOVCNT(iovb) == 1) { 2125 if (NS_PRV_IOVCNT(iovb) == 1) {
2127 if (NS_PRV_BUFTYPE(skb) != BUF_SM) { 2126 if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
2128 printk 2127 printk
2129 ("nicstar%d: Expected a small buffer, and this is not one.\n", 2128 ("nicstar%d: Expected a small buffer, and this is not one.\n",
2130 card->index); 2129 card->index);
2131 which_list(card, skb); 2130 which_list(card, skb);
2132 atomic_inc(&vcc->stats->rx_err); 2131 atomic_inc(&vcc->stats->rx_err);
2133 recycle_rx_buf(card, skb); 2132 recycle_rx_buf(card, skb);
2134 vc->rx_iov = NULL; 2133 vc->rx_iov = NULL;
2135 recycle_iov_buf(card, iovb); 2134 recycle_iov_buf(card, iovb);
2136 return; 2135 return;
2137 } 2136 }
2138 } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ 2137 } else { /* NS_PRV_IOVCNT(iovb) >= 2 */
2139 2138
2140 if (NS_PRV_BUFTYPE(skb) != BUF_LG) { 2139 if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
2141 printk 2140 printk
2142 ("nicstar%d: Expected a large buffer, and this is not one.\n", 2141 ("nicstar%d: Expected a large buffer, and this is not one.\n",
2143 card->index); 2142 card->index);
2144 which_list(card, skb); 2143 which_list(card, skb);
2145 atomic_inc(&vcc->stats->rx_err); 2144 atomic_inc(&vcc->stats->rx_err);
2146 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2145 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2147 NS_PRV_IOVCNT(iovb)); 2146 NS_PRV_IOVCNT(iovb));
2148 vc->rx_iov = NULL; 2147 vc->rx_iov = NULL;
2149 recycle_iov_buf(card, iovb); 2148 recycle_iov_buf(card, iovb);
2150 return; 2149 return;
2151 } 2150 }
2152 } 2151 }
2153 #endif /* EXTRA_DEBUG */ 2152 #endif /* EXTRA_DEBUG */
2154 2153
2155 if (ns_rsqe_eopdu(rsqe)) { 2154 if (ns_rsqe_eopdu(rsqe)) {
2156 /* This works correctly regardless of the endianness of the host */ 2155 /* This works correctly regardless of the endianness of the host */
2157 unsigned char *L1L2 = (unsigned char *) 2156 unsigned char *L1L2 = (unsigned char *)
2158 (skb->data + iov->iov_len - 6); 2157 (skb->data + iov->iov_len - 6);
2159 aal5_len = L1L2[0] << 8 | L1L2[1]; 2158 aal5_len = L1L2[0] << 8 | L1L2[1];
2160 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; 2159 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2161 if (ns_rsqe_crcerr(rsqe) || 2160 if (ns_rsqe_crcerr(rsqe) ||
2162 len + 8 > iovb->len || len + (47 + 8) < iovb->len) { 2161 len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
2163 printk("nicstar%d: AAL5 CRC error", card->index); 2162 printk("nicstar%d: AAL5 CRC error", card->index);
2164 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) 2163 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2165 printk(" - PDU size mismatch.\n"); 2164 printk(" - PDU size mismatch.\n");
2166 else 2165 else
2167 printk(".\n"); 2166 printk(".\n");
2168 atomic_inc(&vcc->stats->rx_err); 2167 atomic_inc(&vcc->stats->rx_err);
2169 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, 2168 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2170 NS_PRV_IOVCNT(iovb)); 2169 NS_PRV_IOVCNT(iovb));
2171 vc->rx_iov = NULL; 2170 vc->rx_iov = NULL;
2172 recycle_iov_buf(card, iovb); 2171 recycle_iov_buf(card, iovb);
2173 return; 2172 return;
2174 } 2173 }
2175 2174
2176 /* By this point we (hopefully) have a complete SDU without errors. */ 2175 /* By this point we (hopefully) have a complete SDU without errors. */
2177 2176
2178 if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ 2177 if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2179 /* skb points to a small buffer */ 2178 /* skb points to a small buffer */
2180 if (!atm_charge(vcc, skb->truesize)) { 2179 if (!atm_charge(vcc, skb->truesize)) {
2181 push_rxbufs(card, skb); 2180 push_rxbufs(card, skb);
2182 atomic_inc(&vcc->stats->rx_drop); 2181 atomic_inc(&vcc->stats->rx_drop);
2183 } else { 2182 } else {
2184 skb_put(skb, len); 2183 skb_put(skb, len);
2185 dequeue_sm_buf(card, skb); 2184 dequeue_sm_buf(card, skb);
2186 #ifdef NS_USE_DESTRUCTORS 2185 #ifdef NS_USE_DESTRUCTORS
2187 skb->destructor = ns_sb_destructor; 2186 skb->destructor = ns_sb_destructor;
2188 #endif /* NS_USE_DESTRUCTORS */ 2187 #endif /* NS_USE_DESTRUCTORS */
2189 ATM_SKB(skb)->vcc = vcc; 2188 ATM_SKB(skb)->vcc = vcc;
2190 __net_timestamp(skb); 2189 __net_timestamp(skb);
2191 vcc->push(vcc, skb); 2190 vcc->push(vcc, skb);
2192 atomic_inc(&vcc->stats->rx); 2191 atomic_inc(&vcc->stats->rx);
2193 } 2192 }
2194 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ 2193 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
2195 struct sk_buff *sb; 2194 struct sk_buff *sb;
2196 2195
2197 sb = (struct sk_buff *)(iov - 1)->iov_base; 2196 sb = (struct sk_buff *)(iov - 1)->iov_base;
2198 /* skb points to a large buffer */ 2197 /* skb points to a large buffer */
2199 2198
2200 if (len <= NS_SMBUFSIZE) { 2199 if (len <= NS_SMBUFSIZE) {
2201 if (!atm_charge(vcc, sb->truesize)) { 2200 if (!atm_charge(vcc, sb->truesize)) {
2202 push_rxbufs(card, sb); 2201 push_rxbufs(card, sb);
2203 atomic_inc(&vcc->stats->rx_drop); 2202 atomic_inc(&vcc->stats->rx_drop);
2204 } else { 2203 } else {
2205 skb_put(sb, len); 2204 skb_put(sb, len);
2206 dequeue_sm_buf(card, sb); 2205 dequeue_sm_buf(card, sb);
2207 #ifdef NS_USE_DESTRUCTORS 2206 #ifdef NS_USE_DESTRUCTORS
2208 sb->destructor = ns_sb_destructor; 2207 sb->destructor = ns_sb_destructor;
2209 #endif /* NS_USE_DESTRUCTORS */ 2208 #endif /* NS_USE_DESTRUCTORS */
2210 ATM_SKB(sb)->vcc = vcc; 2209 ATM_SKB(sb)->vcc = vcc;
2211 __net_timestamp(sb); 2210 __net_timestamp(sb);
2212 vcc->push(vcc, sb); 2211 vcc->push(vcc, sb);
2213 atomic_inc(&vcc->stats->rx); 2212 atomic_inc(&vcc->stats->rx);
2214 } 2213 }
2215 2214
2216 push_rxbufs(card, skb); 2215 push_rxbufs(card, skb);
2217 2216
2218 } else { /* len > NS_SMBUFSIZE, the usual case */ 2217 } else { /* len > NS_SMBUFSIZE, the usual case */
2219 2218
2220 if (!atm_charge(vcc, skb->truesize)) { 2219 if (!atm_charge(vcc, skb->truesize)) {
2221 push_rxbufs(card, skb); 2220 push_rxbufs(card, skb);
2222 atomic_inc(&vcc->stats->rx_drop); 2221 atomic_inc(&vcc->stats->rx_drop);
2223 } else { 2222 } else {
2224 dequeue_lg_buf(card, skb); 2223 dequeue_lg_buf(card, skb);
2225 #ifdef NS_USE_DESTRUCTORS 2224 #ifdef NS_USE_DESTRUCTORS
2226 skb->destructor = ns_lb_destructor; 2225 skb->destructor = ns_lb_destructor;
2227 #endif /* NS_USE_DESTRUCTORS */ 2226 #endif /* NS_USE_DESTRUCTORS */
2228 skb_push(skb, NS_SMBUFSIZE); 2227 skb_push(skb, NS_SMBUFSIZE);
2229 skb_copy_from_linear_data(sb, skb->data, 2228 skb_copy_from_linear_data(sb, skb->data,
2230 NS_SMBUFSIZE); 2229 NS_SMBUFSIZE);
2231 skb_put(skb, len - NS_SMBUFSIZE); 2230 skb_put(skb, len - NS_SMBUFSIZE);
2232 ATM_SKB(skb)->vcc = vcc; 2231 ATM_SKB(skb)->vcc = vcc;
2233 __net_timestamp(skb); 2232 __net_timestamp(skb);
2234 vcc->push(vcc, skb); 2233 vcc->push(vcc, skb);
2235 atomic_inc(&vcc->stats->rx); 2234 atomic_inc(&vcc->stats->rx);
2236 } 2235 }
2237 2236
2238 push_rxbufs(card, sb); 2237 push_rxbufs(card, sb);
2239 2238
2240 } 2239 }
2241 2240
2242 } else { /* Must push a huge buffer */ 2241 } else { /* Must push a huge buffer */
2243 2242
2244 struct sk_buff *hb, *sb, *lb; 2243 struct sk_buff *hb, *sb, *lb;
2245 int remaining, tocopy; 2244 int remaining, tocopy;
2246 int j; 2245 int j;
2247 2246
2248 hb = skb_dequeue(&(card->hbpool.queue)); 2247 hb = skb_dequeue(&(card->hbpool.queue));
2249 if (hb == NULL) { /* No buffers in the queue */ 2248 if (hb == NULL) { /* No buffers in the queue */
2250 2249
2251 hb = dev_alloc_skb(NS_HBUFSIZE); 2250 hb = dev_alloc_skb(NS_HBUFSIZE);
2252 if (hb == NULL) { 2251 if (hb == NULL) {
2253 printk 2252 printk
2254 ("nicstar%d: Out of huge buffers.\n", 2253 ("nicstar%d: Out of huge buffers.\n",
2255 card->index); 2254 card->index);
2256 atomic_inc(&vcc->stats->rx_drop); 2255 atomic_inc(&vcc->stats->rx_drop);
2257 recycle_iovec_rx_bufs(card, 2256 recycle_iovec_rx_bufs(card,
2258 (struct iovec *) 2257 (struct iovec *)
2259 iovb->data, 2258 iovb->data,
2260 NS_PRV_IOVCNT(iovb)); 2259 NS_PRV_IOVCNT(iovb));
2261 vc->rx_iov = NULL; 2260 vc->rx_iov = NULL;
2262 recycle_iov_buf(card, iovb); 2261 recycle_iov_buf(card, iovb);
2263 return; 2262 return;
2264 } else if (card->hbpool.count < card->hbnr.min) { 2263 } else if (card->hbpool.count < card->hbnr.min) {
2265 struct sk_buff *new_hb; 2264 struct sk_buff *new_hb;
2266 if ((new_hb = 2265 if ((new_hb =
2267 dev_alloc_skb(NS_HBUFSIZE)) != 2266 dev_alloc_skb(NS_HBUFSIZE)) !=
2268 NULL) { 2267 NULL) {
2269 skb_queue_tail(&card->hbpool. 2268 skb_queue_tail(&card->hbpool.
2270 queue, new_hb); 2269 queue, new_hb);
2271 card->hbpool.count++; 2270 card->hbpool.count++;
2272 } 2271 }
2273 } 2272 }
2274 NS_PRV_BUFTYPE(hb) = BUF_NONE; 2273 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2275 } else if (--card->hbpool.count < card->hbnr.min) { 2274 } else if (--card->hbpool.count < card->hbnr.min) {
2276 struct sk_buff *new_hb; 2275 struct sk_buff *new_hb;
2277 if ((new_hb = 2276 if ((new_hb =
2278 dev_alloc_skb(NS_HBUFSIZE)) != NULL) { 2277 dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
2279 NS_PRV_BUFTYPE(new_hb) = BUF_NONE; 2278 NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
2280 skb_queue_tail(&card->hbpool.queue, 2279 skb_queue_tail(&card->hbpool.queue,
2281 new_hb); 2280 new_hb);
2282 card->hbpool.count++; 2281 card->hbpool.count++;
2283 } 2282 }
2284 if (card->hbpool.count < card->hbnr.min) { 2283 if (card->hbpool.count < card->hbnr.min) {
2285 if ((new_hb = 2284 if ((new_hb =
2286 dev_alloc_skb(NS_HBUFSIZE)) != 2285 dev_alloc_skb(NS_HBUFSIZE)) !=
2287 NULL) { 2286 NULL) {
2288 NS_PRV_BUFTYPE(new_hb) = 2287 NS_PRV_BUFTYPE(new_hb) =
2289 BUF_NONE; 2288 BUF_NONE;
2290 skb_queue_tail(&card->hbpool. 2289 skb_queue_tail(&card->hbpool.
2291 queue, new_hb); 2290 queue, new_hb);
2292 card->hbpool.count++; 2291 card->hbpool.count++;
2293 } 2292 }
2294 } 2293 }
2295 } 2294 }
2296 2295
2297 iov = (struct iovec *)iovb->data; 2296 iov = (struct iovec *)iovb->data;
2298 2297
2299 if (!atm_charge(vcc, hb->truesize)) { 2298 if (!atm_charge(vcc, hb->truesize)) {
2300 recycle_iovec_rx_bufs(card, iov, 2299 recycle_iovec_rx_bufs(card, iov,
2301 NS_PRV_IOVCNT(iovb)); 2300 NS_PRV_IOVCNT(iovb));
2302 if (card->hbpool.count < card->hbnr.max) { 2301 if (card->hbpool.count < card->hbnr.max) {
2303 skb_queue_tail(&card->hbpool.queue, hb); 2302 skb_queue_tail(&card->hbpool.queue, hb);
2304 card->hbpool.count++; 2303 card->hbpool.count++;
2305 } else 2304 } else
2306 dev_kfree_skb_any(hb); 2305 dev_kfree_skb_any(hb);
2307 atomic_inc(&vcc->stats->rx_drop); 2306 atomic_inc(&vcc->stats->rx_drop);
2308 } else { 2307 } else {
2309 /* Copy the small buffer to the huge buffer */ 2308 /* Copy the small buffer to the huge buffer */
2310 sb = (struct sk_buff *)iov->iov_base; 2309 sb = (struct sk_buff *)iov->iov_base;
2311 skb_copy_from_linear_data(sb, hb->data, 2310 skb_copy_from_linear_data(sb, hb->data,
2312 iov->iov_len); 2311 iov->iov_len);
2313 skb_put(hb, iov->iov_len); 2312 skb_put(hb, iov->iov_len);
2314 remaining = len - iov->iov_len; 2313 remaining = len - iov->iov_len;
2315 iov++; 2314 iov++;
2316 /* Free the small buffer */ 2315 /* Free the small buffer */
2317 push_rxbufs(card, sb); 2316 push_rxbufs(card, sb);
2318 2317
2319 /* Copy all large buffers to the huge buffer and free them */ 2318 /* Copy all large buffers to the huge buffer and free them */
2320 for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { 2319 for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2321 lb = (struct sk_buff *)iov->iov_base; 2320 lb = (struct sk_buff *)iov->iov_base;
2322 tocopy = 2321 tocopy =
2323 min_t(int, remaining, iov->iov_len); 2322 min_t(int, remaining, iov->iov_len);
2324 skb_copy_from_linear_data(lb, 2323 skb_copy_from_linear_data(lb,
2325 skb_tail_pointer 2324 skb_tail_pointer
2326 (hb), tocopy); 2325 (hb), tocopy);
2327 skb_put(hb, tocopy); 2326 skb_put(hb, tocopy);
2328 iov++; 2327 iov++;
2329 remaining -= tocopy; 2328 remaining -= tocopy;
2330 push_rxbufs(card, lb); 2329 push_rxbufs(card, lb);
2331 } 2330 }
2332 #ifdef EXTRA_DEBUG 2331 #ifdef EXTRA_DEBUG
2333 if (remaining != 0 || hb->len != len) 2332 if (remaining != 0 || hb->len != len)
2334 printk 2333 printk
2335 ("nicstar%d: Huge buffer len mismatch.\n", 2334 ("nicstar%d: Huge buffer len mismatch.\n",
2336 card->index); 2335 card->index);
2337 #endif /* EXTRA_DEBUG */ 2336 #endif /* EXTRA_DEBUG */
2338 ATM_SKB(hb)->vcc = vcc; 2337 ATM_SKB(hb)->vcc = vcc;
2339 #ifdef NS_USE_DESTRUCTORS 2338 #ifdef NS_USE_DESTRUCTORS
2340 hb->destructor = ns_hb_destructor; 2339 hb->destructor = ns_hb_destructor;
2341 #endif /* NS_USE_DESTRUCTORS */ 2340 #endif /* NS_USE_DESTRUCTORS */
2342 __net_timestamp(hb); 2341 __net_timestamp(hb);
2343 vcc->push(vcc, hb); 2342 vcc->push(vcc, hb);
2344 atomic_inc(&vcc->stats->rx); 2343 atomic_inc(&vcc->stats->rx);
2345 } 2344 }
2346 } 2345 }
2347 2346
2348 vc->rx_iov = NULL; 2347 vc->rx_iov = NULL;
2349 recycle_iov_buf(card, iovb); 2348 recycle_iov_buf(card, iovb);
2350 } 2349 }
2351 2350
2352 } 2351 }
2353 2352
2354 #ifdef NS_USE_DESTRUCTORS 2353 #ifdef NS_USE_DESTRUCTORS
2355 2354
2356 static void ns_sb_destructor(struct sk_buff *sb) 2355 static void ns_sb_destructor(struct sk_buff *sb)
2357 { 2356 {
2358 ns_dev *card; 2357 ns_dev *card;
2359 u32 stat; 2358 u32 stat;
2360 2359
2361 card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; 2360 card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2362 stat = readl(card->membase + STAT); 2361 stat = readl(card->membase + STAT);
2363 card->sbfqc = ns_stat_sfbqc_get(stat); 2362 card->sbfqc = ns_stat_sfbqc_get(stat);
2364 card->lbfqc = ns_stat_lfbqc_get(stat); 2363 card->lbfqc = ns_stat_lfbqc_get(stat);
2365 2364
2366 do { 2365 do {
2367 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2366 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2368 if (sb == NULL) 2367 if (sb == NULL)
2369 break; 2368 break;
2370 NS_PRV_BUFTYPE(sb) = BUF_SM; 2369 NS_PRV_BUFTYPE(sb) = BUF_SM;
2371 skb_queue_tail(&card->sbpool.queue, sb); 2370 skb_queue_tail(&card->sbpool.queue, sb);
2372 skb_reserve(sb, NS_AAL0_HEADER); 2371 skb_reserve(sb, NS_AAL0_HEADER);
2373 push_rxbufs(card, sb); 2372 push_rxbufs(card, sb);
2374 } while (card->sbfqc < card->sbnr.min); 2373 } while (card->sbfqc < card->sbnr.min);
2375 } 2374 }
2376 2375
2377 static void ns_lb_destructor(struct sk_buff *lb) 2376 static void ns_lb_destructor(struct sk_buff *lb)
2378 { 2377 {
2379 ns_dev *card; 2378 ns_dev *card;
2380 u32 stat; 2379 u32 stat;
2381 2380
2382 card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; 2381 card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2383 stat = readl(card->membase + STAT); 2382 stat = readl(card->membase + STAT);
2384 card->sbfqc = ns_stat_sfbqc_get(stat); 2383 card->sbfqc = ns_stat_sfbqc_get(stat);
2385 card->lbfqc = ns_stat_lfbqc_get(stat); 2384 card->lbfqc = ns_stat_lfbqc_get(stat);
2386 2385
2387 do { 2386 do {
2388 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2387 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2389 if (lb == NULL) 2388 if (lb == NULL)
2390 break; 2389 break;
2391 NS_PRV_BUFTYPE(lb) = BUF_LG; 2390 NS_PRV_BUFTYPE(lb) = BUF_LG;
2392 skb_queue_tail(&card->lbpool.queue, lb); 2391 skb_queue_tail(&card->lbpool.queue, lb);
2393 skb_reserve(lb, NS_SMBUFSIZE); 2392 skb_reserve(lb, NS_SMBUFSIZE);
2394 push_rxbufs(card, lb); 2393 push_rxbufs(card, lb);
2395 } while (card->lbfqc < card->lbnr.min); 2394 } while (card->lbfqc < card->lbnr.min);
2396 } 2395 }
2397 2396
2398 static void ns_hb_destructor(struct sk_buff *hb) 2397 static void ns_hb_destructor(struct sk_buff *hb)
2399 { 2398 {
2400 ns_dev *card; 2399 ns_dev *card;
2401 2400
2402 card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; 2401 card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2403 2402
2404 while (card->hbpool.count < card->hbnr.init) { 2403 while (card->hbpool.count < card->hbnr.init) {
2405 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2404 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2406 if (hb == NULL) 2405 if (hb == NULL)
2407 break; 2406 break;
2408 NS_PRV_BUFTYPE(hb) = BUF_NONE; 2407 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2409 skb_queue_tail(&card->hbpool.queue, hb); 2408 skb_queue_tail(&card->hbpool.queue, hb);
2410 card->hbpool.count++; 2409 card->hbpool.count++;
2411 } 2410 }
2412 } 2411 }
2413 2412
2414 #endif /* NS_USE_DESTRUCTORS */ 2413 #endif /* NS_USE_DESTRUCTORS */
2415 2414
2416 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) 2415 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2417 { 2416 {
2418 if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { 2417 if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
2419 printk("nicstar%d: What kind of rx buffer is this?\n", 2418 printk("nicstar%d: What kind of rx buffer is this?\n",
2420 card->index); 2419 card->index);
2421 dev_kfree_skb_any(skb); 2420 dev_kfree_skb_any(skb);
2422 } else 2421 } else
2423 push_rxbufs(card, skb); 2422 push_rxbufs(card, skb);
2424 } 2423 }
2425 2424
2426 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) 2425 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
2427 { 2426 {
2428 while (count-- > 0) 2427 while (count-- > 0)
2429 recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); 2428 recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
2430 } 2429 }
2431 2430
2432 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) 2431 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2433 { 2432 {
2434 if (card->iovpool.count < card->iovnr.max) { 2433 if (card->iovpool.count < card->iovnr.max) {
2435 skb_queue_tail(&card->iovpool.queue, iovb); 2434 skb_queue_tail(&card->iovpool.queue, iovb);
2436 card->iovpool.count++; 2435 card->iovpool.count++;
2437 } else 2436 } else
2438 dev_kfree_skb_any(iovb); 2437 dev_kfree_skb_any(iovb);
2439 } 2438 }
2440 2439
2441 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) 2440 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2442 { 2441 {
2443 skb_unlink(sb, &card->sbpool.queue); 2442 skb_unlink(sb, &card->sbpool.queue);
2444 #ifdef NS_USE_DESTRUCTORS 2443 #ifdef NS_USE_DESTRUCTORS
2445 if (card->sbfqc < card->sbnr.min) 2444 if (card->sbfqc < card->sbnr.min)
2446 #else 2445 #else
2447 if (card->sbfqc < card->sbnr.init) { 2446 if (card->sbfqc < card->sbnr.init) {
2448 struct sk_buff *new_sb; 2447 struct sk_buff *new_sb;
2449 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2448 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2450 NS_PRV_BUFTYPE(new_sb) = BUF_SM; 2449 NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2451 skb_queue_tail(&card->sbpool.queue, new_sb); 2450 skb_queue_tail(&card->sbpool.queue, new_sb);
2452 skb_reserve(new_sb, NS_AAL0_HEADER); 2451 skb_reserve(new_sb, NS_AAL0_HEADER);
2453 push_rxbufs(card, new_sb); 2452 push_rxbufs(card, new_sb);
2454 } 2453 }
2455 } 2454 }
2456 if (card->sbfqc < card->sbnr.init) 2455 if (card->sbfqc < card->sbnr.init)
2457 #endif /* NS_USE_DESTRUCTORS */ 2456 #endif /* NS_USE_DESTRUCTORS */
2458 { 2457 {
2459 struct sk_buff *new_sb; 2458 struct sk_buff *new_sb;
2460 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { 2459 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2461 NS_PRV_BUFTYPE(new_sb) = BUF_SM; 2460 NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2462 skb_queue_tail(&card->sbpool.queue, new_sb); 2461 skb_queue_tail(&card->sbpool.queue, new_sb);
2463 skb_reserve(new_sb, NS_AAL0_HEADER); 2462 skb_reserve(new_sb, NS_AAL0_HEADER);
2464 push_rxbufs(card, new_sb); 2463 push_rxbufs(card, new_sb);
2465 } 2464 }
2466 } 2465 }
2467 } 2466 }
2468 2467
2469 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) 2468 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2470 { 2469 {
2471 skb_unlink(lb, &card->lbpool.queue); 2470 skb_unlink(lb, &card->lbpool.queue);
2472 #ifdef NS_USE_DESTRUCTORS 2471 #ifdef NS_USE_DESTRUCTORS
2473 if (card->lbfqc < card->lbnr.min) 2472 if (card->lbfqc < card->lbnr.min)
2474 #else 2473 #else
2475 if (card->lbfqc < card->lbnr.init) { 2474 if (card->lbfqc < card->lbnr.init) {
2476 struct sk_buff *new_lb; 2475 struct sk_buff *new_lb;
2477 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2476 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2478 NS_PRV_BUFTYPE(new_lb) = BUF_LG; 2477 NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2479 skb_queue_tail(&card->lbpool.queue, new_lb); 2478 skb_queue_tail(&card->lbpool.queue, new_lb);
2480 skb_reserve(new_lb, NS_SMBUFSIZE); 2479 skb_reserve(new_lb, NS_SMBUFSIZE);
2481 push_rxbufs(card, new_lb); 2480 push_rxbufs(card, new_lb);
2482 } 2481 }
2483 } 2482 }
2484 if (card->lbfqc < card->lbnr.init) 2483 if (card->lbfqc < card->lbnr.init)
2485 #endif /* NS_USE_DESTRUCTORS */ 2484 #endif /* NS_USE_DESTRUCTORS */
2486 { 2485 {
2487 struct sk_buff *new_lb; 2486 struct sk_buff *new_lb;
2488 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { 2487 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2489 NS_PRV_BUFTYPE(new_lb) = BUF_LG; 2488 NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2490 skb_queue_tail(&card->lbpool.queue, new_lb); 2489 skb_queue_tail(&card->lbpool.queue, new_lb);
2491 skb_reserve(new_lb, NS_SMBUFSIZE); 2490 skb_reserve(new_lb, NS_SMBUFSIZE);
2492 push_rxbufs(card, new_lb); 2491 push_rxbufs(card, new_lb);
2493 } 2492 }
2494 } 2493 }
2495 } 2494 }
2496 2495
2497 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) 2496 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
2498 { 2497 {
2499 u32 stat; 2498 u32 stat;
2500 ns_dev *card; 2499 ns_dev *card;
2501 int left; 2500 int left;
2502 2501
2503 left = (int)*pos; 2502 left = (int)*pos;
2504 card = (ns_dev *) dev->dev_data; 2503 card = (ns_dev *) dev->dev_data;
2505 stat = readl(card->membase + STAT); 2504 stat = readl(card->membase + STAT);
2506 if (!left--) 2505 if (!left--)
2507 return sprintf(page, "Pool count min init max \n"); 2506 return sprintf(page, "Pool count min init max \n");
2508 if (!left--) 2507 if (!left--)
2509 return sprintf(page, "Small %5d %5d %5d %5d \n", 2508 return sprintf(page, "Small %5d %5d %5d %5d \n",
2510 ns_stat_sfbqc_get(stat), card->sbnr.min, 2509 ns_stat_sfbqc_get(stat), card->sbnr.min,
2511 card->sbnr.init, card->sbnr.max); 2510 card->sbnr.init, card->sbnr.max);
2512 if (!left--) 2511 if (!left--)
2513 return sprintf(page, "Large %5d %5d %5d %5d \n", 2512 return sprintf(page, "Large %5d %5d %5d %5d \n",
2514 ns_stat_lfbqc_get(stat), card->lbnr.min, 2513 ns_stat_lfbqc_get(stat), card->lbnr.min,
2515 card->lbnr.init, card->lbnr.max); 2514 card->lbnr.init, card->lbnr.max);
2516 if (!left--) 2515 if (!left--)
2517 return sprintf(page, "Huge %5d %5d %5d %5d \n", 2516 return sprintf(page, "Huge %5d %5d %5d %5d \n",
2518 card->hbpool.count, card->hbnr.min, 2517 card->hbpool.count, card->hbnr.min,
2519 card->hbnr.init, card->hbnr.max); 2518 card->hbnr.init, card->hbnr.max);
2520 if (!left--) 2519 if (!left--)
2521 return sprintf(page, "Iovec %5d %5d %5d %5d \n", 2520 return sprintf(page, "Iovec %5d %5d %5d %5d \n",
2522 card->iovpool.count, card->iovnr.min, 2521 card->iovpool.count, card->iovnr.min,
2523 card->iovnr.init, card->iovnr.max); 2522 card->iovnr.init, card->iovnr.max);
2524 if (!left--) { 2523 if (!left--) {
2525 int retval; 2524 int retval;
2526 retval = 2525 retval =
2527 sprintf(page, "Interrupt counter: %u \n", card->intcnt); 2526 sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2528 card->intcnt = 0; 2527 card->intcnt = 0;
2529 return retval; 2528 return retval;
2530 } 2529 }
2531 #if 0 2530 #if 0
2532 /* Dump 25.6 Mbps PHY registers */ 2531 /* Dump 25.6 Mbps PHY registers */
2533 /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it 2532 /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2534 here just in case it's needed for debugging. */ 2533 here just in case it's needed for debugging. */
2535 if (card->max_pcr == ATM_25_PCR && !left--) { 2534 if (card->max_pcr == ATM_25_PCR && !left--) {
2536 u32 phy_regs[4]; 2535 u32 phy_regs[4];
2537 u32 i; 2536 u32 i;
2538 2537
2539 for (i = 0; i < 4; i++) { 2538 for (i = 0; i < 4; i++) {
2540 while (CMD_BUSY(card)) ; 2539 while (CMD_BUSY(card)) ;
2541 writel(NS_CMD_READ_UTILITY | 0x00000200 | i, 2540 writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
2542 card->membase + CMD); 2541 card->membase + CMD);
2543 while (CMD_BUSY(card)) ; 2542 while (CMD_BUSY(card)) ;
2544 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; 2543 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2545 } 2544 }
2546 2545
2547 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", 2546 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2548 phy_regs[0], phy_regs[1], phy_regs[2], 2547 phy_regs[0], phy_regs[1], phy_regs[2],
2549 phy_regs[3]); 2548 phy_regs[3]);
2550 } 2549 }
2551 #endif /* 0 - Dump 25.6 Mbps PHY registers */ 2550 #endif /* 0 - Dump 25.6 Mbps PHY registers */
2552 #if 0 2551 #if 0
2553 /* Dump TST */ 2552 /* Dump TST */
2554 if (left-- < NS_TST_NUM_ENTRIES) { 2553 if (left-- < NS_TST_NUM_ENTRIES) {
2555 if (card->tste2vc[left + 1] == NULL) 2554 if (card->tste2vc[left + 1] == NULL)
2556 return sprintf(page, "%5d - VBR/UBR \n", left + 1); 2555 return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2557 else 2556 else
2558 return sprintf(page, "%5d - %d %d \n", left + 1, 2557 return sprintf(page, "%5d - %d %d \n", left + 1,
2559 card->tste2vc[left + 1]->tx_vcc->vpi, 2558 card->tste2vc[left + 1]->tx_vcc->vpi,
2560 card->tste2vc[left + 1]->tx_vcc->vci); 2559 card->tste2vc[left + 1]->tx_vcc->vci);
2561 } 2560 }
2562 #endif /* 0 */ 2561 #endif /* 0 */
2563 return 0; 2562 return 0;
2564 } 2563 }
2565 2564
2566 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) 2565 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2567 { 2566 {
2568 ns_dev *card; 2567 ns_dev *card;
2569 pool_levels pl; 2568 pool_levels pl;
2570 long btype; 2569 long btype;
2571 unsigned long flags; 2570 unsigned long flags;
2572 2571
2573 card = dev->dev_data; 2572 card = dev->dev_data;
2574 switch (cmd) { 2573 switch (cmd) {
2575 case NS_GETPSTAT: 2574 case NS_GETPSTAT:
2576 if (get_user 2575 if (get_user
2577 (pl.buftype, &((pool_levels __user *) arg)->buftype)) 2576 (pl.buftype, &((pool_levels __user *) arg)->buftype))
2578 return -EFAULT; 2577 return -EFAULT;
2579 switch (pl.buftype) { 2578 switch (pl.buftype) {
2580 case NS_BUFTYPE_SMALL: 2579 case NS_BUFTYPE_SMALL:
2581 pl.count = 2580 pl.count =
2582 ns_stat_sfbqc_get(readl(card->membase + STAT)); 2581 ns_stat_sfbqc_get(readl(card->membase + STAT));
2583 pl.level.min = card->sbnr.min; 2582 pl.level.min = card->sbnr.min;
2584 pl.level.init = card->sbnr.init; 2583 pl.level.init = card->sbnr.init;
2585 pl.level.max = card->sbnr.max; 2584 pl.level.max = card->sbnr.max;
2586 break; 2585 break;
2587 2586
2588 case NS_BUFTYPE_LARGE: 2587 case NS_BUFTYPE_LARGE:
2589 pl.count = 2588 pl.count =
2590 ns_stat_lfbqc_get(readl(card->membase + STAT)); 2589 ns_stat_lfbqc_get(readl(card->membase + STAT));
2591 pl.level.min = card->lbnr.min; 2590 pl.level.min = card->lbnr.min;
2592 pl.level.init = card->lbnr.init; 2591 pl.level.init = card->lbnr.init;
2593 pl.level.max = card->lbnr.max; 2592 pl.level.max = card->lbnr.max;
2594 break; 2593 break;
2595 2594
2596 case NS_BUFTYPE_HUGE: 2595 case NS_BUFTYPE_HUGE:
2597 pl.count = card->hbpool.count; 2596 pl.count = card->hbpool.count;
2598 pl.level.min = card->hbnr.min; 2597 pl.level.min = card->hbnr.min;
2599 pl.level.init = card->hbnr.init; 2598 pl.level.init = card->hbnr.init;
2600 pl.level.max = card->hbnr.max; 2599 pl.level.max = card->hbnr.max;
2601 break; 2600 break;
2602 2601
2603 case NS_BUFTYPE_IOVEC: 2602 case NS_BUFTYPE_IOVEC:
2604 pl.count = card->iovpool.count; 2603 pl.count = card->iovpool.count;
2605 pl.level.min = card->iovnr.min; 2604 pl.level.min = card->iovnr.min;
2606 pl.level.init = card->iovnr.init; 2605 pl.level.init = card->iovnr.init;
2607 pl.level.max = card->iovnr.max; 2606 pl.level.max = card->iovnr.max;
2608 break; 2607 break;
2609 2608
2610 default: 2609 default:
2611 return -ENOIOCTLCMD; 2610 return -ENOIOCTLCMD;
2612 2611
2613 } 2612 }
2614 if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) 2613 if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
2615 return (sizeof(pl)); 2614 return (sizeof(pl));
2616 else 2615 else
2617 return -EFAULT; 2616 return -EFAULT;
2618 2617
2619 case NS_SETBUFLEV: 2618 case NS_SETBUFLEV:
2620 if (!capable(CAP_NET_ADMIN)) 2619 if (!capable(CAP_NET_ADMIN))
2621 return -EPERM; 2620 return -EPERM;
2622 if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) 2621 if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
2623 return -EFAULT; 2622 return -EFAULT;
2624 if (pl.level.min >= pl.level.init 2623 if (pl.level.min >= pl.level.init
2625 || pl.level.init >= pl.level.max) 2624 || pl.level.init >= pl.level.max)
2626 return -EINVAL; 2625 return -EINVAL;
2627 if (pl.level.min == 0) 2626 if (pl.level.min == 0)
2628 return -EINVAL; 2627 return -EINVAL;
2629 switch (pl.buftype) { 2628 switch (pl.buftype) {
2630 case NS_BUFTYPE_SMALL: 2629 case NS_BUFTYPE_SMALL:
2631 if (pl.level.max > TOP_SB) 2630 if (pl.level.max > TOP_SB)
2632 return -EINVAL; 2631 return -EINVAL;
2633 card->sbnr.min = pl.level.min; 2632 card->sbnr.min = pl.level.min;
2634 card->sbnr.init = pl.level.init; 2633 card->sbnr.init = pl.level.init;
2635 card->sbnr.max = pl.level.max; 2634 card->sbnr.max = pl.level.max;
2636 break; 2635 break;
2637 2636
2638 case NS_BUFTYPE_LARGE: 2637 case NS_BUFTYPE_LARGE:
2639 if (pl.level.max > TOP_LB) 2638 if (pl.level.max > TOP_LB)
2640 return -EINVAL; 2639 return -EINVAL;
2641 card->lbnr.min = pl.level.min; 2640 card->lbnr.min = pl.level.min;
2642 card->lbnr.init = pl.level.init; 2641 card->lbnr.init = pl.level.init;
2643 card->lbnr.max = pl.level.max; 2642 card->lbnr.max = pl.level.max;
2644 break; 2643 break;
2645 2644
2646 case NS_BUFTYPE_HUGE: 2645 case NS_BUFTYPE_HUGE:
2647 if (pl.level.max > TOP_HB) 2646 if (pl.level.max > TOP_HB)
2648 return -EINVAL; 2647 return -EINVAL;
2649 card->hbnr.min = pl.level.min; 2648 card->hbnr.min = pl.level.min;
2650 card->hbnr.init = pl.level.init; 2649 card->hbnr.init = pl.level.init;
2651 card->hbnr.max = pl.level.max; 2650 card->hbnr.max = pl.level.max;
2652 break; 2651 break;
2653 2652
2654 case NS_BUFTYPE_IOVEC: 2653 case NS_BUFTYPE_IOVEC:
2655 if (pl.level.max > TOP_IOVB) 2654 if (pl.level.max > TOP_IOVB)
2656 return -EINVAL; 2655 return -EINVAL;
2657 card->iovnr.min = pl.level.min; 2656 card->iovnr.min = pl.level.min;
2658 card->iovnr.init = pl.level.init; 2657 card->iovnr.init = pl.level.init;
2659 card->iovnr.max = pl.level.max; 2658 card->iovnr.max = pl.level.max;
2660 break; 2659 break;
2661 2660
2662 default: 2661 default:
2663 return -EINVAL; 2662 return -EINVAL;
2664 2663
2665 } 2664 }
2666 return 0; 2665 return 0;
2667 2666
2668 case NS_ADJBUFLEV: 2667 case NS_ADJBUFLEV:
2669 if (!capable(CAP_NET_ADMIN)) 2668 if (!capable(CAP_NET_ADMIN))
2670 return -EPERM; 2669 return -EPERM;
2671 btype = (long)arg; /* a long is the same size as a pointer or bigger */ 2670 btype = (long)arg; /* a long is the same size as a pointer or bigger */
2672 switch (btype) { 2671 switch (btype) {
2673 case NS_BUFTYPE_SMALL: 2672 case NS_BUFTYPE_SMALL:
2674 while (card->sbfqc < card->sbnr.init) { 2673 while (card->sbfqc < card->sbnr.init) {
2675 struct sk_buff *sb; 2674 struct sk_buff *sb;
2676 2675
2677 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); 2676 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2678 if (sb == NULL) 2677 if (sb == NULL)
2679 return -ENOMEM; 2678 return -ENOMEM;
2680 NS_PRV_BUFTYPE(sb) = BUF_SM; 2679 NS_PRV_BUFTYPE(sb) = BUF_SM;
2681 skb_queue_tail(&card->sbpool.queue, sb); 2680 skb_queue_tail(&card->sbpool.queue, sb);
2682 skb_reserve(sb, NS_AAL0_HEADER); 2681 skb_reserve(sb, NS_AAL0_HEADER);
2683 push_rxbufs(card, sb); 2682 push_rxbufs(card, sb);
2684 } 2683 }
2685 break; 2684 break;
2686 2685
2687 case NS_BUFTYPE_LARGE: 2686 case NS_BUFTYPE_LARGE:
2688 while (card->lbfqc < card->lbnr.init) { 2687 while (card->lbfqc < card->lbnr.init) {
2689 struct sk_buff *lb; 2688 struct sk_buff *lb;
2690 2689
2691 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); 2690 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2692 if (lb == NULL) 2691 if (lb == NULL)
2693 return -ENOMEM; 2692 return -ENOMEM;
2694 NS_PRV_BUFTYPE(lb) = BUF_LG; 2693 NS_PRV_BUFTYPE(lb) = BUF_LG;
2695 skb_queue_tail(&card->lbpool.queue, lb); 2694 skb_queue_tail(&card->lbpool.queue, lb);
2696 skb_reserve(lb, NS_SMBUFSIZE); 2695 skb_reserve(lb, NS_SMBUFSIZE);
2697 push_rxbufs(card, lb); 2696 push_rxbufs(card, lb);
2698 } 2697 }
2699 break; 2698 break;
2700 2699
2701 case NS_BUFTYPE_HUGE: 2700 case NS_BUFTYPE_HUGE:
2702 while (card->hbpool.count > card->hbnr.init) { 2701 while (card->hbpool.count > card->hbnr.init) {
2703 struct sk_buff *hb; 2702 struct sk_buff *hb;
2704 2703
2705 spin_lock_irqsave(&card->int_lock, flags); 2704 spin_lock_irqsave(&card->int_lock, flags);
2706 hb = skb_dequeue(&card->hbpool.queue); 2705 hb = skb_dequeue(&card->hbpool.queue);
2707 card->hbpool.count--; 2706 card->hbpool.count--;
2708 spin_unlock_irqrestore(&card->int_lock, flags); 2707 spin_unlock_irqrestore(&card->int_lock, flags);
2709 if (hb == NULL) 2708 if (hb == NULL)
2710 printk 2709 printk
2711 ("nicstar%d: huge buffer count inconsistent.\n", 2710 ("nicstar%d: huge buffer count inconsistent.\n",
2712 card->index); 2711 card->index);
2713 else 2712 else
2714 dev_kfree_skb_any(hb); 2713 dev_kfree_skb_any(hb);
2715 2714
2716 } 2715 }
2717 while (card->hbpool.count < card->hbnr.init) { 2716 while (card->hbpool.count < card->hbnr.init) {
2718 struct sk_buff *hb; 2717 struct sk_buff *hb;
2719 2718
2720 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); 2719 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2721 if (hb == NULL) 2720 if (hb == NULL)
2722 return -ENOMEM; 2721 return -ENOMEM;
2723 NS_PRV_BUFTYPE(hb) = BUF_NONE; 2722 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2724 spin_lock_irqsave(&card->int_lock, flags); 2723 spin_lock_irqsave(&card->int_lock, flags);
2725 skb_queue_tail(&card->hbpool.queue, hb); 2724 skb_queue_tail(&card->hbpool.queue, hb);
2726 card->hbpool.count++; 2725 card->hbpool.count++;
2727 spin_unlock_irqrestore(&card->int_lock, flags); 2726 spin_unlock_irqrestore(&card->int_lock, flags);
2728 } 2727 }
2729 break; 2728 break;
2730 2729
2731 case NS_BUFTYPE_IOVEC: 2730 case NS_BUFTYPE_IOVEC:
2732 while (card->iovpool.count > card->iovnr.init) { 2731 while (card->iovpool.count > card->iovnr.init) {
2733 struct sk_buff *iovb; 2732 struct sk_buff *iovb;
2734 2733
2735 spin_lock_irqsave(&card->int_lock, flags); 2734 spin_lock_irqsave(&card->int_lock, flags);
2736 iovb = skb_dequeue(&card->iovpool.queue); 2735 iovb = skb_dequeue(&card->iovpool.queue);
2737 card->iovpool.count--; 2736 card->iovpool.count--;
2738 spin_unlock_irqrestore(&card->int_lock, flags); 2737 spin_unlock_irqrestore(&card->int_lock, flags);
2739 if (iovb == NULL) 2738 if (iovb == NULL)
2740 printk 2739 printk
2741 ("nicstar%d: iovec buffer count inconsistent.\n", 2740 ("nicstar%d: iovec buffer count inconsistent.\n",
2742 card->index); 2741 card->index);
2743 else 2742 else
2744 dev_kfree_skb_any(iovb); 2743 dev_kfree_skb_any(iovb);
2745 2744
2746 } 2745 }
2747 while (card->iovpool.count < card->iovnr.init) { 2746 while (card->iovpool.count < card->iovnr.init) {
2748 struct sk_buff *iovb; 2747 struct sk_buff *iovb;
2749 2748
2750 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); 2749 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2751 if (iovb == NULL) 2750 if (iovb == NULL)
2752 return -ENOMEM; 2751 return -ENOMEM;
2753 NS_PRV_BUFTYPE(iovb) = BUF_NONE; 2752 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2754 spin_lock_irqsave(&card->int_lock, flags); 2753 spin_lock_irqsave(&card->int_lock, flags);
2755 skb_queue_tail(&card->iovpool.queue, iovb); 2754 skb_queue_tail(&card->iovpool.queue, iovb);
2756 card->iovpool.count++; 2755 card->iovpool.count++;
2757 spin_unlock_irqrestore(&card->int_lock, flags); 2756 spin_unlock_irqrestore(&card->int_lock, flags);
2758 } 2757 }
2759 break; 2758 break;
2760 2759
2761 default: 2760 default:
2762 return -EINVAL; 2761 return -EINVAL;
2763 2762
2764 } 2763 }
2765 return 0; 2764 return 0;
2766 2765
2767 default: 2766 default:
2768 if (dev->phy && dev->phy->ioctl) { 2767 if (dev->phy && dev->phy->ioctl) {
2769 return dev->phy->ioctl(dev, cmd, arg); 2768 return dev->phy->ioctl(dev, cmd, arg);
2770 } else { 2769 } else {
2771 printk("nicstar%d: %s == NULL \n", card->index, 2770 printk("nicstar%d: %s == NULL \n", card->index,
2772 dev->phy ? "dev->phy->ioctl" : "dev->phy"); 2771 dev->phy ? "dev->phy->ioctl" : "dev->phy");
2773 return -ENOIOCTLCMD; 2772 return -ENOIOCTLCMD;
2774 } 2773 }
2775 } 2774 }
2776 } 2775 }
2777 2776
2778 #ifdef EXTRA_DEBUG 2777 #ifdef EXTRA_DEBUG
2779 static void which_list(ns_dev * card, struct sk_buff *skb) 2778 static void which_list(ns_dev * card, struct sk_buff *skb)
2780 { 2779 {
2781 printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); 2780 printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
2782 } 2781 }
2783 #endif /* EXTRA_DEBUG */ 2782 #endif /* EXTRA_DEBUG */
2784 2783
2785 static void ns_poll(unsigned long arg) 2784 static void ns_poll(unsigned long arg)
2786 { 2785 {
2787 int i; 2786 int i;
2788 ns_dev *card; 2787 ns_dev *card;
2789 unsigned long flags; 2788 unsigned long flags;
2790 u32 stat_r, stat_w; 2789 u32 stat_r, stat_w;
2791 2790
2792 PRINTK("nicstar: Entering ns_poll().\n"); 2791 PRINTK("nicstar: Entering ns_poll().\n");
2793 for (i = 0; i < num_cards; i++) { 2792 for (i = 0; i < num_cards; i++) {
2794 card = cards[i]; 2793 card = cards[i];
2795 if (spin_is_locked(&card->int_lock)) { 2794 if (spin_is_locked(&card->int_lock)) {
2796 /* Probably it isn't worth spinning */ 2795 /* Probably it isn't worth spinning */
2797 continue; 2796 continue;
2798 } 2797 }
2799 spin_lock_irqsave(&card->int_lock, flags); 2798 spin_lock_irqsave(&card->int_lock, flags);
2800 2799
2801 stat_w = 0; 2800 stat_w = 0;
2802 stat_r = readl(card->membase + STAT); 2801 stat_r = readl(card->membase + STAT);
2803 if (stat_r & NS_STAT_TSIF) 2802 if (stat_r & NS_STAT_TSIF)
2804 stat_w |= NS_STAT_TSIF; 2803 stat_w |= NS_STAT_TSIF;
2805 if (stat_r & NS_STAT_EOPDU) 2804 if (stat_r & NS_STAT_EOPDU)
2806 stat_w |= NS_STAT_EOPDU; 2805 stat_w |= NS_STAT_EOPDU;
2807 2806
2808 process_tsq(card); 2807 process_tsq(card);
2809 process_rsq(card); 2808 process_rsq(card);
2810 2809
2811 writel(stat_w, card->membase + STAT); 2810 writel(stat_w, card->membase + STAT);
2812 spin_unlock_irqrestore(&card->int_lock, flags); 2811 spin_unlock_irqrestore(&card->int_lock, flags);
2813 } 2812 }
2814 mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); 2813 mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2815 PRINTK("nicstar: Leaving ns_poll().\n"); 2814 PRINTK("nicstar: Leaving ns_poll().\n");
2816 } 2815 }
2817 2816
2818 static int ns_parse_mac(char *mac, unsigned char *esi) 2817 static int ns_parse_mac(char *mac, unsigned char *esi)
2819 { 2818 {
2820 int i, j; 2819 int i, j;
2821 short byte1, byte0; 2820 short byte1, byte0;
2822 2821
2823 if (mac == NULL || esi == NULL) 2822 if (mac == NULL || esi == NULL)
2824 return -1; 2823 return -1;
2825 j = 0; 2824 j = 0;
2826 for (i = 0; i < 6; i++) { 2825 for (i = 0; i < 6; i++) {
2827 if ((byte1 = hex_to_bin(mac[j++])) < 0) 2826 if ((byte1 = hex_to_bin(mac[j++])) < 0)
2828 return -1; 2827 return -1;
2829 if ((byte0 = hex_to_bin(mac[j++])) < 0) 2828 if ((byte0 = hex_to_bin(mac[j++])) < 0)
2830 return -1; 2829 return -1;
2831 esi[i] = (unsigned char)(byte1 * 16 + byte0); 2830 esi[i] = (unsigned char)(byte1 * 16 + byte0);
2832 if (i < 5) { 2831 if (i < 5) {
2833 if (mac[j++] != ':') 2832 if (mac[j++] != ':')
2834 return -1; 2833 return -1;
2835 } 2834 }
2836 } 2835 }
2837 return 0; 2836 return 0;
2838 } 2837 }
2839 2838
2840 2839
2841 static void ns_phy_put(struct atm_dev *dev, unsigned char value, 2840 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2842 unsigned long addr) 2841 unsigned long addr)
2843 { 2842 {
2844 ns_dev *card; 2843 ns_dev *card;
2845 unsigned long flags; 2844 unsigned long flags;
2846 2845
2847 card = dev->dev_data; 2846 card = dev->dev_data;
2848 spin_lock_irqsave(&card->res_lock, flags); 2847 spin_lock_irqsave(&card->res_lock, flags);
2849 while (CMD_BUSY(card)) ; 2848 while (CMD_BUSY(card)) ;
2850 writel((u32) value, card->membase + DR0); 2849 writel((u32) value, card->membase + DR0);
2851 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), 2850 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
2852 card->membase + CMD); 2851 card->membase + CMD);
2853 spin_unlock_irqrestore(&card->res_lock, flags); 2852 spin_unlock_irqrestore(&card->res_lock, flags);
2854 } 2853 }
2855 2854
2856 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) 2855 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
2857 { 2856 {
2858 ns_dev *card; 2857 ns_dev *card;
2859 unsigned long flags; 2858 unsigned long flags;
2860 u32 data; 2859 u32 data;
2861 2860
2862 card = dev->dev_data; 2861 card = dev->dev_data;
2863 spin_lock_irqsave(&card->res_lock, flags); 2862 spin_lock_irqsave(&card->res_lock, flags);
2864 while (CMD_BUSY(card)) ; 2863 while (CMD_BUSY(card)) ;
2865 writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), 2864 writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
2866 card->membase + CMD); 2865 card->membase + CMD);
2867 while (CMD_BUSY(card)) ; 2866 while (CMD_BUSY(card)) ;
2868 data = readl(card->membase + DR0) & 0x000000FF; 2867 data = readl(card->membase + DR0) & 0x000000FF;
2869 spin_unlock_irqrestore(&card->res_lock, flags); 2868 spin_unlock_irqrestore(&card->res_lock, flags);
2870 return (unsigned char)data; 2869 return (unsigned char)data;
2871 } 2870 }
2872 2871
2873 module_init(nicstar_init); 2872 module_init(nicstar_init);
2874 module_exit(nicstar_cleanup); 2873 module_exit(nicstar_cleanup);
2875 2874