Commit 4a73a43741489a652588460e72be959e60bcb9ec

Authored by Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  vlan: dont drop packets from unknown vlans in promiscuous mode
  Phonet: Correct header retrieval after pskb_may_pull
  um: Proper Fix for f25c80a4: remove duplicate structure field initialization
  ip_gre: Fix dependencies wrt. ipv6.
  net-2.6: SYN retransmits: Add new parameter to retransmits_timed_out()
  iwl3945: queue the right work if the scan needs to be aborted
  mac80211: fix use-after-free

Showing 8 changed files Inline Diff

arch/um/drivers/net_kern.c
1 /* 1 /*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and 3 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
4 * James Leu (jleu@mindspring.net). 4 * James Leu (jleu@mindspring.net).
5 * Copyright (C) 2001 by various other people who didn't put their name here. 5 * Copyright (C) 2001 by various other people who didn't put their name here.
6 * Licensed under the GPL. 6 * Licensed under the GPL.
7 */ 7 */
8 8
9 #include <linux/bootmem.h> 9 #include <linux/bootmem.h>
10 #include <linux/etherdevice.h> 10 #include <linux/etherdevice.h>
11 #include <linux/ethtool.h> 11 #include <linux/ethtool.h>
12 #include <linux/inetdevice.h> 12 #include <linux/inetdevice.h>
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/list.h> 14 #include <linux/list.h>
15 #include <linux/netdevice.h> 15 #include <linux/netdevice.h>
16 #include <linux/platform_device.h> 16 #include <linux/platform_device.h>
17 #include <linux/rtnetlink.h> 17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h> 18 #include <linux/skbuff.h>
19 #include <linux/slab.h> 19 #include <linux/slab.h>
20 #include <linux/spinlock.h> 20 #include <linux/spinlock.h>
21 #include "init.h" 21 #include "init.h"
22 #include "irq_kern.h" 22 #include "irq_kern.h"
23 #include "irq_user.h" 23 #include "irq_user.h"
24 #include "mconsole_kern.h" 24 #include "mconsole_kern.h"
25 #include "net_kern.h" 25 #include "net_kern.h"
26 #include "net_user.h" 26 #include "net_user.h"
27 27
28 #define DRIVER_NAME "uml-netdev" 28 #define DRIVER_NAME "uml-netdev"
29 29
30 static DEFINE_SPINLOCK(opened_lock); 30 static DEFINE_SPINLOCK(opened_lock);
31 static LIST_HEAD(opened); 31 static LIST_HEAD(opened);
32 32
33 /* 33 /*
34 * The drop_skb is used when we can't allocate an skb. The 34 * The drop_skb is used when we can't allocate an skb. The
35 * packet is read into drop_skb in order to get the data off the 35 * packet is read into drop_skb in order to get the data off the
36 * connection to the host. 36 * connection to the host.
37 * It is reallocated whenever a maximum packet size is seen which is 37 * It is reallocated whenever a maximum packet size is seen which is
38 * larger than any seen before. update_drop_skb is called from 38 * larger than any seen before. update_drop_skb is called from
39 * eth_configure when a new interface is added. 39 * eth_configure when a new interface is added.
40 */ 40 */
41 static DEFINE_SPINLOCK(drop_lock); 41 static DEFINE_SPINLOCK(drop_lock);
42 static struct sk_buff *drop_skb; 42 static struct sk_buff *drop_skb;
43 static int drop_max; 43 static int drop_max;
44 44
45 static int update_drop_skb(int max) 45 static int update_drop_skb(int max)
46 { 46 {
47 struct sk_buff *new; 47 struct sk_buff *new;
48 unsigned long flags; 48 unsigned long flags;
49 int err = 0; 49 int err = 0;
50 50
51 spin_lock_irqsave(&drop_lock, flags); 51 spin_lock_irqsave(&drop_lock, flags);
52 52
53 if (max <= drop_max) 53 if (max <= drop_max)
54 goto out; 54 goto out;
55 55
56 err = -ENOMEM; 56 err = -ENOMEM;
57 new = dev_alloc_skb(max); 57 new = dev_alloc_skb(max);
58 if (new == NULL) 58 if (new == NULL)
59 goto out; 59 goto out;
60 60
61 skb_put(new, max); 61 skb_put(new, max);
62 62
63 kfree_skb(drop_skb); 63 kfree_skb(drop_skb);
64 drop_skb = new; 64 drop_skb = new;
65 drop_max = max; 65 drop_max = max;
66 err = 0; 66 err = 0;
67 out: 67 out:
68 spin_unlock_irqrestore(&drop_lock, flags); 68 spin_unlock_irqrestore(&drop_lock, flags);
69 69
70 return err; 70 return err;
71 } 71 }
72 72
73 static int uml_net_rx(struct net_device *dev) 73 static int uml_net_rx(struct net_device *dev)
74 { 74 {
75 struct uml_net_private *lp = netdev_priv(dev); 75 struct uml_net_private *lp = netdev_priv(dev);
76 int pkt_len; 76 int pkt_len;
77 struct sk_buff *skb; 77 struct sk_buff *skb;
78 78
79 /* If we can't allocate memory, try again next round. */ 79 /* If we can't allocate memory, try again next round. */
80 skb = dev_alloc_skb(lp->max_packet); 80 skb = dev_alloc_skb(lp->max_packet);
81 if (skb == NULL) { 81 if (skb == NULL) {
82 drop_skb->dev = dev; 82 drop_skb->dev = dev;
83 /* Read a packet into drop_skb and don't do anything with it. */ 83 /* Read a packet into drop_skb and don't do anything with it. */
84 (*lp->read)(lp->fd, drop_skb, lp); 84 (*lp->read)(lp->fd, drop_skb, lp);
85 dev->stats.rx_dropped++; 85 dev->stats.rx_dropped++;
86 return 0; 86 return 0;
87 } 87 }
88 88
89 skb->dev = dev; 89 skb->dev = dev;
90 skb_put(skb, lp->max_packet); 90 skb_put(skb, lp->max_packet);
91 skb_reset_mac_header(skb); 91 skb_reset_mac_header(skb);
92 pkt_len = (*lp->read)(lp->fd, skb, lp); 92 pkt_len = (*lp->read)(lp->fd, skb, lp);
93 93
94 if (pkt_len > 0) { 94 if (pkt_len > 0) {
95 skb_trim(skb, pkt_len); 95 skb_trim(skb, pkt_len);
96 skb->protocol = (*lp->protocol)(skb); 96 skb->protocol = (*lp->protocol)(skb);
97 97
98 dev->stats.rx_bytes += skb->len; 98 dev->stats.rx_bytes += skb->len;
99 dev->stats.rx_packets++; 99 dev->stats.rx_packets++;
100 netif_rx(skb); 100 netif_rx(skb);
101 return pkt_len; 101 return pkt_len;
102 } 102 }
103 103
104 kfree_skb(skb); 104 kfree_skb(skb);
105 return pkt_len; 105 return pkt_len;
106 } 106 }
107 107
108 static void uml_dev_close(struct work_struct *work) 108 static void uml_dev_close(struct work_struct *work)
109 { 109 {
110 struct uml_net_private *lp = 110 struct uml_net_private *lp =
111 container_of(work, struct uml_net_private, work); 111 container_of(work, struct uml_net_private, work);
112 dev_close(lp->dev); 112 dev_close(lp->dev);
113 } 113 }
114 114
115 static irqreturn_t uml_net_interrupt(int irq, void *dev_id) 115 static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
116 { 116 {
117 struct net_device *dev = dev_id; 117 struct net_device *dev = dev_id;
118 struct uml_net_private *lp = netdev_priv(dev); 118 struct uml_net_private *lp = netdev_priv(dev);
119 int err; 119 int err;
120 120
121 if (!netif_running(dev)) 121 if (!netif_running(dev))
122 return IRQ_NONE; 122 return IRQ_NONE;
123 123
124 spin_lock(&lp->lock); 124 spin_lock(&lp->lock);
125 while ((err = uml_net_rx(dev)) > 0) ; 125 while ((err = uml_net_rx(dev)) > 0) ;
126 if (err < 0) { 126 if (err < 0) {
127 printk(KERN_ERR 127 printk(KERN_ERR
128 "Device '%s' read returned %d, shutting it down\n", 128 "Device '%s' read returned %d, shutting it down\n",
129 dev->name, err); 129 dev->name, err);
130 /* dev_close can't be called in interrupt context, and takes 130 /* dev_close can't be called in interrupt context, and takes
131 * again lp->lock. 131 * again lp->lock.
132 * And dev_close() can be safely called multiple times on the 132 * And dev_close() can be safely called multiple times on the
133 * same device, since it tests for (dev->flags & IFF_UP). So 133 * same device, since it tests for (dev->flags & IFF_UP). So
134 * there's no harm in delaying the device shutdown. 134 * there's no harm in delaying the device shutdown.
135 * Furthermore, the workqueue will not re-enqueue an already 135 * Furthermore, the workqueue will not re-enqueue an already
136 * enqueued work item. */ 136 * enqueued work item. */
137 schedule_work(&lp->work); 137 schedule_work(&lp->work);
138 goto out; 138 goto out;
139 } 139 }
140 reactivate_fd(lp->fd, UM_ETH_IRQ); 140 reactivate_fd(lp->fd, UM_ETH_IRQ);
141 141
142 out: 142 out:
143 spin_unlock(&lp->lock); 143 spin_unlock(&lp->lock);
144 return IRQ_HANDLED; 144 return IRQ_HANDLED;
145 } 145 }
146 146
147 static int uml_net_open(struct net_device *dev) 147 static int uml_net_open(struct net_device *dev)
148 { 148 {
149 struct uml_net_private *lp = netdev_priv(dev); 149 struct uml_net_private *lp = netdev_priv(dev);
150 int err; 150 int err;
151 151
152 if (lp->fd >= 0) { 152 if (lp->fd >= 0) {
153 err = -ENXIO; 153 err = -ENXIO;
154 goto out; 154 goto out;
155 } 155 }
156 156
157 lp->fd = (*lp->open)(&lp->user); 157 lp->fd = (*lp->open)(&lp->user);
158 if (lp->fd < 0) { 158 if (lp->fd < 0) {
159 err = lp->fd; 159 err = lp->fd;
160 goto out; 160 goto out;
161 } 161 }
162 162
163 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, 163 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
164 IRQF_DISABLED | IRQF_SHARED, dev->name, dev); 164 IRQF_DISABLED | IRQF_SHARED, dev->name, dev);
165 if (err != 0) { 165 if (err != 0) {
166 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); 166 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
167 err = -ENETUNREACH; 167 err = -ENETUNREACH;
168 goto out_close; 168 goto out_close;
169 } 169 }
170 170
171 lp->tl.data = (unsigned long) &lp->user; 171 lp->tl.data = (unsigned long) &lp->user;
172 netif_start_queue(dev); 172 netif_start_queue(dev);
173 173
174 /* clear buffer - it can happen that the host side of the interface 174 /* clear buffer - it can happen that the host side of the interface
175 * is full when we get here. In this case, new data is never queued, 175 * is full when we get here. In this case, new data is never queued,
176 * SIGIOs never arrive, and the net never works. 176 * SIGIOs never arrive, and the net never works.
177 */ 177 */
178 while ((err = uml_net_rx(dev)) > 0) ; 178 while ((err = uml_net_rx(dev)) > 0) ;
179 179
180 spin_lock(&opened_lock); 180 spin_lock(&opened_lock);
181 list_add(&lp->list, &opened); 181 list_add(&lp->list, &opened);
182 spin_unlock(&opened_lock); 182 spin_unlock(&opened_lock);
183 183
184 return 0; 184 return 0;
185 out_close: 185 out_close:
186 if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); 186 if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
187 lp->fd = -1; 187 lp->fd = -1;
188 out: 188 out:
189 return err; 189 return err;
190 } 190 }
191 191
192 static int uml_net_close(struct net_device *dev) 192 static int uml_net_close(struct net_device *dev)
193 { 193 {
194 struct uml_net_private *lp = netdev_priv(dev); 194 struct uml_net_private *lp = netdev_priv(dev);
195 195
196 netif_stop_queue(dev); 196 netif_stop_queue(dev);
197 197
198 free_irq(dev->irq, dev); 198 free_irq(dev->irq, dev);
199 if (lp->close != NULL) 199 if (lp->close != NULL)
200 (*lp->close)(lp->fd, &lp->user); 200 (*lp->close)(lp->fd, &lp->user);
201 lp->fd = -1; 201 lp->fd = -1;
202 202
203 spin_lock(&opened_lock); 203 spin_lock(&opened_lock);
204 list_del(&lp->list); 204 list_del(&lp->list);
205 spin_unlock(&opened_lock); 205 spin_unlock(&opened_lock);
206 206
207 return 0; 207 return 0;
208 } 208 }
209 209
210 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) 210 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
211 { 211 {
212 struct uml_net_private *lp = netdev_priv(dev); 212 struct uml_net_private *lp = netdev_priv(dev);
213 unsigned long flags; 213 unsigned long flags;
214 int len; 214 int len;
215 215
216 netif_stop_queue(dev); 216 netif_stop_queue(dev);
217 217
218 spin_lock_irqsave(&lp->lock, flags); 218 spin_lock_irqsave(&lp->lock, flags);
219 219
220 len = (*lp->write)(lp->fd, skb, lp); 220 len = (*lp->write)(lp->fd, skb, lp);
221 221
222 if (len == skb->len) { 222 if (len == skb->len) {
223 dev->stats.tx_packets++; 223 dev->stats.tx_packets++;
224 dev->stats.tx_bytes += skb->len; 224 dev->stats.tx_bytes += skb->len;
225 dev->trans_start = jiffies; 225 dev->trans_start = jiffies;
226 netif_start_queue(dev); 226 netif_start_queue(dev);
227 227
228 /* this is normally done in the interrupt when tx finishes */ 228 /* this is normally done in the interrupt when tx finishes */
229 netif_wake_queue(dev); 229 netif_wake_queue(dev);
230 } 230 }
231 else if (len == 0) { 231 else if (len == 0) {
232 netif_start_queue(dev); 232 netif_start_queue(dev);
233 dev->stats.tx_dropped++; 233 dev->stats.tx_dropped++;
234 } 234 }
235 else { 235 else {
236 netif_start_queue(dev); 236 netif_start_queue(dev);
237 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); 237 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
238 } 238 }
239 239
240 spin_unlock_irqrestore(&lp->lock, flags); 240 spin_unlock_irqrestore(&lp->lock, flags);
241 241
242 dev_kfree_skb(skb); 242 dev_kfree_skb(skb);
243 243
244 return NETDEV_TX_OK; 244 return NETDEV_TX_OK;
245 } 245 }
246 246
247 static void uml_net_set_multicast_list(struct net_device *dev) 247 static void uml_net_set_multicast_list(struct net_device *dev)
248 { 248 {
249 return; 249 return;
250 } 250 }
251 251
252 static void uml_net_tx_timeout(struct net_device *dev) 252 static void uml_net_tx_timeout(struct net_device *dev)
253 { 253 {
254 dev->trans_start = jiffies; 254 dev->trans_start = jiffies;
255 netif_wake_queue(dev); 255 netif_wake_queue(dev);
256 } 256 }
257 257
258 static int uml_net_set_mac(struct net_device *dev, void *addr)
259 {
260 struct uml_net_private *lp = netdev_priv(dev);
261 struct sockaddr *hwaddr = addr;
262
263 spin_lock_irq(&lp->lock);
264 eth_mac_addr(dev, hwaddr->sa_data);
265 spin_unlock_irq(&lp->lock);
266
267 return 0;
268 }
269
270 static int uml_net_change_mtu(struct net_device *dev, int new_mtu) 258 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
271 { 259 {
272 dev->mtu = new_mtu; 260 dev->mtu = new_mtu;
273 261
274 return 0; 262 return 0;
275 } 263 }
276 264
277 static void uml_net_get_drvinfo(struct net_device *dev, 265 static void uml_net_get_drvinfo(struct net_device *dev,
278 struct ethtool_drvinfo *info) 266 struct ethtool_drvinfo *info)
279 { 267 {
280 strcpy(info->driver, DRIVER_NAME); 268 strcpy(info->driver, DRIVER_NAME);
281 strcpy(info->version, "42"); 269 strcpy(info->version, "42");
282 } 270 }
283 271
284 static const struct ethtool_ops uml_net_ethtool_ops = { 272 static const struct ethtool_ops uml_net_ethtool_ops = {
285 .get_drvinfo = uml_net_get_drvinfo, 273 .get_drvinfo = uml_net_get_drvinfo,
286 .get_link = ethtool_op_get_link, 274 .get_link = ethtool_op_get_link,
287 }; 275 };
288 276
289 static void uml_net_user_timer_expire(unsigned long _conn) 277 static void uml_net_user_timer_expire(unsigned long _conn)
290 { 278 {
291 #ifdef undef 279 #ifdef undef
292 struct connection *conn = (struct connection *)_conn; 280 struct connection *conn = (struct connection *)_conn;
293 281
294 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); 282 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
295 do_connect(conn); 283 do_connect(conn);
296 #endif 284 #endif
297 } 285 }
298 286
299 static void setup_etheraddr(char *str, unsigned char *addr, char *name) 287 static void setup_etheraddr(char *str, unsigned char *addr, char *name)
300 { 288 {
301 char *end; 289 char *end;
302 int i; 290 int i;
303 291
304 if (str == NULL) 292 if (str == NULL)
305 goto random; 293 goto random;
306 294
307 for (i = 0; i < 6; i++) { 295 for (i = 0; i < 6; i++) {
308 addr[i] = simple_strtoul(str, &end, 16); 296 addr[i] = simple_strtoul(str, &end, 16);
309 if ((end == str) || 297 if ((end == str) ||
310 ((*end != ':') && (*end != ',') && (*end != '\0'))) { 298 ((*end != ':') && (*end != ',') && (*end != '\0'))) {
311 printk(KERN_ERR 299 printk(KERN_ERR
312 "setup_etheraddr: failed to parse '%s' " 300 "setup_etheraddr: failed to parse '%s' "
313 "as an ethernet address\n", str); 301 "as an ethernet address\n", str);
314 goto random; 302 goto random;
315 } 303 }
316 str = end + 1; 304 str = end + 1;
317 } 305 }
318 if (is_multicast_ether_addr(addr)) { 306 if (is_multicast_ether_addr(addr)) {
319 printk(KERN_ERR 307 printk(KERN_ERR
320 "Attempt to assign a multicast ethernet address to a " 308 "Attempt to assign a multicast ethernet address to a "
321 "device disallowed\n"); 309 "device disallowed\n");
322 goto random; 310 goto random;
323 } 311 }
324 if (!is_valid_ether_addr(addr)) { 312 if (!is_valid_ether_addr(addr)) {
325 printk(KERN_ERR 313 printk(KERN_ERR
326 "Attempt to assign an invalid ethernet address to a " 314 "Attempt to assign an invalid ethernet address to a "
327 "device disallowed\n"); 315 "device disallowed\n");
328 goto random; 316 goto random;
329 } 317 }
330 if (!is_local_ether_addr(addr)) { 318 if (!is_local_ether_addr(addr)) {
331 printk(KERN_WARNING 319 printk(KERN_WARNING
332 "Warning: Assigning a globally valid ethernet " 320 "Warning: Assigning a globally valid ethernet "
333 "address to a device\n"); 321 "address to a device\n");
334 printk(KERN_WARNING "You should set the 2nd rightmost bit in " 322 printk(KERN_WARNING "You should set the 2nd rightmost bit in "
335 "the first byte of the MAC,\n"); 323 "the first byte of the MAC,\n");
336 printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", 324 printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
337 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], 325 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
338 addr[5]); 326 addr[5]);
339 } 327 }
340 return; 328 return;
341 329
342 random: 330 random:
343 printk(KERN_INFO 331 printk(KERN_INFO
344 "Choosing a random ethernet address for device %s\n", name); 332 "Choosing a random ethernet address for device %s\n", name);
345 random_ether_addr(addr); 333 random_ether_addr(addr);
346 } 334 }
347 335
348 static DEFINE_SPINLOCK(devices_lock); 336 static DEFINE_SPINLOCK(devices_lock);
349 static LIST_HEAD(devices); 337 static LIST_HEAD(devices);
350 338
351 static struct platform_driver uml_net_driver = { 339 static struct platform_driver uml_net_driver = {
352 .driver = { 340 .driver = {
353 .name = DRIVER_NAME, 341 .name = DRIVER_NAME,
354 }, 342 },
355 }; 343 };
356 344
357 static void net_device_release(struct device *dev) 345 static void net_device_release(struct device *dev)
358 { 346 {
359 struct uml_net *device = dev_get_drvdata(dev); 347 struct uml_net *device = dev_get_drvdata(dev);
360 struct net_device *netdev = device->dev; 348 struct net_device *netdev = device->dev;
361 struct uml_net_private *lp = netdev_priv(netdev); 349 struct uml_net_private *lp = netdev_priv(netdev);
362 350
363 if (lp->remove != NULL) 351 if (lp->remove != NULL)
364 (*lp->remove)(&lp->user); 352 (*lp->remove)(&lp->user);
365 list_del(&device->list); 353 list_del(&device->list);
366 kfree(device); 354 kfree(device);
367 free_netdev(netdev); 355 free_netdev(netdev);
368 } 356 }
369 357
370 static const struct net_device_ops uml_netdev_ops = { 358 static const struct net_device_ops uml_netdev_ops = {
371 .ndo_open = uml_net_open, 359 .ndo_open = uml_net_open,
372 .ndo_stop = uml_net_close, 360 .ndo_stop = uml_net_close,
373 .ndo_start_xmit = uml_net_start_xmit, 361 .ndo_start_xmit = uml_net_start_xmit,
374 .ndo_set_multicast_list = uml_net_set_multicast_list, 362 .ndo_set_multicast_list = uml_net_set_multicast_list,
375 .ndo_tx_timeout = uml_net_tx_timeout, 363 .ndo_tx_timeout = uml_net_tx_timeout,
376 .ndo_set_mac_address = uml_net_set_mac, 364 .ndo_set_mac_address = eth_mac_addr,
377 .ndo_change_mtu = uml_net_change_mtu, 365 .ndo_change_mtu = uml_net_change_mtu,
378 .ndo_validate_addr = eth_validate_addr, 366 .ndo_validate_addr = eth_validate_addr,
379 }; 367 };
380 368
381 /* 369 /*
382 * Ensures that platform_driver_register is called only once by 370 * Ensures that platform_driver_register is called only once by
383 * eth_configure. Will be set in an initcall. 371 * eth_configure. Will be set in an initcall.
384 */ 372 */
385 static int driver_registered; 373 static int driver_registered;
386 374
387 static void eth_configure(int n, void *init, char *mac, 375 static void eth_configure(int n, void *init, char *mac,
388 struct transport *transport) 376 struct transport *transport)
389 { 377 {
390 struct uml_net *device; 378 struct uml_net *device;
391 struct net_device *dev; 379 struct net_device *dev;
392 struct uml_net_private *lp; 380 struct uml_net_private *lp;
393 int err, size; 381 int err, size;
394 382
395 size = transport->private_size + sizeof(struct uml_net_private); 383 size = transport->private_size + sizeof(struct uml_net_private);
396 384
397 device = kzalloc(sizeof(*device), GFP_KERNEL); 385 device = kzalloc(sizeof(*device), GFP_KERNEL);
398 if (device == NULL) { 386 if (device == NULL) {
399 printk(KERN_ERR "eth_configure failed to allocate struct " 387 printk(KERN_ERR "eth_configure failed to allocate struct "
400 "uml_net\n"); 388 "uml_net\n");
401 return; 389 return;
402 } 390 }
403 391
404 dev = alloc_etherdev(size); 392 dev = alloc_etherdev(size);
405 if (dev == NULL) { 393 if (dev == NULL) {
406 printk(KERN_ERR "eth_configure: failed to allocate struct " 394 printk(KERN_ERR "eth_configure: failed to allocate struct "
407 "net_device for eth%d\n", n); 395 "net_device for eth%d\n", n);
408 goto out_free_device; 396 goto out_free_device;
409 } 397 }
410 398
411 INIT_LIST_HEAD(&device->list); 399 INIT_LIST_HEAD(&device->list);
412 device->index = n; 400 device->index = n;
413 401
414 /* If this name ends up conflicting with an existing registered 402 /* If this name ends up conflicting with an existing registered
415 * netdevice, that is OK, register_netdev{,ice}() will notice this 403 * netdevice, that is OK, register_netdev{,ice}() will notice this
416 * and fail. 404 * and fail.
417 */ 405 */
418 snprintf(dev->name, sizeof(dev->name), "eth%d", n); 406 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
419 407
420 setup_etheraddr(mac, device->mac, dev->name); 408 setup_etheraddr(mac, device->mac, dev->name);
421 409
422 printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); 410 printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac);
423 411
424 lp = netdev_priv(dev); 412 lp = netdev_priv(dev);
425 /* This points to the transport private data. It's still clear, but we 413 /* This points to the transport private data. It's still clear, but we
426 * must memset it to 0 *now*. Let's help the drivers. */ 414 * must memset it to 0 *now*. Let's help the drivers. */
427 memset(lp, 0, size); 415 memset(lp, 0, size);
428 INIT_WORK(&lp->work, uml_dev_close); 416 INIT_WORK(&lp->work, uml_dev_close);
429 417
430 /* sysfs register */ 418 /* sysfs register */
431 if (!driver_registered) { 419 if (!driver_registered) {
432 platform_driver_register(&uml_net_driver); 420 platform_driver_register(&uml_net_driver);
433 driver_registered = 1; 421 driver_registered = 1;
434 } 422 }
435 device->pdev.id = n; 423 device->pdev.id = n;
436 device->pdev.name = DRIVER_NAME; 424 device->pdev.name = DRIVER_NAME;
437 device->pdev.dev.release = net_device_release; 425 device->pdev.dev.release = net_device_release;
438 dev_set_drvdata(&device->pdev.dev, device); 426 dev_set_drvdata(&device->pdev.dev, device);
439 if (platform_device_register(&device->pdev)) 427 if (platform_device_register(&device->pdev))
440 goto out_free_netdev; 428 goto out_free_netdev;
441 SET_NETDEV_DEV(dev,&device->pdev.dev); 429 SET_NETDEV_DEV(dev,&device->pdev.dev);
442 430
443 device->dev = dev; 431 device->dev = dev;
444 432
445 /* 433 /*
446 * These just fill in a data structure, so there's no failure 434 * These just fill in a data structure, so there's no failure
447 * to be worried about. 435 * to be worried about.
448 */ 436 */
449 (*transport->kern->init)(dev, init); 437 (*transport->kern->init)(dev, init);
450 438
451 *lp = ((struct uml_net_private) 439 *lp = ((struct uml_net_private)
452 { .list = LIST_HEAD_INIT(lp->list), 440 { .list = LIST_HEAD_INIT(lp->list),
453 .dev = dev, 441 .dev = dev,
454 .fd = -1, 442 .fd = -1,
455 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, 443 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
456 .max_packet = transport->user->max_packet, 444 .max_packet = transport->user->max_packet,
457 .protocol = transport->kern->protocol, 445 .protocol = transport->kern->protocol,
458 .open = transport->user->open, 446 .open = transport->user->open,
459 .close = transport->user->close, 447 .close = transport->user->close,
460 .remove = transport->user->remove, 448 .remove = transport->user->remove,
461 .read = transport->kern->read, 449 .read = transport->kern->read,
462 .write = transport->kern->write, 450 .write = transport->kern->write,
463 .add_address = transport->user->add_address, 451 .add_address = transport->user->add_address,
464 .delete_address = transport->user->delete_address }); 452 .delete_address = transport->user->delete_address });
465 453
466 init_timer(&lp->tl); 454 init_timer(&lp->tl);
467 spin_lock_init(&lp->lock); 455 spin_lock_init(&lp->lock);
468 lp->tl.function = uml_net_user_timer_expire; 456 lp->tl.function = uml_net_user_timer_expire;
469 memcpy(lp->mac, device->mac, sizeof(lp->mac)); 457 memcpy(lp->mac, device->mac, sizeof(lp->mac));
470 458
471 if ((transport->user->init != NULL) && 459 if ((transport->user->init != NULL) &&
472 ((*transport->user->init)(&lp->user, dev) != 0)) 460 ((*transport->user->init)(&lp->user, dev) != 0))
473 goto out_unregister; 461 goto out_unregister;
474 462
475 eth_mac_addr(dev, device->mac); 463 /* don't use eth_mac_addr, it will not work here */
464 memcpy(dev->dev_addr, device->mac, ETH_ALEN);
476 dev->mtu = transport->user->mtu; 465 dev->mtu = transport->user->mtu;
477 dev->netdev_ops = &uml_netdev_ops; 466 dev->netdev_ops = &uml_netdev_ops;
478 dev->ethtool_ops = &uml_net_ethtool_ops; 467 dev->ethtool_ops = &uml_net_ethtool_ops;
479 dev->watchdog_timeo = (HZ >> 1); 468 dev->watchdog_timeo = (HZ >> 1);
480 dev->irq = UM_ETH_IRQ; 469 dev->irq = UM_ETH_IRQ;
481 470
482 err = update_drop_skb(lp->max_packet); 471 err = update_drop_skb(lp->max_packet);
483 if (err) 472 if (err)
484 goto out_undo_user_init; 473 goto out_undo_user_init;
485 474
486 rtnl_lock(); 475 rtnl_lock();
487 err = register_netdevice(dev); 476 err = register_netdevice(dev);
488 rtnl_unlock(); 477 rtnl_unlock();
489 if (err) 478 if (err)
490 goto out_undo_user_init; 479 goto out_undo_user_init;
491 480
492 spin_lock(&devices_lock); 481 spin_lock(&devices_lock);
493 list_add(&device->list, &devices); 482 list_add(&device->list, &devices);
494 spin_unlock(&devices_lock); 483 spin_unlock(&devices_lock);
495 484
496 return; 485 return;
497 486
498 out_undo_user_init: 487 out_undo_user_init:
499 if (transport->user->remove != NULL) 488 if (transport->user->remove != NULL)
500 (*transport->user->remove)(&lp->user); 489 (*transport->user->remove)(&lp->user);
501 out_unregister: 490 out_unregister:
502 platform_device_unregister(&device->pdev); 491 platform_device_unregister(&device->pdev);
503 return; /* platform_device_unregister frees dev and device */ 492 return; /* platform_device_unregister frees dev and device */
504 out_free_netdev: 493 out_free_netdev:
505 free_netdev(dev); 494 free_netdev(dev);
506 out_free_device: 495 out_free_device:
507 kfree(device); 496 kfree(device);
508 } 497 }
509 498
510 static struct uml_net *find_device(int n) 499 static struct uml_net *find_device(int n)
511 { 500 {
512 struct uml_net *device; 501 struct uml_net *device;
513 struct list_head *ele; 502 struct list_head *ele;
514 503
515 spin_lock(&devices_lock); 504 spin_lock(&devices_lock);
516 list_for_each(ele, &devices) { 505 list_for_each(ele, &devices) {
517 device = list_entry(ele, struct uml_net, list); 506 device = list_entry(ele, struct uml_net, list);
518 if (device->index == n) 507 if (device->index == n)
519 goto out; 508 goto out;
520 } 509 }
521 device = NULL; 510 device = NULL;
522 out: 511 out:
523 spin_unlock(&devices_lock); 512 spin_unlock(&devices_lock);
524 return device; 513 return device;
525 } 514 }
526 515
527 static int eth_parse(char *str, int *index_out, char **str_out, 516 static int eth_parse(char *str, int *index_out, char **str_out,
528 char **error_out) 517 char **error_out)
529 { 518 {
530 char *end; 519 char *end;
531 int n, err = -EINVAL; 520 int n, err = -EINVAL;
532 521
533 n = simple_strtoul(str, &end, 0); 522 n = simple_strtoul(str, &end, 0);
534 if (end == str) { 523 if (end == str) {
535 *error_out = "Bad device number"; 524 *error_out = "Bad device number";
536 return err; 525 return err;
537 } 526 }
538 527
539 str = end; 528 str = end;
540 if (*str != '=') { 529 if (*str != '=') {
541 *error_out = "Expected '=' after device number"; 530 *error_out = "Expected '=' after device number";
542 return err; 531 return err;
543 } 532 }
544 533
545 str++; 534 str++;
546 if (find_device(n)) { 535 if (find_device(n)) {
547 *error_out = "Device already configured"; 536 *error_out = "Device already configured";
548 return err; 537 return err;
549 } 538 }
550 539
551 *index_out = n; 540 *index_out = n;
552 *str_out = str; 541 *str_out = str;
553 return 0; 542 return 0;
554 } 543 }
555 544
556 struct eth_init { 545 struct eth_init {
557 struct list_head list; 546 struct list_head list;
558 char *init; 547 char *init;
559 int index; 548 int index;
560 }; 549 };
561 550
562 static DEFINE_SPINLOCK(transports_lock); 551 static DEFINE_SPINLOCK(transports_lock);
563 static LIST_HEAD(transports); 552 static LIST_HEAD(transports);
564 553
565 /* Filled in during early boot */ 554 /* Filled in during early boot */
566 static LIST_HEAD(eth_cmd_line); 555 static LIST_HEAD(eth_cmd_line);
567 556
568 static int check_transport(struct transport *transport, char *eth, int n, 557 static int check_transport(struct transport *transport, char *eth, int n,
569 void **init_out, char **mac_out) 558 void **init_out, char **mac_out)
570 { 559 {
571 int len; 560 int len;
572 561
573 len = strlen(transport->name); 562 len = strlen(transport->name);
574 if (strncmp(eth, transport->name, len)) 563 if (strncmp(eth, transport->name, len))
575 return 0; 564 return 0;
576 565
577 eth += len; 566 eth += len;
578 if (*eth == ',') 567 if (*eth == ',')
579 eth++; 568 eth++;
580 else if (*eth != '\0') 569 else if (*eth != '\0')
581 return 0; 570 return 0;
582 571
583 *init_out = kmalloc(transport->setup_size, GFP_KERNEL); 572 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
584 if (*init_out == NULL) 573 if (*init_out == NULL)
585 return 1; 574 return 1;
586 575
587 if (!transport->setup(eth, mac_out, *init_out)) { 576 if (!transport->setup(eth, mac_out, *init_out)) {
588 kfree(*init_out); 577 kfree(*init_out);
589 *init_out = NULL; 578 *init_out = NULL;
590 } 579 }
591 return 1; 580 return 1;
592 } 581 }
593 582
594 void register_transport(struct transport *new) 583 void register_transport(struct transport *new)
595 { 584 {
596 struct list_head *ele, *next; 585 struct list_head *ele, *next;
597 struct eth_init *eth; 586 struct eth_init *eth;
598 void *init; 587 void *init;
599 char *mac = NULL; 588 char *mac = NULL;
600 int match; 589 int match;
601 590
602 spin_lock(&transports_lock); 591 spin_lock(&transports_lock);
603 BUG_ON(!list_empty(&new->list)); 592 BUG_ON(!list_empty(&new->list));
604 list_add(&new->list, &transports); 593 list_add(&new->list, &transports);
605 spin_unlock(&transports_lock); 594 spin_unlock(&transports_lock);
606 595
607 list_for_each_safe(ele, next, &eth_cmd_line) { 596 list_for_each_safe(ele, next, &eth_cmd_line) {
608 eth = list_entry(ele, struct eth_init, list); 597 eth = list_entry(ele, struct eth_init, list);
609 match = check_transport(new, eth->init, eth->index, &init, 598 match = check_transport(new, eth->init, eth->index, &init,
610 &mac); 599 &mac);
611 if (!match) 600 if (!match)
612 continue; 601 continue;
613 else if (init != NULL) { 602 else if (init != NULL) {
614 eth_configure(eth->index, init, mac, new); 603 eth_configure(eth->index, init, mac, new);
615 kfree(init); 604 kfree(init);
616 } 605 }
617 list_del(&eth->list); 606 list_del(&eth->list);
618 } 607 }
619 } 608 }
620 609
621 static int eth_setup_common(char *str, int index) 610 static int eth_setup_common(char *str, int index)
622 { 611 {
623 struct list_head *ele; 612 struct list_head *ele;
624 struct transport *transport; 613 struct transport *transport;
625 void *init; 614 void *init;
626 char *mac = NULL; 615 char *mac = NULL;
627 int found = 0; 616 int found = 0;
628 617
629 spin_lock(&transports_lock); 618 spin_lock(&transports_lock);
630 list_for_each(ele, &transports) { 619 list_for_each(ele, &transports) {
631 transport = list_entry(ele, struct transport, list); 620 transport = list_entry(ele, struct transport, list);
632 if (!check_transport(transport, str, index, &init, &mac)) 621 if (!check_transport(transport, str, index, &init, &mac))
633 continue; 622 continue;
634 if (init != NULL) { 623 if (init != NULL) {
635 eth_configure(index, init, mac, transport); 624 eth_configure(index, init, mac, transport);
636 kfree(init); 625 kfree(init);
637 } 626 }
638 found = 1; 627 found = 1;
639 break; 628 break;
640 } 629 }
641 630
642 spin_unlock(&transports_lock); 631 spin_unlock(&transports_lock);
643 return found; 632 return found;
644 } 633 }
645 634
646 static int __init eth_setup(char *str) 635 static int __init eth_setup(char *str)
647 { 636 {
648 struct eth_init *new; 637 struct eth_init *new;
649 char *error; 638 char *error;
650 int n, err; 639 int n, err;
651 640
652 err = eth_parse(str, &n, &str, &error); 641 err = eth_parse(str, &n, &str, &error);
653 if (err) { 642 if (err) {
654 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", 643 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
655 str, error); 644 str, error);
656 return 1; 645 return 1;
657 } 646 }
658 647
659 new = alloc_bootmem(sizeof(*new)); 648 new = alloc_bootmem(sizeof(*new));
660 if (new == NULL) { 649 if (new == NULL) {
661 printk(KERN_ERR "eth_init : alloc_bootmem failed\n"); 650 printk(KERN_ERR "eth_init : alloc_bootmem failed\n");
662 return 1; 651 return 1;
663 } 652 }
664 653
665 INIT_LIST_HEAD(&new->list); 654 INIT_LIST_HEAD(&new->list);
666 new->index = n; 655 new->index = n;
667 new->init = str; 656 new->init = str;
668 657
669 list_add_tail(&new->list, &eth_cmd_line); 658 list_add_tail(&new->list, &eth_cmd_line);
670 return 1; 659 return 1;
671 } 660 }
672 661
673 __setup("eth", eth_setup); 662 __setup("eth", eth_setup);
674 __uml_help(eth_setup, 663 __uml_help(eth_setup,
675 "eth[0-9]+=<transport>,<options>\n" 664 "eth[0-9]+=<transport>,<options>\n"
676 " Configure a network device.\n\n" 665 " Configure a network device.\n\n"
677 ); 666 );
678 667
679 static int net_config(char *str, char **error_out) 668 static int net_config(char *str, char **error_out)
680 { 669 {
681 int n, err; 670 int n, err;
682 671
683 err = eth_parse(str, &n, &str, error_out); 672 err = eth_parse(str, &n, &str, error_out);
684 if (err) 673 if (err)
685 return err; 674 return err;
686 675
687 /* This string is broken up and the pieces used by the underlying 676 /* This string is broken up and the pieces used by the underlying
688 * driver. So, it is freed only if eth_setup_common fails. 677 * driver. So, it is freed only if eth_setup_common fails.
689 */ 678 */
690 str = kstrdup(str, GFP_KERNEL); 679 str = kstrdup(str, GFP_KERNEL);
691 if (str == NULL) { 680 if (str == NULL) {
692 *error_out = "net_config failed to strdup string"; 681 *error_out = "net_config failed to strdup string";
693 return -ENOMEM; 682 return -ENOMEM;
694 } 683 }
695 err = !eth_setup_common(str, n); 684 err = !eth_setup_common(str, n);
696 if (err) 685 if (err)
697 kfree(str); 686 kfree(str);
698 return err; 687 return err;
699 } 688 }
700 689
701 static int net_id(char **str, int *start_out, int *end_out) 690 static int net_id(char **str, int *start_out, int *end_out)
702 { 691 {
703 char *end; 692 char *end;
704 int n; 693 int n;
705 694
706 n = simple_strtoul(*str, &end, 0); 695 n = simple_strtoul(*str, &end, 0);
707 if ((*end != '\0') || (end == *str)) 696 if ((*end != '\0') || (end == *str))
708 return -1; 697 return -1;
709 698
710 *start_out = n; 699 *start_out = n;
711 *end_out = n; 700 *end_out = n;
712 *str = end; 701 *str = end;
713 return n; 702 return n;
714 } 703 }
715 704
716 static int net_remove(int n, char **error_out) 705 static int net_remove(int n, char **error_out)
717 { 706 {
718 struct uml_net *device; 707 struct uml_net *device;
719 struct net_device *dev; 708 struct net_device *dev;
720 struct uml_net_private *lp; 709 struct uml_net_private *lp;
721 710
722 device = find_device(n); 711 device = find_device(n);
723 if (device == NULL) 712 if (device == NULL)
724 return -ENODEV; 713 return -ENODEV;
725 714
726 dev = device->dev; 715 dev = device->dev;
727 lp = netdev_priv(dev); 716 lp = netdev_priv(dev);
728 if (lp->fd > 0) 717 if (lp->fd > 0)
729 return -EBUSY; 718 return -EBUSY;
730 unregister_netdev(dev); 719 unregister_netdev(dev);
731 platform_device_unregister(&device->pdev); 720 platform_device_unregister(&device->pdev);
732 721
733 return 0; 722 return 0;
734 } 723 }
735 724
736 static struct mc_device net_mc = { 725 static struct mc_device net_mc = {
737 .list = LIST_HEAD_INIT(net_mc.list), 726 .list = LIST_HEAD_INIT(net_mc.list),
738 .name = "eth", 727 .name = "eth",
739 .config = net_config, 728 .config = net_config,
740 .get_config = NULL, 729 .get_config = NULL,
741 .id = net_id, 730 .id = net_id,
742 .remove = net_remove, 731 .remove = net_remove,
743 }; 732 };
744 733
745 #ifdef CONFIG_INET 734 #ifdef CONFIG_INET
746 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, 735 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
747 void *ptr) 736 void *ptr)
748 { 737 {
749 struct in_ifaddr *ifa = ptr; 738 struct in_ifaddr *ifa = ptr;
750 struct net_device *dev = ifa->ifa_dev->dev; 739 struct net_device *dev = ifa->ifa_dev->dev;
751 struct uml_net_private *lp; 740 struct uml_net_private *lp;
752 void (*proc)(unsigned char *, unsigned char *, void *); 741 void (*proc)(unsigned char *, unsigned char *, void *);
753 unsigned char addr_buf[4], netmask_buf[4]; 742 unsigned char addr_buf[4], netmask_buf[4];
754 743
755 if (dev->netdev_ops->ndo_open != uml_net_open) 744 if (dev->netdev_ops->ndo_open != uml_net_open)
756 return NOTIFY_DONE; 745 return NOTIFY_DONE;
757 746
758 lp = netdev_priv(dev); 747 lp = netdev_priv(dev);
759 748
760 proc = NULL; 749 proc = NULL;
761 switch (event) { 750 switch (event) {
762 case NETDEV_UP: 751 case NETDEV_UP:
763 proc = lp->add_address; 752 proc = lp->add_address;
764 break; 753 break;
765 case NETDEV_DOWN: 754 case NETDEV_DOWN:
766 proc = lp->delete_address; 755 proc = lp->delete_address;
767 break; 756 break;
768 } 757 }
769 if (proc != NULL) { 758 if (proc != NULL) {
770 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); 759 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
771 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); 760 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
772 (*proc)(addr_buf, netmask_buf, &lp->user); 761 (*proc)(addr_buf, netmask_buf, &lp->user);
773 } 762 }
774 return NOTIFY_DONE; 763 return NOTIFY_DONE;
775 } 764 }
776 765
777 /* uml_net_init shouldn't be called twice on two CPUs at the same time */ 766 /* uml_net_init shouldn't be called twice on two CPUs at the same time */
778 static struct notifier_block uml_inetaddr_notifier = { 767 static struct notifier_block uml_inetaddr_notifier = {
779 .notifier_call = uml_inetaddr_event, 768 .notifier_call = uml_inetaddr_event,
780 }; 769 };
781 770
782 static void inet_register(void) 771 static void inet_register(void)
783 { 772 {
784 struct list_head *ele; 773 struct list_head *ele;
785 struct uml_net_private *lp; 774 struct uml_net_private *lp;
786 struct in_device *ip; 775 struct in_device *ip;
787 struct in_ifaddr *in; 776 struct in_ifaddr *in;
788 777
789 register_inetaddr_notifier(&uml_inetaddr_notifier); 778 register_inetaddr_notifier(&uml_inetaddr_notifier);
790 779
791 /* Devices may have been opened already, so the uml_inetaddr_notifier 780 /* Devices may have been opened already, so the uml_inetaddr_notifier
792 * didn't get a chance to run for them. This fakes it so that 781 * didn't get a chance to run for them. This fakes it so that
793 * addresses which have already been set up get handled properly. 782 * addresses which have already been set up get handled properly.
794 */ 783 */
795 spin_lock(&opened_lock); 784 spin_lock(&opened_lock);
796 list_for_each(ele, &opened) { 785 list_for_each(ele, &opened) {
797 lp = list_entry(ele, struct uml_net_private, list); 786 lp = list_entry(ele, struct uml_net_private, list);
798 ip = lp->dev->ip_ptr; 787 ip = lp->dev->ip_ptr;
799 if (ip == NULL) 788 if (ip == NULL)
800 continue; 789 continue;
801 in = ip->ifa_list; 790 in = ip->ifa_list;
802 while (in != NULL) { 791 while (in != NULL) {
803 uml_inetaddr_event(NULL, NETDEV_UP, in); 792 uml_inetaddr_event(NULL, NETDEV_UP, in);
804 in = in->ifa_next; 793 in = in->ifa_next;
805 } 794 }
806 } 795 }
807 spin_unlock(&opened_lock); 796 spin_unlock(&opened_lock);
808 } 797 }
809 #else 798 #else
810 static inline void inet_register(void) 799 static inline void inet_register(void)
811 { 800 {
812 } 801 }
813 #endif 802 #endif
814 803
815 static int uml_net_init(void) 804 static int uml_net_init(void)
816 { 805 {
817 mconsole_register_dev(&net_mc); 806 mconsole_register_dev(&net_mc);
818 inet_register(); 807 inet_register();
819 return 0; 808 return 0;
820 } 809 }
821 810
822 __initcall(uml_net_init); 811 __initcall(uml_net_init);
823 812
824 static void close_devices(void) 813 static void close_devices(void)
825 { 814 {
826 struct list_head *ele; 815 struct list_head *ele;
827 struct uml_net_private *lp; 816 struct uml_net_private *lp;
828 817
829 spin_lock(&opened_lock); 818 spin_lock(&opened_lock);
830 list_for_each(ele, &opened) { 819 list_for_each(ele, &opened) {
831 lp = list_entry(ele, struct uml_net_private, list); 820 lp = list_entry(ele, struct uml_net_private, list);
832 free_irq(lp->dev->irq, lp->dev); 821 free_irq(lp->dev->irq, lp->dev);
833 if ((lp->close != NULL) && (lp->fd >= 0)) 822 if ((lp->close != NULL) && (lp->fd >= 0))
834 (*lp->close)(lp->fd, &lp->user); 823 (*lp->close)(lp->fd, &lp->user);
835 if (lp->remove != NULL) 824 if (lp->remove != NULL)
836 (*lp->remove)(&lp->user); 825 (*lp->remove)(&lp->user);
837 } 826 }
838 spin_unlock(&opened_lock); 827 spin_unlock(&opened_lock);
839 } 828 }
840 829
841 __uml_exitcall(close_devices); 830 __uml_exitcall(close_devices);
842 831
843 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, 832 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
844 void *), 833 void *),
845 void *arg) 834 void *arg)
846 { 835 {
847 struct net_device *dev = d; 836 struct net_device *dev = d;
848 struct in_device *ip = dev->ip_ptr; 837 struct in_device *ip = dev->ip_ptr;
849 struct in_ifaddr *in; 838 struct in_ifaddr *in;
850 unsigned char address[4], netmask[4]; 839 unsigned char address[4], netmask[4];
851 840
852 if (ip == NULL) return; 841 if (ip == NULL) return;
853 in = ip->ifa_list; 842 in = ip->ifa_list;
854 while (in != NULL) { 843 while (in != NULL) {
855 memcpy(address, &in->ifa_address, sizeof(address)); 844 memcpy(address, &in->ifa_address, sizeof(address));
856 memcpy(netmask, &in->ifa_mask, sizeof(netmask)); 845 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
857 (*cb)(address, netmask, arg); 846 (*cb)(address, netmask, arg);
858 in = in->ifa_next; 847 in = in->ifa_next;
859 } 848 }
860 } 849 }
861 850
862 int dev_netmask(void *d, void *m) 851 int dev_netmask(void *d, void *m)
863 { 852 {
864 struct net_device *dev = d; 853 struct net_device *dev = d;
865 struct in_device *ip = dev->ip_ptr; 854 struct in_device *ip = dev->ip_ptr;
866 struct in_ifaddr *in; 855 struct in_ifaddr *in;
867 __be32 *mask_out = m; 856 __be32 *mask_out = m;
868 857
869 if (ip == NULL) 858 if (ip == NULL)
870 return 1; 859 return 1;
871 860
872 in = ip->ifa_list; 861 in = ip->ifa_list;
873 if (in == NULL) 862 if (in == NULL)
874 return 1; 863 return 1;
875 864
876 *mask_out = in->ifa_mask; 865 *mask_out = in->ifa_mask;
877 return 0; 866 return 0;
878 } 867 }
879 868
880 void *get_output_buffer(int *len_out) 869 void *get_output_buffer(int *len_out)
881 { 870 {
882 void *ret; 871 void *ret;
883 872
884 ret = (void *) __get_free_pages(GFP_KERNEL, 0); 873 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
885 if (ret) *len_out = PAGE_SIZE; 874 if (ret) *len_out = PAGE_SIZE;
886 else *len_out = 0; 875 else *len_out = 0;
887 return ret; 876 return ret;
888 } 877 }
889 878
890 void free_output_buffer(void *buffer) 879 void free_output_buffer(void *buffer)
891 { 880 {
892 free_pages((unsigned long) buffer, 0); 881 free_pages((unsigned long) buffer, 0);
893 } 882 }
894 883
895 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, 884 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
896 char **gate_addr) 885 char **gate_addr)
897 { 886 {
898 char *remain; 887 char *remain;
899 888
900 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); 889 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
901 if (remain != NULL) { 890 if (remain != NULL) {
902 printk(KERN_ERR "tap_setup_common - Extra garbage on " 891 printk(KERN_ERR "tap_setup_common - Extra garbage on "
903 "specification : '%s'\n", remain); 892 "specification : '%s'\n", remain);
904 return 1; 893 return 1;
905 } 894 }
906 895
907 return 0; 896 return 0;
908 } 897 }
909 898
910 unsigned short eth_protocol(struct sk_buff *skb) 899 unsigned short eth_protocol(struct sk_buff *skb)
911 { 900 {
912 return eth_type_trans(skb, skb->dev); 901 return eth_type_trans(skb, skb->dev);
913 } 902 }
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
1 /****************************************************************************** 1 /******************************************************************************
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, but 11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details. 14 * General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 #include <linux/etherdevice.h> 29 #include <linux/etherdevice.h>
30 #include <linux/kernel.h> 30 #include <linux/kernel.h>
31 #include <linux/module.h> 31 #include <linux/module.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/sched.h> 33 #include <linux/sched.h>
34 34
35 #include "iwl-dev.h" 35 #include "iwl-dev.h"
36 #include "iwl-core.h" 36 #include "iwl-core.h"
37 #include "iwl-io.h" 37 #include "iwl-io.h"
38 #include "iwl-helpers.h" 38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h" 39 #include "iwl-agn-hw.h"
40 #include "iwl-agn.h" 40 #include "iwl-agn.h"
41 #include "iwl-sta.h" 41 #include "iwl-sta.h"
42 42
43 static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 43 static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
44 { 44 {
45 return le32_to_cpup((__le32 *)&tx_resp->status + 45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN; 46 tx_resp->frame_count) & MAX_SN;
47 } 47 }
48 48
49 static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, 49 static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
50 struct iwl_ht_agg *agg, 50 struct iwl_ht_agg *agg,
51 struct iwl5000_tx_resp *tx_resp, 51 struct iwl5000_tx_resp *tx_resp,
52 int txq_id, u16 start_idx) 52 int txq_id, u16 start_idx)
53 { 53 {
54 u16 status; 54 u16 status;
55 struct agg_tx_status *frame_status = &tx_resp->status; 55 struct agg_tx_status *frame_status = &tx_resp->status;
56 struct ieee80211_tx_info *info = NULL; 56 struct ieee80211_tx_info *info = NULL;
57 struct ieee80211_hdr *hdr = NULL; 57 struct ieee80211_hdr *hdr = NULL;
58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
59 int i, sh, idx; 59 int i, sh, idx;
60 u16 seq; 60 u16 seq;
61 61
62 if (agg->wait_for_ba) 62 if (agg->wait_for_ba)
63 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); 63 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
64 64
65 agg->frame_count = tx_resp->frame_count; 65 agg->frame_count = tx_resp->frame_count;
66 agg->start_idx = start_idx; 66 agg->start_idx = start_idx;
67 agg->rate_n_flags = rate_n_flags; 67 agg->rate_n_flags = rate_n_flags;
68 agg->bitmap = 0; 68 agg->bitmap = 0;
69 69
70 /* # frames attempted by Tx command */ 70 /* # frames attempted by Tx command */
71 if (agg->frame_count == 1) { 71 if (agg->frame_count == 1) {
72 /* Only one frame was attempted; no block-ack will arrive */ 72 /* Only one frame was attempted; no block-ack will arrive */
73 status = le16_to_cpu(frame_status[0].status); 73 status = le16_to_cpu(frame_status[0].status);
74 idx = start_idx; 74 idx = start_idx;
75 75
76 /* FIXME: code repetition */ 76 /* FIXME: code repetition */
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx); 78 agg->frame_count, agg->start_idx, idx);
79 79
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
81 info->status.rates[0].count = tx_resp->failure_frame + 1; 81 info->status.rates[0].count = tx_resp->failure_frame + 1;
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 82 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
83 info->flags |= iwl_tx_status_to_mac80211(status); 83 info->flags |= iwl_tx_status_to_mac80211(status);
84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); 84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
85 85
86 /* FIXME: code repetition end */ 86 /* FIXME: code repetition end */
87 87
88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
89 status & 0xff, tx_resp->failure_frame); 89 status & 0xff, tx_resp->failure_frame);
90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); 90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
91 91
92 agg->wait_for_ba = 0; 92 agg->wait_for_ba = 0;
93 } else { 93 } else {
94 /* Two or more frames were attempted; expect block-ack */ 94 /* Two or more frames were attempted; expect block-ack */
95 u64 bitmap = 0; 95 u64 bitmap = 0;
96 96
97 /* 97 /*
98 * Start is the lowest frame sent. It may not be the first 98 * Start is the lowest frame sent. It may not be the first
99 * frame in the batch; we figure this out dynamically during 99 * frame in the batch; we figure this out dynamically during
100 * the following loop. 100 * the following loop.
101 */ 101 */
102 int start = agg->start_idx; 102 int start = agg->start_idx;
103 103
104 /* Construct bit-map of pending frames within Tx window */ 104 /* Construct bit-map of pending frames within Tx window */
105 for (i = 0; i < agg->frame_count; i++) { 105 for (i = 0; i < agg->frame_count; i++) {
106 u16 sc; 106 u16 sc;
107 status = le16_to_cpu(frame_status[i].status); 107 status = le16_to_cpu(frame_status[i].status);
108 seq = le16_to_cpu(frame_status[i].sequence); 108 seq = le16_to_cpu(frame_status[i].sequence);
109 idx = SEQ_TO_INDEX(seq); 109 idx = SEQ_TO_INDEX(seq);
110 txq_id = SEQ_TO_QUEUE(seq); 110 txq_id = SEQ_TO_QUEUE(seq);
111 111
112 if (status & (AGG_TX_STATE_FEW_BYTES_MSK | 112 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
113 AGG_TX_STATE_ABORT_MSK)) 113 AGG_TX_STATE_ABORT_MSK))
114 continue; 114 continue;
115 115
116 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 116 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
117 agg->frame_count, txq_id, idx); 117 agg->frame_count, txq_id, idx);
118 118
119 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 119 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
120 if (!hdr) { 120 if (!hdr) {
121 IWL_ERR(priv, 121 IWL_ERR(priv,
122 "BUG_ON idx doesn't point to valid skb" 122 "BUG_ON idx doesn't point to valid skb"
123 " idx=%d, txq_id=%d\n", idx, txq_id); 123 " idx=%d, txq_id=%d\n", idx, txq_id);
124 return -1; 124 return -1;
125 } 125 }
126 126
127 sc = le16_to_cpu(hdr->seq_ctrl); 127 sc = le16_to_cpu(hdr->seq_ctrl);
128 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 128 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
129 IWL_ERR(priv, 129 IWL_ERR(priv,
130 "BUG_ON idx doesn't match seq control" 130 "BUG_ON idx doesn't match seq control"
131 " idx=%d, seq_idx=%d, seq=%d\n", 131 " idx=%d, seq_idx=%d, seq=%d\n",
132 idx, SEQ_TO_SN(sc), 132 idx, SEQ_TO_SN(sc),
133 hdr->seq_ctrl); 133 hdr->seq_ctrl);
134 return -1; 134 return -1;
135 } 135 }
136 136
137 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", 137 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
138 i, idx, SEQ_TO_SN(sc)); 138 i, idx, SEQ_TO_SN(sc));
139 139
140 /* 140 /*
141 * sh -> how many frames ahead of the starting frame is 141 * sh -> how many frames ahead of the starting frame is
142 * the current one? 142 * the current one?
143 * 143 *
144 * Note that all frames sent in the batch must be in a 144 * Note that all frames sent in the batch must be in a
145 * 64-frame window, so this number should be in [0,63]. 145 * 64-frame window, so this number should be in [0,63].
146 * If outside of this window, then we've found a new 146 * If outside of this window, then we've found a new
147 * "first" frame in the batch and need to change start. 147 * "first" frame in the batch and need to change start.
148 */ 148 */
149 sh = idx - start; 149 sh = idx - start;
150 150
151 /* 151 /*
152 * If >= 64, out of window. start must be at the front 152 * If >= 64, out of window. start must be at the front
153 * of the circular buffer, idx must be near the end of 153 * of the circular buffer, idx must be near the end of
154 * the buffer, and idx is the new "first" frame. Shift 154 * the buffer, and idx is the new "first" frame. Shift
155 * the indices around. 155 * the indices around.
156 */ 156 */
157 if (sh >= 64) { 157 if (sh >= 64) {
158 /* Shift bitmap by start - idx, wrapped */ 158 /* Shift bitmap by start - idx, wrapped */
159 sh = 0x100 - idx + start; 159 sh = 0x100 - idx + start;
160 bitmap = bitmap << sh; 160 bitmap = bitmap << sh;
161 /* Now idx is the new start so sh = 0 */ 161 /* Now idx is the new start so sh = 0 */
162 sh = 0; 162 sh = 0;
163 start = idx; 163 start = idx;
164 /* 164 /*
165 * If <= -64 then wraps the 256-pkt circular buffer 165 * If <= -64 then wraps the 256-pkt circular buffer
166 * (e.g., start = 255 and idx = 0, sh should be 1) 166 * (e.g., start = 255 and idx = 0, sh should be 1)
167 */ 167 */
168 } else if (sh <= -64) { 168 } else if (sh <= -64) {
169 sh = 0x100 - start + idx; 169 sh = 0x100 - start + idx;
170 /* 170 /*
171 * If < 0 but > -64, out of window. idx is before start 171 * If < 0 but > -64, out of window. idx is before start
172 * but not wrapped. Shift the indices around. 172 * but not wrapped. Shift the indices around.
173 */ 173 */
174 } else if (sh < 0) { 174 } else if (sh < 0) {
175 /* Shift by how far start is ahead of idx */ 175 /* Shift by how far start is ahead of idx */
176 sh = start - idx; 176 sh = start - idx;
177 bitmap = bitmap << sh; 177 bitmap = bitmap << sh;
178 /* Now idx is the new start so sh = 0 */ 178 /* Now idx is the new start so sh = 0 */
179 start = idx; 179 start = idx;
180 sh = 0; 180 sh = 0;
181 } 181 }
182 /* Sequence number start + sh was sent in this batch */ 182 /* Sequence number start + sh was sent in this batch */
183 bitmap |= 1ULL << sh; 183 bitmap |= 1ULL << sh;
184 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", 184 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
185 start, (unsigned long long)bitmap); 185 start, (unsigned long long)bitmap);
186 } 186 }
187 187
188 /* 188 /*
189 * Store the bitmap and possibly the new start, if we wrapped 189 * Store the bitmap and possibly the new start, if we wrapped
190 * the buffer above 190 * the buffer above
191 */ 191 */
192 agg->bitmap = bitmap; 192 agg->bitmap = bitmap;
193 agg->start_idx = start; 193 agg->start_idx = start;
194 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", 194 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
195 agg->frame_count, agg->start_idx, 195 agg->frame_count, agg->start_idx,
196 (unsigned long long)agg->bitmap); 196 (unsigned long long)agg->bitmap);
197 197
198 if (bitmap) 198 if (bitmap)
199 agg->wait_for_ba = 1; 199 agg->wait_for_ba = 1;
200 } 200 }
201 return 0; 201 return 0;
202 } 202 }
203 203
204 void iwl_check_abort_status(struct iwl_priv *priv, 204 void iwl_check_abort_status(struct iwl_priv *priv,
205 u8 frame_count, u32 status) 205 u8 frame_count, u32 status)
206 { 206 {
207 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { 207 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
208 IWL_ERR(priv, "Tx flush command to flush out all frames\n"); 208 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
209 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 209 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
210 queue_work(priv->workqueue, &priv->tx_flush); 210 queue_work(priv->workqueue, &priv->tx_flush);
211 } 211 }
212 } 212 }
213 213
214 static void iwlagn_rx_reply_tx(struct iwl_priv *priv, 214 static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
215 struct iwl_rx_mem_buffer *rxb) 215 struct iwl_rx_mem_buffer *rxb)
216 { 216 {
217 struct iwl_rx_packet *pkt = rxb_addr(rxb); 217 struct iwl_rx_packet *pkt = rxb_addr(rxb);
218 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 218 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
219 int txq_id = SEQ_TO_QUEUE(sequence); 219 int txq_id = SEQ_TO_QUEUE(sequence);
220 int index = SEQ_TO_INDEX(sequence); 220 int index = SEQ_TO_INDEX(sequence);
221 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 221 struct iwl_tx_queue *txq = &priv->txq[txq_id];
222 struct ieee80211_tx_info *info; 222 struct ieee80211_tx_info *info;
223 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 223 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
224 u32 status = le16_to_cpu(tx_resp->status.status); 224 u32 status = le16_to_cpu(tx_resp->status.status);
225 int tid; 225 int tid;
226 int sta_id; 226 int sta_id;
227 int freed; 227 int freed;
228 unsigned long flags; 228 unsigned long flags;
229 229
230 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 230 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
231 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 231 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
232 "is out of range [0-%d] %d %d\n", txq_id, 232 "is out of range [0-%d] %d %d\n", txq_id,
233 index, txq->q.n_bd, txq->q.write_ptr, 233 index, txq->q.n_bd, txq->q.write_ptr,
234 txq->q.read_ptr); 234 txq->q.read_ptr);
235 return; 235 return;
236 } 236 }
237 237
238 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 238 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
239 memset(&info->status, 0, sizeof(info->status)); 239 memset(&info->status, 0, sizeof(info->status));
240 240
241 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; 241 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
242 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; 242 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
243 243
244 spin_lock_irqsave(&priv->sta_lock, flags); 244 spin_lock_irqsave(&priv->sta_lock, flags);
245 if (txq->sched_retry) { 245 if (txq->sched_retry) {
246 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); 246 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
247 struct iwl_ht_agg *agg; 247 struct iwl_ht_agg *agg;
248 248
249 agg = &priv->stations[sta_id].tid[tid].agg; 249 agg = &priv->stations[sta_id].tid[tid].agg;
250 250
251 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 251 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
252 252
253 /* check if BAR is needed */ 253 /* check if BAR is needed */
254 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) 254 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
255 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 255 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
256 256
257 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 257 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
258 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 258 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
259 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " 259 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
260 "scd_ssn=%d idx=%d txq=%d swq=%d\n", 260 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
261 scd_ssn , index, txq_id, txq->swq_id); 261 scd_ssn , index, txq_id, txq->swq_id);
262 262
263 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 263 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
264 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 264 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
265 265
266 if (priv->mac80211_registered && 266 if (priv->mac80211_registered &&
267 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 267 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
268 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 268 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
269 if (agg->state == IWL_AGG_OFF) 269 if (agg->state == IWL_AGG_OFF)
270 iwl_wake_queue(priv, txq_id); 270 iwl_wake_queue(priv, txq_id);
271 else 271 else
272 iwl_wake_queue(priv, txq->swq_id); 272 iwl_wake_queue(priv, txq->swq_id);
273 } 273 }
274 } 274 }
275 } else { 275 } else {
276 BUG_ON(txq_id != txq->swq_id); 276 BUG_ON(txq_id != txq->swq_id);
277 277
278 info->status.rates[0].count = tx_resp->failure_frame + 1; 278 info->status.rates[0].count = tx_resp->failure_frame + 1;
279 info->flags |= iwl_tx_status_to_mac80211(status); 279 info->flags |= iwl_tx_status_to_mac80211(status);
280 iwlagn_hwrate_to_tx_control(priv, 280 iwlagn_hwrate_to_tx_control(priv,
281 le32_to_cpu(tx_resp->rate_n_flags), 281 le32_to_cpu(tx_resp->rate_n_flags),
282 info); 282 info);
283 283
284 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " 284 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
285 "0x%x retries %d\n", 285 "0x%x retries %d\n",
286 txq_id, 286 txq_id,
287 iwl_get_tx_fail_reason(status), status, 287 iwl_get_tx_fail_reason(status), status,
288 le32_to_cpu(tx_resp->rate_n_flags), 288 le32_to_cpu(tx_resp->rate_n_flags),
289 tx_resp->failure_frame); 289 tx_resp->failure_frame);
290 290
291 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 291 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
292 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 292 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
293 293
294 if (priv->mac80211_registered && 294 if (priv->mac80211_registered &&
295 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 295 (iwl_queue_space(&txq->q) > txq->q.low_mark))
296 iwl_wake_queue(priv, txq_id); 296 iwl_wake_queue(priv, txq_id);
297 } 297 }
298 298
299 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 299 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
300 300
301 iwl_check_abort_status(priv, tx_resp->frame_count, status); 301 iwl_check_abort_status(priv, tx_resp->frame_count, status);
302 spin_unlock_irqrestore(&priv->sta_lock, flags); 302 spin_unlock_irqrestore(&priv->sta_lock, flags);
303 } 303 }
304 304
305 void iwlagn_rx_handler_setup(struct iwl_priv *priv) 305 void iwlagn_rx_handler_setup(struct iwl_priv *priv)
306 { 306 {
307 /* init calibration handlers */ 307 /* init calibration handlers */
308 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = 308 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
309 iwlagn_rx_calib_result; 309 iwlagn_rx_calib_result;
310 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = 310 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
311 iwlagn_rx_calib_complete; 311 iwlagn_rx_calib_complete;
312 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 312 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
313 } 313 }
314 314
315 void iwlagn_setup_deferred_work(struct iwl_priv *priv) 315 void iwlagn_setup_deferred_work(struct iwl_priv *priv)
316 { 316 {
317 /* in agn, the tx power calibration is done in uCode */ 317 /* in agn, the tx power calibration is done in uCode */
318 priv->disable_tx_power_cal = 1; 318 priv->disable_tx_power_cal = 1;
319 } 319 }
320 320
321 int iwlagn_hw_valid_rtc_data_addr(u32 addr) 321 int iwlagn_hw_valid_rtc_data_addr(u32 addr)
322 { 322 {
323 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && 323 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
324 (addr < IWLAGN_RTC_DATA_UPPER_BOUND); 324 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
325 } 325 }
326 326
327 int iwlagn_send_tx_power(struct iwl_priv *priv) 327 int iwlagn_send_tx_power(struct iwl_priv *priv)
328 { 328 {
329 struct iwl5000_tx_power_dbm_cmd tx_power_cmd; 329 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
330 u8 tx_ant_cfg_cmd; 330 u8 tx_ant_cfg_cmd;
331 331
332 /* half dBm need to multiply */ 332 /* half dBm need to multiply */
333 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 333 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
334 334
335 if (priv->tx_power_lmt_in_half_dbm && 335 if (priv->tx_power_lmt_in_half_dbm &&
336 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) { 336 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
337 /* 337 /*
338 * For the newer devices which using enhanced/extend tx power 338 * For the newer devices which using enhanced/extend tx power
339 * table in EEPROM, the format is in half dBm. driver need to 339 * table in EEPROM, the format is in half dBm. driver need to
340 * convert to dBm format before report to mac80211. 340 * convert to dBm format before report to mac80211.
341 * By doing so, there is a possibility of 1/2 dBm resolution 341 * By doing so, there is a possibility of 1/2 dBm resolution
342 * lost. driver will perform "round-up" operation before 342 * lost. driver will perform "round-up" operation before
343 * reporting, but it will cause 1/2 dBm tx power over the 343 * reporting, but it will cause 1/2 dBm tx power over the
344 * regulatory limit. Perform the checking here, if the 344 * regulatory limit. Perform the checking here, if the
345 * "tx_power_user_lmt" is higher than EEPROM value (in 345 * "tx_power_user_lmt" is higher than EEPROM value (in
346 * half-dBm format), lower the tx power based on EEPROM 346 * half-dBm format), lower the tx power based on EEPROM
347 */ 347 */
348 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; 348 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
349 } 349 }
350 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 350 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
351 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 351 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
352 352
353 if (IWL_UCODE_API(priv->ucode_ver) == 1) 353 if (IWL_UCODE_API(priv->ucode_ver) == 1)
354 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; 354 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
355 else 355 else
356 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 356 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
357 357
358 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, 358 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
359 sizeof(tx_power_cmd), &tx_power_cmd, 359 sizeof(tx_power_cmd), &tx_power_cmd,
360 NULL); 360 NULL);
361 } 361 }
362 362
363 void iwlagn_temperature(struct iwl_priv *priv) 363 void iwlagn_temperature(struct iwl_priv *priv)
364 { 364 {
365 /* store temperature from statistics (in Celsius) */ 365 /* store temperature from statistics (in Celsius) */
366 priv->temperature = 366 priv->temperature =
367 le32_to_cpu(priv->_agn.statistics.general.common.temperature); 367 le32_to_cpu(priv->_agn.statistics.general.common.temperature);
368 iwl_tt_handler(priv); 368 iwl_tt_handler(priv);
369 } 369 }
370 370
371 u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) 371 u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
372 { 372 {
373 struct iwl_eeprom_calib_hdr { 373 struct iwl_eeprom_calib_hdr {
374 u8 version; 374 u8 version;
375 u8 pa_type; 375 u8 pa_type;
376 u16 voltage; 376 u16 voltage;
377 } *hdr; 377 } *hdr;
378 378
379 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 379 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
380 EEPROM_CALIB_ALL); 380 EEPROM_CALIB_ALL);
381 return hdr->version; 381 return hdr->version;
382 382
383 } 383 }
384 384
385 /* 385 /*
386 * EEPROM 386 * EEPROM
387 */ 387 */
388 static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) 388 static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
389 { 389 {
390 u16 offset = 0; 390 u16 offset = 0;
391 391
392 if ((address & INDIRECT_ADDRESS) == 0) 392 if ((address & INDIRECT_ADDRESS) == 0)
393 return address; 393 return address;
394 394
395 switch (address & INDIRECT_TYPE_MSK) { 395 switch (address & INDIRECT_TYPE_MSK) {
396 case INDIRECT_HOST: 396 case INDIRECT_HOST:
397 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); 397 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
398 break; 398 break;
399 case INDIRECT_GENERAL: 399 case INDIRECT_GENERAL:
400 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); 400 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
401 break; 401 break;
402 case INDIRECT_REGULATORY: 402 case INDIRECT_REGULATORY:
403 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); 403 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
404 break; 404 break;
405 case INDIRECT_CALIBRATION: 405 case INDIRECT_CALIBRATION:
406 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); 406 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
407 break; 407 break;
408 case INDIRECT_PROCESS_ADJST: 408 case INDIRECT_PROCESS_ADJST:
409 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); 409 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
410 break; 410 break;
411 case INDIRECT_OTHERS: 411 case INDIRECT_OTHERS:
412 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); 412 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
413 break; 413 break;
414 default: 414 default:
415 IWL_ERR(priv, "illegal indirect type: 0x%X\n", 415 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
416 address & INDIRECT_TYPE_MSK); 416 address & INDIRECT_TYPE_MSK);
417 break; 417 break;
418 } 418 }
419 419
420 /* translate the offset from words to byte */ 420 /* translate the offset from words to byte */
421 return (address & ADDRESS_MSK) + (offset << 1); 421 return (address & ADDRESS_MSK) + (offset << 1);
422 } 422 }
423 423
424 const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, 424 const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
425 size_t offset) 425 size_t offset)
426 { 426 {
427 u32 address = eeprom_indirect_address(priv, offset); 427 u32 address = eeprom_indirect_address(priv, offset);
428 BUG_ON(address >= priv->cfg->eeprom_size); 428 BUG_ON(address >= priv->cfg->eeprom_size);
429 return &priv->eeprom[address]; 429 return &priv->eeprom[address];
430 } 430 }
431 431
432 struct iwl_mod_params iwlagn_mod_params = { 432 struct iwl_mod_params iwlagn_mod_params = {
433 .amsdu_size_8K = 1, 433 .amsdu_size_8K = 1,
434 .restart_fw = 1, 434 .restart_fw = 1,
435 /* the rest are 0 by default */ 435 /* the rest are 0 by default */
436 }; 436 };
437 437
438 void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 438 void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
439 { 439 {
440 unsigned long flags; 440 unsigned long flags;
441 int i; 441 int i;
442 spin_lock_irqsave(&rxq->lock, flags); 442 spin_lock_irqsave(&rxq->lock, flags);
443 INIT_LIST_HEAD(&rxq->rx_free); 443 INIT_LIST_HEAD(&rxq->rx_free);
444 INIT_LIST_HEAD(&rxq->rx_used); 444 INIT_LIST_HEAD(&rxq->rx_used);
445 /* Fill the rx_used queue with _all_ of the Rx buffers */ 445 /* Fill the rx_used queue with _all_ of the Rx buffers */
446 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 446 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
447 /* In the reset function, these buffers may have been allocated 447 /* In the reset function, these buffers may have been allocated
448 * to an SKB, so we need to unmap and free potential storage */ 448 * to an SKB, so we need to unmap and free potential storage */
449 if (rxq->pool[i].page != NULL) { 449 if (rxq->pool[i].page != NULL) {
450 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 450 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
451 PAGE_SIZE << priv->hw_params.rx_page_order, 451 PAGE_SIZE << priv->hw_params.rx_page_order,
452 PCI_DMA_FROMDEVICE); 452 PCI_DMA_FROMDEVICE);
453 __iwl_free_pages(priv, rxq->pool[i].page); 453 __iwl_free_pages(priv, rxq->pool[i].page);
454 rxq->pool[i].page = NULL; 454 rxq->pool[i].page = NULL;
455 } 455 }
456 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 456 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
457 } 457 }
458 458
459 for (i = 0; i < RX_QUEUE_SIZE; i++) 459 for (i = 0; i < RX_QUEUE_SIZE; i++)
460 rxq->queue[i] = NULL; 460 rxq->queue[i] = NULL;
461 461
462 /* Set us so that we have processed and used all buffers, but have 462 /* Set us so that we have processed and used all buffers, but have
463 * not restocked the Rx queue with fresh buffers */ 463 * not restocked the Rx queue with fresh buffers */
464 rxq->read = rxq->write = 0; 464 rxq->read = rxq->write = 0;
465 rxq->write_actual = 0; 465 rxq->write_actual = 0;
466 rxq->free_count = 0; 466 rxq->free_count = 0;
467 spin_unlock_irqrestore(&rxq->lock, flags); 467 spin_unlock_irqrestore(&rxq->lock, flags);
468 } 468 }
469 469
470 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 470 int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
471 { 471 {
472 u32 rb_size; 472 u32 rb_size;
473 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 473 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
474 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ 474 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
475 475
476 if (!priv->cfg->use_isr_legacy) 476 if (!priv->cfg->use_isr_legacy)
477 rb_timeout = RX_RB_TIMEOUT; 477 rb_timeout = RX_RB_TIMEOUT;
478 478
479 if (priv->cfg->mod_params->amsdu_size_8K) 479 if (priv->cfg->mod_params->amsdu_size_8K)
480 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 480 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
481 else 481 else
482 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 482 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
483 483
484 /* Stop Rx DMA */ 484 /* Stop Rx DMA */
485 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 485 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
486 486
487 /* Reset driver's Rx queue write index */ 487 /* Reset driver's Rx queue write index */
488 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 488 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
489 489
490 /* Tell device where to find RBD circular buffer in DRAM */ 490 /* Tell device where to find RBD circular buffer in DRAM */
491 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 491 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
492 (u32)(rxq->bd_dma >> 8)); 492 (u32)(rxq->bd_dma >> 8));
493 493
494 /* Tell device where in DRAM to update its Rx status */ 494 /* Tell device where in DRAM to update its Rx status */
495 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 495 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
496 rxq->rb_stts_dma >> 4); 496 rxq->rb_stts_dma >> 4);
497 497
498 /* Enable Rx DMA 498 /* Enable Rx DMA
499 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 499 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
500 * the credit mechanism in 5000 HW RX FIFO 500 * the credit mechanism in 5000 HW RX FIFO
501 * Direct rx interrupts to hosts 501 * Direct rx interrupts to hosts
502 * Rx buffer size 4 or 8k 502 * Rx buffer size 4 or 8k
503 * RB timeout 0x10 503 * RB timeout 0x10
504 * 256 RBDs 504 * 256 RBDs
505 */ 505 */
506 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 506 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
507 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 507 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
508 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 508 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
509 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 509 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
510 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | 510 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
511 rb_size| 511 rb_size|
512 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 512 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
513 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 513 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
514 514
515 /* Set interrupt coalescing timer to default (2048 usecs) */ 515 /* Set interrupt coalescing timer to default (2048 usecs) */
516 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 516 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
517 517
518 return 0; 518 return 0;
519 } 519 }
520 520
521 int iwlagn_hw_nic_init(struct iwl_priv *priv) 521 int iwlagn_hw_nic_init(struct iwl_priv *priv)
522 { 522 {
523 unsigned long flags; 523 unsigned long flags;
524 struct iwl_rx_queue *rxq = &priv->rxq; 524 struct iwl_rx_queue *rxq = &priv->rxq;
525 int ret; 525 int ret;
526 526
527 /* nic_init */ 527 /* nic_init */
528 spin_lock_irqsave(&priv->lock, flags); 528 spin_lock_irqsave(&priv->lock, flags);
529 priv->cfg->ops->lib->apm_ops.init(priv); 529 priv->cfg->ops->lib->apm_ops.init(priv);
530 530
531 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 531 /* Set interrupt coalescing calibration timer to default (512 usecs) */
532 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); 532 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
533 533
534 spin_unlock_irqrestore(&priv->lock, flags); 534 spin_unlock_irqrestore(&priv->lock, flags);
535 535
536 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 536 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
537 537
538 priv->cfg->ops->lib->apm_ops.config(priv); 538 priv->cfg->ops->lib->apm_ops.config(priv);
539 539
540 /* Allocate the RX queue, or reset if it is already allocated */ 540 /* Allocate the RX queue, or reset if it is already allocated */
541 if (!rxq->bd) { 541 if (!rxq->bd) {
542 ret = iwl_rx_queue_alloc(priv); 542 ret = iwl_rx_queue_alloc(priv);
543 if (ret) { 543 if (ret) {
544 IWL_ERR(priv, "Unable to initialize Rx queue\n"); 544 IWL_ERR(priv, "Unable to initialize Rx queue\n");
545 return -ENOMEM; 545 return -ENOMEM;
546 } 546 }
547 } else 547 } else
548 iwlagn_rx_queue_reset(priv, rxq); 548 iwlagn_rx_queue_reset(priv, rxq);
549 549
550 iwlagn_rx_replenish(priv); 550 iwlagn_rx_replenish(priv);
551 551
552 iwlagn_rx_init(priv, rxq); 552 iwlagn_rx_init(priv, rxq);
553 553
554 spin_lock_irqsave(&priv->lock, flags); 554 spin_lock_irqsave(&priv->lock, flags);
555 555
556 rxq->need_update = 1; 556 rxq->need_update = 1;
557 iwl_rx_queue_update_write_ptr(priv, rxq); 557 iwl_rx_queue_update_write_ptr(priv, rxq);
558 558
559 spin_unlock_irqrestore(&priv->lock, flags); 559 spin_unlock_irqrestore(&priv->lock, flags);
560 560
561 /* Allocate or reset and init all Tx and Command queues */ 561 /* Allocate or reset and init all Tx and Command queues */
562 if (!priv->txq) { 562 if (!priv->txq) {
563 ret = iwlagn_txq_ctx_alloc(priv); 563 ret = iwlagn_txq_ctx_alloc(priv);
564 if (ret) 564 if (ret)
565 return ret; 565 return ret;
566 } else 566 } else
567 iwlagn_txq_ctx_reset(priv); 567 iwlagn_txq_ctx_reset(priv);
568 568
569 set_bit(STATUS_INIT, &priv->status); 569 set_bit(STATUS_INIT, &priv->status);
570 570
571 return 0; 571 return 0;
572 } 572 }
573 573
574 /** 574 /**
575 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 575 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
576 */ 576 */
577 static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, 577 static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
578 dma_addr_t dma_addr) 578 dma_addr_t dma_addr)
579 { 579 {
580 return cpu_to_le32((u32)(dma_addr >> 8)); 580 return cpu_to_le32((u32)(dma_addr >> 8));
581 } 581 }
582 582
583 /** 583 /**
584 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool 584 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
585 * 585 *
586 * If there are slots in the RX queue that need to be restocked, 586 * If there are slots in the RX queue that need to be restocked,
587 * and we have free pre-allocated buffers, fill the ranks as much 587 * and we have free pre-allocated buffers, fill the ranks as much
588 * as we can, pulling from rx_free. 588 * as we can, pulling from rx_free.
589 * 589 *
590 * This moves the 'write' index forward to catch up with 'processed', and 590 * This moves the 'write' index forward to catch up with 'processed', and
591 * also updates the memory address in the firmware to reference the new 591 * also updates the memory address in the firmware to reference the new
592 * target buffer. 592 * target buffer.
593 */ 593 */
594 void iwlagn_rx_queue_restock(struct iwl_priv *priv) 594 void iwlagn_rx_queue_restock(struct iwl_priv *priv)
595 { 595 {
596 struct iwl_rx_queue *rxq = &priv->rxq; 596 struct iwl_rx_queue *rxq = &priv->rxq;
597 struct list_head *element; 597 struct list_head *element;
598 struct iwl_rx_mem_buffer *rxb; 598 struct iwl_rx_mem_buffer *rxb;
599 unsigned long flags; 599 unsigned long flags;
600 600
601 spin_lock_irqsave(&rxq->lock, flags); 601 spin_lock_irqsave(&rxq->lock, flags);
602 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 602 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
603 /* The overwritten rxb must be a used one */ 603 /* The overwritten rxb must be a used one */
604 rxb = rxq->queue[rxq->write]; 604 rxb = rxq->queue[rxq->write];
605 BUG_ON(rxb && rxb->page); 605 BUG_ON(rxb && rxb->page);
606 606
607 /* Get next free Rx buffer, remove from free list */ 607 /* Get next free Rx buffer, remove from free list */
608 element = rxq->rx_free.next; 608 element = rxq->rx_free.next;
609 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 609 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
610 list_del(element); 610 list_del(element);
611 611
612 /* Point to Rx buffer via next RBD in circular buffer */ 612 /* Point to Rx buffer via next RBD in circular buffer */
613 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, 613 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
614 rxb->page_dma); 614 rxb->page_dma);
615 rxq->queue[rxq->write] = rxb; 615 rxq->queue[rxq->write] = rxb;
616 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 616 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
617 rxq->free_count--; 617 rxq->free_count--;
618 } 618 }
619 spin_unlock_irqrestore(&rxq->lock, flags); 619 spin_unlock_irqrestore(&rxq->lock, flags);
620 /* If the pre-allocated buffer pool is dropping low, schedule to 620 /* If the pre-allocated buffer pool is dropping low, schedule to
621 * refill it */ 621 * refill it */
622 if (rxq->free_count <= RX_LOW_WATERMARK) 622 if (rxq->free_count <= RX_LOW_WATERMARK)
623 queue_work(priv->workqueue, &priv->rx_replenish); 623 queue_work(priv->workqueue, &priv->rx_replenish);
624 624
625 625
626 /* If we've added more space for the firmware to place data, tell it. 626 /* If we've added more space for the firmware to place data, tell it.
627 * Increment device's write pointer in multiples of 8. */ 627 * Increment device's write pointer in multiples of 8. */
628 if (rxq->write_actual != (rxq->write & ~0x7)) { 628 if (rxq->write_actual != (rxq->write & ~0x7)) {
629 spin_lock_irqsave(&rxq->lock, flags); 629 spin_lock_irqsave(&rxq->lock, flags);
630 rxq->need_update = 1; 630 rxq->need_update = 1;
631 spin_unlock_irqrestore(&rxq->lock, flags); 631 spin_unlock_irqrestore(&rxq->lock, flags);
632 iwl_rx_queue_update_write_ptr(priv, rxq); 632 iwl_rx_queue_update_write_ptr(priv, rxq);
633 } 633 }
634 } 634 }
635 635
636 /** 636 /**
637 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free 637 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
638 * 638 *
639 * When moving to rx_free an SKB is allocated for the slot. 639 * When moving to rx_free an SKB is allocated for the slot.
640 * 640 *
641 * Also restock the Rx queue via iwl_rx_queue_restock. 641 * Also restock the Rx queue via iwl_rx_queue_restock.
642 * This is called as a scheduled work item (except for during initialization) 642 * This is called as a scheduled work item (except for during initialization)
643 */ 643 */
644 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) 644 void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
645 { 645 {
646 struct iwl_rx_queue *rxq = &priv->rxq; 646 struct iwl_rx_queue *rxq = &priv->rxq;
647 struct list_head *element; 647 struct list_head *element;
648 struct iwl_rx_mem_buffer *rxb; 648 struct iwl_rx_mem_buffer *rxb;
649 struct page *page; 649 struct page *page;
650 unsigned long flags; 650 unsigned long flags;
651 gfp_t gfp_mask = priority; 651 gfp_t gfp_mask = priority;
652 652
653 while (1) { 653 while (1) {
654 spin_lock_irqsave(&rxq->lock, flags); 654 spin_lock_irqsave(&rxq->lock, flags);
655 if (list_empty(&rxq->rx_used)) { 655 if (list_empty(&rxq->rx_used)) {
656 spin_unlock_irqrestore(&rxq->lock, flags); 656 spin_unlock_irqrestore(&rxq->lock, flags);
657 return; 657 return;
658 } 658 }
659 spin_unlock_irqrestore(&rxq->lock, flags); 659 spin_unlock_irqrestore(&rxq->lock, flags);
660 660
661 if (rxq->free_count > RX_LOW_WATERMARK) 661 if (rxq->free_count > RX_LOW_WATERMARK)
662 gfp_mask |= __GFP_NOWARN; 662 gfp_mask |= __GFP_NOWARN;
663 663
664 if (priv->hw_params.rx_page_order > 0) 664 if (priv->hw_params.rx_page_order > 0)
665 gfp_mask |= __GFP_COMP; 665 gfp_mask |= __GFP_COMP;
666 666
667 /* Alloc a new receive buffer */ 667 /* Alloc a new receive buffer */
668 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); 668 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
669 if (!page) { 669 if (!page) {
670 if (net_ratelimit()) 670 if (net_ratelimit())
671 IWL_DEBUG_INFO(priv, "alloc_pages failed, " 671 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
672 "order: %d\n", 672 "order: %d\n",
673 priv->hw_params.rx_page_order); 673 priv->hw_params.rx_page_order);
674 674
675 if ((rxq->free_count <= RX_LOW_WATERMARK) && 675 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
676 net_ratelimit()) 676 net_ratelimit())
677 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", 677 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
678 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 678 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
679 rxq->free_count); 679 rxq->free_count);
680 /* We don't reschedule replenish work here -- we will 680 /* We don't reschedule replenish work here -- we will
681 * call the restock method and if it still needs 681 * call the restock method and if it still needs
682 * more buffers it will schedule replenish */ 682 * more buffers it will schedule replenish */
683 return; 683 return;
684 } 684 }
685 685
686 spin_lock_irqsave(&rxq->lock, flags); 686 spin_lock_irqsave(&rxq->lock, flags);
687 687
688 if (list_empty(&rxq->rx_used)) { 688 if (list_empty(&rxq->rx_used)) {
689 spin_unlock_irqrestore(&rxq->lock, flags); 689 spin_unlock_irqrestore(&rxq->lock, flags);
690 __free_pages(page, priv->hw_params.rx_page_order); 690 __free_pages(page, priv->hw_params.rx_page_order);
691 return; 691 return;
692 } 692 }
693 element = rxq->rx_used.next; 693 element = rxq->rx_used.next;
694 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 694 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
695 list_del(element); 695 list_del(element);
696 696
697 spin_unlock_irqrestore(&rxq->lock, flags); 697 spin_unlock_irqrestore(&rxq->lock, flags);
698 698
699 BUG_ON(rxb->page); 699 BUG_ON(rxb->page);
700 rxb->page = page; 700 rxb->page = page;
701 /* Get physical address of the RB */ 701 /* Get physical address of the RB */
702 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, 702 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
703 PAGE_SIZE << priv->hw_params.rx_page_order, 703 PAGE_SIZE << priv->hw_params.rx_page_order,
704 PCI_DMA_FROMDEVICE); 704 PCI_DMA_FROMDEVICE);
705 /* dma address must be no more than 36 bits */ 705 /* dma address must be no more than 36 bits */
706 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 706 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
707 /* and also 256 byte aligned! */ 707 /* and also 256 byte aligned! */
708 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 708 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
709 709
710 spin_lock_irqsave(&rxq->lock, flags); 710 spin_lock_irqsave(&rxq->lock, flags);
711 711
712 list_add_tail(&rxb->list, &rxq->rx_free); 712 list_add_tail(&rxb->list, &rxq->rx_free);
713 rxq->free_count++; 713 rxq->free_count++;
714 priv->alloc_rxb_page++; 714 priv->alloc_rxb_page++;
715 715
716 spin_unlock_irqrestore(&rxq->lock, flags); 716 spin_unlock_irqrestore(&rxq->lock, flags);
717 } 717 }
718 } 718 }
719 719
720 void iwlagn_rx_replenish(struct iwl_priv *priv) 720 void iwlagn_rx_replenish(struct iwl_priv *priv)
721 { 721 {
722 unsigned long flags; 722 unsigned long flags;
723 723
724 iwlagn_rx_allocate(priv, GFP_KERNEL); 724 iwlagn_rx_allocate(priv, GFP_KERNEL);
725 725
726 spin_lock_irqsave(&priv->lock, flags); 726 spin_lock_irqsave(&priv->lock, flags);
727 iwlagn_rx_queue_restock(priv); 727 iwlagn_rx_queue_restock(priv);
728 spin_unlock_irqrestore(&priv->lock, flags); 728 spin_unlock_irqrestore(&priv->lock, flags);
729 } 729 }
730 730
731 void iwlagn_rx_replenish_now(struct iwl_priv *priv) 731 void iwlagn_rx_replenish_now(struct iwl_priv *priv)
732 { 732 {
733 iwlagn_rx_allocate(priv, GFP_ATOMIC); 733 iwlagn_rx_allocate(priv, GFP_ATOMIC);
734 734
735 iwlagn_rx_queue_restock(priv); 735 iwlagn_rx_queue_restock(priv);
736 } 736 }
737 737
738 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 738 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
739 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 739 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
740 * This free routine walks the list of POOL entries and if SKB is set to 740 * This free routine walks the list of POOL entries and if SKB is set to
741 * non NULL it is unmapped and freed 741 * non NULL it is unmapped and freed
742 */ 742 */
743 void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 743 void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
744 { 744 {
745 int i; 745 int i;
746 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 746 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
747 if (rxq->pool[i].page != NULL) { 747 if (rxq->pool[i].page != NULL) {
748 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 748 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
749 PAGE_SIZE << priv->hw_params.rx_page_order, 749 PAGE_SIZE << priv->hw_params.rx_page_order,
750 PCI_DMA_FROMDEVICE); 750 PCI_DMA_FROMDEVICE);
751 __iwl_free_pages(priv, rxq->pool[i].page); 751 __iwl_free_pages(priv, rxq->pool[i].page);
752 rxq->pool[i].page = NULL; 752 rxq->pool[i].page = NULL;
753 } 753 }
754 } 754 }
755 755
756 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 756 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
757 rxq->bd_dma); 757 rxq->bd_dma);
758 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), 758 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
759 rxq->rb_stts, rxq->rb_stts_dma); 759 rxq->rb_stts, rxq->rb_stts_dma);
760 rxq->bd = NULL; 760 rxq->bd = NULL;
761 rxq->rb_stts = NULL; 761 rxq->rb_stts = NULL;
762 } 762 }
763 763
764 int iwlagn_rxq_stop(struct iwl_priv *priv) 764 int iwlagn_rxq_stop(struct iwl_priv *priv)
765 { 765 {
766 766
767 /* stop Rx DMA */ 767 /* stop Rx DMA */
768 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 768 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
769 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 769 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
770 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 770 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
771 771
772 return 0; 772 return 0;
773 } 773 }
774 774
775 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 775 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
776 { 776 {
777 int idx = 0; 777 int idx = 0;
778 int band_offset = 0; 778 int band_offset = 0;
779 779
780 /* HT rate format: mac80211 wants an MCS number, which is just LSB */ 780 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
781 if (rate_n_flags & RATE_MCS_HT_MSK) { 781 if (rate_n_flags & RATE_MCS_HT_MSK) {
782 idx = (rate_n_flags & 0xff); 782 idx = (rate_n_flags & 0xff);
783 return idx; 783 return idx;
784 /* Legacy rate format, search for match in table */ 784 /* Legacy rate format, search for match in table */
785 } else { 785 } else {
786 if (band == IEEE80211_BAND_5GHZ) 786 if (band == IEEE80211_BAND_5GHZ)
787 band_offset = IWL_FIRST_OFDM_RATE; 787 band_offset = IWL_FIRST_OFDM_RATE;
788 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) 788 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
789 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) 789 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
790 return idx - band_offset; 790 return idx - band_offset;
791 } 791 }
792 792
793 return -1; 793 return -1;
794 } 794 }
795 795
796 /* Calc max signal level (dBm) among 3 possible receivers */ 796 /* Calc max signal level (dBm) among 3 possible receivers */
797 static inline int iwlagn_calc_rssi(struct iwl_priv *priv, 797 static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
798 struct iwl_rx_phy_res *rx_resp) 798 struct iwl_rx_phy_res *rx_resp)
799 { 799 {
800 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); 800 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
801 } 801 }
802 802
803 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 803 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
804 { 804 {
805 u32 decrypt_out = 0; 805 u32 decrypt_out = 0;
806 806
807 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == 807 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
808 RX_RES_STATUS_STATION_FOUND) 808 RX_RES_STATUS_STATION_FOUND)
809 decrypt_out |= (RX_RES_STATUS_STATION_FOUND | 809 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
810 RX_RES_STATUS_NO_STATION_INFO_MISMATCH); 810 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
811 811
812 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); 812 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
813 813
814 /* packet was not encrypted */ 814 /* packet was not encrypted */
815 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 815 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
816 RX_RES_STATUS_SEC_TYPE_NONE) 816 RX_RES_STATUS_SEC_TYPE_NONE)
817 return decrypt_out; 817 return decrypt_out;
818 818
819 /* packet was encrypted with unknown alg */ 819 /* packet was encrypted with unknown alg */
820 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 820 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
821 RX_RES_STATUS_SEC_TYPE_ERR) 821 RX_RES_STATUS_SEC_TYPE_ERR)
822 return decrypt_out; 822 return decrypt_out;
823 823
824 /* decryption was not done in HW */ 824 /* decryption was not done in HW */
825 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != 825 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
826 RX_MPDU_RES_STATUS_DEC_DONE_MSK) 826 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
827 return decrypt_out; 827 return decrypt_out;
828 828
829 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { 829 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
830 830
831 case RX_RES_STATUS_SEC_TYPE_CCMP: 831 case RX_RES_STATUS_SEC_TYPE_CCMP:
832 /* alg is CCM: check MIC only */ 832 /* alg is CCM: check MIC only */
833 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) 833 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
834 /* Bad MIC */ 834 /* Bad MIC */
835 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 835 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
836 else 836 else
837 decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 837 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
838 838
839 break; 839 break;
840 840
841 case RX_RES_STATUS_SEC_TYPE_TKIP: 841 case RX_RES_STATUS_SEC_TYPE_TKIP:
842 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { 842 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
843 /* Bad TTAK */ 843 /* Bad TTAK */
844 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; 844 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
845 break; 845 break;
846 } 846 }
847 /* fall through if TTAK OK */ 847 /* fall through if TTAK OK */
848 default: 848 default:
849 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) 849 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
850 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 850 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
851 else 851 else
852 decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 852 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
853 break; 853 break;
854 } 854 }
855 855
856 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", 856 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
857 decrypt_in, decrypt_out); 857 decrypt_in, decrypt_out);
858 858
859 return decrypt_out; 859 return decrypt_out;
860 } 860 }
861 861
862 static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, 862 static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
863 struct ieee80211_hdr *hdr, 863 struct ieee80211_hdr *hdr,
864 u16 len, 864 u16 len,
865 u32 ampdu_status, 865 u32 ampdu_status,
866 struct iwl_rx_mem_buffer *rxb, 866 struct iwl_rx_mem_buffer *rxb,
867 struct ieee80211_rx_status *stats) 867 struct ieee80211_rx_status *stats)
868 { 868 {
869 struct sk_buff *skb; 869 struct sk_buff *skb;
870 __le16 fc = hdr->frame_control; 870 __le16 fc = hdr->frame_control;
871 871
872 /* We only process data packets if the interface is open */ 872 /* We only process data packets if the interface is open */
873 if (unlikely(!priv->is_open)) { 873 if (unlikely(!priv->is_open)) {
874 IWL_DEBUG_DROP_LIMIT(priv, 874 IWL_DEBUG_DROP_LIMIT(priv,
875 "Dropping packet while interface is not open.\n"); 875 "Dropping packet while interface is not open.\n");
876 return; 876 return;
877 } 877 }
878 878
879 /* In case of HW accelerated crypto and bad decryption, drop */ 879 /* In case of HW accelerated crypto and bad decryption, drop */
880 if (!priv->cfg->mod_params->sw_crypto && 880 if (!priv->cfg->mod_params->sw_crypto &&
881 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 881 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
882 return; 882 return;
883 883
884 skb = dev_alloc_skb(128); 884 skb = dev_alloc_skb(128);
885 if (!skb) { 885 if (!skb) {
886 IWL_ERR(priv, "dev_alloc_skb failed\n"); 886 IWL_ERR(priv, "dev_alloc_skb failed\n");
887 return; 887 return;
888 } 888 }
889 889
890 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); 890 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
891 891
892 iwl_update_stats(priv, false, fc, len); 892 iwl_update_stats(priv, false, fc, len);
893 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 893 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
894 894
895 ieee80211_rx(priv->hw, skb); 895 ieee80211_rx(priv->hw, skb);
896 priv->alloc_rxb_page--; 896 priv->alloc_rxb_page--;
897 rxb->page = NULL; 897 rxb->page = NULL;
898 } 898 }
899 899
900 /* Called for REPLY_RX (legacy ABG frames), or 900 /* Called for REPLY_RX (legacy ABG frames), or
901 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 901 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
902 void iwlagn_rx_reply_rx(struct iwl_priv *priv, 902 void iwlagn_rx_reply_rx(struct iwl_priv *priv,
903 struct iwl_rx_mem_buffer *rxb) 903 struct iwl_rx_mem_buffer *rxb)
904 { 904 {
905 struct ieee80211_hdr *header; 905 struct ieee80211_hdr *header;
906 struct ieee80211_rx_status rx_status; 906 struct ieee80211_rx_status rx_status;
907 struct iwl_rx_packet *pkt = rxb_addr(rxb); 907 struct iwl_rx_packet *pkt = rxb_addr(rxb);
908 struct iwl_rx_phy_res *phy_res; 908 struct iwl_rx_phy_res *phy_res;
909 __le32 rx_pkt_status; 909 __le32 rx_pkt_status;
910 struct iwl_rx_mpdu_res_start *amsdu; 910 struct iwl_rx_mpdu_res_start *amsdu;
911 u32 len; 911 u32 len;
912 u32 ampdu_status; 912 u32 ampdu_status;
913 u32 rate_n_flags; 913 u32 rate_n_flags;
914 914
915 /** 915 /**
916 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. 916 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
917 * REPLY_RX: physical layer info is in this buffer 917 * REPLY_RX: physical layer info is in this buffer
918 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate 918 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
919 * command and cached in priv->last_phy_res 919 * command and cached in priv->last_phy_res
920 * 920 *
921 * Here we set up local variables depending on which command is 921 * Here we set up local variables depending on which command is
922 * received. 922 * received.
923 */ 923 */
924 if (pkt->hdr.cmd == REPLY_RX) { 924 if (pkt->hdr.cmd == REPLY_RX) {
925 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; 925 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
926 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) 926 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
927 + phy_res->cfg_phy_cnt); 927 + phy_res->cfg_phy_cnt);
928 928
929 len = le16_to_cpu(phy_res->byte_count); 929 len = le16_to_cpu(phy_res->byte_count);
930 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + 930 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
931 phy_res->cfg_phy_cnt + len); 931 phy_res->cfg_phy_cnt + len);
932 ampdu_status = le32_to_cpu(rx_pkt_status); 932 ampdu_status = le32_to_cpu(rx_pkt_status);
933 } else { 933 } else {
934 if (!priv->_agn.last_phy_res_valid) { 934 if (!priv->_agn.last_phy_res_valid) {
935 IWL_ERR(priv, "MPDU frame without cached PHY data\n"); 935 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
936 return; 936 return;
937 } 937 }
938 phy_res = &priv->_agn.last_phy_res; 938 phy_res = &priv->_agn.last_phy_res;
939 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; 939 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
940 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); 940 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
941 len = le16_to_cpu(amsdu->byte_count); 941 len = le16_to_cpu(amsdu->byte_count);
942 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); 942 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
943 ampdu_status = iwlagn_translate_rx_status(priv, 943 ampdu_status = iwlagn_translate_rx_status(priv,
944 le32_to_cpu(rx_pkt_status)); 944 le32_to_cpu(rx_pkt_status));
945 } 945 }
946 946
947 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 947 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
948 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", 948 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
949 phy_res->cfg_phy_cnt); 949 phy_res->cfg_phy_cnt);
950 return; 950 return;
951 } 951 }
952 952
953 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || 953 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
954 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { 954 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
955 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", 955 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
956 le32_to_cpu(rx_pkt_status)); 956 le32_to_cpu(rx_pkt_status));
957 return; 957 return;
958 } 958 }
959 959
960 /* This will be used in several places later */ 960 /* This will be used in several places later */
961 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); 961 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
962 962
963 /* rx_status carries information about the packet to mac80211 */ 963 /* rx_status carries information about the packet to mac80211 */
964 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 964 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
965 rx_status.freq = 965 rx_status.freq =
966 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); 966 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
967 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 967 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
968 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 968 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
969 rx_status.rate_idx = 969 rx_status.rate_idx =
970 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 970 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
971 rx_status.flag = 0; 971 rx_status.flag = 0;
972 972
973 /* TSF isn't reliable. In order to allow smooth user experience, 973 /* TSF isn't reliable. In order to allow smooth user experience,
974 * this W/A doesn't propagate it to the mac80211 */ 974 * this W/A doesn't propagate it to the mac80211 */
975 /*rx_status.flag |= RX_FLAG_TSFT;*/ 975 /*rx_status.flag |= RX_FLAG_TSFT;*/
976 976
977 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 977 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
978 978
979 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 979 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
980 rx_status.signal = iwlagn_calc_rssi(priv, phy_res); 980 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
981 981
982 iwl_dbg_log_rx_data_frame(priv, len, header); 982 iwl_dbg_log_rx_data_frame(priv, len, header);
983 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 983 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
984 rx_status.signal, (unsigned long long)rx_status.mactime); 984 rx_status.signal, (unsigned long long)rx_status.mactime);
985 985
986 /* 986 /*
987 * "antenna number" 987 * "antenna number"
988 * 988 *
989 * It seems that the antenna field in the phy flags value 989 * It seems that the antenna field in the phy flags value
990 * is actually a bit field. This is undefined by radiotap, 990 * is actually a bit field. This is undefined by radiotap,
991 * it wants an actual antenna number but I always get "7" 991 * it wants an actual antenna number but I always get "7"
992 * for most legacy frames I receive indicating that the 992 * for most legacy frames I receive indicating that the
993 * same frame was received on all three RX chains. 993 * same frame was received on all three RX chains.
994 * 994 *
995 * I think this field should be removed in favor of a 995 * I think this field should be removed in favor of a
996 * new 802.11n radiotap field "RX chains" that is defined 996 * new 802.11n radiotap field "RX chains" that is defined
997 * as a bitmask. 997 * as a bitmask.
998 */ 998 */
999 rx_status.antenna = 999 rx_status.antenna =
1000 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) 1000 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1001 >> RX_RES_PHY_FLAGS_ANTENNA_POS; 1001 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1002 1002
1003 /* set the preamble flag if appropriate */ 1003 /* set the preamble flag if appropriate */
1004 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 1004 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1005 rx_status.flag |= RX_FLAG_SHORTPRE; 1005 rx_status.flag |= RX_FLAG_SHORTPRE;
1006 1006
1007 /* Set up the HT phy flags */ 1007 /* Set up the HT phy flags */
1008 if (rate_n_flags & RATE_MCS_HT_MSK) 1008 if (rate_n_flags & RATE_MCS_HT_MSK)
1009 rx_status.flag |= RX_FLAG_HT; 1009 rx_status.flag |= RX_FLAG_HT;
1010 if (rate_n_flags & RATE_MCS_HT40_MSK) 1010 if (rate_n_flags & RATE_MCS_HT40_MSK)
1011 rx_status.flag |= RX_FLAG_40MHZ; 1011 rx_status.flag |= RX_FLAG_40MHZ;
1012 if (rate_n_flags & RATE_MCS_SGI_MSK) 1012 if (rate_n_flags & RATE_MCS_SGI_MSK)
1013 rx_status.flag |= RX_FLAG_SHORT_GI; 1013 rx_status.flag |= RX_FLAG_SHORT_GI;
1014 1014
1015 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, 1015 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1016 rxb, &rx_status); 1016 rxb, &rx_status);
1017 } 1017 }
1018 1018
1019 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). 1019 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1020 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ 1020 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1021 void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, 1021 void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1022 struct iwl_rx_mem_buffer *rxb) 1022 struct iwl_rx_mem_buffer *rxb)
1023 { 1023 {
1024 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1024 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1025 priv->_agn.last_phy_res_valid = true; 1025 priv->_agn.last_phy_res_valid = true;
1026 memcpy(&priv->_agn.last_phy_res, pkt->u.raw, 1026 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1027 sizeof(struct iwl_rx_phy_res)); 1027 sizeof(struct iwl_rx_phy_res));
1028 } 1028 }
1029 1029
1030 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 1030 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1031 struct ieee80211_vif *vif, 1031 struct ieee80211_vif *vif,
1032 enum ieee80211_band band, 1032 enum ieee80211_band band,
1033 struct iwl_scan_channel *scan_ch) 1033 struct iwl_scan_channel *scan_ch)
1034 { 1034 {
1035 const struct ieee80211_supported_band *sband; 1035 const struct ieee80211_supported_band *sband;
1036 u16 passive_dwell = 0; 1036 u16 passive_dwell = 0;
1037 u16 active_dwell = 0; 1037 u16 active_dwell = 0;
1038 int added = 0; 1038 int added = 0;
1039 u16 channel = 0; 1039 u16 channel = 0;
1040 1040
1041 sband = iwl_get_hw_mode(priv, band); 1041 sband = iwl_get_hw_mode(priv, band);
1042 if (!sband) { 1042 if (!sband) {
1043 IWL_ERR(priv, "invalid band\n"); 1043 IWL_ERR(priv, "invalid band\n");
1044 return added; 1044 return added;
1045 } 1045 }
1046 1046
1047 active_dwell = iwl_get_active_dwell_time(priv, band, 0); 1047 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
1048 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1048 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1049 1049
1050 if (passive_dwell <= active_dwell) 1050 if (passive_dwell <= active_dwell)
1051 passive_dwell = active_dwell + 1; 1051 passive_dwell = active_dwell + 1;
1052 1052
1053 channel = iwl_get_single_channel_number(priv, band); 1053 channel = iwl_get_single_channel_number(priv, band);
1054 if (channel) { 1054 if (channel) {
1055 scan_ch->channel = cpu_to_le16(channel); 1055 scan_ch->channel = cpu_to_le16(channel);
1056 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 1056 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1057 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1057 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1058 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1058 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1059 /* Set txpower levels to defaults */ 1059 /* Set txpower levels to defaults */
1060 scan_ch->dsp_atten = 110; 1060 scan_ch->dsp_atten = 110;
1061 if (band == IEEE80211_BAND_5GHZ) 1061 if (band == IEEE80211_BAND_5GHZ)
1062 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 1062 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1063 else 1063 else
1064 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 1064 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1065 added++; 1065 added++;
1066 } else 1066 } else
1067 IWL_ERR(priv, "no valid channel found\n"); 1067 IWL_ERR(priv, "no valid channel found\n");
1068 return added; 1068 return added;
1069 } 1069 }
1070 1070
1071 static int iwl_get_channels_for_scan(struct iwl_priv *priv, 1071 static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1072 struct ieee80211_vif *vif, 1072 struct ieee80211_vif *vif,
1073 enum ieee80211_band band, 1073 enum ieee80211_band band,
1074 u8 is_active, u8 n_probes, 1074 u8 is_active, u8 n_probes,
1075 struct iwl_scan_channel *scan_ch) 1075 struct iwl_scan_channel *scan_ch)
1076 { 1076 {
1077 struct ieee80211_channel *chan; 1077 struct ieee80211_channel *chan;
1078 const struct ieee80211_supported_band *sband; 1078 const struct ieee80211_supported_band *sband;
1079 const struct iwl_channel_info *ch_info; 1079 const struct iwl_channel_info *ch_info;
1080 u16 passive_dwell = 0; 1080 u16 passive_dwell = 0;
1081 u16 active_dwell = 0; 1081 u16 active_dwell = 0;
1082 int added, i; 1082 int added, i;
1083 u16 channel; 1083 u16 channel;
1084 1084
1085 sband = iwl_get_hw_mode(priv, band); 1085 sband = iwl_get_hw_mode(priv, band);
1086 if (!sband) 1086 if (!sband)
1087 return 0; 1087 return 0;
1088 1088
1089 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1089 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1090 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1090 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1091 1091
1092 if (passive_dwell <= active_dwell) 1092 if (passive_dwell <= active_dwell)
1093 passive_dwell = active_dwell + 1; 1093 passive_dwell = active_dwell + 1;
1094 1094
1095 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { 1095 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1096 chan = priv->scan_request->channels[i]; 1096 chan = priv->scan_request->channels[i];
1097 1097
1098 if (chan->band != band) 1098 if (chan->band != band)
1099 continue; 1099 continue;
1100 1100
1101 channel = ieee80211_frequency_to_channel(chan->center_freq); 1101 channel = ieee80211_frequency_to_channel(chan->center_freq);
1102 scan_ch->channel = cpu_to_le16(channel); 1102 scan_ch->channel = cpu_to_le16(channel);
1103 1103
1104 ch_info = iwl_get_channel_info(priv, band, channel); 1104 ch_info = iwl_get_channel_info(priv, band, channel);
1105 if (!is_channel_valid(ch_info)) { 1105 if (!is_channel_valid(ch_info)) {
1106 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", 1106 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1107 channel); 1107 channel);
1108 continue; 1108 continue;
1109 } 1109 }
1110 1110
1111 if (!is_active || is_channel_passive(ch_info) || 1111 if (!is_active || is_channel_passive(ch_info) ||
1112 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) 1112 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
1113 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; 1113 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1114 else 1114 else
1115 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 1115 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1116 1116
1117 if (n_probes) 1117 if (n_probes)
1118 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 1118 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
1119 1119
1120 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1120 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1121 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1121 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1122 1122
1123 /* Set txpower levels to defaults */ 1123 /* Set txpower levels to defaults */
1124 scan_ch->dsp_atten = 110; 1124 scan_ch->dsp_atten = 110;
1125 1125
1126 /* NOTE: if we were doing 6Mb OFDM for scans we'd use 1126 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1127 * power level: 1127 * power level:
1128 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; 1128 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1129 */ 1129 */
1130 if (band == IEEE80211_BAND_5GHZ) 1130 if (band == IEEE80211_BAND_5GHZ)
1131 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; 1131 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1132 else 1132 else
1133 scan_ch->tx_gain = ((1 << 5) | (5 << 3)); 1133 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1134 1134
1135 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", 1135 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
1136 channel, le32_to_cpu(scan_ch->type), 1136 channel, le32_to_cpu(scan_ch->type),
1137 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? 1137 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1138 "ACTIVE" : "PASSIVE", 1138 "ACTIVE" : "PASSIVE",
1139 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? 1139 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1140 active_dwell : passive_dwell); 1140 active_dwell : passive_dwell);
1141 1141
1142 scan_ch++; 1142 scan_ch++;
1143 added++; 1143 added++;
1144 } 1144 }
1145 1145
1146 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); 1146 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1147 return added; 1147 return added;
1148 } 1148 }
1149 1149
1150 void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 1150 void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1151 { 1151 {
1152 struct iwl_host_cmd cmd = { 1152 struct iwl_host_cmd cmd = {
1153 .id = REPLY_SCAN_CMD, 1153 .id = REPLY_SCAN_CMD,
1154 .len = sizeof(struct iwl_scan_cmd), 1154 .len = sizeof(struct iwl_scan_cmd),
1155 .flags = CMD_SIZE_HUGE, 1155 .flags = CMD_SIZE_HUGE,
1156 }; 1156 };
1157 struct iwl_scan_cmd *scan; 1157 struct iwl_scan_cmd *scan;
1158 struct ieee80211_conf *conf = NULL; 1158 struct ieee80211_conf *conf = NULL;
1159 u32 rate_flags = 0; 1159 u32 rate_flags = 0;
1160 u16 cmd_len; 1160 u16 cmd_len;
1161 u16 rx_chain = 0; 1161 u16 rx_chain = 0;
1162 enum ieee80211_band band; 1162 enum ieee80211_band band;
1163 u8 n_probes = 0; 1163 u8 n_probes = 0;
1164 u8 rx_ant = priv->hw_params.valid_rx_ant; 1164 u8 rx_ant = priv->hw_params.valid_rx_ant;
1165 u8 rate; 1165 u8 rate;
1166 bool is_active = false; 1166 bool is_active = false;
1167 int chan_mod; 1167 int chan_mod;
1168 u8 active_chains; 1168 u8 active_chains;
1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1170 1170
1171 conf = ieee80211_get_hw_conf(priv->hw); 1171 conf = ieee80211_get_hw_conf(priv->hw);
1172 1172
1173 cancel_delayed_work(&priv->scan_check); 1173 cancel_delayed_work(&priv->scan_check);
1174 1174
1175 if (!iwl_is_ready(priv)) { 1175 if (!iwl_is_ready(priv)) {
1176 IWL_WARN(priv, "request scan called when driver not ready.\n"); 1176 IWL_WARN(priv, "request scan called when driver not ready.\n");
1177 goto done; 1177 goto done;
1178 } 1178 }
1179 1179
1180 /* Make sure the scan wasn't canceled before this queued work 1180 /* Make sure the scan wasn't canceled before this queued work
1181 * was given the chance to run... */ 1181 * was given the chance to run... */
1182 if (!test_bit(STATUS_SCANNING, &priv->status)) 1182 if (!test_bit(STATUS_SCANNING, &priv->status))
1183 goto done; 1183 goto done;
1184 1184
1185 /* This should never be called or scheduled if there is currently 1185 /* This should never be called or scheduled if there is currently
1186 * a scan active in the hardware. */ 1186 * a scan active in the hardware. */
1187 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 1187 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1188 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. " 1188 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1189 "Ignoring second request.\n"); 1189 "Ignoring second request.\n");
1190 goto done; 1190 goto done;
1191 } 1191 }
1192 1192
1193 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 1193 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1194 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n"); 1194 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1195 goto done; 1195 goto done;
1196 } 1196 }
1197 1197
1198 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 1198 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1199 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n"); 1199 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1200 goto done; 1200 goto done;
1201 } 1201 }
1202 1202
1203 if (iwl_is_rfkill(priv)) { 1203 if (iwl_is_rfkill(priv)) {
1204 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n"); 1204 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1205 goto done; 1205 goto done;
1206 } 1206 }
1207 1207
1208 if (!test_bit(STATUS_READY, &priv->status)) { 1208 if (!test_bit(STATUS_READY, &priv->status)) {
1209 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n"); 1209 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
1210 goto done; 1210 goto done;
1211 } 1211 }
1212 1212
1213 if (!priv->scan_cmd) { 1213 if (!priv->scan_cmd) {
1214 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + 1214 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1215 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 1215 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1216 if (!priv->scan_cmd) { 1216 if (!priv->scan_cmd) {
1217 IWL_DEBUG_SCAN(priv, 1217 IWL_DEBUG_SCAN(priv,
1218 "fail to allocate memory for scan\n"); 1218 "fail to allocate memory for scan\n");
1219 goto done; 1219 goto done;
1220 } 1220 }
1221 } 1221 }
1222 scan = priv->scan_cmd; 1222 scan = priv->scan_cmd;
1223 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); 1223 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
1224 1224
1225 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 1225 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1226 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 1226 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1227 1227
1228 if (iwl_is_associated(priv)) { 1228 if (iwl_is_associated(priv)) {
1229 u16 interval = 0; 1229 u16 interval = 0;
1230 u32 extra; 1230 u32 extra;
1231 u32 suspend_time = 100; 1231 u32 suspend_time = 100;
1232 u32 scan_suspend_time = 100; 1232 u32 scan_suspend_time = 100;
1233 unsigned long flags; 1233 unsigned long flags;
1234 1234
1235 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 1235 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1236 spin_lock_irqsave(&priv->lock, flags); 1236 spin_lock_irqsave(&priv->lock, flags);
1237 if (priv->is_internal_short_scan) 1237 if (priv->is_internal_short_scan)
1238 interval = 0; 1238 interval = 0;
1239 else 1239 else
1240 interval = vif->bss_conf.beacon_int; 1240 interval = vif->bss_conf.beacon_int;
1241 spin_unlock_irqrestore(&priv->lock, flags); 1241 spin_unlock_irqrestore(&priv->lock, flags);
1242 1242
1243 scan->suspend_time = 0; 1243 scan->suspend_time = 0;
1244 scan->max_out_time = cpu_to_le32(200 * 1024); 1244 scan->max_out_time = cpu_to_le32(200 * 1024);
1245 if (!interval) 1245 if (!interval)
1246 interval = suspend_time; 1246 interval = suspend_time;
1247 1247
1248 extra = (suspend_time / interval) << 22; 1248 extra = (suspend_time / interval) << 22;
1249 scan_suspend_time = (extra | 1249 scan_suspend_time = (extra |
1250 ((suspend_time % interval) * 1024)); 1250 ((suspend_time % interval) * 1024));
1251 scan->suspend_time = cpu_to_le32(scan_suspend_time); 1251 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1252 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", 1252 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1253 scan_suspend_time, interval); 1253 scan_suspend_time, interval);
1254 } 1254 }
1255 1255
1256 if (priv->is_internal_short_scan) { 1256 if (priv->is_internal_short_scan) {
1257 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 1257 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1258 } else if (priv->scan_request->n_ssids) { 1258 } else if (priv->scan_request->n_ssids) {
1259 int i, p = 0; 1259 int i, p = 0;
1260 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 1260 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1261 for (i = 0; i < priv->scan_request->n_ssids; i++) { 1261 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1262 /* always does wildcard anyway */ 1262 /* always does wildcard anyway */
1263 if (!priv->scan_request->ssids[i].ssid_len) 1263 if (!priv->scan_request->ssids[i].ssid_len)
1264 continue; 1264 continue;
1265 scan->direct_scan[p].id = WLAN_EID_SSID; 1265 scan->direct_scan[p].id = WLAN_EID_SSID;
1266 scan->direct_scan[p].len = 1266 scan->direct_scan[p].len =
1267 priv->scan_request->ssids[i].ssid_len; 1267 priv->scan_request->ssids[i].ssid_len;
1268 memcpy(scan->direct_scan[p].ssid, 1268 memcpy(scan->direct_scan[p].ssid,
1269 priv->scan_request->ssids[i].ssid, 1269 priv->scan_request->ssids[i].ssid,
1270 priv->scan_request->ssids[i].ssid_len); 1270 priv->scan_request->ssids[i].ssid_len);
1271 n_probes++; 1271 n_probes++;
1272 p++; 1272 p++;
1273 } 1273 }
1274 is_active = true; 1274 is_active = true;
1275 } else 1275 } else
1276 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 1276 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1277 1277
1278 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 1278 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1279 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 1279 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
1280 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 1280 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1281 1281
1282 switch (priv->scan_band) { 1282 switch (priv->scan_band) {
1283 case IEEE80211_BAND_2GHZ: 1283 case IEEE80211_BAND_2GHZ:
1284 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 1284 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1285 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) 1285 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
1286 >> RXON_FLG_CHANNEL_MODE_POS; 1286 >> RXON_FLG_CHANNEL_MODE_POS;
1287 if (chan_mod == CHANNEL_MODE_PURE_40) { 1287 if (chan_mod == CHANNEL_MODE_PURE_40) {
1288 rate = IWL_RATE_6M_PLCP; 1288 rate = IWL_RATE_6M_PLCP;
1289 } else { 1289 } else {
1290 rate = IWL_RATE_1M_PLCP; 1290 rate = IWL_RATE_1M_PLCP;
1291 rate_flags = RATE_MCS_CCK_MSK; 1291 rate_flags = RATE_MCS_CCK_MSK;
1292 } 1292 }
1293 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; 1293 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1294 break; 1294 break;
1295 case IEEE80211_BAND_5GHZ: 1295 case IEEE80211_BAND_5GHZ:
1296 rate = IWL_RATE_6M_PLCP; 1296 rate = IWL_RATE_6M_PLCP;
1297 /* 1297 /*
1298 * If active scanning is requested but a certain channel is 1298 * If active scanning is requested but a certain channel is
1299 * marked passive, we can do active scanning if we detect 1299 * marked passive, we can do active scanning if we detect
1300 * transmissions. 1300 * transmissions.
1301 * 1301 *
1302 * There is an issue with some firmware versions that triggers 1302 * There is an issue with some firmware versions that triggers
1303 * a sysassert on a "good CRC threshold" of zero (== disabled), 1303 * a sysassert on a "good CRC threshold" of zero (== disabled),
1304 * on a radar channel even though this means that we should NOT 1304 * on a radar channel even though this means that we should NOT
1305 * send probes. 1305 * send probes.
1306 * 1306 *
1307 * The "good CRC threshold" is the number of frames that we 1307 * The "good CRC threshold" is the number of frames that we
1308 * need to receive during our dwell time on a channel before 1308 * need to receive during our dwell time on a channel before
1309 * sending out probes -- setting this to a huge value will 1309 * sending out probes -- setting this to a huge value will
1310 * mean we never reach it, but at the same time work around 1310 * mean we never reach it, but at the same time work around
1311 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 1311 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1312 * here instead of IWL_GOOD_CRC_TH_DISABLED. 1312 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1313 */ 1313 */
1314 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : 1314 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1315 IWL_GOOD_CRC_TH_NEVER; 1315 IWL_GOOD_CRC_TH_NEVER;
1316 break; 1316 break;
1317 default: 1317 default:
1318 IWL_WARN(priv, "Invalid scan band count\n"); 1318 IWL_WARN(priv, "Invalid scan band count\n");
1319 goto done; 1319 goto done;
1320 } 1320 }
1321 1321
1322 band = priv->scan_band; 1322 band = priv->scan_band;
1323 1323
1324 if (priv->cfg->scan_rx_antennas[band]) 1324 if (priv->cfg->scan_rx_antennas[band])
1325 rx_ant = priv->cfg->scan_rx_antennas[band]; 1325 rx_ant = priv->cfg->scan_rx_antennas[band];
1326 1326
1327 if (priv->cfg->scan_tx_antennas[band]) 1327 if (priv->cfg->scan_tx_antennas[band])
1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
1329 1329
1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1331 scan_tx_antennas); 1331 scan_tx_antennas);
1332 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 1332 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1333 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 1333 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1334 1334
1335 /* In power save mode use one chain, otherwise use all chains */ 1335 /* In power save mode use one chain, otherwise use all chains */
1336 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 1336 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
1337 /* rx_ant has been set to all valid chains previously */ 1337 /* rx_ant has been set to all valid chains previously */
1338 active_chains = rx_ant & 1338 active_chains = rx_ant &
1339 ((u8)(priv->chain_noise_data.active_chains)); 1339 ((u8)(priv->chain_noise_data.active_chains));
1340 if (!active_chains) 1340 if (!active_chains)
1341 active_chains = rx_ant; 1341 active_chains = rx_ant;
1342 1342
1343 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", 1343 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
1344 priv->chain_noise_data.active_chains); 1344 priv->chain_noise_data.active_chains);
1345 1345
1346 rx_ant = first_antenna(active_chains); 1346 rx_ant = first_antenna(active_chains);
1347 } 1347 }
1348 /* MIMO is not used here, but value is required */ 1348 /* MIMO is not used here, but value is required */
1349 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 1349 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1350 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 1350 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1351 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 1351 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1352 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 1352 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1353 scan->rx_chain = cpu_to_le16(rx_chain); 1353 scan->rx_chain = cpu_to_le16(rx_chain);
1354 if (!priv->is_internal_short_scan) { 1354 if (!priv->is_internal_short_scan) {
1355 cmd_len = iwl_fill_probe_req(priv, 1355 cmd_len = iwl_fill_probe_req(priv,
1356 (struct ieee80211_mgmt *)scan->data, 1356 (struct ieee80211_mgmt *)scan->data,
1357 vif->addr, 1357 vif->addr,
1358 priv->scan_request->ie, 1358 priv->scan_request->ie,
1359 priv->scan_request->ie_len, 1359 priv->scan_request->ie_len,
1360 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1360 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1361 } else { 1361 } else {
1362 /* use bcast addr, will not be transmitted but must be valid */ 1362 /* use bcast addr, will not be transmitted but must be valid */
1363 cmd_len = iwl_fill_probe_req(priv, 1363 cmd_len = iwl_fill_probe_req(priv,
1364 (struct ieee80211_mgmt *)scan->data, 1364 (struct ieee80211_mgmt *)scan->data,
1365 iwl_bcast_addr, NULL, 0, 1365 iwl_bcast_addr, NULL, 0,
1366 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1366 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1367 1367
1368 } 1368 }
1369 scan->tx_cmd.len = cpu_to_le16(cmd_len); 1369 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1370 1370
1371 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 1371 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1372 RXON_FILTER_BCON_AWARE_MSK); 1372 RXON_FILTER_BCON_AWARE_MSK);
1373 1373
1374 if (priv->is_internal_short_scan) { 1374 if (priv->is_internal_short_scan) {
1375 scan->channel_count = 1375 scan->channel_count =
1376 iwl_get_single_channel_for_scan(priv, vif, band, 1376 iwl_get_single_channel_for_scan(priv, vif, band,
1377 (void *)&scan->data[le16_to_cpu( 1377 (void *)&scan->data[le16_to_cpu(
1378 scan->tx_cmd.len)]); 1378 scan->tx_cmd.len)]);
1379 } else { 1379 } else {
1380 scan->channel_count = 1380 scan->channel_count =
1381 iwl_get_channels_for_scan(priv, vif, band, 1381 iwl_get_channels_for_scan(priv, vif, band,
1382 is_active, n_probes, 1382 is_active, n_probes,
1383 (void *)&scan->data[le16_to_cpu( 1383 (void *)&scan->data[le16_to_cpu(
1384 scan->tx_cmd.len)]); 1384 scan->tx_cmd.len)]);
1385 } 1385 }
1386 if (scan->channel_count == 0) { 1386 if (scan->channel_count == 0) {
1387 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 1387 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1388 goto done; 1388 goto done;
1389 } 1389 }
1390 1390
1391 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 1391 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1392 scan->channel_count * sizeof(struct iwl_scan_channel); 1392 scan->channel_count * sizeof(struct iwl_scan_channel);
1393 cmd.data = scan; 1393 cmd.data = scan;
1394 scan->len = cpu_to_le16(cmd.len); 1394 scan->len = cpu_to_le16(cmd.len);
1395 1395
1396 set_bit(STATUS_SCAN_HW, &priv->status); 1396 set_bit(STATUS_SCAN_HW, &priv->status);
1397 if (iwl_send_cmd_sync(priv, &cmd)) 1397 if (iwl_send_cmd_sync(priv, &cmd))
1398 goto done; 1398 goto done;
1399 1399
1400 queue_delayed_work(priv->workqueue, &priv->scan_check, 1400 queue_delayed_work(priv->workqueue, &priv->scan_check,
1401 IWL_SCAN_CHECK_WATCHDOG); 1401 IWL_SCAN_CHECK_WATCHDOG);
1402 1402
1403 return; 1403 return;
1404 1404
1405 done: 1405 done:
1406 /* Cannot perform scan. Make sure we clear scanning 1406 /* Cannot perform scan. Make sure we clear scanning
1407 * bits from status so next scan request can be performed. 1407 * bits from status so next scan request can be performed.
1408 * If we don't clear scanning status bit here all next scan 1408 * If we don't clear scanning status bit here all next scan
1409 * will fail 1409 * will fail
1410 */ 1410 */
1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1411 clear_bit(STATUS_SCAN_HW, &priv->status);
1412 clear_bit(STATUS_SCANNING, &priv->status); 1412 clear_bit(STATUS_SCANNING, &priv->status);
1413 /* inform mac80211 scan aborted */ 1413 /* inform mac80211 scan aborted */
1414 queue_work(priv->workqueue, &priv->scan_completed); 1414 queue_work(priv->workqueue, &priv->abort_scan);
1415 } 1415 }
1416 1416
1417 int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1417 int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1418 struct ieee80211_vif *vif, bool add) 1418 struct ieee80211_vif *vif, bool add)
1419 { 1419 {
1420 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1420 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1421 1421
1422 if (add) 1422 if (add)
1423 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true, 1423 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
1424 &vif_priv->ibss_bssid_sta_id); 1424 &vif_priv->ibss_bssid_sta_id);
1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1426 vif->bss_conf.bssid); 1426 vif->bss_conf.bssid);
1427 } 1427 }
1428 1428
1429 void iwl_free_tfds_in_queue(struct iwl_priv *priv, 1429 void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1430 int sta_id, int tid, int freed) 1430 int sta_id, int tid, int freed)
1431 { 1431 {
1432 lockdep_assert_held(&priv->sta_lock); 1432 lockdep_assert_held(&priv->sta_lock);
1433 1433
1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) 1434 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1435 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1436 else { 1436 else {
1437 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", 1437 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1438 priv->stations[sta_id].tid[tid].tfds_in_queue, 1438 priv->stations[sta_id].tid[tid].tfds_in_queue,
1439 freed); 1439 freed);
1440 priv->stations[sta_id].tid[tid].tfds_in_queue = 0; 1440 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1441 } 1441 }
1442 } 1442 }
1443 1443
1444 #define IWL_FLUSH_WAIT_MS 2000 1444 #define IWL_FLUSH_WAIT_MS 2000
1445 1445
1446 int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv) 1446 int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1447 { 1447 {
1448 struct iwl_tx_queue *txq; 1448 struct iwl_tx_queue *txq;
1449 struct iwl_queue *q; 1449 struct iwl_queue *q;
1450 int cnt; 1450 int cnt;
1451 unsigned long now = jiffies; 1451 unsigned long now = jiffies;
1452 int ret = 0; 1452 int ret = 0;
1453 1453
1454 /* waiting for all the tx frames complete might take a while */ 1454 /* waiting for all the tx frames complete might take a while */
1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1456 if (cnt == IWL_CMD_QUEUE_NUM) 1456 if (cnt == IWL_CMD_QUEUE_NUM)
1457 continue; 1457 continue;
1458 txq = &priv->txq[cnt]; 1458 txq = &priv->txq[cnt];
1459 q = &txq->q; 1459 q = &txq->q;
1460 while (q->read_ptr != q->write_ptr && !time_after(jiffies, 1460 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1461 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) 1461 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1462 msleep(1); 1462 msleep(1);
1463 1463
1464 if (q->read_ptr != q->write_ptr) { 1464 if (q->read_ptr != q->write_ptr) {
1465 IWL_ERR(priv, "fail to flush all tx fifo queues\n"); 1465 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1466 ret = -ETIMEDOUT; 1466 ret = -ETIMEDOUT;
1467 break; 1467 break;
1468 } 1468 }
1469 } 1469 }
1470 return ret; 1470 return ret;
1471 } 1471 }
1472 1472
1473 #define IWL_TX_QUEUE_MSK 0xfffff 1473 #define IWL_TX_QUEUE_MSK 0xfffff
1474 1474
1475 /** 1475 /**
1476 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode 1476 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1477 * 1477 *
1478 * pre-requirements: 1478 * pre-requirements:
1479 * 1. acquire mutex before calling 1479 * 1. acquire mutex before calling
1480 * 2. make sure rf is on and not in exit state 1480 * 2. make sure rf is on and not in exit state
1481 */ 1481 */
1482 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 1482 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1483 { 1483 {
1484 struct iwl_txfifo_flush_cmd flush_cmd; 1484 struct iwl_txfifo_flush_cmd flush_cmd;
1485 struct iwl_host_cmd cmd = { 1485 struct iwl_host_cmd cmd = {
1486 .id = REPLY_TXFIFO_FLUSH, 1486 .id = REPLY_TXFIFO_FLUSH,
1487 .len = sizeof(struct iwl_txfifo_flush_cmd), 1487 .len = sizeof(struct iwl_txfifo_flush_cmd),
1488 .flags = CMD_SYNC, 1488 .flags = CMD_SYNC,
1489 .data = &flush_cmd, 1489 .data = &flush_cmd,
1490 }; 1490 };
1491 1491
1492 might_sleep(); 1492 might_sleep();
1493 1493
1494 memset(&flush_cmd, 0, sizeof(flush_cmd)); 1494 memset(&flush_cmd, 0, sizeof(flush_cmd));
1495 flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK | 1495 flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
1496 IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK; 1496 IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
1497 if (priv->cfg->sku & IWL_SKU_N) 1497 if (priv->cfg->sku & IWL_SKU_N)
1498 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 1498 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
1499 1499
1500 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 1500 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
1501 flush_cmd.fifo_control); 1501 flush_cmd.fifo_control);
1502 flush_cmd.flush_control = cpu_to_le16(flush_control); 1502 flush_cmd.flush_control = cpu_to_le16(flush_control);
1503 1503
1504 return iwl_send_cmd(priv, &cmd); 1504 return iwl_send_cmd(priv, &cmd);
1505 } 1505 }
1506 1506
1507 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 1507 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1508 { 1508 {
1509 mutex_lock(&priv->mutex); 1509 mutex_lock(&priv->mutex);
1510 ieee80211_stop_queues(priv->hw); 1510 ieee80211_stop_queues(priv->hw);
1511 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { 1511 if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
1512 IWL_ERR(priv, "flush request fail\n"); 1512 IWL_ERR(priv, "flush request fail\n");
1513 goto done; 1513 goto done;
1514 } 1514 }
1515 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 1515 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1516 iwlagn_wait_tx_queue_empty(priv); 1516 iwlagn_wait_tx_queue_empty(priv);
1517 done: 1517 done:
1518 ieee80211_wake_queues(priv->hw); 1518 ieee80211_wake_queues(priv->hw);
1519 mutex_unlock(&priv->mutex); 1519 mutex_unlock(&priv->mutex);
1520 } 1520 }
1521 1521
drivers/net/wireless/iwlwifi/iwl3945-base.c
1 /****************************************************************************** 1 /******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as 9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details. 15 * more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License along with 17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution in the 21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32 #include <linux/kernel.h> 32 #include <linux/kernel.h>
33 #include <linux/module.h> 33 #include <linux/module.h>
34 #include <linux/init.h> 34 #include <linux/init.h>
35 #include <linux/pci.h> 35 #include <linux/pci.h>
36 #include <linux/slab.h> 36 #include <linux/slab.h>
37 #include <linux/dma-mapping.h> 37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h> 38 #include <linux/delay.h>
39 #include <linux/sched.h> 39 #include <linux/sched.h>
40 #include <linux/skbuff.h> 40 #include <linux/skbuff.h>
41 #include <linux/netdevice.h> 41 #include <linux/netdevice.h>
42 #include <linux/wireless.h> 42 #include <linux/wireless.h>
43 #include <linux/firmware.h> 43 #include <linux/firmware.h>
44 #include <linux/etherdevice.h> 44 #include <linux/etherdevice.h>
45 #include <linux/if_arp.h> 45 #include <linux/if_arp.h>
46 46
47 #include <net/ieee80211_radiotap.h> 47 #include <net/ieee80211_radiotap.h>
48 #include <net/mac80211.h> 48 #include <net/mac80211.h>
49 49
50 #include <asm/div64.h> 50 #include <asm/div64.h>
51 51
52 #define DRV_NAME "iwl3945" 52 #define DRV_NAME "iwl3945"
53 53
54 #include "iwl-fh.h" 54 #include "iwl-fh.h"
55 #include "iwl-3945-fh.h" 55 #include "iwl-3945-fh.h"
56 #include "iwl-commands.h" 56 #include "iwl-commands.h"
57 #include "iwl-sta.h" 57 #include "iwl-sta.h"
58 #include "iwl-3945.h" 58 #include "iwl-3945.h"
59 #include "iwl-core.h" 59 #include "iwl-core.h"
60 #include "iwl-helpers.h" 60 #include "iwl-helpers.h"
61 #include "iwl-dev.h" 61 #include "iwl-dev.h"
62 #include "iwl-spectrum.h" 62 #include "iwl-spectrum.h"
63 63
64 /* 64 /*
65 * module name, copyright, version, etc. 65 * module name, copyright, version, etc.
66 */ 66 */
67 67
68 #define DRV_DESCRIPTION \ 68 #define DRV_DESCRIPTION \
69 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" 69 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
70 70
71 #ifdef CONFIG_IWLWIFI_DEBUG 71 #ifdef CONFIG_IWLWIFI_DEBUG
72 #define VD "d" 72 #define VD "d"
73 #else 73 #else
74 #define VD 74 #define VD
75 #endif 75 #endif
76 76
77 /* 77 /*
78 * add "s" to indicate spectrum measurement included. 78 * add "s" to indicate spectrum measurement included.
79 * we add it here to be consistent with previous releases in which 79 * we add it here to be consistent with previous releases in which
80 * this was configurable. 80 * this was configurable.
81 */ 81 */
82 #define DRV_VERSION IWLWIFI_VERSION VD "s" 82 #define DRV_VERSION IWLWIFI_VERSION VD "s"
83 #define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" 83 #define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
84 #define DRV_AUTHOR "<ilw@linux.intel.com>" 84 #define DRV_AUTHOR "<ilw@linux.intel.com>"
85 85
86 MODULE_DESCRIPTION(DRV_DESCRIPTION); 86 MODULE_DESCRIPTION(DRV_DESCRIPTION);
87 MODULE_VERSION(DRV_VERSION); 87 MODULE_VERSION(DRV_VERSION);
88 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 88 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89 MODULE_LICENSE("GPL"); 89 MODULE_LICENSE("GPL");
90 90
91 /* module parameters */ 91 /* module parameters */
92 struct iwl_mod_params iwl3945_mod_params = { 92 struct iwl_mod_params iwl3945_mod_params = {
93 .sw_crypto = 1, 93 .sw_crypto = 1,
94 .restart_fw = 1, 94 .restart_fw = 1,
95 /* the rest are 0 by default */ 95 /* the rest are 0 by default */
96 }; 96 };
97 97
98 /** 98 /**
99 * iwl3945_get_antenna_flags - Get antenna flags for RXON command 99 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
100 * @priv: eeprom and antenna fields are used to determine antenna flags 100 * @priv: eeprom and antenna fields are used to determine antenna flags
101 * 101 *
102 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed 102 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
103 * iwl3945_mod_params.antenna specifies the antenna diversity mode: 103 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
104 * 104 *
105 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself 105 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
106 * IWL_ANTENNA_MAIN - Force MAIN antenna 106 * IWL_ANTENNA_MAIN - Force MAIN antenna
107 * IWL_ANTENNA_AUX - Force AUX antenna 107 * IWL_ANTENNA_AUX - Force AUX antenna
108 */ 108 */
109 __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) 109 __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
110 { 110 {
111 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 111 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
112 112
113 switch (iwl3945_mod_params.antenna) { 113 switch (iwl3945_mod_params.antenna) {
114 case IWL_ANTENNA_DIVERSITY: 114 case IWL_ANTENNA_DIVERSITY:
115 return 0; 115 return 0;
116 116
117 case IWL_ANTENNA_MAIN: 117 case IWL_ANTENNA_MAIN:
118 if (eeprom->antenna_switch_type) 118 if (eeprom->antenna_switch_type)
119 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 119 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
121 121
122 case IWL_ANTENNA_AUX: 122 case IWL_ANTENNA_AUX:
123 if (eeprom->antenna_switch_type) 123 if (eeprom->antenna_switch_type)
124 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; 124 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; 125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
126 } 126 }
127 127
128 /* bad antenna selector value */ 128 /* bad antenna selector value */
129 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", 129 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
130 iwl3945_mod_params.antenna); 130 iwl3945_mod_params.antenna);
131 131
132 return 0; /* "diversity" is default if error */ 132 return 0; /* "diversity" is default if error */
133 } 133 }
134 134
135 static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, 135 static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
136 struct ieee80211_key_conf *keyconf, 136 struct ieee80211_key_conf *keyconf,
137 u8 sta_id) 137 u8 sta_id)
138 { 138 {
139 unsigned long flags; 139 unsigned long flags;
140 __le16 key_flags = 0; 140 __le16 key_flags = 0;
141 int ret; 141 int ret;
142 142
143 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 143 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
144 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 144 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
145 145
146 if (sta_id == priv->hw_params.bcast_sta_id) 146 if (sta_id == priv->hw_params.bcast_sta_id)
147 key_flags |= STA_KEY_MULTICAST_MSK; 147 key_flags |= STA_KEY_MULTICAST_MSK;
148 148
149 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 149 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
150 keyconf->hw_key_idx = keyconf->keyidx; 150 keyconf->hw_key_idx = keyconf->keyidx;
151 key_flags &= ~STA_KEY_FLG_INVALID; 151 key_flags &= ~STA_KEY_FLG_INVALID;
152 152
153 spin_lock_irqsave(&priv->sta_lock, flags); 153 spin_lock_irqsave(&priv->sta_lock, flags);
154 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 154 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
155 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 155 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
156 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 156 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
157 keyconf->keylen); 157 keyconf->keylen);
158 158
159 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 159 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
160 keyconf->keylen); 160 keyconf->keylen);
161 161
162 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 162 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
163 == STA_KEY_FLG_NO_ENC) 163 == STA_KEY_FLG_NO_ENC)
164 priv->stations[sta_id].sta.key.key_offset = 164 priv->stations[sta_id].sta.key.key_offset =
165 iwl_get_free_ucode_key_index(priv); 165 iwl_get_free_ucode_key_index(priv);
166 /* else, we are overriding an existing key => no need to allocated room 166 /* else, we are overriding an existing key => no need to allocated room
167 * in uCode. */ 167 * in uCode. */
168 168
169 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, 169 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
170 "no space for a new key"); 170 "no space for a new key");
171 171
172 priv->stations[sta_id].sta.key.key_flags = key_flags; 172 priv->stations[sta_id].sta.key.key_flags = key_flags;
173 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 173 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
174 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 174 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
175 175
176 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); 176 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
177 177
178 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 178 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
179 179
180 spin_unlock_irqrestore(&priv->sta_lock, flags); 180 spin_unlock_irqrestore(&priv->sta_lock, flags);
181 181
182 return ret; 182 return ret;
183 } 183 }
184 184
185 static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, 185 static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
186 struct ieee80211_key_conf *keyconf, 186 struct ieee80211_key_conf *keyconf,
187 u8 sta_id) 187 u8 sta_id)
188 { 188 {
189 return -EOPNOTSUPP; 189 return -EOPNOTSUPP;
190 } 190 }
191 191
192 static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, 192 static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
193 struct ieee80211_key_conf *keyconf, 193 struct ieee80211_key_conf *keyconf,
194 u8 sta_id) 194 u8 sta_id)
195 { 195 {
196 return -EOPNOTSUPP; 196 return -EOPNOTSUPP;
197 } 197 }
198 198
199 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) 199 static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
200 { 200 {
201 unsigned long flags; 201 unsigned long flags;
202 struct iwl_addsta_cmd sta_cmd; 202 struct iwl_addsta_cmd sta_cmd;
203 203
204 spin_lock_irqsave(&priv->sta_lock, flags); 204 spin_lock_irqsave(&priv->sta_lock, flags);
205 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); 205 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
206 memset(&priv->stations[sta_id].sta.key, 0, 206 memset(&priv->stations[sta_id].sta.key, 0,
207 sizeof(struct iwl4965_keyinfo)); 207 sizeof(struct iwl4965_keyinfo));
208 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; 208 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
209 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 209 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
210 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 210 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
211 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 211 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
212 spin_unlock_irqrestore(&priv->sta_lock, flags); 212 spin_unlock_irqrestore(&priv->sta_lock, flags);
213 213
214 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); 214 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
215 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 215 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
216 } 216 }
217 217
218 static int iwl3945_set_dynamic_key(struct iwl_priv *priv, 218 static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
219 struct ieee80211_key_conf *keyconf, u8 sta_id) 219 struct ieee80211_key_conf *keyconf, u8 sta_id)
220 { 220 {
221 int ret = 0; 221 int ret = 0;
222 222
223 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 223 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
224 224
225 switch (keyconf->alg) { 225 switch (keyconf->alg) {
226 case ALG_CCMP: 226 case ALG_CCMP:
227 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); 227 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
228 break; 228 break;
229 case ALG_TKIP: 229 case ALG_TKIP:
230 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); 230 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
231 break; 231 break;
232 case ALG_WEP: 232 case ALG_WEP:
233 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); 233 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
234 break; 234 break;
235 default: 235 default:
236 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 236 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg);
237 ret = -EINVAL; 237 ret = -EINVAL;
238 } 238 }
239 239
240 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 240 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n",
241 keyconf->alg, keyconf->keylen, keyconf->keyidx, 241 keyconf->alg, keyconf->keylen, keyconf->keyidx,
242 sta_id, ret); 242 sta_id, ret);
243 243
244 return ret; 244 return ret;
245 } 245 }
246 246
247 static int iwl3945_remove_static_key(struct iwl_priv *priv) 247 static int iwl3945_remove_static_key(struct iwl_priv *priv)
248 { 248 {
249 int ret = -EOPNOTSUPP; 249 int ret = -EOPNOTSUPP;
250 250
251 return ret; 251 return ret;
252 } 252 }
253 253
254 static int iwl3945_set_static_key(struct iwl_priv *priv, 254 static int iwl3945_set_static_key(struct iwl_priv *priv,
255 struct ieee80211_key_conf *key) 255 struct ieee80211_key_conf *key)
256 { 256 {
257 if (key->alg == ALG_WEP) 257 if (key->alg == ALG_WEP)
258 return -EOPNOTSUPP; 258 return -EOPNOTSUPP;
259 259
260 IWL_ERR(priv, "Static key invalid: alg %d\n", key->alg); 260 IWL_ERR(priv, "Static key invalid: alg %d\n", key->alg);
261 return -EINVAL; 261 return -EINVAL;
262 } 262 }
263 263
264 static void iwl3945_clear_free_frames(struct iwl_priv *priv) 264 static void iwl3945_clear_free_frames(struct iwl_priv *priv)
265 { 265 {
266 struct list_head *element; 266 struct list_head *element;
267 267
268 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", 268 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
269 priv->frames_count); 269 priv->frames_count);
270 270
271 while (!list_empty(&priv->free_frames)) { 271 while (!list_empty(&priv->free_frames)) {
272 element = priv->free_frames.next; 272 element = priv->free_frames.next;
273 list_del(element); 273 list_del(element);
274 kfree(list_entry(element, struct iwl3945_frame, list)); 274 kfree(list_entry(element, struct iwl3945_frame, list));
275 priv->frames_count--; 275 priv->frames_count--;
276 } 276 }
277 277
278 if (priv->frames_count) { 278 if (priv->frames_count) {
279 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", 279 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
280 priv->frames_count); 280 priv->frames_count);
281 priv->frames_count = 0; 281 priv->frames_count = 0;
282 } 282 }
283 } 283 }
284 284
285 static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) 285 static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
286 { 286 {
287 struct iwl3945_frame *frame; 287 struct iwl3945_frame *frame;
288 struct list_head *element; 288 struct list_head *element;
289 if (list_empty(&priv->free_frames)) { 289 if (list_empty(&priv->free_frames)) {
290 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 290 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
291 if (!frame) { 291 if (!frame) {
292 IWL_ERR(priv, "Could not allocate frame!\n"); 292 IWL_ERR(priv, "Could not allocate frame!\n");
293 return NULL; 293 return NULL;
294 } 294 }
295 295
296 priv->frames_count++; 296 priv->frames_count++;
297 return frame; 297 return frame;
298 } 298 }
299 299
300 element = priv->free_frames.next; 300 element = priv->free_frames.next;
301 list_del(element); 301 list_del(element);
302 return list_entry(element, struct iwl3945_frame, list); 302 return list_entry(element, struct iwl3945_frame, list);
303 } 303 }
304 304
305 static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) 305 static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
306 { 306 {
307 memset(frame, 0, sizeof(*frame)); 307 memset(frame, 0, sizeof(*frame));
308 list_add(&frame->list, &priv->free_frames); 308 list_add(&frame->list, &priv->free_frames);
309 } 309 }
310 310
311 unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 311 unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
312 struct ieee80211_hdr *hdr, 312 struct ieee80211_hdr *hdr,
313 int left) 313 int left)
314 { 314 {
315 315
316 if (!iwl_is_associated(priv) || !priv->ibss_beacon) 316 if (!iwl_is_associated(priv) || !priv->ibss_beacon)
317 return 0; 317 return 0;
318 318
319 if (priv->ibss_beacon->len > left) 319 if (priv->ibss_beacon->len > left)
320 return 0; 320 return 0;
321 321
322 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); 322 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
323 323
324 return priv->ibss_beacon->len; 324 return priv->ibss_beacon->len;
325 } 325 }
326 326
327 static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) 327 static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
328 { 328 {
329 struct iwl3945_frame *frame; 329 struct iwl3945_frame *frame;
330 unsigned int frame_size; 330 unsigned int frame_size;
331 int rc; 331 int rc;
332 u8 rate; 332 u8 rate;
333 333
334 frame = iwl3945_get_free_frame(priv); 334 frame = iwl3945_get_free_frame(priv);
335 335
336 if (!frame) { 336 if (!frame) {
337 IWL_ERR(priv, "Could not obtain free frame buffer for beacon " 337 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
338 "command.\n"); 338 "command.\n");
339 return -ENOMEM; 339 return -ENOMEM;
340 } 340 }
341 341
342 rate = iwl_rate_get_lowest_plcp(priv); 342 rate = iwl_rate_get_lowest_plcp(priv);
343 343
344 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 344 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
345 345
346 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 346 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
347 &frame->u.cmd[0]); 347 &frame->u.cmd[0]);
348 348
349 iwl3945_free_frame(priv, frame); 349 iwl3945_free_frame(priv, frame);
350 350
351 return rc; 351 return rc;
352 } 352 }
353 353
354 static void iwl3945_unset_hw_params(struct iwl_priv *priv) 354 static void iwl3945_unset_hw_params(struct iwl_priv *priv)
355 { 355 {
356 if (priv->_3945.shared_virt) 356 if (priv->_3945.shared_virt)
357 dma_free_coherent(&priv->pci_dev->dev, 357 dma_free_coherent(&priv->pci_dev->dev,
358 sizeof(struct iwl3945_shared), 358 sizeof(struct iwl3945_shared),
359 priv->_3945.shared_virt, 359 priv->_3945.shared_virt,
360 priv->_3945.shared_phys); 360 priv->_3945.shared_phys);
361 } 361 }
362 362
363 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, 363 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
364 struct ieee80211_tx_info *info, 364 struct ieee80211_tx_info *info,
365 struct iwl_device_cmd *cmd, 365 struct iwl_device_cmd *cmd,
366 struct sk_buff *skb_frag, 366 struct sk_buff *skb_frag,
367 int sta_id) 367 int sta_id)
368 { 368 {
369 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 369 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
371 371
372 switch (keyinfo->alg) { 372 switch (keyinfo->alg) {
373 case ALG_CCMP: 373 case ALG_CCMP:
374 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 374 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
375 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); 375 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
377 break; 377 break;
378 378
379 case ALG_TKIP: 379 case ALG_TKIP:
380 break; 380 break;
381 381
382 case ALG_WEP: 382 case ALG_WEP:
383 tx_cmd->sec_ctl = TX_CMD_SEC_WEP | 383 tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
385 385
386 if (keyinfo->keylen == 13) 386 if (keyinfo->keylen == 13)
387 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; 387 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
388 388
389 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); 389 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
390 390
391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
392 "with key %d\n", info->control.hw_key->hw_key_idx); 392 "with key %d\n", info->control.hw_key->hw_key_idx);
393 break; 393 break;
394 394
395 default: 395 default:
396 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg); 396 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
397 break; 397 break;
398 } 398 }
399 } 399 }
400 400
401 /* 401 /*
402 * handle build REPLY_TX command notification. 402 * handle build REPLY_TX command notification.
403 */ 403 */
404 static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, 404 static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
405 struct iwl_device_cmd *cmd, 405 struct iwl_device_cmd *cmd,
406 struct ieee80211_tx_info *info, 406 struct ieee80211_tx_info *info,
407 struct ieee80211_hdr *hdr, u8 std_id) 407 struct ieee80211_hdr *hdr, u8 std_id)
408 { 408 {
409 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 409 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
410 __le32 tx_flags = tx_cmd->tx_flags; 410 __le32 tx_flags = tx_cmd->tx_flags;
411 __le16 fc = hdr->frame_control; 411 __le16 fc = hdr->frame_control;
412 412
413 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 413 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
414 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 414 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
415 tx_flags |= TX_CMD_FLG_ACK_MSK; 415 tx_flags |= TX_CMD_FLG_ACK_MSK;
416 if (ieee80211_is_mgmt(fc)) 416 if (ieee80211_is_mgmt(fc))
417 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 417 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
418 if (ieee80211_is_probe_resp(fc) && 418 if (ieee80211_is_probe_resp(fc) &&
419 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) 419 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
420 tx_flags |= TX_CMD_FLG_TSF_MSK; 420 tx_flags |= TX_CMD_FLG_TSF_MSK;
421 } else { 421 } else {
422 tx_flags &= (~TX_CMD_FLG_ACK_MSK); 422 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
423 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 423 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
424 } 424 }
425 425
426 tx_cmd->sta_id = std_id; 426 tx_cmd->sta_id = std_id;
427 if (ieee80211_has_morefrags(fc)) 427 if (ieee80211_has_morefrags(fc))
428 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 428 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
429 429
430 if (ieee80211_is_data_qos(fc)) { 430 if (ieee80211_is_data_qos(fc)) {
431 u8 *qc = ieee80211_get_qos_ctl(hdr); 431 u8 *qc = ieee80211_get_qos_ctl(hdr);
432 tx_cmd->tid_tspec = qc[0] & 0xf; 432 tx_cmd->tid_tspec = qc[0] & 0xf;
433 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 433 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
434 } else { 434 } else {
435 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 435 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
436 } 436 }
437 437
438 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); 438 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
439 439
440 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 440 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
441 if (ieee80211_is_mgmt(fc)) { 441 if (ieee80211_is_mgmt(fc)) {
442 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 442 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
443 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); 443 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
444 else 444 else
445 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); 445 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
446 } else { 446 } else {
447 tx_cmd->timeout.pm_frame_timeout = 0; 447 tx_cmd->timeout.pm_frame_timeout = 0;
448 } 448 }
449 449
450 tx_cmd->driver_txop = 0; 450 tx_cmd->driver_txop = 0;
451 tx_cmd->tx_flags = tx_flags; 451 tx_cmd->tx_flags = tx_flags;
452 tx_cmd->next_frame_len = 0; 452 tx_cmd->next_frame_len = 0;
453 } 453 }
454 454
455 /* 455 /*
456 * start REPLY_TX command process 456 * start REPLY_TX command process
457 */ 457 */
458 static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 458 static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
459 { 459 {
460 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 460 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
462 struct iwl3945_tx_cmd *tx_cmd; 462 struct iwl3945_tx_cmd *tx_cmd;
463 struct iwl_tx_queue *txq = NULL; 463 struct iwl_tx_queue *txq = NULL;
464 struct iwl_queue *q = NULL; 464 struct iwl_queue *q = NULL;
465 struct iwl_device_cmd *out_cmd; 465 struct iwl_device_cmd *out_cmd;
466 struct iwl_cmd_meta *out_meta; 466 struct iwl_cmd_meta *out_meta;
467 dma_addr_t phys_addr; 467 dma_addr_t phys_addr;
468 dma_addr_t txcmd_phys; 468 dma_addr_t txcmd_phys;
469 int txq_id = skb_get_queue_mapping(skb); 469 int txq_id = skb_get_queue_mapping(skb);
470 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */ 470 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
471 u8 id; 471 u8 id;
472 u8 unicast; 472 u8 unicast;
473 u8 sta_id; 473 u8 sta_id;
474 u8 tid = 0; 474 u8 tid = 0;
475 __le16 fc; 475 __le16 fc;
476 u8 wait_write_ptr = 0; 476 u8 wait_write_ptr = 0;
477 unsigned long flags; 477 unsigned long flags;
478 478
479 spin_lock_irqsave(&priv->lock, flags); 479 spin_lock_irqsave(&priv->lock, flags);
480 if (iwl_is_rfkill(priv)) { 480 if (iwl_is_rfkill(priv)) {
481 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 481 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
482 goto drop_unlock; 482 goto drop_unlock;
483 } 483 }
484 484
485 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { 485 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
486 IWL_ERR(priv, "ERROR: No TX rate available.\n"); 486 IWL_ERR(priv, "ERROR: No TX rate available.\n");
487 goto drop_unlock; 487 goto drop_unlock;
488 } 488 }
489 489
490 unicast = !is_multicast_ether_addr(hdr->addr1); 490 unicast = !is_multicast_ether_addr(hdr->addr1);
491 id = 0; 491 id = 0;
492 492
493 fc = hdr->frame_control; 493 fc = hdr->frame_control;
494 494
495 #ifdef CONFIG_IWLWIFI_DEBUG 495 #ifdef CONFIG_IWLWIFI_DEBUG
496 if (ieee80211_is_auth(fc)) 496 if (ieee80211_is_auth(fc))
497 IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); 497 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
498 else if (ieee80211_is_assoc_req(fc)) 498 else if (ieee80211_is_assoc_req(fc))
499 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); 499 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
500 else if (ieee80211_is_reassoc_req(fc)) 500 else if (ieee80211_is_reassoc_req(fc))
501 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); 501 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
502 #endif 502 #endif
503 503
504 spin_unlock_irqrestore(&priv->lock, flags); 504 spin_unlock_irqrestore(&priv->lock, flags);
505 505
506 hdr_len = ieee80211_hdrlen(fc); 506 hdr_len = ieee80211_hdrlen(fc);
507 507
508 /* Find index into station table for destination station */ 508 /* Find index into station table for destination station */
509 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 509 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
510 if (sta_id == IWL_INVALID_STATION) { 510 if (sta_id == IWL_INVALID_STATION) {
511 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 511 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
512 hdr->addr1); 512 hdr->addr1);
513 goto drop; 513 goto drop;
514 } 514 }
515 515
516 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); 516 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
517 517
518 if (ieee80211_is_data_qos(fc)) { 518 if (ieee80211_is_data_qos(fc)) {
519 u8 *qc = ieee80211_get_qos_ctl(hdr); 519 u8 *qc = ieee80211_get_qos_ctl(hdr);
520 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 520 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
521 if (unlikely(tid >= MAX_TID_COUNT)) 521 if (unlikely(tid >= MAX_TID_COUNT))
522 goto drop; 522 goto drop;
523 } 523 }
524 524
525 /* Descriptor for chosen Tx queue */ 525 /* Descriptor for chosen Tx queue */
526 txq = &priv->txq[txq_id]; 526 txq = &priv->txq[txq_id];
527 q = &txq->q; 527 q = &txq->q;
528 528
529 if ((iwl_queue_space(q) < q->high_mark)) 529 if ((iwl_queue_space(q) < q->high_mark))
530 goto drop; 530 goto drop;
531 531
532 spin_lock_irqsave(&priv->lock, flags); 532 spin_lock_irqsave(&priv->lock, flags);
533 533
534 idx = get_cmd_index(q, q->write_ptr, 0); 534 idx = get_cmd_index(q, q->write_ptr, 0);
535 535
536 /* Set up driver data for this TFD */ 536 /* Set up driver data for this TFD */
537 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 537 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
538 txq->txb[q->write_ptr].skb = skb; 538 txq->txb[q->write_ptr].skb = skb;
539 539
540 /* Init first empty entry in queue's array of Tx/cmd buffers */ 540 /* Init first empty entry in queue's array of Tx/cmd buffers */
541 out_cmd = txq->cmd[idx]; 541 out_cmd = txq->cmd[idx];
542 out_meta = &txq->meta[idx]; 542 out_meta = &txq->meta[idx];
543 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; 543 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
544 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 544 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
545 memset(tx_cmd, 0, sizeof(*tx_cmd)); 545 memset(tx_cmd, 0, sizeof(*tx_cmd));
546 546
547 /* 547 /*
548 * Set up the Tx-command (not MAC!) header. 548 * Set up the Tx-command (not MAC!) header.
549 * Store the chosen Tx queue and TFD index within the sequence field; 549 * Store the chosen Tx queue and TFD index within the sequence field;
550 * after Tx, uCode's Tx response will return this value so driver can 550 * after Tx, uCode's Tx response will return this value so driver can
551 * locate the frame within the tx queue and do post-tx processing. 551 * locate the frame within the tx queue and do post-tx processing.
552 */ 552 */
553 out_cmd->hdr.cmd = REPLY_TX; 553 out_cmd->hdr.cmd = REPLY_TX;
554 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 554 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
555 INDEX_TO_SEQ(q->write_ptr))); 555 INDEX_TO_SEQ(q->write_ptr)));
556 556
557 /* Copy MAC header from skb into command buffer */ 557 /* Copy MAC header from skb into command buffer */
558 memcpy(tx_cmd->hdr, hdr, hdr_len); 558 memcpy(tx_cmd->hdr, hdr, hdr_len);
559 559
560 560
561 if (info->control.hw_key) 561 if (info->control.hw_key)
562 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); 562 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
563 563
564 /* TODO need this for burst mode later on */ 564 /* TODO need this for burst mode later on */
565 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); 565 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
566 566
567 /* set is_hcca to 0; it probably will never be implemented */ 567 /* set is_hcca to 0; it probably will never be implemented */
568 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); 568 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
569 569
570 /* Total # bytes to be transmitted */ 570 /* Total # bytes to be transmitted */
571 len = (u16)skb->len; 571 len = (u16)skb->len;
572 tx_cmd->len = cpu_to_le16(len); 572 tx_cmd->len = cpu_to_le16(len);
573 573
574 iwl_dbg_log_tx_data_frame(priv, len, hdr); 574 iwl_dbg_log_tx_data_frame(priv, len, hdr);
575 iwl_update_stats(priv, true, fc, len); 575 iwl_update_stats(priv, true, fc, len);
576 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 576 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
577 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 577 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
578 578
579 if (!ieee80211_has_morefrags(hdr->frame_control)) { 579 if (!ieee80211_has_morefrags(hdr->frame_control)) {
580 txq->need_update = 1; 580 txq->need_update = 1;
581 } else { 581 } else {
582 wait_write_ptr = 1; 582 wait_write_ptr = 1;
583 txq->need_update = 0; 583 txq->need_update = 0;
584 } 584 }
585 585
586 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", 586 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
587 le16_to_cpu(out_cmd->hdr.sequence)); 587 le16_to_cpu(out_cmd->hdr.sequence));
588 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 588 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
589 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); 589 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
590 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, 590 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
591 ieee80211_hdrlen(fc)); 591 ieee80211_hdrlen(fc));
592 592
593 /* 593 /*
594 * Use the first empty entry in this queue's command buffer array 594 * Use the first empty entry in this queue's command buffer array
595 * to contain the Tx command and MAC header concatenated together 595 * to contain the Tx command and MAC header concatenated together
596 * (payload data will be in another buffer). 596 * (payload data will be in another buffer).
597 * Size of this varies, due to varying MAC header length. 597 * Size of this varies, due to varying MAC header length.
598 * If end is not dword aligned, we'll have 2 extra bytes at the end 598 * If end is not dword aligned, we'll have 2 extra bytes at the end
599 * of the MAC header (device reads on dword boundaries). 599 * of the MAC header (device reads on dword boundaries).
600 * We'll tell device about this padding later. 600 * We'll tell device about this padding later.
601 */ 601 */
602 len = sizeof(struct iwl3945_tx_cmd) + 602 len = sizeof(struct iwl3945_tx_cmd) +
603 sizeof(struct iwl_cmd_header) + hdr_len; 603 sizeof(struct iwl_cmd_header) + hdr_len;
604 604
605 len_org = len; 605 len_org = len;
606 len = (len + 3) & ~3; 606 len = (len + 3) & ~3;
607 607
608 if (len_org != len) 608 if (len_org != len)
609 len_org = 1; 609 len_org = 1;
610 else 610 else
611 len_org = 0; 611 len_org = 0;
612 612
613 /* Physical address of this Tx command's header (not MAC header!), 613 /* Physical address of this Tx command's header (not MAC header!),
614 * within command buffer array. */ 614 * within command buffer array. */
615 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, 615 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
616 len, PCI_DMA_TODEVICE); 616 len, PCI_DMA_TODEVICE);
617 /* we do not map meta data ... so we can safely access address to 617 /* we do not map meta data ... so we can safely access address to
618 * provide to unmap command*/ 618 * provide to unmap command*/
619 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 619 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
620 dma_unmap_len_set(out_meta, len, len); 620 dma_unmap_len_set(out_meta, len, len);
621 621
622 /* Add buffer containing Tx command and MAC(!) header to TFD's 622 /* Add buffer containing Tx command and MAC(!) header to TFD's
623 * first entry */ 623 * first entry */
624 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 624 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
625 txcmd_phys, len, 1, 0); 625 txcmd_phys, len, 1, 0);
626 626
627 627
628 /* Set up TFD's 2nd entry to point directly to remainder of skb, 628 /* Set up TFD's 2nd entry to point directly to remainder of skb,
629 * if any (802.11 null frames have no payload). */ 629 * if any (802.11 null frames have no payload). */
630 len = skb->len - hdr_len; 630 len = skb->len - hdr_len;
631 if (len) { 631 if (len) {
632 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 632 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
633 len, PCI_DMA_TODEVICE); 633 len, PCI_DMA_TODEVICE);
634 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 634 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
635 phys_addr, len, 635 phys_addr, len,
636 0, U32_PAD(len)); 636 0, U32_PAD(len));
637 } 637 }
638 638
639 639
640 /* Tell device the write index *just past* this latest filled TFD */ 640 /* Tell device the write index *just past* this latest filled TFD */
641 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 641 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
642 iwl_txq_update_write_ptr(priv, txq); 642 iwl_txq_update_write_ptr(priv, txq);
643 spin_unlock_irqrestore(&priv->lock, flags); 643 spin_unlock_irqrestore(&priv->lock, flags);
644 644
645 if ((iwl_queue_space(q) < q->high_mark) 645 if ((iwl_queue_space(q) < q->high_mark)
646 && priv->mac80211_registered) { 646 && priv->mac80211_registered) {
647 if (wait_write_ptr) { 647 if (wait_write_ptr) {
648 spin_lock_irqsave(&priv->lock, flags); 648 spin_lock_irqsave(&priv->lock, flags);
649 txq->need_update = 1; 649 txq->need_update = 1;
650 iwl_txq_update_write_ptr(priv, txq); 650 iwl_txq_update_write_ptr(priv, txq);
651 spin_unlock_irqrestore(&priv->lock, flags); 651 spin_unlock_irqrestore(&priv->lock, flags);
652 } 652 }
653 653
654 iwl_stop_queue(priv, skb_get_queue_mapping(skb)); 654 iwl_stop_queue(priv, skb_get_queue_mapping(skb));
655 } 655 }
656 656
657 return 0; 657 return 0;
658 658
659 drop_unlock: 659 drop_unlock:
660 spin_unlock_irqrestore(&priv->lock, flags); 660 spin_unlock_irqrestore(&priv->lock, flags);
661 drop: 661 drop:
662 return -1; 662 return -1;
663 } 663 }
664 664
665 static int iwl3945_get_measurement(struct iwl_priv *priv, 665 static int iwl3945_get_measurement(struct iwl_priv *priv,
666 struct ieee80211_measurement_params *params, 666 struct ieee80211_measurement_params *params,
667 u8 type) 667 u8 type)
668 { 668 {
669 struct iwl_spectrum_cmd spectrum; 669 struct iwl_spectrum_cmd spectrum;
670 struct iwl_rx_packet *pkt; 670 struct iwl_rx_packet *pkt;
671 struct iwl_host_cmd cmd = { 671 struct iwl_host_cmd cmd = {
672 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 672 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
673 .data = (void *)&spectrum, 673 .data = (void *)&spectrum,
674 .flags = CMD_WANT_SKB, 674 .flags = CMD_WANT_SKB,
675 }; 675 };
676 u32 add_time = le64_to_cpu(params->start_time); 676 u32 add_time = le64_to_cpu(params->start_time);
677 int rc; 677 int rc;
678 int spectrum_resp_status; 678 int spectrum_resp_status;
679 int duration = le16_to_cpu(params->duration); 679 int duration = le16_to_cpu(params->duration);
680 680
681 if (iwl_is_associated(priv)) 681 if (iwl_is_associated(priv))
682 add_time = iwl_usecs_to_beacons(priv, 682 add_time = iwl_usecs_to_beacons(priv,
683 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 683 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
684 le16_to_cpu(priv->rxon_timing.beacon_interval)); 684 le16_to_cpu(priv->rxon_timing.beacon_interval));
685 685
686 memset(&spectrum, 0, sizeof(spectrum)); 686 memset(&spectrum, 0, sizeof(spectrum));
687 687
688 spectrum.channel_count = cpu_to_le16(1); 688 spectrum.channel_count = cpu_to_le16(1);
689 spectrum.flags = 689 spectrum.flags =
690 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; 690 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
691 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; 691 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
692 cmd.len = sizeof(spectrum); 692 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694 694
695 if (iwl_is_associated(priv)) 695 if (iwl_is_associated(priv))
696 spectrum.start_time = 696 spectrum.start_time =
697 iwl_add_beacon_time(priv, 697 iwl_add_beacon_time(priv,
698 priv->_3945.last_beacon_time, add_time, 698 priv->_3945.last_beacon_time, add_time,
699 le16_to_cpu(priv->rxon_timing.beacon_interval)); 699 le16_to_cpu(priv->rxon_timing.beacon_interval));
700 else 700 else
701 spectrum.start_time = 0; 701 spectrum.start_time = 0;
702 702
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); 703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel; 704 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type; 705 spectrum.channels[0].type = type;
706 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) 706 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 707 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
708 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 708 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
709 709
710 rc = iwl_send_cmd_sync(priv, &cmd); 710 rc = iwl_send_cmd_sync(priv, &cmd);
711 if (rc) 711 if (rc)
712 return rc; 712 return rc;
713 713
714 pkt = (struct iwl_rx_packet *)cmd.reply_page; 714 pkt = (struct iwl_rx_packet *)cmd.reply_page;
715 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 715 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
716 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 716 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
717 rc = -EIO; 717 rc = -EIO;
718 } 718 }
719 719
720 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); 720 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
721 switch (spectrum_resp_status) { 721 switch (spectrum_resp_status) {
722 case 0: /* Command will be handled */ 722 case 0: /* Command will be handled */
723 if (pkt->u.spectrum.id != 0xff) { 723 if (pkt->u.spectrum.id != 0xff) {
724 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 724 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
725 pkt->u.spectrum.id); 725 pkt->u.spectrum.id);
726 priv->measurement_status &= ~MEASUREMENT_READY; 726 priv->measurement_status &= ~MEASUREMENT_READY;
727 } 727 }
728 priv->measurement_status |= MEASUREMENT_ACTIVE; 728 priv->measurement_status |= MEASUREMENT_ACTIVE;
729 rc = 0; 729 rc = 0;
730 break; 730 break;
731 731
732 case 1: /* Command will not be handled */ 732 case 1: /* Command will not be handled */
733 rc = -EAGAIN; 733 rc = -EAGAIN;
734 break; 734 break;
735 } 735 }
736 736
737 iwl_free_pages(priv, cmd.reply_page); 737 iwl_free_pages(priv, cmd.reply_page);
738 738
739 return rc; 739 return rc;
740 } 740 }
741 741
742 static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 742 static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
743 struct iwl_rx_mem_buffer *rxb) 743 struct iwl_rx_mem_buffer *rxb)
744 { 744 {
745 struct iwl_rx_packet *pkt = rxb_addr(rxb); 745 struct iwl_rx_packet *pkt = rxb_addr(rxb);
746 struct iwl_alive_resp *palive; 746 struct iwl_alive_resp *palive;
747 struct delayed_work *pwork; 747 struct delayed_work *pwork;
748 748
749 palive = &pkt->u.alive_frame; 749 palive = &pkt->u.alive_frame;
750 750
751 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " 751 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
752 "0x%01X 0x%01X\n", 752 "0x%01X 0x%01X\n",
753 palive->is_valid, palive->ver_type, 753 palive->is_valid, palive->ver_type,
754 palive->ver_subtype); 754 palive->ver_subtype);
755 755
756 if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 756 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
757 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); 757 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
758 memcpy(&priv->card_alive_init, &pkt->u.alive_frame, 758 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
759 sizeof(struct iwl_alive_resp)); 759 sizeof(struct iwl_alive_resp));
760 pwork = &priv->init_alive_start; 760 pwork = &priv->init_alive_start;
761 } else { 761 } else {
762 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 762 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
763 memcpy(&priv->card_alive, &pkt->u.alive_frame, 763 memcpy(&priv->card_alive, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp)); 764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->alive_start; 765 pwork = &priv->alive_start;
766 iwl3945_disable_events(priv); 766 iwl3945_disable_events(priv);
767 } 767 }
768 768
769 /* We delay the ALIVE response by 5ms to 769 /* We delay the ALIVE response by 5ms to
770 * give the HW RF Kill time to activate... */ 770 * give the HW RF Kill time to activate... */
771 if (palive->is_valid == UCODE_VALID_OK) 771 if (palive->is_valid == UCODE_VALID_OK)
772 queue_delayed_work(priv->workqueue, pwork, 772 queue_delayed_work(priv->workqueue, pwork,
773 msecs_to_jiffies(5)); 773 msecs_to_jiffies(5));
774 else 774 else
775 IWL_WARN(priv, "uCode did not respond OK.\n"); 775 IWL_WARN(priv, "uCode did not respond OK.\n");
776 } 776 }
777 777
778 static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, 778 static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
779 struct iwl_rx_mem_buffer *rxb) 779 struct iwl_rx_mem_buffer *rxb)
780 { 780 {
781 #ifdef CONFIG_IWLWIFI_DEBUG 781 #ifdef CONFIG_IWLWIFI_DEBUG
782 struct iwl_rx_packet *pkt = rxb_addr(rxb); 782 struct iwl_rx_packet *pkt = rxb_addr(rxb);
783 #endif 783 #endif
784 784
785 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 785 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
786 } 786 }
787 787
788 static void iwl3945_bg_beacon_update(struct work_struct *work) 788 static void iwl3945_bg_beacon_update(struct work_struct *work)
789 { 789 {
790 struct iwl_priv *priv = 790 struct iwl_priv *priv =
791 container_of(work, struct iwl_priv, beacon_update); 791 container_of(work, struct iwl_priv, beacon_update);
792 struct sk_buff *beacon; 792 struct sk_buff *beacon;
793 793
794 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 794 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
795 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 795 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
796 796
797 if (!beacon) { 797 if (!beacon) {
798 IWL_ERR(priv, "update beacon failed\n"); 798 IWL_ERR(priv, "update beacon failed\n");
799 return; 799 return;
800 } 800 }
801 801
802 mutex_lock(&priv->mutex); 802 mutex_lock(&priv->mutex);
803 /* new beacon skb is allocated every time; dispose previous.*/ 803 /* new beacon skb is allocated every time; dispose previous.*/
804 if (priv->ibss_beacon) 804 if (priv->ibss_beacon)
805 dev_kfree_skb(priv->ibss_beacon); 805 dev_kfree_skb(priv->ibss_beacon);
806 806
807 priv->ibss_beacon = beacon; 807 priv->ibss_beacon = beacon;
808 mutex_unlock(&priv->mutex); 808 mutex_unlock(&priv->mutex);
809 809
810 iwl3945_send_beacon_cmd(priv); 810 iwl3945_send_beacon_cmd(priv);
811 } 811 }
812 812
813 static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 813 static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
814 struct iwl_rx_mem_buffer *rxb) 814 struct iwl_rx_mem_buffer *rxb)
815 { 815 {
816 #ifdef CONFIG_IWLWIFI_DEBUG 816 #ifdef CONFIG_IWLWIFI_DEBUG
817 struct iwl_rx_packet *pkt = rxb_addr(rxb); 817 struct iwl_rx_packet *pkt = rxb_addr(rxb);
818 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 818 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
819 u8 rate = beacon->beacon_notify_hdr.rate; 819 u8 rate = beacon->beacon_notify_hdr.rate;
820 820
821 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 821 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
822 "tsf %d %d rate %d\n", 822 "tsf %d %d rate %d\n",
823 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, 823 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
824 beacon->beacon_notify_hdr.failure_frame, 824 beacon->beacon_notify_hdr.failure_frame,
825 le32_to_cpu(beacon->ibss_mgr_status), 825 le32_to_cpu(beacon->ibss_mgr_status),
826 le32_to_cpu(beacon->high_tsf), 826 le32_to_cpu(beacon->high_tsf),
827 le32_to_cpu(beacon->low_tsf), rate); 827 le32_to_cpu(beacon->low_tsf), rate);
828 #endif 828 #endif
829 829
830 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 830 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
831 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 831 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
832 queue_work(priv->workqueue, &priv->beacon_update); 832 queue_work(priv->workqueue, &priv->beacon_update);
833 } 833 }
834 834
835 /* Handle notification from uCode that card's power state is changing 835 /* Handle notification from uCode that card's power state is changing
836 * due to software, hardware, or critical temperature RFKILL */ 836 * due to software, hardware, or critical temperature RFKILL */
837 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 837 static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
838 struct iwl_rx_mem_buffer *rxb) 838 struct iwl_rx_mem_buffer *rxb)
839 { 839 {
840 struct iwl_rx_packet *pkt = rxb_addr(rxb); 840 struct iwl_rx_packet *pkt = rxb_addr(rxb);
841 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 841 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
842 unsigned long status = priv->status; 842 unsigned long status = priv->status;
843 843
844 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", 844 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
845 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 845 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
846 (flags & SW_CARD_DISABLED) ? "Kill" : "On"); 846 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
847 847
848 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 848 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
849 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 849 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
850 850
851 if (flags & HW_CARD_DISABLED) 851 if (flags & HW_CARD_DISABLED)
852 set_bit(STATUS_RF_KILL_HW, &priv->status); 852 set_bit(STATUS_RF_KILL_HW, &priv->status);
853 else 853 else
854 clear_bit(STATUS_RF_KILL_HW, &priv->status); 854 clear_bit(STATUS_RF_KILL_HW, &priv->status);
855 855
856 856
857 iwl_scan_cancel(priv); 857 iwl_scan_cancel(priv);
858 858
859 if ((test_bit(STATUS_RF_KILL_HW, &status) != 859 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
860 test_bit(STATUS_RF_KILL_HW, &priv->status))) 860 test_bit(STATUS_RF_KILL_HW, &priv->status)))
861 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 861 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
862 test_bit(STATUS_RF_KILL_HW, &priv->status)); 862 test_bit(STATUS_RF_KILL_HW, &priv->status));
863 else 863 else
864 wake_up_interruptible(&priv->wait_command_queue); 864 wake_up_interruptible(&priv->wait_command_queue);
865 } 865 }
866 866
867 /** 867 /**
868 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks 868 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
869 * 869 *
870 * Setup the RX handlers for each of the reply types sent from the uCode 870 * Setup the RX handlers for each of the reply types sent from the uCode
871 * to the host. 871 * to the host.
872 * 872 *
873 * This function chains into the hardware specific files for them to setup 873 * This function chains into the hardware specific files for them to setup
874 * any hardware specific handlers as well. 874 * any hardware specific handlers as well.
875 */ 875 */
876 static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) 876 static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
877 { 877 {
878 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; 878 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
879 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; 879 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
880 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 880 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
881 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 881 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
882 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 882 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
883 iwl_rx_spectrum_measure_notif; 883 iwl_rx_spectrum_measure_notif;
884 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 884 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
885 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 885 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
886 iwl_rx_pm_debug_statistics_notif; 886 iwl_rx_pm_debug_statistics_notif;
887 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; 887 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
888 888
889 /* 889 /*
890 * The same handler is used for both the REPLY to a discrete 890 * The same handler is used for both the REPLY to a discrete
891 * statistics request from the host as well as for the periodic 891 * statistics request from the host as well as for the periodic
892 * statistics notifications (after received beacons) from the uCode. 892 * statistics notifications (after received beacons) from the uCode.
893 */ 893 */
894 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; 894 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
895 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 895 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
896 896
897 iwl_setup_rx_scan_handlers(priv); 897 iwl_setup_rx_scan_handlers(priv);
898 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 898 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
899 899
900 /* Set up hardware specific Rx handlers */ 900 /* Set up hardware specific Rx handlers */
901 iwl3945_hw_rx_handler_setup(priv); 901 iwl3945_hw_rx_handler_setup(priv);
902 } 902 }
903 903
904 /************************** RX-FUNCTIONS ****************************/ 904 /************************** RX-FUNCTIONS ****************************/
905 /* 905 /*
906 * Rx theory of operation 906 * Rx theory of operation
907 * 907 *
908 * The host allocates 32 DMA target addresses and passes the host address 908 * The host allocates 32 DMA target addresses and passes the host address
909 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is 909 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
910 * 0 to 31 910 * 0 to 31
911 * 911 *
912 * Rx Queue Indexes 912 * Rx Queue Indexes
913 * The host/firmware share two index registers for managing the Rx buffers. 913 * The host/firmware share two index registers for managing the Rx buffers.
914 * 914 *
915 * The READ index maps to the first position that the firmware may be writing 915 * The READ index maps to the first position that the firmware may be writing
916 * to -- the driver can read up to (but not including) this position and get 916 * to -- the driver can read up to (but not including) this position and get
917 * good data. 917 * good data.
918 * The READ index is managed by the firmware once the card is enabled. 918 * The READ index is managed by the firmware once the card is enabled.
919 * 919 *
920 * The WRITE index maps to the last position the driver has read from -- the 920 * The WRITE index maps to the last position the driver has read from -- the
921 * position preceding WRITE is the last slot the firmware can place a packet. 921 * position preceding WRITE is the last slot the firmware can place a packet.
922 * 922 *
923 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 923 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
924 * WRITE = READ. 924 * WRITE = READ.
925 * 925 *
926 * During initialization, the host sets up the READ queue position to the first 926 * During initialization, the host sets up the READ queue position to the first
927 * INDEX position, and WRITE to the last (READ - 1 wrapped) 927 * INDEX position, and WRITE to the last (READ - 1 wrapped)
928 * 928 *
929 * When the firmware places a packet in a buffer, it will advance the READ index 929 * When the firmware places a packet in a buffer, it will advance the READ index
930 * and fire the RX interrupt. The driver can then query the READ index and 930 * and fire the RX interrupt. The driver can then query the READ index and
931 * process as many packets as possible, moving the WRITE index forward as it 931 * process as many packets as possible, moving the WRITE index forward as it
932 * resets the Rx queue buffers with new memory. 932 * resets the Rx queue buffers with new memory.
933 * 933 *
934 * The management in the driver is as follows: 934 * The management in the driver is as follows:
935 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 935 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
936 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 936 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
937 * to replenish the iwl->rxq->rx_free. 937 * to replenish the iwl->rxq->rx_free.
938 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the 938 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
939 * iwl->rxq is replenished and the READ INDEX is updated (updating the 939 * iwl->rxq is replenished and the READ INDEX is updated (updating the
940 * 'processed' and 'read' driver indexes as well) 940 * 'processed' and 'read' driver indexes as well)
941 * + A received packet is processed and handed to the kernel network stack, 941 * + A received packet is processed and handed to the kernel network stack,
942 * detached from the iwl->rxq. The driver 'processed' index is updated. 942 * detached from the iwl->rxq. The driver 'processed' index is updated.
943 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 943 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
944 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 944 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
945 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there 945 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
946 * were enough free buffers and RX_STALLED is set it is cleared. 946 * were enough free buffers and RX_STALLED is set it is cleared.
947 * 947 *
948 * 948 *
949 * Driver sequence: 949 * Driver sequence:
950 * 950 *
951 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls 951 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
952 * iwl3945_rx_queue_restock 952 * iwl3945_rx_queue_restock
953 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx 953 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
954 * queue, updates firmware pointers, and updates 954 * queue, updates firmware pointers, and updates
955 * the WRITE index. If insufficient rx_free buffers 955 * the WRITE index. If insufficient rx_free buffers
956 * are available, schedules iwl3945_rx_replenish 956 * are available, schedules iwl3945_rx_replenish
957 * 957 *
958 * -- enable interrupts -- 958 * -- enable interrupts --
959 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the 959 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
960 * READ INDEX, detaching the SKB from the pool. 960 * READ INDEX, detaching the SKB from the pool.
961 * Moves the packet buffer from queue to rx_used. 961 * Moves the packet buffer from queue to rx_used.
962 * Calls iwl3945_rx_queue_restock to refill any empty 962 * Calls iwl3945_rx_queue_restock to refill any empty
963 * slots. 963 * slots.
964 * ... 964 * ...
965 * 965 *
966 */ 966 */
967 967
968 /** 968 /**
969 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 969 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
970 */ 970 */
971 static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, 971 static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
972 dma_addr_t dma_addr) 972 dma_addr_t dma_addr)
973 { 973 {
974 return cpu_to_le32((u32)dma_addr); 974 return cpu_to_le32((u32)dma_addr);
975 } 975 }
976 976
977 /** 977 /**
978 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool 978 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
979 * 979 *
980 * If there are slots in the RX queue that need to be restocked, 980 * If there are slots in the RX queue that need to be restocked,
981 * and we have free pre-allocated buffers, fill the ranks as much 981 * and we have free pre-allocated buffers, fill the ranks as much
982 * as we can, pulling from rx_free. 982 * as we can, pulling from rx_free.
983 * 983 *
984 * This moves the 'write' index forward to catch up with 'processed', and 984 * This moves the 'write' index forward to catch up with 'processed', and
985 * also updates the memory address in the firmware to reference the new 985 * also updates the memory address in the firmware to reference the new
986 * target buffer. 986 * target buffer.
987 */ 987 */
988 static void iwl3945_rx_queue_restock(struct iwl_priv *priv) 988 static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
989 { 989 {
990 struct iwl_rx_queue *rxq = &priv->rxq; 990 struct iwl_rx_queue *rxq = &priv->rxq;
991 struct list_head *element; 991 struct list_head *element;
992 struct iwl_rx_mem_buffer *rxb; 992 struct iwl_rx_mem_buffer *rxb;
993 unsigned long flags; 993 unsigned long flags;
994 int write; 994 int write;
995 995
996 spin_lock_irqsave(&rxq->lock, flags); 996 spin_lock_irqsave(&rxq->lock, flags);
997 write = rxq->write & ~0x7; 997 write = rxq->write & ~0x7;
998 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 998 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
999 /* Get next free Rx buffer, remove from free list */ 999 /* Get next free Rx buffer, remove from free list */
1000 element = rxq->rx_free.next; 1000 element = rxq->rx_free.next;
1001 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 1001 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1002 list_del(element); 1002 list_del(element);
1003 1003
1004 /* Point to Rx buffer via next RBD in circular buffer */ 1004 /* Point to Rx buffer via next RBD in circular buffer */
1005 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); 1005 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1006 rxq->queue[rxq->write] = rxb; 1006 rxq->queue[rxq->write] = rxb;
1007 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1007 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1008 rxq->free_count--; 1008 rxq->free_count--;
1009 } 1009 }
1010 spin_unlock_irqrestore(&rxq->lock, flags); 1010 spin_unlock_irqrestore(&rxq->lock, flags);
1011 /* If the pre-allocated buffer pool is dropping low, schedule to 1011 /* If the pre-allocated buffer pool is dropping low, schedule to
1012 * refill it */ 1012 * refill it */
1013 if (rxq->free_count <= RX_LOW_WATERMARK) 1013 if (rxq->free_count <= RX_LOW_WATERMARK)
1014 queue_work(priv->workqueue, &priv->rx_replenish); 1014 queue_work(priv->workqueue, &priv->rx_replenish);
1015 1015
1016 1016
1017 /* If we've added more space for the firmware to place data, tell it. 1017 /* If we've added more space for the firmware to place data, tell it.
1018 * Increment device's write pointer in multiples of 8. */ 1018 * Increment device's write pointer in multiples of 8. */
1019 if ((rxq->write_actual != (rxq->write & ~0x7)) 1019 if ((rxq->write_actual != (rxq->write & ~0x7))
1020 || (abs(rxq->write - rxq->read) > 7)) { 1020 || (abs(rxq->write - rxq->read) > 7)) {
1021 spin_lock_irqsave(&rxq->lock, flags); 1021 spin_lock_irqsave(&rxq->lock, flags);
1022 rxq->need_update = 1; 1022 rxq->need_update = 1;
1023 spin_unlock_irqrestore(&rxq->lock, flags); 1023 spin_unlock_irqrestore(&rxq->lock, flags);
1024 iwl_rx_queue_update_write_ptr(priv, rxq); 1024 iwl_rx_queue_update_write_ptr(priv, rxq);
1025 } 1025 }
1026 } 1026 }
1027 1027
1028 /** 1028 /**
1029 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free 1029 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1030 * 1030 *
1031 * When moving to rx_free an SKB is allocated for the slot. 1031 * When moving to rx_free an SKB is allocated for the slot.
1032 * 1032 *
1033 * Also restock the Rx queue via iwl3945_rx_queue_restock. 1033 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1034 * This is called as a scheduled work item (except for during initialization) 1034 * This is called as a scheduled work item (except for during initialization)
1035 */ 1035 */
1036 static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) 1036 static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1037 { 1037 {
1038 struct iwl_rx_queue *rxq = &priv->rxq; 1038 struct iwl_rx_queue *rxq = &priv->rxq;
1039 struct list_head *element; 1039 struct list_head *element;
1040 struct iwl_rx_mem_buffer *rxb; 1040 struct iwl_rx_mem_buffer *rxb;
1041 struct page *page; 1041 struct page *page;
1042 unsigned long flags; 1042 unsigned long flags;
1043 gfp_t gfp_mask = priority; 1043 gfp_t gfp_mask = priority;
1044 1044
1045 while (1) { 1045 while (1) {
1046 spin_lock_irqsave(&rxq->lock, flags); 1046 spin_lock_irqsave(&rxq->lock, flags);
1047 1047
1048 if (list_empty(&rxq->rx_used)) { 1048 if (list_empty(&rxq->rx_used)) {
1049 spin_unlock_irqrestore(&rxq->lock, flags); 1049 spin_unlock_irqrestore(&rxq->lock, flags);
1050 return; 1050 return;
1051 } 1051 }
1052 spin_unlock_irqrestore(&rxq->lock, flags); 1052 spin_unlock_irqrestore(&rxq->lock, flags);
1053 1053
1054 if (rxq->free_count > RX_LOW_WATERMARK) 1054 if (rxq->free_count > RX_LOW_WATERMARK)
1055 gfp_mask |= __GFP_NOWARN; 1055 gfp_mask |= __GFP_NOWARN;
1056 1056
1057 if (priv->hw_params.rx_page_order > 0) 1057 if (priv->hw_params.rx_page_order > 0)
1058 gfp_mask |= __GFP_COMP; 1058 gfp_mask |= __GFP_COMP;
1059 1059
1060 /* Alloc a new receive buffer */ 1060 /* Alloc a new receive buffer */
1061 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); 1061 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1062 if (!page) { 1062 if (!page) {
1063 if (net_ratelimit()) 1063 if (net_ratelimit())
1064 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1064 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1065 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1065 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1066 net_ratelimit()) 1066 net_ratelimit())
1067 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", 1067 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1068 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 1068 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1069 rxq->free_count); 1069 rxq->free_count);
1070 /* We don't reschedule replenish work here -- we will 1070 /* We don't reschedule replenish work here -- we will
1071 * call the restock method and if it still needs 1071 * call the restock method and if it still needs
1072 * more buffers it will schedule replenish */ 1072 * more buffers it will schedule replenish */
1073 break; 1073 break;
1074 } 1074 }
1075 1075
1076 spin_lock_irqsave(&rxq->lock, flags); 1076 spin_lock_irqsave(&rxq->lock, flags);
1077 if (list_empty(&rxq->rx_used)) { 1077 if (list_empty(&rxq->rx_used)) {
1078 spin_unlock_irqrestore(&rxq->lock, flags); 1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 __free_pages(page, priv->hw_params.rx_page_order); 1079 __free_pages(page, priv->hw_params.rx_page_order);
1080 return; 1080 return;
1081 } 1081 }
1082 element = rxq->rx_used.next; 1082 element = rxq->rx_used.next;
1083 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 1083 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1084 list_del(element); 1084 list_del(element);
1085 spin_unlock_irqrestore(&rxq->lock, flags); 1085 spin_unlock_irqrestore(&rxq->lock, flags);
1086 1086
1087 rxb->page = page; 1087 rxb->page = page;
1088 /* Get physical address of RB/SKB */ 1088 /* Get physical address of RB/SKB */
1089 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, 1089 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1090 PAGE_SIZE << priv->hw_params.rx_page_order, 1090 PAGE_SIZE << priv->hw_params.rx_page_order,
1091 PCI_DMA_FROMDEVICE); 1091 PCI_DMA_FROMDEVICE);
1092 1092
1093 spin_lock_irqsave(&rxq->lock, flags); 1093 spin_lock_irqsave(&rxq->lock, flags);
1094 1094
1095 list_add_tail(&rxb->list, &rxq->rx_free); 1095 list_add_tail(&rxb->list, &rxq->rx_free);
1096 rxq->free_count++; 1096 rxq->free_count++;
1097 priv->alloc_rxb_page++; 1097 priv->alloc_rxb_page++;
1098 1098
1099 spin_unlock_irqrestore(&rxq->lock, flags); 1099 spin_unlock_irqrestore(&rxq->lock, flags);
1100 } 1100 }
1101 } 1101 }
1102 1102
1103 void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 1103 void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1104 { 1104 {
1105 unsigned long flags; 1105 unsigned long flags;
1106 int i; 1106 int i;
1107 spin_lock_irqsave(&rxq->lock, flags); 1107 spin_lock_irqsave(&rxq->lock, flags);
1108 INIT_LIST_HEAD(&rxq->rx_free); 1108 INIT_LIST_HEAD(&rxq->rx_free);
1109 INIT_LIST_HEAD(&rxq->rx_used); 1109 INIT_LIST_HEAD(&rxq->rx_used);
1110 /* Fill the rx_used queue with _all_ of the Rx buffers */ 1110 /* Fill the rx_used queue with _all_ of the Rx buffers */
1111 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1111 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1112 /* In the reset function, these buffers may have been allocated 1112 /* In the reset function, these buffers may have been allocated
1113 * to an SKB, so we need to unmap and free potential storage */ 1113 * to an SKB, so we need to unmap and free potential storage */
1114 if (rxq->pool[i].page != NULL) { 1114 if (rxq->pool[i].page != NULL) {
1115 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1115 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1116 PAGE_SIZE << priv->hw_params.rx_page_order, 1116 PAGE_SIZE << priv->hw_params.rx_page_order,
1117 PCI_DMA_FROMDEVICE); 1117 PCI_DMA_FROMDEVICE);
1118 __iwl_free_pages(priv, rxq->pool[i].page); 1118 __iwl_free_pages(priv, rxq->pool[i].page);
1119 rxq->pool[i].page = NULL; 1119 rxq->pool[i].page = NULL;
1120 } 1120 }
1121 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1121 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1122 } 1122 }
1123 1123
1124 /* Set us so that we have processed and used all buffers, but have 1124 /* Set us so that we have processed and used all buffers, but have
1125 * not restocked the Rx queue with fresh buffers */ 1125 * not restocked the Rx queue with fresh buffers */
1126 rxq->read = rxq->write = 0; 1126 rxq->read = rxq->write = 0;
1127 rxq->write_actual = 0; 1127 rxq->write_actual = 0;
1128 rxq->free_count = 0; 1128 rxq->free_count = 0;
1129 spin_unlock_irqrestore(&rxq->lock, flags); 1129 spin_unlock_irqrestore(&rxq->lock, flags);
1130 } 1130 }
1131 1131
1132 void iwl3945_rx_replenish(void *data) 1132 void iwl3945_rx_replenish(void *data)
1133 { 1133 {
1134 struct iwl_priv *priv = data; 1134 struct iwl_priv *priv = data;
1135 unsigned long flags; 1135 unsigned long flags;
1136 1136
1137 iwl3945_rx_allocate(priv, GFP_KERNEL); 1137 iwl3945_rx_allocate(priv, GFP_KERNEL);
1138 1138
1139 spin_lock_irqsave(&priv->lock, flags); 1139 spin_lock_irqsave(&priv->lock, flags);
1140 iwl3945_rx_queue_restock(priv); 1140 iwl3945_rx_queue_restock(priv);
1141 spin_unlock_irqrestore(&priv->lock, flags); 1141 spin_unlock_irqrestore(&priv->lock, flags);
1142 } 1142 }
1143 1143
1144 static void iwl3945_rx_replenish_now(struct iwl_priv *priv) 1144 static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1145 { 1145 {
1146 iwl3945_rx_allocate(priv, GFP_ATOMIC); 1146 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1147 1147
1148 iwl3945_rx_queue_restock(priv); 1148 iwl3945_rx_queue_restock(priv);
1149 } 1149 }
1150 1150
1151 1151
1152 /* Assumes that the skb field of the buffers in 'pool' is kept accurate. 1152 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1153 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 1153 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1154 * This free routine walks the list of POOL entries and if SKB is set to 1154 * This free routine walks the list of POOL entries and if SKB is set to
1155 * non NULL it is unmapped and freed 1155 * non NULL it is unmapped and freed
1156 */ 1156 */
1157 static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 1157 static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1158 { 1158 {
1159 int i; 1159 int i;
1160 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1160 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1161 if (rxq->pool[i].page != NULL) { 1161 if (rxq->pool[i].page != NULL) {
1162 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, 1162 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1163 PAGE_SIZE << priv->hw_params.rx_page_order, 1163 PAGE_SIZE << priv->hw_params.rx_page_order,
1164 PCI_DMA_FROMDEVICE); 1164 PCI_DMA_FROMDEVICE);
1165 __iwl_free_pages(priv, rxq->pool[i].page); 1165 __iwl_free_pages(priv, rxq->pool[i].page);
1166 rxq->pool[i].page = NULL; 1166 rxq->pool[i].page = NULL;
1167 } 1167 }
1168 } 1168 }
1169 1169
1170 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, 1170 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1171 rxq->bd_dma); 1171 rxq->bd_dma);
1172 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), 1172 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1173 rxq->rb_stts, rxq->rb_stts_dma); 1173 rxq->rb_stts, rxq->rb_stts_dma);
1174 rxq->bd = NULL; 1174 rxq->bd = NULL;
1175 rxq->rb_stts = NULL; 1175 rxq->rb_stts = NULL;
1176 } 1176 }
1177 1177
1178 1178
1179 /* Convert linear signal-to-noise ratio into dB */ 1179 /* Convert linear signal-to-noise ratio into dB */
1180 static u8 ratio2dB[100] = { 1180 static u8 ratio2dB[100] = {
1181 /* 0 1 2 3 4 5 6 7 8 9 */ 1181 /* 0 1 2 3 4 5 6 7 8 9 */
1182 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ 1182 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1183 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ 1183 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1184 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ 1184 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1185 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ 1185 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1186 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ 1186 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1187 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ 1187 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1188 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ 1188 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1189 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ 1189 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1190 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ 1190 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1191 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ 1191 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1192 }; 1192 };
1193 1193
1194 /* Calculates a relative dB value from a ratio of linear 1194 /* Calculates a relative dB value from a ratio of linear
1195 * (i.e. not dB) signal levels. 1195 * (i.e. not dB) signal levels.
1196 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ 1196 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1197 int iwl3945_calc_db_from_ratio(int sig_ratio) 1197 int iwl3945_calc_db_from_ratio(int sig_ratio)
1198 { 1198 {
1199 /* 1000:1 or higher just report as 60 dB */ 1199 /* 1000:1 or higher just report as 60 dB */
1200 if (sig_ratio >= 1000) 1200 if (sig_ratio >= 1000)
1201 return 60; 1201 return 60;
1202 1202
1203 /* 100:1 or higher, divide by 10 and use table, 1203 /* 100:1 or higher, divide by 10 and use table,
1204 * add 20 dB to make up for divide by 10 */ 1204 * add 20 dB to make up for divide by 10 */
1205 if (sig_ratio >= 100) 1205 if (sig_ratio >= 100)
1206 return 20 + (int)ratio2dB[sig_ratio/10]; 1206 return 20 + (int)ratio2dB[sig_ratio/10];
1207 1207
1208 /* We shouldn't see this */ 1208 /* We shouldn't see this */
1209 if (sig_ratio < 1) 1209 if (sig_ratio < 1)
1210 return 0; 1210 return 0;
1211 1211
1212 /* Use table for ratios 1:1 - 99:1 */ 1212 /* Use table for ratios 1:1 - 99:1 */
1213 return (int)ratio2dB[sig_ratio]; 1213 return (int)ratio2dB[sig_ratio];
1214 } 1214 }
1215 1215
1216 /** 1216 /**
1217 * iwl3945_rx_handle - Main entry function for receiving responses from uCode 1217 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1218 * 1218 *
1219 * Uses the priv->rx_handlers callback function array to invoke 1219 * Uses the priv->rx_handlers callback function array to invoke
1220 * the appropriate handlers, including command responses, 1220 * the appropriate handlers, including command responses,
1221 * frame-received notifications, and other notifications. 1221 * frame-received notifications, and other notifications.
1222 */ 1222 */
1223 static void iwl3945_rx_handle(struct iwl_priv *priv) 1223 static void iwl3945_rx_handle(struct iwl_priv *priv)
1224 { 1224 {
1225 struct iwl_rx_mem_buffer *rxb; 1225 struct iwl_rx_mem_buffer *rxb;
1226 struct iwl_rx_packet *pkt; 1226 struct iwl_rx_packet *pkt;
1227 struct iwl_rx_queue *rxq = &priv->rxq; 1227 struct iwl_rx_queue *rxq = &priv->rxq;
1228 u32 r, i; 1228 u32 r, i;
1229 int reclaim; 1229 int reclaim;
1230 unsigned long flags; 1230 unsigned long flags;
1231 u8 fill_rx = 0; 1231 u8 fill_rx = 0;
1232 u32 count = 8; 1232 u32 count = 8;
1233 int total_empty = 0; 1233 int total_empty = 0;
1234 1234
1235 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1235 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1236 * buffer that the driver may process (last buffer filled by ucode). */ 1236 * buffer that the driver may process (last buffer filled by ucode). */
1237 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 1237 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1238 i = rxq->read; 1238 i = rxq->read;
1239 1239
1240 /* calculate total frames need to be restock after handling RX */ 1240 /* calculate total frames need to be restock after handling RX */
1241 total_empty = r - rxq->write_actual; 1241 total_empty = r - rxq->write_actual;
1242 if (total_empty < 0) 1242 if (total_empty < 0)
1243 total_empty += RX_QUEUE_SIZE; 1243 total_empty += RX_QUEUE_SIZE;
1244 1244
1245 if (total_empty > (RX_QUEUE_SIZE / 2)) 1245 if (total_empty > (RX_QUEUE_SIZE / 2))
1246 fill_rx = 1; 1246 fill_rx = 1;
1247 /* Rx interrupt, but nothing sent from uCode */ 1247 /* Rx interrupt, but nothing sent from uCode */
1248 if (i == r) 1248 if (i == r)
1249 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 1249 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1250 1250
1251 while (i != r) { 1251 while (i != r) {
1252 int len; 1252 int len;
1253 1253
1254 rxb = rxq->queue[i]; 1254 rxb = rxq->queue[i];
1255 1255
1256 /* If an RXB doesn't have a Rx queue slot associated with it, 1256 /* If an RXB doesn't have a Rx queue slot associated with it,
1257 * then a bug has been introduced in the queue refilling 1257 * then a bug has been introduced in the queue refilling
1258 * routines -- catch it here */ 1258 * routines -- catch it here */
1259 BUG_ON(rxb == NULL); 1259 BUG_ON(rxb == NULL);
1260 1260
1261 rxq->queue[i] = NULL; 1261 rxq->queue[i] = NULL;
1262 1262
1263 pci_unmap_page(priv->pci_dev, rxb->page_dma, 1263 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1264 PAGE_SIZE << priv->hw_params.rx_page_order, 1264 PAGE_SIZE << priv->hw_params.rx_page_order,
1265 PCI_DMA_FROMDEVICE); 1265 PCI_DMA_FROMDEVICE);
1266 pkt = rxb_addr(rxb); 1266 pkt = rxb_addr(rxb);
1267 1267
1268 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1268 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1269 len += sizeof(u32); /* account for status word */ 1269 len += sizeof(u32); /* account for status word */
1270 trace_iwlwifi_dev_rx(priv, pkt, len); 1270 trace_iwlwifi_dev_rx(priv, pkt, len);
1271 1271
1272 /* Reclaim a command buffer only if this packet is a response 1272 /* Reclaim a command buffer only if this packet is a response
1273 * to a (driver-originated) command. 1273 * to a (driver-originated) command.
1274 * If the packet (e.g. Rx frame) originated from uCode, 1274 * If the packet (e.g. Rx frame) originated from uCode,
1275 * there is no command buffer to reclaim. 1275 * there is no command buffer to reclaim.
1276 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1276 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1277 * but apparently a few don't get set; catch them here. */ 1277 * but apparently a few don't get set; catch them here. */
1278 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && 1278 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1279 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 1279 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1280 (pkt->hdr.cmd != REPLY_TX); 1280 (pkt->hdr.cmd != REPLY_TX);
1281 1281
1282 /* Based on type of command response or notification, 1282 /* Based on type of command response or notification,
1283 * handle those that need handling via function in 1283 * handle those that need handling via function in
1284 * rx_handlers table. See iwl3945_setup_rx_handlers() */ 1284 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1285 if (priv->rx_handlers[pkt->hdr.cmd]) { 1285 if (priv->rx_handlers[pkt->hdr.cmd]) {
1286 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1286 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1287 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1287 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1288 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1288 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1289 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1289 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1290 } else { 1290 } else {
1291 /* No handling needed */ 1291 /* No handling needed */
1292 IWL_DEBUG_RX(priv, 1292 IWL_DEBUG_RX(priv,
1293 "r %d i %d No handler needed for %s, 0x%02x\n", 1293 "r %d i %d No handler needed for %s, 0x%02x\n",
1294 r, i, get_cmd_string(pkt->hdr.cmd), 1294 r, i, get_cmd_string(pkt->hdr.cmd),
1295 pkt->hdr.cmd); 1295 pkt->hdr.cmd);
1296 } 1296 }
1297 1297
1298 /* 1298 /*
1299 * XXX: After here, we should always check rxb->page 1299 * XXX: After here, we should always check rxb->page
1300 * against NULL before touching it or its virtual 1300 * against NULL before touching it or its virtual
1301 * memory (pkt). Because some rx_handler might have 1301 * memory (pkt). Because some rx_handler might have
1302 * already taken or freed the pages. 1302 * already taken or freed the pages.
1303 */ 1303 */
1304 1304
1305 if (reclaim) { 1305 if (reclaim) {
1306 /* Invoke any callbacks, transfer the buffer to caller, 1306 /* Invoke any callbacks, transfer the buffer to caller,
1307 * and fire off the (possibly) blocking iwl_send_cmd() 1307 * and fire off the (possibly) blocking iwl_send_cmd()
1308 * as we reclaim the driver command queue */ 1308 * as we reclaim the driver command queue */
1309 if (rxb->page) 1309 if (rxb->page)
1310 iwl_tx_cmd_complete(priv, rxb); 1310 iwl_tx_cmd_complete(priv, rxb);
1311 else 1311 else
1312 IWL_WARN(priv, "Claim null rxb?\n"); 1312 IWL_WARN(priv, "Claim null rxb?\n");
1313 } 1313 }
1314 1314
1315 /* Reuse the page if possible. For notification packets and 1315 /* Reuse the page if possible. For notification packets and
1316 * SKBs that fail to Rx correctly, add them back into the 1316 * SKBs that fail to Rx correctly, add them back into the
1317 * rx_free list for reuse later. */ 1317 * rx_free list for reuse later. */
1318 spin_lock_irqsave(&rxq->lock, flags); 1318 spin_lock_irqsave(&rxq->lock, flags);
1319 if (rxb->page != NULL) { 1319 if (rxb->page != NULL) {
1320 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, 1320 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1321 0, PAGE_SIZE << priv->hw_params.rx_page_order, 1321 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1322 PCI_DMA_FROMDEVICE); 1322 PCI_DMA_FROMDEVICE);
1323 list_add_tail(&rxb->list, &rxq->rx_free); 1323 list_add_tail(&rxb->list, &rxq->rx_free);
1324 rxq->free_count++; 1324 rxq->free_count++;
1325 } else 1325 } else
1326 list_add_tail(&rxb->list, &rxq->rx_used); 1326 list_add_tail(&rxb->list, &rxq->rx_used);
1327 1327
1328 spin_unlock_irqrestore(&rxq->lock, flags); 1328 spin_unlock_irqrestore(&rxq->lock, flags);
1329 1329
1330 i = (i + 1) & RX_QUEUE_MASK; 1330 i = (i + 1) & RX_QUEUE_MASK;
1331 /* If there are a lot of unused frames, 1331 /* If there are a lot of unused frames,
1332 * restock the Rx queue so ucode won't assert. */ 1332 * restock the Rx queue so ucode won't assert. */
1333 if (fill_rx) { 1333 if (fill_rx) {
1334 count++; 1334 count++;
1335 if (count >= 8) { 1335 if (count >= 8) {
1336 rxq->read = i; 1336 rxq->read = i;
1337 iwl3945_rx_replenish_now(priv); 1337 iwl3945_rx_replenish_now(priv);
1338 count = 0; 1338 count = 0;
1339 } 1339 }
1340 } 1340 }
1341 } 1341 }
1342 1342
1343 /* Backtrack one entry */ 1343 /* Backtrack one entry */
1344 rxq->read = i; 1344 rxq->read = i;
1345 if (fill_rx) 1345 if (fill_rx)
1346 iwl3945_rx_replenish_now(priv); 1346 iwl3945_rx_replenish_now(priv);
1347 else 1347 else
1348 iwl3945_rx_queue_restock(priv); 1348 iwl3945_rx_queue_restock(priv);
1349 } 1349 }
1350 1350
1351 /* call this function to flush any scheduled tasklet */ 1351 /* call this function to flush any scheduled tasklet */
1352 static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1352 static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1353 { 1353 {
1354 /* wait to make sure we flush pending tasklet*/ 1354 /* wait to make sure we flush pending tasklet*/
1355 synchronize_irq(priv->pci_dev->irq); 1355 synchronize_irq(priv->pci_dev->irq);
1356 tasklet_kill(&priv->irq_tasklet); 1356 tasklet_kill(&priv->irq_tasklet);
1357 } 1357 }
1358 1358
1359 static const char *desc_lookup(int i) 1359 static const char *desc_lookup(int i)
1360 { 1360 {
1361 switch (i) { 1361 switch (i) {
1362 case 1: 1362 case 1:
1363 return "FAIL"; 1363 return "FAIL";
1364 case 2: 1364 case 2:
1365 return "BAD_PARAM"; 1365 return "BAD_PARAM";
1366 case 3: 1366 case 3:
1367 return "BAD_CHECKSUM"; 1367 return "BAD_CHECKSUM";
1368 case 4: 1368 case 4:
1369 return "NMI_INTERRUPT"; 1369 return "NMI_INTERRUPT";
1370 case 5: 1370 case 5:
1371 return "SYSASSERT"; 1371 return "SYSASSERT";
1372 case 6: 1372 case 6:
1373 return "FATAL_ERROR"; 1373 return "FATAL_ERROR";
1374 } 1374 }
1375 1375
1376 return "UNKNOWN"; 1376 return "UNKNOWN";
1377 } 1377 }
1378 1378
1379 #define ERROR_START_OFFSET (1 * sizeof(u32)) 1379 #define ERROR_START_OFFSET (1 * sizeof(u32))
1380 #define ERROR_ELEM_SIZE (7 * sizeof(u32)) 1380 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1381 1381
1382 void iwl3945_dump_nic_error_log(struct iwl_priv *priv) 1382 void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1383 { 1383 {
1384 u32 i; 1384 u32 i;
1385 u32 desc, time, count, base, data1; 1385 u32 desc, time, count, base, data1;
1386 u32 blink1, blink2, ilink1, ilink2; 1386 u32 blink1, blink2, ilink1, ilink2;
1387 1387
1388 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 1388 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1389 1389
1390 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1390 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1391 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); 1391 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1392 return; 1392 return;
1393 } 1393 }
1394 1394
1395 1395
1396 count = iwl_read_targ_mem(priv, base); 1396 count = iwl_read_targ_mem(priv, base);
1397 1397
1398 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 1398 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1399 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1399 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1400 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", 1400 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1401 priv->status, count); 1401 priv->status, count);
1402 } 1402 }
1403 1403
1404 IWL_ERR(priv, "Desc Time asrtPC blink2 " 1404 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1405 "ilink1 nmiPC Line\n"); 1405 "ilink1 nmiPC Line\n");
1406 for (i = ERROR_START_OFFSET; 1406 for (i = ERROR_START_OFFSET;
1407 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; 1407 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1408 i += ERROR_ELEM_SIZE) { 1408 i += ERROR_ELEM_SIZE) {
1409 desc = iwl_read_targ_mem(priv, base + i); 1409 desc = iwl_read_targ_mem(priv, base + i);
1410 time = 1410 time =
1411 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); 1411 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1412 blink1 = 1412 blink1 =
1413 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); 1413 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1414 blink2 = 1414 blink2 =
1415 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); 1415 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1416 ilink1 = 1416 ilink1 =
1417 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); 1417 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1418 ilink2 = 1418 ilink2 =
1419 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); 1419 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1420 data1 = 1420 data1 =
1421 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); 1421 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1422 1422
1423 IWL_ERR(priv, 1423 IWL_ERR(priv,
1424 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1424 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1425 desc_lookup(desc), desc, time, blink1, blink2, 1425 desc_lookup(desc), desc, time, blink1, blink2,
1426 ilink1, ilink2, data1); 1426 ilink1, ilink2, data1);
1427 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, 1427 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
1428 0, blink1, blink2, ilink1, ilink2); 1428 0, blink1, blink2, ilink1, ilink2);
1429 } 1429 }
1430 } 1430 }
1431 1431
1432 #define EVENT_START_OFFSET (6 * sizeof(u32)) 1432 #define EVENT_START_OFFSET (6 * sizeof(u32))
1433 1433
1434 /** 1434 /**
1435 * iwl3945_print_event_log - Dump error event log to syslog 1435 * iwl3945_print_event_log - Dump error event log to syslog
1436 * 1436 *
1437 */ 1437 */
1438 static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, 1438 static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1439 u32 num_events, u32 mode, 1439 u32 num_events, u32 mode,
1440 int pos, char **buf, size_t bufsz) 1440 int pos, char **buf, size_t bufsz)
1441 { 1441 {
1442 u32 i; 1442 u32 i;
1443 u32 base; /* SRAM byte address of event log header */ 1443 u32 base; /* SRAM byte address of event log header */
1444 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ 1444 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1445 u32 ptr; /* SRAM byte address of log data */ 1445 u32 ptr; /* SRAM byte address of log data */
1446 u32 ev, time, data; /* event log data */ 1446 u32 ev, time, data; /* event log data */
1447 unsigned long reg_flags; 1447 unsigned long reg_flags;
1448 1448
1449 if (num_events == 0) 1449 if (num_events == 0)
1450 return pos; 1450 return pos;
1451 1451
1452 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1452 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1453 1453
1454 if (mode == 0) 1454 if (mode == 0)
1455 event_size = 2 * sizeof(u32); 1455 event_size = 2 * sizeof(u32);
1456 else 1456 else
1457 event_size = 3 * sizeof(u32); 1457 event_size = 3 * sizeof(u32);
1458 1458
1459 ptr = base + EVENT_START_OFFSET + (start_idx * event_size); 1459 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1460 1460
1461 /* Make sure device is powered up for SRAM reads */ 1461 /* Make sure device is powered up for SRAM reads */
1462 spin_lock_irqsave(&priv->reg_lock, reg_flags); 1462 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1463 iwl_grab_nic_access(priv); 1463 iwl_grab_nic_access(priv);
1464 1464
1465 /* Set starting address; reads will auto-increment */ 1465 /* Set starting address; reads will auto-increment */
1466 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); 1466 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1467 rmb(); 1467 rmb();
1468 1468
1469 /* "time" is actually "data" for mode 0 (no timestamp). 1469 /* "time" is actually "data" for mode 0 (no timestamp).
1470 * place event id # at far right for easier visual parsing. */ 1470 * place event id # at far right for easier visual parsing. */
1471 for (i = 0; i < num_events; i++) { 1471 for (i = 0; i < num_events; i++) {
1472 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1472 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1473 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1473 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1474 if (mode == 0) { 1474 if (mode == 0) {
1475 /* data, ev */ 1475 /* data, ev */
1476 if (bufsz) { 1476 if (bufsz) {
1477 pos += scnprintf(*buf + pos, bufsz - pos, 1477 pos += scnprintf(*buf + pos, bufsz - pos,
1478 "0x%08x:%04u\n", 1478 "0x%08x:%04u\n",
1479 time, ev); 1479 time, ev);
1480 } else { 1480 } else {
1481 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1481 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1482 trace_iwlwifi_dev_ucode_event(priv, 0, 1482 trace_iwlwifi_dev_ucode_event(priv, 0,
1483 time, ev); 1483 time, ev);
1484 } 1484 }
1485 } else { 1485 } else {
1486 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1486 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1487 if (bufsz) { 1487 if (bufsz) {
1488 pos += scnprintf(*buf + pos, bufsz - pos, 1488 pos += scnprintf(*buf + pos, bufsz - pos,
1489 "%010u:0x%08x:%04u\n", 1489 "%010u:0x%08x:%04u\n",
1490 time, data, ev); 1490 time, data, ev);
1491 } else { 1491 } else {
1492 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", 1492 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
1493 time, data, ev); 1493 time, data, ev);
1494 trace_iwlwifi_dev_ucode_event(priv, time, 1494 trace_iwlwifi_dev_ucode_event(priv, time,
1495 data, ev); 1495 data, ev);
1496 } 1496 }
1497 } 1497 }
1498 } 1498 }
1499 1499
1500 /* Allow device to power down */ 1500 /* Allow device to power down */
1501 iwl_release_nic_access(priv); 1501 iwl_release_nic_access(priv);
1502 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 1502 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1503 return pos; 1503 return pos;
1504 } 1504 }
1505 1505
1506 /** 1506 /**
1507 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog 1507 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1508 */ 1508 */
1509 static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, 1509 static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1510 u32 num_wraps, u32 next_entry, 1510 u32 num_wraps, u32 next_entry,
1511 u32 size, u32 mode, 1511 u32 size, u32 mode,
1512 int pos, char **buf, size_t bufsz) 1512 int pos, char **buf, size_t bufsz)
1513 { 1513 {
1514 /* 1514 /*
1515 * display the newest DEFAULT_LOG_ENTRIES entries 1515 * display the newest DEFAULT_LOG_ENTRIES entries
1516 * i.e the entries just before the next ont that uCode would fill. 1516 * i.e the entries just before the next ont that uCode would fill.
1517 */ 1517 */
1518 if (num_wraps) { 1518 if (num_wraps) {
1519 if (next_entry < size) { 1519 if (next_entry < size) {
1520 pos = iwl3945_print_event_log(priv, 1520 pos = iwl3945_print_event_log(priv,
1521 capacity - (size - next_entry), 1521 capacity - (size - next_entry),
1522 size - next_entry, mode, 1522 size - next_entry, mode,
1523 pos, buf, bufsz); 1523 pos, buf, bufsz);
1524 pos = iwl3945_print_event_log(priv, 0, 1524 pos = iwl3945_print_event_log(priv, 0,
1525 next_entry, mode, 1525 next_entry, mode,
1526 pos, buf, bufsz); 1526 pos, buf, bufsz);
1527 } else 1527 } else
1528 pos = iwl3945_print_event_log(priv, next_entry - size, 1528 pos = iwl3945_print_event_log(priv, next_entry - size,
1529 size, mode, 1529 size, mode,
1530 pos, buf, bufsz); 1530 pos, buf, bufsz);
1531 } else { 1531 } else {
1532 if (next_entry < size) 1532 if (next_entry < size)
1533 pos = iwl3945_print_event_log(priv, 0, 1533 pos = iwl3945_print_event_log(priv, 0,
1534 next_entry, mode, 1534 next_entry, mode,
1535 pos, buf, bufsz); 1535 pos, buf, bufsz);
1536 else 1536 else
1537 pos = iwl3945_print_event_log(priv, next_entry - size, 1537 pos = iwl3945_print_event_log(priv, next_entry - size,
1538 size, mode, 1538 size, mode,
1539 pos, buf, bufsz); 1539 pos, buf, bufsz);
1540 } 1540 }
1541 return pos; 1541 return pos;
1542 } 1542 }
1543 1543
1544 #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1544 #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1545 1545
1546 int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 1546 int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1547 char **buf, bool display) 1547 char **buf, bool display)
1548 { 1548 {
1549 u32 base; /* SRAM byte address of event log header */ 1549 u32 base; /* SRAM byte address of event log header */
1550 u32 capacity; /* event log capacity in # entries */ 1550 u32 capacity; /* event log capacity in # entries */
1551 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ 1551 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1552 u32 num_wraps; /* # times uCode wrapped to top of log */ 1552 u32 num_wraps; /* # times uCode wrapped to top of log */
1553 u32 next_entry; /* index of next entry to be written by uCode */ 1553 u32 next_entry; /* index of next entry to be written by uCode */
1554 u32 size; /* # entries that we'll print */ 1554 u32 size; /* # entries that we'll print */
1555 int pos = 0; 1555 int pos = 0;
1556 size_t bufsz = 0; 1556 size_t bufsz = 0;
1557 1557
1558 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 1558 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1559 if (!iwl3945_hw_valid_rtc_data_addr(base)) { 1559 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1560 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); 1560 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
1561 return -EINVAL; 1561 return -EINVAL;
1562 } 1562 }
1563 1563
1564 /* event log header */ 1564 /* event log header */
1565 capacity = iwl_read_targ_mem(priv, base); 1565 capacity = iwl_read_targ_mem(priv, base);
1566 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 1566 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1567 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1567 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1568 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1568 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1569 1569
1570 if (capacity > priv->cfg->max_event_log_size) { 1570 if (capacity > priv->cfg->max_event_log_size) {
1571 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1571 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1572 capacity, priv->cfg->max_event_log_size); 1572 capacity, priv->cfg->max_event_log_size);
1573 capacity = priv->cfg->max_event_log_size; 1573 capacity = priv->cfg->max_event_log_size;
1574 } 1574 }
1575 1575
1576 if (next_entry > priv->cfg->max_event_log_size) { 1576 if (next_entry > priv->cfg->max_event_log_size) {
1577 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 1577 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1578 next_entry, priv->cfg->max_event_log_size); 1578 next_entry, priv->cfg->max_event_log_size);
1579 next_entry = priv->cfg->max_event_log_size; 1579 next_entry = priv->cfg->max_event_log_size;
1580 } 1580 }
1581 1581
1582 size = num_wraps ? capacity : next_entry; 1582 size = num_wraps ? capacity : next_entry;
1583 1583
1584 /* bail out if nothing in log */ 1584 /* bail out if nothing in log */
1585 if (size == 0) { 1585 if (size == 0) {
1586 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); 1586 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1587 return pos; 1587 return pos;
1588 } 1588 }
1589 1589
1590 #ifdef CONFIG_IWLWIFI_DEBUG 1590 #ifdef CONFIG_IWLWIFI_DEBUG
1591 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 1591 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1592 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1592 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1593 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1593 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1594 #else 1594 #else
1595 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) 1595 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1596 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; 1596 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1597 #endif 1597 #endif
1598 1598
1599 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", 1599 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1600 size); 1600 size);
1601 1601
1602 #ifdef CONFIG_IWLWIFI_DEBUG 1602 #ifdef CONFIG_IWLWIFI_DEBUG
1603 if (display) { 1603 if (display) {
1604 if (full_log) 1604 if (full_log)
1605 bufsz = capacity * 48; 1605 bufsz = capacity * 48;
1606 else 1606 else
1607 bufsz = size * 48; 1607 bufsz = size * 48;
1608 *buf = kmalloc(bufsz, GFP_KERNEL); 1608 *buf = kmalloc(bufsz, GFP_KERNEL);
1609 if (!*buf) 1609 if (!*buf)
1610 return -ENOMEM; 1610 return -ENOMEM;
1611 } 1611 }
1612 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { 1612 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1613 /* if uCode has wrapped back to top of log, 1613 /* if uCode has wrapped back to top of log,
1614 * start at the oldest entry, 1614 * start at the oldest entry,
1615 * i.e the next one that uCode would fill. 1615 * i.e the next one that uCode would fill.
1616 */ 1616 */
1617 if (num_wraps) 1617 if (num_wraps)
1618 pos = iwl3945_print_event_log(priv, next_entry, 1618 pos = iwl3945_print_event_log(priv, next_entry,
1619 capacity - next_entry, mode, 1619 capacity - next_entry, mode,
1620 pos, buf, bufsz); 1620 pos, buf, bufsz);
1621 1621
1622 /* (then/else) start at top of log */ 1622 /* (then/else) start at top of log */
1623 pos = iwl3945_print_event_log(priv, 0, next_entry, mode, 1623 pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
1624 pos, buf, bufsz); 1624 pos, buf, bufsz);
1625 } else 1625 } else
1626 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1626 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1627 next_entry, size, mode, 1627 next_entry, size, mode,
1628 pos, buf, bufsz); 1628 pos, buf, bufsz);
1629 #else 1629 #else
1630 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, 1630 pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1631 next_entry, size, mode, 1631 next_entry, size, mode,
1632 pos, buf, bufsz); 1632 pos, buf, bufsz);
1633 #endif 1633 #endif
1634 return pos; 1634 return pos;
1635 } 1635 }
1636 1636
1637 static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1637 static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1638 { 1638 {
1639 u32 inta, handled = 0; 1639 u32 inta, handled = 0;
1640 u32 inta_fh; 1640 u32 inta_fh;
1641 unsigned long flags; 1641 unsigned long flags;
1642 #ifdef CONFIG_IWLWIFI_DEBUG 1642 #ifdef CONFIG_IWLWIFI_DEBUG
1643 u32 inta_mask; 1643 u32 inta_mask;
1644 #endif 1644 #endif
1645 1645
1646 spin_lock_irqsave(&priv->lock, flags); 1646 spin_lock_irqsave(&priv->lock, flags);
1647 1647
1648 /* Ack/clear/reset pending uCode interrupts. 1648 /* Ack/clear/reset pending uCode interrupts.
1649 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1649 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1650 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 1650 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1651 inta = iwl_read32(priv, CSR_INT); 1651 inta = iwl_read32(priv, CSR_INT);
1652 iwl_write32(priv, CSR_INT, inta); 1652 iwl_write32(priv, CSR_INT, inta);
1653 1653
1654 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 1654 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1655 * Any new interrupts that happen after this, either while we're 1655 * Any new interrupts that happen after this, either while we're
1656 * in this tasklet, or later, will show up in next ISR/tasklet. */ 1656 * in this tasklet, or later, will show up in next ISR/tasklet. */
1657 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1657 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1658 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); 1658 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1659 1659
1660 #ifdef CONFIG_IWLWIFI_DEBUG 1660 #ifdef CONFIG_IWLWIFI_DEBUG
1661 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1661 if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
1662 /* just for debug */ 1662 /* just for debug */
1663 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1663 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1664 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 1664 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1665 inta, inta_mask, inta_fh); 1665 inta, inta_mask, inta_fh);
1666 } 1666 }
1667 #endif 1667 #endif
1668 1668
1669 spin_unlock_irqrestore(&priv->lock, flags); 1669 spin_unlock_irqrestore(&priv->lock, flags);
1670 1670
1671 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1671 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1672 * atomic, make sure that inta covers all the interrupts that 1672 * atomic, make sure that inta covers all the interrupts that
1673 * we've discovered, even if FH interrupt came in just after 1673 * we've discovered, even if FH interrupt came in just after
1674 * reading CSR_INT. */ 1674 * reading CSR_INT. */
1675 if (inta_fh & CSR39_FH_INT_RX_MASK) 1675 if (inta_fh & CSR39_FH_INT_RX_MASK)
1676 inta |= CSR_INT_BIT_FH_RX; 1676 inta |= CSR_INT_BIT_FH_RX;
1677 if (inta_fh & CSR39_FH_INT_TX_MASK) 1677 if (inta_fh & CSR39_FH_INT_TX_MASK)
1678 inta |= CSR_INT_BIT_FH_TX; 1678 inta |= CSR_INT_BIT_FH_TX;
1679 1679
1680 /* Now service all interrupt bits discovered above. */ 1680 /* Now service all interrupt bits discovered above. */
1681 if (inta & CSR_INT_BIT_HW_ERR) { 1681 if (inta & CSR_INT_BIT_HW_ERR) {
1682 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 1682 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1683 1683
1684 /* Tell the device to stop sending interrupts */ 1684 /* Tell the device to stop sending interrupts */
1685 iwl_disable_interrupts(priv); 1685 iwl_disable_interrupts(priv);
1686 1686
1687 priv->isr_stats.hw++; 1687 priv->isr_stats.hw++;
1688 iwl_irq_handle_error(priv); 1688 iwl_irq_handle_error(priv);
1689 1689
1690 handled |= CSR_INT_BIT_HW_ERR; 1690 handled |= CSR_INT_BIT_HW_ERR;
1691 1691
1692 return; 1692 return;
1693 } 1693 }
1694 1694
1695 #ifdef CONFIG_IWLWIFI_DEBUG 1695 #ifdef CONFIG_IWLWIFI_DEBUG
1696 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1696 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1697 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1697 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1698 if (inta & CSR_INT_BIT_SCD) { 1698 if (inta & CSR_INT_BIT_SCD) {
1699 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 1699 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1700 "the frame/frames.\n"); 1700 "the frame/frames.\n");
1701 priv->isr_stats.sch++; 1701 priv->isr_stats.sch++;
1702 } 1702 }
1703 1703
1704 /* Alive notification via Rx interrupt will do the real work */ 1704 /* Alive notification via Rx interrupt will do the real work */
1705 if (inta & CSR_INT_BIT_ALIVE) { 1705 if (inta & CSR_INT_BIT_ALIVE) {
1706 IWL_DEBUG_ISR(priv, "Alive interrupt\n"); 1706 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1707 priv->isr_stats.alive++; 1707 priv->isr_stats.alive++;
1708 } 1708 }
1709 } 1709 }
1710 #endif 1710 #endif
1711 /* Safely ignore these bits for debug checks below */ 1711 /* Safely ignore these bits for debug checks below */
1712 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1712 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1713 1713
1714 /* Error detected by uCode */ 1714 /* Error detected by uCode */
1715 if (inta & CSR_INT_BIT_SW_ERR) { 1715 if (inta & CSR_INT_BIT_SW_ERR) {
1716 IWL_ERR(priv, "Microcode SW error detected. " 1716 IWL_ERR(priv, "Microcode SW error detected. "
1717 "Restarting 0x%X.\n", inta); 1717 "Restarting 0x%X.\n", inta);
1718 priv->isr_stats.sw++; 1718 priv->isr_stats.sw++;
1719 priv->isr_stats.sw_err = inta; 1719 priv->isr_stats.sw_err = inta;
1720 iwl_irq_handle_error(priv); 1720 iwl_irq_handle_error(priv);
1721 handled |= CSR_INT_BIT_SW_ERR; 1721 handled |= CSR_INT_BIT_SW_ERR;
1722 } 1722 }
1723 1723
1724 /* uCode wakes up after power-down sleep */ 1724 /* uCode wakes up after power-down sleep */
1725 if (inta & CSR_INT_BIT_WAKEUP) { 1725 if (inta & CSR_INT_BIT_WAKEUP) {
1726 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1726 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1727 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1727 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1728 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1728 iwl_txq_update_write_ptr(priv, &priv->txq[0]);
1729 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1729 iwl_txq_update_write_ptr(priv, &priv->txq[1]);
1730 iwl_txq_update_write_ptr(priv, &priv->txq[2]); 1730 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
1731 iwl_txq_update_write_ptr(priv, &priv->txq[3]); 1731 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
1732 iwl_txq_update_write_ptr(priv, &priv->txq[4]); 1732 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
1733 iwl_txq_update_write_ptr(priv, &priv->txq[5]); 1733 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
1734 1734
1735 priv->isr_stats.wakeup++; 1735 priv->isr_stats.wakeup++;
1736 handled |= CSR_INT_BIT_WAKEUP; 1736 handled |= CSR_INT_BIT_WAKEUP;
1737 } 1737 }
1738 1738
1739 /* All uCode command responses, including Tx command responses, 1739 /* All uCode command responses, including Tx command responses,
1740 * Rx "responses" (frame-received notification), and other 1740 * Rx "responses" (frame-received notification), and other
1741 * notifications from uCode come through here*/ 1741 * notifications from uCode come through here*/
1742 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1742 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1743 iwl3945_rx_handle(priv); 1743 iwl3945_rx_handle(priv);
1744 priv->isr_stats.rx++; 1744 priv->isr_stats.rx++;
1745 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1745 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1746 } 1746 }
1747 1747
1748 if (inta & CSR_INT_BIT_FH_TX) { 1748 if (inta & CSR_INT_BIT_FH_TX) {
1749 IWL_DEBUG_ISR(priv, "Tx interrupt\n"); 1749 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1750 priv->isr_stats.tx++; 1750 priv->isr_stats.tx++;
1751 1751
1752 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); 1752 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1753 iwl_write_direct32(priv, FH39_TCSR_CREDIT 1753 iwl_write_direct32(priv, FH39_TCSR_CREDIT
1754 (FH39_SRVC_CHNL), 0x0); 1754 (FH39_SRVC_CHNL), 0x0);
1755 handled |= CSR_INT_BIT_FH_TX; 1755 handled |= CSR_INT_BIT_FH_TX;
1756 } 1756 }
1757 1757
1758 if (inta & ~handled) { 1758 if (inta & ~handled) {
1759 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1759 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1760 priv->isr_stats.unhandled++; 1760 priv->isr_stats.unhandled++;
1761 } 1761 }
1762 1762
1763 if (inta & ~priv->inta_mask) { 1763 if (inta & ~priv->inta_mask) {
1764 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", 1764 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1765 inta & ~priv->inta_mask); 1765 inta & ~priv->inta_mask);
1766 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); 1766 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1767 } 1767 }
1768 1768
1769 /* Re-enable all interrupts */ 1769 /* Re-enable all interrupts */
1770 /* only Re-enable if disabled by irq */ 1770 /* only Re-enable if disabled by irq */
1771 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1771 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1772 iwl_enable_interrupts(priv); 1772 iwl_enable_interrupts(priv);
1773 1773
1774 #ifdef CONFIG_IWLWIFI_DEBUG 1774 #ifdef CONFIG_IWLWIFI_DEBUG
1775 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1775 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1776 inta = iwl_read32(priv, CSR_INT); 1776 inta = iwl_read32(priv, CSR_INT);
1777 inta_mask = iwl_read32(priv, CSR_INT_MASK); 1777 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1778 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1778 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1779 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 1779 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1780 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1780 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1781 } 1781 }
1782 #endif 1782 #endif
1783 } 1783 }
1784 1784
1785 static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv, 1785 static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
1786 struct ieee80211_vif *vif, 1786 struct ieee80211_vif *vif,
1787 enum ieee80211_band band, 1787 enum ieee80211_band band,
1788 struct iwl3945_scan_channel *scan_ch) 1788 struct iwl3945_scan_channel *scan_ch)
1789 { 1789 {
1790 const struct ieee80211_supported_band *sband; 1790 const struct ieee80211_supported_band *sband;
1791 u16 passive_dwell = 0; 1791 u16 passive_dwell = 0;
1792 u16 active_dwell = 0; 1792 u16 active_dwell = 0;
1793 int added = 0; 1793 int added = 0;
1794 u8 channel = 0; 1794 u8 channel = 0;
1795 1795
1796 sband = iwl_get_hw_mode(priv, band); 1796 sband = iwl_get_hw_mode(priv, band);
1797 if (!sband) { 1797 if (!sband) {
1798 IWL_ERR(priv, "invalid band\n"); 1798 IWL_ERR(priv, "invalid band\n");
1799 return added; 1799 return added;
1800 } 1800 }
1801 1801
1802 active_dwell = iwl_get_active_dwell_time(priv, band, 0); 1802 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
1803 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1803 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1804 1804
1805 if (passive_dwell <= active_dwell) 1805 if (passive_dwell <= active_dwell)
1806 passive_dwell = active_dwell + 1; 1806 passive_dwell = active_dwell + 1;
1807 1807
1808 1808
1809 channel = iwl_get_single_channel_number(priv, band); 1809 channel = iwl_get_single_channel_number(priv, band);
1810 1810
1811 if (channel) { 1811 if (channel) {
1812 scan_ch->channel = channel; 1812 scan_ch->channel = channel;
1813 scan_ch->type = 0; /* passive */ 1813 scan_ch->type = 0; /* passive */
1814 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1814 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1815 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1815 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1816 /* Set txpower levels to defaults */ 1816 /* Set txpower levels to defaults */
1817 scan_ch->tpc.dsp_atten = 110; 1817 scan_ch->tpc.dsp_atten = 110;
1818 if (band == IEEE80211_BAND_5GHZ) 1818 if (band == IEEE80211_BAND_5GHZ)
1819 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 1819 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1820 else 1820 else
1821 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 1821 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1822 added++; 1822 added++;
1823 } else 1823 } else
1824 IWL_ERR(priv, "no valid channel found\n"); 1824 IWL_ERR(priv, "no valid channel found\n");
1825 return added; 1825 return added;
1826 } 1826 }
1827 1827
1828 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1828 static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1829 enum ieee80211_band band, 1829 enum ieee80211_band band,
1830 u8 is_active, u8 n_probes, 1830 u8 is_active, u8 n_probes,
1831 struct iwl3945_scan_channel *scan_ch, 1831 struct iwl3945_scan_channel *scan_ch,
1832 struct ieee80211_vif *vif) 1832 struct ieee80211_vif *vif)
1833 { 1833 {
1834 struct ieee80211_channel *chan; 1834 struct ieee80211_channel *chan;
1835 const struct ieee80211_supported_band *sband; 1835 const struct ieee80211_supported_band *sband;
1836 const struct iwl_channel_info *ch_info; 1836 const struct iwl_channel_info *ch_info;
1837 u16 passive_dwell = 0; 1837 u16 passive_dwell = 0;
1838 u16 active_dwell = 0; 1838 u16 active_dwell = 0;
1839 int added, i; 1839 int added, i;
1840 1840
1841 sband = iwl_get_hw_mode(priv, band); 1841 sband = iwl_get_hw_mode(priv, band);
1842 if (!sband) 1842 if (!sband)
1843 return 0; 1843 return 0;
1844 1844
1845 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1845 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1846 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); 1846 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1847 1847
1848 if (passive_dwell <= active_dwell) 1848 if (passive_dwell <= active_dwell)
1849 passive_dwell = active_dwell + 1; 1849 passive_dwell = active_dwell + 1;
1850 1850
1851 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { 1851 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1852 chan = priv->scan_request->channels[i]; 1852 chan = priv->scan_request->channels[i];
1853 1853
1854 if (chan->band != band) 1854 if (chan->band != band)
1855 continue; 1855 continue;
1856 1856
1857 scan_ch->channel = chan->hw_value; 1857 scan_ch->channel = chan->hw_value;
1858 1858
1859 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); 1859 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
1860 if (!is_channel_valid(ch_info)) { 1860 if (!is_channel_valid(ch_info)) {
1861 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", 1861 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1862 scan_ch->channel); 1862 scan_ch->channel);
1863 continue; 1863 continue;
1864 } 1864 }
1865 1865
1866 scan_ch->active_dwell = cpu_to_le16(active_dwell); 1866 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1867 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 1867 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1868 /* If passive , set up for auto-switch 1868 /* If passive , set up for auto-switch
1869 * and use long active_dwell time. 1869 * and use long active_dwell time.
1870 */ 1870 */
1871 if (!is_active || is_channel_passive(ch_info) || 1871 if (!is_active || is_channel_passive(ch_info) ||
1872 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { 1872 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1873 scan_ch->type = 0; /* passive */ 1873 scan_ch->type = 0; /* passive */
1874 if (IWL_UCODE_API(priv->ucode_ver) == 1) 1874 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1875 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); 1875 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1876 } else { 1876 } else {
1877 scan_ch->type = 1; /* active */ 1877 scan_ch->type = 1; /* active */
1878 } 1878 }
1879 1879
1880 /* Set direct probe bits. These may be used both for active 1880 /* Set direct probe bits. These may be used both for active
1881 * scan channels (probes gets sent right away), 1881 * scan channels (probes gets sent right away),
1882 * or for passive channels (probes get se sent only after 1882 * or for passive channels (probes get se sent only after
1883 * hearing clear Rx packet).*/ 1883 * hearing clear Rx packet).*/
1884 if (IWL_UCODE_API(priv->ucode_ver) >= 2) { 1884 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1885 if (n_probes) 1885 if (n_probes)
1886 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); 1886 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1887 } else { 1887 } else {
1888 /* uCode v1 does not allow setting direct probe bits on 1888 /* uCode v1 does not allow setting direct probe bits on
1889 * passive channel. */ 1889 * passive channel. */
1890 if ((scan_ch->type & 1) && n_probes) 1890 if ((scan_ch->type & 1) && n_probes)
1891 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); 1891 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1892 } 1892 }
1893 1893
1894 /* Set txpower levels to defaults */ 1894 /* Set txpower levels to defaults */
1895 scan_ch->tpc.dsp_atten = 110; 1895 scan_ch->tpc.dsp_atten = 110;
1896 /* scan_pwr_info->tpc.dsp_atten; */ 1896 /* scan_pwr_info->tpc.dsp_atten; */
1897 1897
1898 /*scan_pwr_info->tpc.tx_gain; */ 1898 /*scan_pwr_info->tpc.tx_gain; */
1899 if (band == IEEE80211_BAND_5GHZ) 1899 if (band == IEEE80211_BAND_5GHZ)
1900 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 1900 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1901 else { 1901 else {
1902 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 1902 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1903 /* NOTE: if we were doing 6Mb OFDM for scans we'd use 1903 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1904 * power level: 1904 * power level:
1905 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; 1905 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1906 */ 1906 */
1907 } 1907 }
1908 1908
1909 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", 1909 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1910 scan_ch->channel, 1910 scan_ch->channel,
1911 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", 1911 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1912 (scan_ch->type & 1) ? 1912 (scan_ch->type & 1) ?
1913 active_dwell : passive_dwell); 1913 active_dwell : passive_dwell);
1914 1914
1915 scan_ch++; 1915 scan_ch++;
1916 added++; 1916 added++;
1917 } 1917 }
1918 1918
1919 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); 1919 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1920 return added; 1920 return added;
1921 } 1921 }
1922 1922
1923 static void iwl3945_init_hw_rates(struct iwl_priv *priv, 1923 static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1924 struct ieee80211_rate *rates) 1924 struct ieee80211_rate *rates)
1925 { 1925 {
1926 int i; 1926 int i;
1927 1927
1928 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { 1928 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1929 rates[i].bitrate = iwl3945_rates[i].ieee * 5; 1929 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1930 rates[i].hw_value = i; /* Rate scaling will work on indexes */ 1930 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1931 rates[i].hw_value_short = i; 1931 rates[i].hw_value_short = i;
1932 rates[i].flags = 0; 1932 rates[i].flags = 0;
1933 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { 1933 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1934 /* 1934 /*
1935 * If CCK != 1M then set short preamble rate flag. 1935 * If CCK != 1M then set short preamble rate flag.
1936 */ 1936 */
1937 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? 1937 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1938 0 : IEEE80211_RATE_SHORT_PREAMBLE; 1938 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1939 } 1939 }
1940 } 1940 }
1941 } 1941 }
1942 1942
1943 /****************************************************************************** 1943 /******************************************************************************
1944 * 1944 *
1945 * uCode download functions 1945 * uCode download functions
1946 * 1946 *
1947 ******************************************************************************/ 1947 ******************************************************************************/
1948 1948
1949 static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) 1949 static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1950 { 1950 {
1951 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1951 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1952 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1952 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1953 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1953 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1954 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); 1954 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1955 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1955 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1956 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1956 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1957 } 1957 }
1958 1958
1959 /** 1959 /**
1960 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, 1960 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1961 * looking at all data. 1961 * looking at all data.
1962 */ 1962 */
1963 static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) 1963 static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1964 { 1964 {
1965 u32 val; 1965 u32 val;
1966 u32 save_len = len; 1966 u32 save_len = len;
1967 int rc = 0; 1967 int rc = 0;
1968 u32 errcnt; 1968 u32 errcnt;
1969 1969
1970 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 1970 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1971 1971
1972 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 1972 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1973 IWL39_RTC_INST_LOWER_BOUND); 1973 IWL39_RTC_INST_LOWER_BOUND);
1974 1974
1975 errcnt = 0; 1975 errcnt = 0;
1976 for (; len > 0; len -= sizeof(u32), image++) { 1976 for (; len > 0; len -= sizeof(u32), image++) {
1977 /* read data comes through single port, auto-incr addr */ 1977 /* read data comes through single port, auto-incr addr */
1978 /* NOTE: Use the debugless read so we don't flood kernel log 1978 /* NOTE: Use the debugless read so we don't flood kernel log
1979 * if IWL_DL_IO is set */ 1979 * if IWL_DL_IO is set */
1980 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 1980 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1981 if (val != le32_to_cpu(*image)) { 1981 if (val != le32_to_cpu(*image)) {
1982 IWL_ERR(priv, "uCode INST section is invalid at " 1982 IWL_ERR(priv, "uCode INST section is invalid at "
1983 "offset 0x%x, is 0x%x, s/b 0x%x\n", 1983 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1984 save_len - len, val, le32_to_cpu(*image)); 1984 save_len - len, val, le32_to_cpu(*image));
1985 rc = -EIO; 1985 rc = -EIO;
1986 errcnt++; 1986 errcnt++;
1987 if (errcnt >= 20) 1987 if (errcnt >= 20)
1988 break; 1988 break;
1989 } 1989 }
1990 } 1990 }
1991 1991
1992 1992
1993 if (!errcnt) 1993 if (!errcnt)
1994 IWL_DEBUG_INFO(priv, 1994 IWL_DEBUG_INFO(priv,
1995 "ucode image in INSTRUCTION memory is good\n"); 1995 "ucode image in INSTRUCTION memory is good\n");
1996 1996
1997 return rc; 1997 return rc;
1998 } 1998 }
1999 1999
2000 2000
2001 /** 2001 /**
2002 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, 2002 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
2003 * using sample data 100 bytes apart. If these sample points are good, 2003 * using sample data 100 bytes apart. If these sample points are good,
2004 * it's a pretty good bet that everything between them is good, too. 2004 * it's a pretty good bet that everything between them is good, too.
2005 */ 2005 */
2006 static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) 2006 static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
2007 { 2007 {
2008 u32 val; 2008 u32 val;
2009 int rc = 0; 2009 int rc = 0;
2010 u32 errcnt = 0; 2010 u32 errcnt = 0;
2011 u32 i; 2011 u32 i;
2012 2012
2013 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); 2013 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
2014 2014
2015 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { 2015 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
2016 /* read data comes through single port, auto-incr addr */ 2016 /* read data comes through single port, auto-incr addr */
2017 /* NOTE: Use the debugless read so we don't flood kernel log 2017 /* NOTE: Use the debugless read so we don't flood kernel log
2018 * if IWL_DL_IO is set */ 2018 * if IWL_DL_IO is set */
2019 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 2019 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2020 i + IWL39_RTC_INST_LOWER_BOUND); 2020 i + IWL39_RTC_INST_LOWER_BOUND);
2021 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 2021 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2022 if (val != le32_to_cpu(*image)) { 2022 if (val != le32_to_cpu(*image)) {
2023 #if 0 /* Enable this if you want to see details */ 2023 #if 0 /* Enable this if you want to see details */
2024 IWL_ERR(priv, "uCode INST section is invalid at " 2024 IWL_ERR(priv, "uCode INST section is invalid at "
2025 "offset 0x%x, is 0x%x, s/b 0x%x\n", 2025 "offset 0x%x, is 0x%x, s/b 0x%x\n",
2026 i, val, *image); 2026 i, val, *image);
2027 #endif 2027 #endif
2028 rc = -EIO; 2028 rc = -EIO;
2029 errcnt++; 2029 errcnt++;
2030 if (errcnt >= 3) 2030 if (errcnt >= 3)
2031 break; 2031 break;
2032 } 2032 }
2033 } 2033 }
2034 2034
2035 return rc; 2035 return rc;
2036 } 2036 }
2037 2037
2038 2038
2039 /** 2039 /**
2040 * iwl3945_verify_ucode - determine which instruction image is in SRAM, 2040 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
2041 * and verify its contents 2041 * and verify its contents
2042 */ 2042 */
2043 static int iwl3945_verify_ucode(struct iwl_priv *priv) 2043 static int iwl3945_verify_ucode(struct iwl_priv *priv)
2044 { 2044 {
2045 __le32 *image; 2045 __le32 *image;
2046 u32 len; 2046 u32 len;
2047 int rc = 0; 2047 int rc = 0;
2048 2048
2049 /* Try bootstrap */ 2049 /* Try bootstrap */
2050 image = (__le32 *)priv->ucode_boot.v_addr; 2050 image = (__le32 *)priv->ucode_boot.v_addr;
2051 len = priv->ucode_boot.len; 2051 len = priv->ucode_boot.len;
2052 rc = iwl3945_verify_inst_sparse(priv, image, len); 2052 rc = iwl3945_verify_inst_sparse(priv, image, len);
2053 if (rc == 0) { 2053 if (rc == 0) {
2054 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); 2054 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
2055 return 0; 2055 return 0;
2056 } 2056 }
2057 2057
2058 /* Try initialize */ 2058 /* Try initialize */
2059 image = (__le32 *)priv->ucode_init.v_addr; 2059 image = (__le32 *)priv->ucode_init.v_addr;
2060 len = priv->ucode_init.len; 2060 len = priv->ucode_init.len;
2061 rc = iwl3945_verify_inst_sparse(priv, image, len); 2061 rc = iwl3945_verify_inst_sparse(priv, image, len);
2062 if (rc == 0) { 2062 if (rc == 0) {
2063 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); 2063 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
2064 return 0; 2064 return 0;
2065 } 2065 }
2066 2066
2067 /* Try runtime/protocol */ 2067 /* Try runtime/protocol */
2068 image = (__le32 *)priv->ucode_code.v_addr; 2068 image = (__le32 *)priv->ucode_code.v_addr;
2069 len = priv->ucode_code.len; 2069 len = priv->ucode_code.len;
2070 rc = iwl3945_verify_inst_sparse(priv, image, len); 2070 rc = iwl3945_verify_inst_sparse(priv, image, len);
2071 if (rc == 0) { 2071 if (rc == 0) {
2072 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); 2072 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
2073 return 0; 2073 return 0;
2074 } 2074 }
2075 2075
2076 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); 2076 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
2077 2077
2078 /* Since nothing seems to match, show first several data entries in 2078 /* Since nothing seems to match, show first several data entries in
2079 * instruction SRAM, so maybe visual inspection will give a clue. 2079 * instruction SRAM, so maybe visual inspection will give a clue.
2080 * Selection of bootstrap image (vs. other images) is arbitrary. */ 2080 * Selection of bootstrap image (vs. other images) is arbitrary. */
2081 image = (__le32 *)priv->ucode_boot.v_addr; 2081 image = (__le32 *)priv->ucode_boot.v_addr;
2082 len = priv->ucode_boot.len; 2082 len = priv->ucode_boot.len;
2083 rc = iwl3945_verify_inst_full(priv, image, len); 2083 rc = iwl3945_verify_inst_full(priv, image, len);
2084 2084
2085 return rc; 2085 return rc;
2086 } 2086 }
2087 2087
2088 static void iwl3945_nic_start(struct iwl_priv *priv) 2088 static void iwl3945_nic_start(struct iwl_priv *priv)
2089 { 2089 {
2090 /* Remove all resets to allow NIC to operate */ 2090 /* Remove all resets to allow NIC to operate */
2091 iwl_write32(priv, CSR_RESET, 0); 2091 iwl_write32(priv, CSR_RESET, 0);
2092 } 2092 }
2093 2093
2094 #define IWL3945_UCODE_GET(item) \ 2094 #define IWL3945_UCODE_GET(item) \
2095 static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ 2095 static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2096 { \ 2096 { \
2097 return le32_to_cpu(ucode->u.v1.item); \ 2097 return le32_to_cpu(ucode->u.v1.item); \
2098 } 2098 }
2099 2099
2100 static u32 iwl3945_ucode_get_header_size(u32 api_ver) 2100 static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2101 { 2101 {
2102 return 24; 2102 return 24;
2103 } 2103 }
2104 2104
2105 static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) 2105 static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2106 { 2106 {
2107 return (u8 *) ucode->u.v1.data; 2107 return (u8 *) ucode->u.v1.data;
2108 } 2108 }
2109 2109
2110 IWL3945_UCODE_GET(inst_size); 2110 IWL3945_UCODE_GET(inst_size);
2111 IWL3945_UCODE_GET(data_size); 2111 IWL3945_UCODE_GET(data_size);
2112 IWL3945_UCODE_GET(init_size); 2112 IWL3945_UCODE_GET(init_size);
2113 IWL3945_UCODE_GET(init_data_size); 2113 IWL3945_UCODE_GET(init_data_size);
2114 IWL3945_UCODE_GET(boot_size); 2114 IWL3945_UCODE_GET(boot_size);
2115 2115
2116 /** 2116 /**
2117 * iwl3945_read_ucode - Read uCode images from disk file. 2117 * iwl3945_read_ucode - Read uCode images from disk file.
2118 * 2118 *
2119 * Copy into buffers for card to fetch via bus-mastering 2119 * Copy into buffers for card to fetch via bus-mastering
2120 */ 2120 */
2121 static int iwl3945_read_ucode(struct iwl_priv *priv) 2121 static int iwl3945_read_ucode(struct iwl_priv *priv)
2122 { 2122 {
2123 const struct iwl_ucode_header *ucode; 2123 const struct iwl_ucode_header *ucode;
2124 int ret = -EINVAL, index; 2124 int ret = -EINVAL, index;
2125 const struct firmware *ucode_raw; 2125 const struct firmware *ucode_raw;
2126 /* firmware file name contains uCode/driver compatibility version */ 2126 /* firmware file name contains uCode/driver compatibility version */
2127 const char *name_pre = priv->cfg->fw_name_pre; 2127 const char *name_pre = priv->cfg->fw_name_pre;
2128 const unsigned int api_max = priv->cfg->ucode_api_max; 2128 const unsigned int api_max = priv->cfg->ucode_api_max;
2129 const unsigned int api_min = priv->cfg->ucode_api_min; 2129 const unsigned int api_min = priv->cfg->ucode_api_min;
2130 char buf[25]; 2130 char buf[25];
2131 u8 *src; 2131 u8 *src;
2132 size_t len; 2132 size_t len;
2133 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; 2133 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
2134 2134
2135 /* Ask kernel firmware_class module to get the boot firmware off disk. 2135 /* Ask kernel firmware_class module to get the boot firmware off disk.
2136 * request_firmware() is synchronous, file is in memory on return. */ 2136 * request_firmware() is synchronous, file is in memory on return. */
2137 for (index = api_max; index >= api_min; index--) { 2137 for (index = api_max; index >= api_min; index--) {
2138 sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); 2138 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
2139 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); 2139 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
2140 if (ret < 0) { 2140 if (ret < 0) {
2141 IWL_ERR(priv, "%s firmware file req failed: %d\n", 2141 IWL_ERR(priv, "%s firmware file req failed: %d\n",
2142 buf, ret); 2142 buf, ret);
2143 if (ret == -ENOENT) 2143 if (ret == -ENOENT)
2144 continue; 2144 continue;
2145 else 2145 else
2146 goto error; 2146 goto error;
2147 } else { 2147 } else {
2148 if (index < api_max) 2148 if (index < api_max)
2149 IWL_ERR(priv, "Loaded firmware %s, " 2149 IWL_ERR(priv, "Loaded firmware %s, "
2150 "which is deprecated. " 2150 "which is deprecated. "
2151 " Please use API v%u instead.\n", 2151 " Please use API v%u instead.\n",
2152 buf, api_max); 2152 buf, api_max);
2153 IWL_DEBUG_INFO(priv, "Got firmware '%s' file " 2153 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
2154 "(%zd bytes) from disk\n", 2154 "(%zd bytes) from disk\n",
2155 buf, ucode_raw->size); 2155 buf, ucode_raw->size);
2156 break; 2156 break;
2157 } 2157 }
2158 } 2158 }
2159 2159
2160 if (ret < 0) 2160 if (ret < 0)
2161 goto error; 2161 goto error;
2162 2162
2163 /* Make sure that we got at least our header! */ 2163 /* Make sure that we got at least our header! */
2164 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { 2164 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
2165 IWL_ERR(priv, "File size way too small!\n"); 2165 IWL_ERR(priv, "File size way too small!\n");
2166 ret = -EINVAL; 2166 ret = -EINVAL;
2167 goto err_release; 2167 goto err_release;
2168 } 2168 }
2169 2169
2170 /* Data from ucode file: header followed by uCode images */ 2170 /* Data from ucode file: header followed by uCode images */
2171 ucode = (struct iwl_ucode_header *)ucode_raw->data; 2171 ucode = (struct iwl_ucode_header *)ucode_raw->data;
2172 2172
2173 priv->ucode_ver = le32_to_cpu(ucode->ver); 2173 priv->ucode_ver = le32_to_cpu(ucode->ver);
2174 api_ver = IWL_UCODE_API(priv->ucode_ver); 2174 api_ver = IWL_UCODE_API(priv->ucode_ver);
2175 inst_size = iwl3945_ucode_get_inst_size(ucode); 2175 inst_size = iwl3945_ucode_get_inst_size(ucode);
2176 data_size = iwl3945_ucode_get_data_size(ucode); 2176 data_size = iwl3945_ucode_get_data_size(ucode);
2177 init_size = iwl3945_ucode_get_init_size(ucode); 2177 init_size = iwl3945_ucode_get_init_size(ucode);
2178 init_data_size = iwl3945_ucode_get_init_data_size(ucode); 2178 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
2179 boot_size = iwl3945_ucode_get_boot_size(ucode); 2179 boot_size = iwl3945_ucode_get_boot_size(ucode);
2180 src = iwl3945_ucode_get_data(ucode); 2180 src = iwl3945_ucode_get_data(ucode);
2181 2181
2182 /* api_ver should match the api version forming part of the 2182 /* api_ver should match the api version forming part of the
2183 * firmware filename ... but we don't check for that and only rely 2183 * firmware filename ... but we don't check for that and only rely
2184 * on the API version read from firmware header from here on forward */ 2184 * on the API version read from firmware header from here on forward */
2185 2185
2186 if (api_ver < api_min || api_ver > api_max) { 2186 if (api_ver < api_min || api_ver > api_max) {
2187 IWL_ERR(priv, "Driver unable to support your firmware API. " 2187 IWL_ERR(priv, "Driver unable to support your firmware API. "
2188 "Driver supports v%u, firmware is v%u.\n", 2188 "Driver supports v%u, firmware is v%u.\n",
2189 api_max, api_ver); 2189 api_max, api_ver);
2190 priv->ucode_ver = 0; 2190 priv->ucode_ver = 0;
2191 ret = -EINVAL; 2191 ret = -EINVAL;
2192 goto err_release; 2192 goto err_release;
2193 } 2193 }
2194 if (api_ver != api_max) 2194 if (api_ver != api_max)
2195 IWL_ERR(priv, "Firmware has old API version. Expected %u, " 2195 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
2196 "got %u. New firmware can be obtained " 2196 "got %u. New firmware can be obtained "
2197 "from http://www.intellinuxwireless.org.\n", 2197 "from http://www.intellinuxwireless.org.\n",
2198 api_max, api_ver); 2198 api_max, api_ver);
2199 2199
2200 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", 2200 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
2201 IWL_UCODE_MAJOR(priv->ucode_ver), 2201 IWL_UCODE_MAJOR(priv->ucode_ver),
2202 IWL_UCODE_MINOR(priv->ucode_ver), 2202 IWL_UCODE_MINOR(priv->ucode_ver),
2203 IWL_UCODE_API(priv->ucode_ver), 2203 IWL_UCODE_API(priv->ucode_ver),
2204 IWL_UCODE_SERIAL(priv->ucode_ver)); 2204 IWL_UCODE_SERIAL(priv->ucode_ver));
2205 2205
2206 snprintf(priv->hw->wiphy->fw_version, 2206 snprintf(priv->hw->wiphy->fw_version,
2207 sizeof(priv->hw->wiphy->fw_version), 2207 sizeof(priv->hw->wiphy->fw_version),
2208 "%u.%u.%u.%u", 2208 "%u.%u.%u.%u",
2209 IWL_UCODE_MAJOR(priv->ucode_ver), 2209 IWL_UCODE_MAJOR(priv->ucode_ver),
2210 IWL_UCODE_MINOR(priv->ucode_ver), 2210 IWL_UCODE_MINOR(priv->ucode_ver),
2211 IWL_UCODE_API(priv->ucode_ver), 2211 IWL_UCODE_API(priv->ucode_ver),
2212 IWL_UCODE_SERIAL(priv->ucode_ver)); 2212 IWL_UCODE_SERIAL(priv->ucode_ver));
2213 2213
2214 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", 2214 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
2215 priv->ucode_ver); 2215 priv->ucode_ver);
2216 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", 2216 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
2217 inst_size); 2217 inst_size);
2218 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", 2218 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
2219 data_size); 2219 data_size);
2220 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", 2220 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
2221 init_size); 2221 init_size);
2222 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", 2222 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
2223 init_data_size); 2223 init_data_size);
2224 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", 2224 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
2225 boot_size); 2225 boot_size);
2226 2226
2227 2227
2228 /* Verify size of file vs. image size info in file's header */ 2228 /* Verify size of file vs. image size info in file's header */
2229 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + 2229 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
2230 inst_size + data_size + init_size + 2230 inst_size + data_size + init_size +
2231 init_data_size + boot_size) { 2231 init_data_size + boot_size) {
2232 2232
2233 IWL_DEBUG_INFO(priv, 2233 IWL_DEBUG_INFO(priv,
2234 "uCode file size %zd does not match expected size\n", 2234 "uCode file size %zd does not match expected size\n",
2235 ucode_raw->size); 2235 ucode_raw->size);
2236 ret = -EINVAL; 2236 ret = -EINVAL;
2237 goto err_release; 2237 goto err_release;
2238 } 2238 }
2239 2239
2240 /* Verify that uCode images will fit in card's SRAM */ 2240 /* Verify that uCode images will fit in card's SRAM */
2241 if (inst_size > IWL39_MAX_INST_SIZE) { 2241 if (inst_size > IWL39_MAX_INST_SIZE) {
2242 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", 2242 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
2243 inst_size); 2243 inst_size);
2244 ret = -EINVAL; 2244 ret = -EINVAL;
2245 goto err_release; 2245 goto err_release;
2246 } 2246 }
2247 2247
2248 if (data_size > IWL39_MAX_DATA_SIZE) { 2248 if (data_size > IWL39_MAX_DATA_SIZE) {
2249 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", 2249 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
2250 data_size); 2250 data_size);
2251 ret = -EINVAL; 2251 ret = -EINVAL;
2252 goto err_release; 2252 goto err_release;
2253 } 2253 }
2254 if (init_size > IWL39_MAX_INST_SIZE) { 2254 if (init_size > IWL39_MAX_INST_SIZE) {
2255 IWL_DEBUG_INFO(priv, 2255 IWL_DEBUG_INFO(priv,
2256 "uCode init instr len %d too large to fit in\n", 2256 "uCode init instr len %d too large to fit in\n",
2257 init_size); 2257 init_size);
2258 ret = -EINVAL; 2258 ret = -EINVAL;
2259 goto err_release; 2259 goto err_release;
2260 } 2260 }
2261 if (init_data_size > IWL39_MAX_DATA_SIZE) { 2261 if (init_data_size > IWL39_MAX_DATA_SIZE) {
2262 IWL_DEBUG_INFO(priv, 2262 IWL_DEBUG_INFO(priv,
2263 "uCode init data len %d too large to fit in\n", 2263 "uCode init data len %d too large to fit in\n",
2264 init_data_size); 2264 init_data_size);
2265 ret = -EINVAL; 2265 ret = -EINVAL;
2266 goto err_release; 2266 goto err_release;
2267 } 2267 }
2268 if (boot_size > IWL39_MAX_BSM_SIZE) { 2268 if (boot_size > IWL39_MAX_BSM_SIZE) {
2269 IWL_DEBUG_INFO(priv, 2269 IWL_DEBUG_INFO(priv,
2270 "uCode boot instr len %d too large to fit in\n", 2270 "uCode boot instr len %d too large to fit in\n",
2271 boot_size); 2271 boot_size);
2272 ret = -EINVAL; 2272 ret = -EINVAL;
2273 goto err_release; 2273 goto err_release;
2274 } 2274 }
2275 2275
2276 /* Allocate ucode buffers for card's bus-master loading ... */ 2276 /* Allocate ucode buffers for card's bus-master loading ... */
2277 2277
2278 /* Runtime instructions and 2 copies of data: 2278 /* Runtime instructions and 2 copies of data:
2279 * 1) unmodified from disk 2279 * 1) unmodified from disk
2280 * 2) backup cache for save/restore during power-downs */ 2280 * 2) backup cache for save/restore during power-downs */
2281 priv->ucode_code.len = inst_size; 2281 priv->ucode_code.len = inst_size;
2282 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 2282 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2283 2283
2284 priv->ucode_data.len = data_size; 2284 priv->ucode_data.len = data_size;
2285 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 2285 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2286 2286
2287 priv->ucode_data_backup.len = data_size; 2287 priv->ucode_data_backup.len = data_size;
2288 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 2288 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2289 2289
2290 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 2290 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2291 !priv->ucode_data_backup.v_addr) 2291 !priv->ucode_data_backup.v_addr)
2292 goto err_pci_alloc; 2292 goto err_pci_alloc;
2293 2293
2294 /* Initialization instructions and data */ 2294 /* Initialization instructions and data */
2295 if (init_size && init_data_size) { 2295 if (init_size && init_data_size) {
2296 priv->ucode_init.len = init_size; 2296 priv->ucode_init.len = init_size;
2297 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 2297 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2298 2298
2299 priv->ucode_init_data.len = init_data_size; 2299 priv->ucode_init_data.len = init_data_size;
2300 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 2300 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2301 2301
2302 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 2302 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2303 goto err_pci_alloc; 2303 goto err_pci_alloc;
2304 } 2304 }
2305 2305
2306 /* Bootstrap (instructions only, no data) */ 2306 /* Bootstrap (instructions only, no data) */
2307 if (boot_size) { 2307 if (boot_size) {
2308 priv->ucode_boot.len = boot_size; 2308 priv->ucode_boot.len = boot_size;
2309 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 2309 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2310 2310
2311 if (!priv->ucode_boot.v_addr) 2311 if (!priv->ucode_boot.v_addr)
2312 goto err_pci_alloc; 2312 goto err_pci_alloc;
2313 } 2313 }
2314 2314
2315 /* Copy images into buffers for card's bus-master reads ... */ 2315 /* Copy images into buffers for card's bus-master reads ... */
2316 2316
2317 /* Runtime instructions (first block of data in file) */ 2317 /* Runtime instructions (first block of data in file) */
2318 len = inst_size; 2318 len = inst_size;
2319 IWL_DEBUG_INFO(priv, 2319 IWL_DEBUG_INFO(priv,
2320 "Copying (but not loading) uCode instr len %zd\n", len); 2320 "Copying (but not loading) uCode instr len %zd\n", len);
2321 memcpy(priv->ucode_code.v_addr, src, len); 2321 memcpy(priv->ucode_code.v_addr, src, len);
2322 src += len; 2322 src += len;
2323 2323
2324 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 2324 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2325 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 2325 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2326 2326
2327 /* Runtime data (2nd block) 2327 /* Runtime data (2nd block)
2328 * NOTE: Copy into backup buffer will be done in iwl3945_up() */ 2328 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2329 len = data_size; 2329 len = data_size;
2330 IWL_DEBUG_INFO(priv, 2330 IWL_DEBUG_INFO(priv,
2331 "Copying (but not loading) uCode data len %zd\n", len); 2331 "Copying (but not loading) uCode data len %zd\n", len);
2332 memcpy(priv->ucode_data.v_addr, src, len); 2332 memcpy(priv->ucode_data.v_addr, src, len);
2333 memcpy(priv->ucode_data_backup.v_addr, src, len); 2333 memcpy(priv->ucode_data_backup.v_addr, src, len);
2334 src += len; 2334 src += len;
2335 2335
2336 /* Initialization instructions (3rd block) */ 2336 /* Initialization instructions (3rd block) */
2337 if (init_size) { 2337 if (init_size) {
2338 len = init_size; 2338 len = init_size;
2339 IWL_DEBUG_INFO(priv, 2339 IWL_DEBUG_INFO(priv,
2340 "Copying (but not loading) init instr len %zd\n", len); 2340 "Copying (but not loading) init instr len %zd\n", len);
2341 memcpy(priv->ucode_init.v_addr, src, len); 2341 memcpy(priv->ucode_init.v_addr, src, len);
2342 src += len; 2342 src += len;
2343 } 2343 }
2344 2344
2345 /* Initialization data (4th block) */ 2345 /* Initialization data (4th block) */
2346 if (init_data_size) { 2346 if (init_data_size) {
2347 len = init_data_size; 2347 len = init_data_size;
2348 IWL_DEBUG_INFO(priv, 2348 IWL_DEBUG_INFO(priv,
2349 "Copying (but not loading) init data len %zd\n", len); 2349 "Copying (but not loading) init data len %zd\n", len);
2350 memcpy(priv->ucode_init_data.v_addr, src, len); 2350 memcpy(priv->ucode_init_data.v_addr, src, len);
2351 src += len; 2351 src += len;
2352 } 2352 }
2353 2353
2354 /* Bootstrap instructions (5th block) */ 2354 /* Bootstrap instructions (5th block) */
2355 len = boot_size; 2355 len = boot_size;
2356 IWL_DEBUG_INFO(priv, 2356 IWL_DEBUG_INFO(priv,
2357 "Copying (but not loading) boot instr len %zd\n", len); 2357 "Copying (but not loading) boot instr len %zd\n", len);
2358 memcpy(priv->ucode_boot.v_addr, src, len); 2358 memcpy(priv->ucode_boot.v_addr, src, len);
2359 2359
2360 /* We have our copies now, allow OS release its copies */ 2360 /* We have our copies now, allow OS release its copies */
2361 release_firmware(ucode_raw); 2361 release_firmware(ucode_raw);
2362 return 0; 2362 return 0;
2363 2363
2364 err_pci_alloc: 2364 err_pci_alloc:
2365 IWL_ERR(priv, "failed to allocate pci memory\n"); 2365 IWL_ERR(priv, "failed to allocate pci memory\n");
2366 ret = -ENOMEM; 2366 ret = -ENOMEM;
2367 iwl3945_dealloc_ucode_pci(priv); 2367 iwl3945_dealloc_ucode_pci(priv);
2368 2368
2369 err_release: 2369 err_release:
2370 release_firmware(ucode_raw); 2370 release_firmware(ucode_raw);
2371 2371
2372 error: 2372 error:
2373 return ret; 2373 return ret;
2374 } 2374 }
2375 2375
2376 2376
2377 /** 2377 /**
2378 * iwl3945_set_ucode_ptrs - Set uCode address location 2378 * iwl3945_set_ucode_ptrs - Set uCode address location
2379 * 2379 *
2380 * Tell initialization uCode where to find runtime uCode. 2380 * Tell initialization uCode where to find runtime uCode.
2381 * 2381 *
2382 * BSM registers initially contain pointers to initialization uCode. 2382 * BSM registers initially contain pointers to initialization uCode.
2383 * We need to replace them to load runtime uCode inst and data, 2383 * We need to replace them to load runtime uCode inst and data,
2384 * and to save runtime data when powering down. 2384 * and to save runtime data when powering down.
2385 */ 2385 */
2386 static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) 2386 static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2387 { 2387 {
2388 dma_addr_t pinst; 2388 dma_addr_t pinst;
2389 dma_addr_t pdata; 2389 dma_addr_t pdata;
2390 2390
2391 /* bits 31:0 for 3945 */ 2391 /* bits 31:0 for 3945 */
2392 pinst = priv->ucode_code.p_addr; 2392 pinst = priv->ucode_code.p_addr;
2393 pdata = priv->ucode_data_backup.p_addr; 2393 pdata = priv->ucode_data_backup.p_addr;
2394 2394
2395 /* Tell bootstrap uCode where to find image to load */ 2395 /* Tell bootstrap uCode where to find image to load */
2396 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 2396 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2397 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 2397 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2398 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 2398 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2399 priv->ucode_data.len); 2399 priv->ucode_data.len);
2400 2400
2401 /* Inst byte count must be last to set up, bit 31 signals uCode 2401 /* Inst byte count must be last to set up, bit 31 signals uCode
2402 * that all new ptr/size info is in place */ 2402 * that all new ptr/size info is in place */
2403 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 2403 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2404 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 2404 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2405 2405
2406 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); 2406 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2407 2407
2408 return 0; 2408 return 0;
2409 } 2409 }
2410 2410
2411 /** 2411 /**
2412 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received 2412 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2413 * 2413 *
2414 * Called after REPLY_ALIVE notification received from "initialize" uCode. 2414 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2415 * 2415 *
2416 * Tell "initialize" uCode to go ahead and load the runtime uCode. 2416 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2417 */ 2417 */
2418 static void iwl3945_init_alive_start(struct iwl_priv *priv) 2418 static void iwl3945_init_alive_start(struct iwl_priv *priv)
2419 { 2419 {
2420 /* Check alive response for "valid" sign from uCode */ 2420 /* Check alive response for "valid" sign from uCode */
2421 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 2421 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2422 /* We had an error bringing up the hardware, so take it 2422 /* We had an error bringing up the hardware, so take it
2423 * all the way back down so we can try again */ 2423 * all the way back down so we can try again */
2424 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); 2424 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2425 goto restart; 2425 goto restart;
2426 } 2426 }
2427 2427
2428 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 2428 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2429 * This is a paranoid check, because we would not have gotten the 2429 * This is a paranoid check, because we would not have gotten the
2430 * "initialize" alive if code weren't properly loaded. */ 2430 * "initialize" alive if code weren't properly loaded. */
2431 if (iwl3945_verify_ucode(priv)) { 2431 if (iwl3945_verify_ucode(priv)) {
2432 /* Runtime instruction load was bad; 2432 /* Runtime instruction load was bad;
2433 * take it all the way back down so we can try again */ 2433 * take it all the way back down so we can try again */
2434 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); 2434 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2435 goto restart; 2435 goto restart;
2436 } 2436 }
2437 2437
2438 /* Send pointers to protocol/runtime uCode image ... init code will 2438 /* Send pointers to protocol/runtime uCode image ... init code will
2439 * load and launch runtime uCode, which will send us another "Alive" 2439 * load and launch runtime uCode, which will send us another "Alive"
2440 * notification. */ 2440 * notification. */
2441 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); 2441 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2442 if (iwl3945_set_ucode_ptrs(priv)) { 2442 if (iwl3945_set_ucode_ptrs(priv)) {
2443 /* Runtime instruction load won't happen; 2443 /* Runtime instruction load won't happen;
2444 * take it all the way back down so we can try again */ 2444 * take it all the way back down so we can try again */
2445 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); 2445 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2446 goto restart; 2446 goto restart;
2447 } 2447 }
2448 return; 2448 return;
2449 2449
2450 restart: 2450 restart:
2451 queue_work(priv->workqueue, &priv->restart); 2451 queue_work(priv->workqueue, &priv->restart);
2452 } 2452 }
2453 2453
2454 /** 2454 /**
2455 * iwl3945_alive_start - called after REPLY_ALIVE notification received 2455 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2456 * from protocol/runtime uCode (initialization uCode's 2456 * from protocol/runtime uCode (initialization uCode's
2457 * Alive gets handled by iwl3945_init_alive_start()). 2457 * Alive gets handled by iwl3945_init_alive_start()).
2458 */ 2458 */
2459 static void iwl3945_alive_start(struct iwl_priv *priv) 2459 static void iwl3945_alive_start(struct iwl_priv *priv)
2460 { 2460 {
2461 int thermal_spin = 0; 2461 int thermal_spin = 0;
2462 u32 rfkill; 2462 u32 rfkill;
2463 2463
2464 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2464 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2465 2465
2466 if (priv->card_alive.is_valid != UCODE_VALID_OK) { 2466 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2467 /* We had an error bringing up the hardware, so take it 2467 /* We had an error bringing up the hardware, so take it
2468 * all the way back down so we can try again */ 2468 * all the way back down so we can try again */
2469 IWL_DEBUG_INFO(priv, "Alive failed.\n"); 2469 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2470 goto restart; 2470 goto restart;
2471 } 2471 }
2472 2472
2473 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2473 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2474 * This is a paranoid check, because we would not have gotten the 2474 * This is a paranoid check, because we would not have gotten the
2475 * "runtime" alive if code weren't properly loaded. */ 2475 * "runtime" alive if code weren't properly loaded. */
2476 if (iwl3945_verify_ucode(priv)) { 2476 if (iwl3945_verify_ucode(priv)) {
2477 /* Runtime instruction load was bad; 2477 /* Runtime instruction load was bad;
2478 * take it all the way back down so we can try again */ 2478 * take it all the way back down so we can try again */
2479 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); 2479 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2480 goto restart; 2480 goto restart;
2481 } 2481 }
2482 2482
2483 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2483 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
2484 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2484 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2485 2485
2486 if (rfkill & 0x1) { 2486 if (rfkill & 0x1) {
2487 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2487 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2488 /* if RFKILL is not on, then wait for thermal 2488 /* if RFKILL is not on, then wait for thermal
2489 * sensor in adapter to kick in */ 2489 * sensor in adapter to kick in */
2490 while (iwl3945_hw_get_temperature(priv) == 0) { 2490 while (iwl3945_hw_get_temperature(priv) == 0) {
2491 thermal_spin++; 2491 thermal_spin++;
2492 udelay(10); 2492 udelay(10);
2493 } 2493 }
2494 2494
2495 if (thermal_spin) 2495 if (thermal_spin)
2496 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", 2496 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2497 thermal_spin * 10); 2497 thermal_spin * 10);
2498 } else 2498 } else
2499 set_bit(STATUS_RF_KILL_HW, &priv->status); 2499 set_bit(STATUS_RF_KILL_HW, &priv->status);
2500 2500
2501 /* After the ALIVE response, we can send commands to 3945 uCode */ 2501 /* After the ALIVE response, we can send commands to 3945 uCode */
2502 set_bit(STATUS_ALIVE, &priv->status); 2502 set_bit(STATUS_ALIVE, &priv->status);
2503 2503
2504 if (priv->cfg->ops->lib->recover_from_tx_stall) { 2504 if (priv->cfg->ops->lib->recover_from_tx_stall) {
2505 /* Enable timer to monitor the driver queues */ 2505 /* Enable timer to monitor the driver queues */
2506 mod_timer(&priv->monitor_recover, 2506 mod_timer(&priv->monitor_recover,
2507 jiffies + 2507 jiffies +
2508 msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2508 msecs_to_jiffies(priv->cfg->monitor_recover_period));
2509 } 2509 }
2510 2510
2511 if (iwl_is_rfkill(priv)) 2511 if (iwl_is_rfkill(priv))
2512 return; 2512 return;
2513 2513
2514 ieee80211_wake_queues(priv->hw); 2514 ieee80211_wake_queues(priv->hw);
2515 2515
2516 priv->active_rate = IWL_RATES_MASK; 2516 priv->active_rate = IWL_RATES_MASK;
2517 2517
2518 iwl_power_update_mode(priv, true); 2518 iwl_power_update_mode(priv, true);
2519 2519
2520 if (iwl_is_associated(priv)) { 2520 if (iwl_is_associated(priv)) {
2521 struct iwl3945_rxon_cmd *active_rxon = 2521 struct iwl3945_rxon_cmd *active_rxon =
2522 (struct iwl3945_rxon_cmd *)(&priv->active_rxon); 2522 (struct iwl3945_rxon_cmd *)(&priv->active_rxon);
2523 2523
2524 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2524 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2525 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2525 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2526 } else { 2526 } else {
2527 /* Initialize our rx_config data */ 2527 /* Initialize our rx_config data */
2528 iwl_connection_init_rx_config(priv, NULL); 2528 iwl_connection_init_rx_config(priv, NULL);
2529 } 2529 }
2530 2530
2531 /* Configure Bluetooth device coexistence support */ 2531 /* Configure Bluetooth device coexistence support */
2532 priv->cfg->ops->hcmd->send_bt_config(priv); 2532 priv->cfg->ops->hcmd->send_bt_config(priv);
2533 2533
2534 /* Configure the adapter for unassociated operation */ 2534 /* Configure the adapter for unassociated operation */
2535 iwlcore_commit_rxon(priv); 2535 iwlcore_commit_rxon(priv);
2536 2536
2537 iwl3945_reg_txpower_periodic(priv); 2537 iwl3945_reg_txpower_periodic(priv);
2538 2538
2539 iwl_leds_init(priv); 2539 iwl_leds_init(priv);
2540 2540
2541 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2541 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2542 set_bit(STATUS_READY, &priv->status); 2542 set_bit(STATUS_READY, &priv->status);
2543 wake_up_interruptible(&priv->wait_command_queue); 2543 wake_up_interruptible(&priv->wait_command_queue);
2544 2544
2545 return; 2545 return;
2546 2546
2547 restart: 2547 restart:
2548 queue_work(priv->workqueue, &priv->restart); 2548 queue_work(priv->workqueue, &priv->restart);
2549 } 2549 }
2550 2550
2551 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); 2551 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2552 2552
2553 static void __iwl3945_down(struct iwl_priv *priv) 2553 static void __iwl3945_down(struct iwl_priv *priv)
2554 { 2554 {
2555 unsigned long flags; 2555 unsigned long flags;
2556 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2556 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
2557 struct ieee80211_conf *conf = NULL; 2557 struct ieee80211_conf *conf = NULL;
2558 2558
2559 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2559 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2560 2560
2561 conf = ieee80211_get_hw_conf(priv->hw); 2561 conf = ieee80211_get_hw_conf(priv->hw);
2562 2562
2563 if (!exit_pending) 2563 if (!exit_pending)
2564 set_bit(STATUS_EXIT_PENDING, &priv->status); 2564 set_bit(STATUS_EXIT_PENDING, &priv->status);
2565 2565
2566 /* Station information will now be cleared in device */ 2566 /* Station information will now be cleared in device */
2567 iwl_clear_ucode_stations(priv); 2567 iwl_clear_ucode_stations(priv);
2568 iwl_dealloc_bcast_station(priv); 2568 iwl_dealloc_bcast_station(priv);
2569 iwl_clear_driver_stations(priv); 2569 iwl_clear_driver_stations(priv);
2570 2570
2571 /* Unblock any waiting calls */ 2571 /* Unblock any waiting calls */
2572 wake_up_interruptible_all(&priv->wait_command_queue); 2572 wake_up_interruptible_all(&priv->wait_command_queue);
2573 2573
2574 /* Wipe out the EXIT_PENDING status bit if we are not actually 2574 /* Wipe out the EXIT_PENDING status bit if we are not actually
2575 * exiting the module */ 2575 * exiting the module */
2576 if (!exit_pending) 2576 if (!exit_pending)
2577 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2577 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2578 2578
2579 /* stop and reset the on-board processor */ 2579 /* stop and reset the on-board processor */
2580 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 2580 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2581 2581
2582 /* tell the device to stop sending interrupts */ 2582 /* tell the device to stop sending interrupts */
2583 spin_lock_irqsave(&priv->lock, flags); 2583 spin_lock_irqsave(&priv->lock, flags);
2584 iwl_disable_interrupts(priv); 2584 iwl_disable_interrupts(priv);
2585 spin_unlock_irqrestore(&priv->lock, flags); 2585 spin_unlock_irqrestore(&priv->lock, flags);
2586 iwl_synchronize_irq(priv); 2586 iwl_synchronize_irq(priv);
2587 2587
2588 if (priv->mac80211_registered) 2588 if (priv->mac80211_registered)
2589 ieee80211_stop_queues(priv->hw); 2589 ieee80211_stop_queues(priv->hw);
2590 2590
2591 /* If we have not previously called iwl3945_init() then 2591 /* If we have not previously called iwl3945_init() then
2592 * clear all bits but the RF Kill bits and return */ 2592 * clear all bits but the RF Kill bits and return */
2593 if (!iwl_is_init(priv)) { 2593 if (!iwl_is_init(priv)) {
2594 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 2594 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2595 STATUS_RF_KILL_HW | 2595 STATUS_RF_KILL_HW |
2596 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2596 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2597 STATUS_GEO_CONFIGURED | 2597 STATUS_GEO_CONFIGURED |
2598 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2598 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2599 STATUS_EXIT_PENDING; 2599 STATUS_EXIT_PENDING;
2600 goto exit; 2600 goto exit;
2601 } 2601 }
2602 2602
2603 /* ...otherwise clear out all the status bits but the RF Kill 2603 /* ...otherwise clear out all the status bits but the RF Kill
2604 * bit and continue taking the NIC down. */ 2604 * bit and continue taking the NIC down. */
2605 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 2605 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2606 STATUS_RF_KILL_HW | 2606 STATUS_RF_KILL_HW |
2607 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 2607 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2608 STATUS_GEO_CONFIGURED | 2608 STATUS_GEO_CONFIGURED |
2609 test_bit(STATUS_FW_ERROR, &priv->status) << 2609 test_bit(STATUS_FW_ERROR, &priv->status) <<
2610 STATUS_FW_ERROR | 2610 STATUS_FW_ERROR |
2611 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2611 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2612 STATUS_EXIT_PENDING; 2612 STATUS_EXIT_PENDING;
2613 2613
2614 iwl3945_hw_txq_ctx_stop(priv); 2614 iwl3945_hw_txq_ctx_stop(priv);
2615 iwl3945_hw_rxq_stop(priv); 2615 iwl3945_hw_rxq_stop(priv);
2616 2616
2617 /* Power-down device's busmaster DMA clocks */ 2617 /* Power-down device's busmaster DMA clocks */
2618 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2618 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2619 udelay(5); 2619 udelay(5);
2620 2620
2621 /* Stop the device, and put it in low power state */ 2621 /* Stop the device, and put it in low power state */
2622 priv->cfg->ops->lib->apm_ops.stop(priv); 2622 priv->cfg->ops->lib->apm_ops.stop(priv);
2623 2623
2624 exit: 2624 exit:
2625 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2625 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2626 2626
2627 if (priv->ibss_beacon) 2627 if (priv->ibss_beacon)
2628 dev_kfree_skb(priv->ibss_beacon); 2628 dev_kfree_skb(priv->ibss_beacon);
2629 priv->ibss_beacon = NULL; 2629 priv->ibss_beacon = NULL;
2630 2630
2631 /* clear out any free frames */ 2631 /* clear out any free frames */
2632 iwl3945_clear_free_frames(priv); 2632 iwl3945_clear_free_frames(priv);
2633 } 2633 }
2634 2634
2635 static void iwl3945_down(struct iwl_priv *priv) 2635 static void iwl3945_down(struct iwl_priv *priv)
2636 { 2636 {
2637 mutex_lock(&priv->mutex); 2637 mutex_lock(&priv->mutex);
2638 __iwl3945_down(priv); 2638 __iwl3945_down(priv);
2639 mutex_unlock(&priv->mutex); 2639 mutex_unlock(&priv->mutex);
2640 2640
2641 iwl3945_cancel_deferred_work(priv); 2641 iwl3945_cancel_deferred_work(priv);
2642 } 2642 }
2643 2643
2644 #define MAX_HW_RESTARTS 5 2644 #define MAX_HW_RESTARTS 5
2645 2645
2646 static int __iwl3945_up(struct iwl_priv *priv) 2646 static int __iwl3945_up(struct iwl_priv *priv)
2647 { 2647 {
2648 int rc, i; 2648 int rc, i;
2649 2649
2650 rc = iwl_alloc_bcast_station(priv, false); 2650 rc = iwl_alloc_bcast_station(priv, false);
2651 if (rc) 2651 if (rc)
2652 return rc; 2652 return rc;
2653 2653
2654 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2654 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2655 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); 2655 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2656 return -EIO; 2656 return -EIO;
2657 } 2657 }
2658 2658
2659 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 2659 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2660 IWL_ERR(priv, "ucode not available for device bring up\n"); 2660 IWL_ERR(priv, "ucode not available for device bring up\n");
2661 return -EIO; 2661 return -EIO;
2662 } 2662 }
2663 2663
2664 /* If platform's RF_KILL switch is NOT set to KILL */ 2664 /* If platform's RF_KILL switch is NOT set to KILL */
2665 if (iwl_read32(priv, CSR_GP_CNTRL) & 2665 if (iwl_read32(priv, CSR_GP_CNTRL) &
2666 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2666 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2667 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2667 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2668 else { 2668 else {
2669 set_bit(STATUS_RF_KILL_HW, &priv->status); 2669 set_bit(STATUS_RF_KILL_HW, &priv->status);
2670 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); 2670 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2671 return -ENODEV; 2671 return -ENODEV;
2672 } 2672 }
2673 2673
2674 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2674 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2675 2675
2676 rc = iwl3945_hw_nic_init(priv); 2676 rc = iwl3945_hw_nic_init(priv);
2677 if (rc) { 2677 if (rc) {
2678 IWL_ERR(priv, "Unable to int nic\n"); 2678 IWL_ERR(priv, "Unable to int nic\n");
2679 return rc; 2679 return rc;
2680 } 2680 }
2681 2681
2682 /* make sure rfkill handshake bits are cleared */ 2682 /* make sure rfkill handshake bits are cleared */
2683 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2683 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 2684 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2685 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2686 2686
2687 /* clear (again), then enable host interrupts */ 2687 /* clear (again), then enable host interrupts */
2688 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2688 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2689 iwl_enable_interrupts(priv); 2689 iwl_enable_interrupts(priv);
2690 2690
2691 /* really make sure rfkill handshake bits are cleared */ 2691 /* really make sure rfkill handshake bits are cleared */
2692 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2692 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2693 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2693 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2694 2694
2695 /* Copy original ucode data image from disk into backup cache. 2695 /* Copy original ucode data image from disk into backup cache.
2696 * This will be used to initialize the on-board processor's 2696 * This will be used to initialize the on-board processor's
2697 * data SRAM for a clean start when the runtime program first loads. */ 2697 * data SRAM for a clean start when the runtime program first loads. */
2698 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, 2698 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2699 priv->ucode_data.len); 2699 priv->ucode_data.len);
2700 2700
2701 /* We return success when we resume from suspend and rf_kill is on. */ 2701 /* We return success when we resume from suspend and rf_kill is on. */
2702 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) 2702 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2703 return 0; 2703 return 0;
2704 2704
2705 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2705 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2706 2706
2707 /* load bootstrap state machine, 2707 /* load bootstrap state machine,
2708 * load bootstrap program into processor's memory, 2708 * load bootstrap program into processor's memory,
2709 * prepare to load the "initialize" uCode */ 2709 * prepare to load the "initialize" uCode */
2710 rc = priv->cfg->ops->lib->load_ucode(priv); 2710 rc = priv->cfg->ops->lib->load_ucode(priv);
2711 2711
2712 if (rc) { 2712 if (rc) {
2713 IWL_ERR(priv, 2713 IWL_ERR(priv,
2714 "Unable to set up bootstrap uCode: %d\n", rc); 2714 "Unable to set up bootstrap uCode: %d\n", rc);
2715 continue; 2715 continue;
2716 } 2716 }
2717 2717
2718 /* start card; "initialize" will load runtime ucode */ 2718 /* start card; "initialize" will load runtime ucode */
2719 iwl3945_nic_start(priv); 2719 iwl3945_nic_start(priv);
2720 2720
2721 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); 2721 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2722 2722
2723 return 0; 2723 return 0;
2724 } 2724 }
2725 2725
2726 set_bit(STATUS_EXIT_PENDING, &priv->status); 2726 set_bit(STATUS_EXIT_PENDING, &priv->status);
2727 __iwl3945_down(priv); 2727 __iwl3945_down(priv);
2728 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2728 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2729 2729
2730 /* tried to restart and config the device for as long as our 2730 /* tried to restart and config the device for as long as our
2731 * patience could withstand */ 2731 * patience could withstand */
2732 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); 2732 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2733 return -EIO; 2733 return -EIO;
2734 } 2734 }
2735 2735
2736 2736
2737 /***************************************************************************** 2737 /*****************************************************************************
2738 * 2738 *
2739 * Workqueue callbacks 2739 * Workqueue callbacks
2740 * 2740 *
2741 *****************************************************************************/ 2741 *****************************************************************************/
2742 2742
2743 static void iwl3945_bg_init_alive_start(struct work_struct *data) 2743 static void iwl3945_bg_init_alive_start(struct work_struct *data)
2744 { 2744 {
2745 struct iwl_priv *priv = 2745 struct iwl_priv *priv =
2746 container_of(data, struct iwl_priv, init_alive_start.work); 2746 container_of(data, struct iwl_priv, init_alive_start.work);
2747 2747
2748 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2748 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2749 return; 2749 return;
2750 2750
2751 mutex_lock(&priv->mutex); 2751 mutex_lock(&priv->mutex);
2752 iwl3945_init_alive_start(priv); 2752 iwl3945_init_alive_start(priv);
2753 mutex_unlock(&priv->mutex); 2753 mutex_unlock(&priv->mutex);
2754 } 2754 }
2755 2755
2756 static void iwl3945_bg_alive_start(struct work_struct *data) 2756 static void iwl3945_bg_alive_start(struct work_struct *data)
2757 { 2757 {
2758 struct iwl_priv *priv = 2758 struct iwl_priv *priv =
2759 container_of(data, struct iwl_priv, alive_start.work); 2759 container_of(data, struct iwl_priv, alive_start.work);
2760 2760
2761 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2761 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2762 return; 2762 return;
2763 2763
2764 mutex_lock(&priv->mutex); 2764 mutex_lock(&priv->mutex);
2765 iwl3945_alive_start(priv); 2765 iwl3945_alive_start(priv);
2766 mutex_unlock(&priv->mutex); 2766 mutex_unlock(&priv->mutex);
2767 } 2767 }
2768 2768
2769 /* 2769 /*
2770 * 3945 cannot interrupt driver when hardware rf kill switch toggles; 2770 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2771 * driver must poll CSR_GP_CNTRL_REG register for change. This register 2771 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2772 * *is* readable even when device has been SW_RESET into low power mode 2772 * *is* readable even when device has been SW_RESET into low power mode
2773 * (e.g. during RF KILL). 2773 * (e.g. during RF KILL).
2774 */ 2774 */
2775 static void iwl3945_rfkill_poll(struct work_struct *data) 2775 static void iwl3945_rfkill_poll(struct work_struct *data)
2776 { 2776 {
2777 struct iwl_priv *priv = 2777 struct iwl_priv *priv =
2778 container_of(data, struct iwl_priv, _3945.rfkill_poll.work); 2778 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2779 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); 2779 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2780 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) 2780 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2781 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 2781 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2782 2782
2783 if (new_rfkill != old_rfkill) { 2783 if (new_rfkill != old_rfkill) {
2784 if (new_rfkill) 2784 if (new_rfkill)
2785 set_bit(STATUS_RF_KILL_HW, &priv->status); 2785 set_bit(STATUS_RF_KILL_HW, &priv->status);
2786 else 2786 else
2787 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2787 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2788 2788
2789 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); 2789 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2790 2790
2791 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", 2791 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2792 new_rfkill ? "disable radio" : "enable radio"); 2792 new_rfkill ? "disable radio" : "enable radio");
2793 } 2793 }
2794 2794
2795 /* Keep this running, even if radio now enabled. This will be 2795 /* Keep this running, even if radio now enabled. This will be
2796 * cancelled in mac_start() if system decides to start again */ 2796 * cancelled in mac_start() if system decides to start again */
2797 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, 2797 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2798 round_jiffies_relative(2 * HZ)); 2798 round_jiffies_relative(2 * HZ));
2799 2799
2800 } 2800 }
2801 2801
2802 void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 2802 void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2803 { 2803 {
2804 struct iwl_host_cmd cmd = { 2804 struct iwl_host_cmd cmd = {
2805 .id = REPLY_SCAN_CMD, 2805 .id = REPLY_SCAN_CMD,
2806 .len = sizeof(struct iwl3945_scan_cmd), 2806 .len = sizeof(struct iwl3945_scan_cmd),
2807 .flags = CMD_SIZE_HUGE, 2807 .flags = CMD_SIZE_HUGE,
2808 }; 2808 };
2809 struct iwl3945_scan_cmd *scan; 2809 struct iwl3945_scan_cmd *scan;
2810 struct ieee80211_conf *conf = NULL; 2810 struct ieee80211_conf *conf = NULL;
2811 u8 n_probes = 0; 2811 u8 n_probes = 0;
2812 enum ieee80211_band band; 2812 enum ieee80211_band band;
2813 bool is_active = false; 2813 bool is_active = false;
2814 2814
2815 conf = ieee80211_get_hw_conf(priv->hw); 2815 conf = ieee80211_get_hw_conf(priv->hw);
2816 2816
2817 cancel_delayed_work(&priv->scan_check); 2817 cancel_delayed_work(&priv->scan_check);
2818 2818
2819 if (!iwl_is_ready(priv)) { 2819 if (!iwl_is_ready(priv)) {
2820 IWL_WARN(priv, "request scan called when driver not ready.\n"); 2820 IWL_WARN(priv, "request scan called when driver not ready.\n");
2821 goto done; 2821 goto done;
2822 } 2822 }
2823 2823
2824 /* Make sure the scan wasn't canceled before this queued work 2824 /* Make sure the scan wasn't canceled before this queued work
2825 * was given the chance to run... */ 2825 * was given the chance to run... */
2826 if (!test_bit(STATUS_SCANNING, &priv->status)) 2826 if (!test_bit(STATUS_SCANNING, &priv->status))
2827 goto done; 2827 goto done;
2828 2828
2829 /* This should never be called or scheduled if there is currently 2829 /* This should never be called or scheduled if there is currently
2830 * a scan active in the hardware. */ 2830 * a scan active in the hardware. */
2831 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 2831 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
2832 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " 2832 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
2833 "Ignoring second request.\n"); 2833 "Ignoring second request.\n");
2834 goto done; 2834 goto done;
2835 } 2835 }
2836 2836
2837 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2837 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2838 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n"); 2838 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
2839 goto done; 2839 goto done;
2840 } 2840 }
2841 2841
2842 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 2842 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2843 IWL_DEBUG_HC(priv, 2843 IWL_DEBUG_HC(priv,
2844 "Scan request while abort pending. Queuing.\n"); 2844 "Scan request while abort pending. Queuing.\n");
2845 goto done; 2845 goto done;
2846 } 2846 }
2847 2847
2848 if (iwl_is_rfkill(priv)) { 2848 if (iwl_is_rfkill(priv)) {
2849 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n"); 2849 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
2850 goto done; 2850 goto done;
2851 } 2851 }
2852 2852
2853 if (!test_bit(STATUS_READY, &priv->status)) { 2853 if (!test_bit(STATUS_READY, &priv->status)) {
2854 IWL_DEBUG_HC(priv, 2854 IWL_DEBUG_HC(priv,
2855 "Scan request while uninitialized. Queuing.\n"); 2855 "Scan request while uninitialized. Queuing.\n");
2856 goto done; 2856 goto done;
2857 } 2857 }
2858 2858
2859 if (!priv->scan_cmd) { 2859 if (!priv->scan_cmd) {
2860 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + 2860 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2861 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 2861 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2862 if (!priv->scan_cmd) { 2862 if (!priv->scan_cmd) {
2863 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); 2863 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2864 goto done; 2864 goto done;
2865 } 2865 }
2866 } 2866 }
2867 scan = priv->scan_cmd; 2867 scan = priv->scan_cmd;
2868 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); 2868 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2869 2869
2870 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2870 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2871 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2871 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2872 2872
2873 if (iwl_is_associated(priv)) { 2873 if (iwl_is_associated(priv)) {
2874 u16 interval = 0; 2874 u16 interval = 0;
2875 u32 extra; 2875 u32 extra;
2876 u32 suspend_time = 100; 2876 u32 suspend_time = 100;
2877 u32 scan_suspend_time = 100; 2877 u32 scan_suspend_time = 100;
2878 unsigned long flags; 2878 unsigned long flags;
2879 2879
2880 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 2880 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2881 2881
2882 spin_lock_irqsave(&priv->lock, flags); 2882 spin_lock_irqsave(&priv->lock, flags);
2883 if (priv->is_internal_short_scan) 2883 if (priv->is_internal_short_scan)
2884 interval = 0; 2884 interval = 0;
2885 else 2885 else
2886 interval = vif->bss_conf.beacon_int; 2886 interval = vif->bss_conf.beacon_int;
2887 spin_unlock_irqrestore(&priv->lock, flags); 2887 spin_unlock_irqrestore(&priv->lock, flags);
2888 2888
2889 scan->suspend_time = 0; 2889 scan->suspend_time = 0;
2890 scan->max_out_time = cpu_to_le32(200 * 1024); 2890 scan->max_out_time = cpu_to_le32(200 * 1024);
2891 if (!interval) 2891 if (!interval)
2892 interval = suspend_time; 2892 interval = suspend_time;
2893 /* 2893 /*
2894 * suspend time format: 2894 * suspend time format:
2895 * 0-19: beacon interval in usec (time before exec.) 2895 * 0-19: beacon interval in usec (time before exec.)
2896 * 20-23: 0 2896 * 20-23: 0
2897 * 24-31: number of beacons (suspend between channels) 2897 * 24-31: number of beacons (suspend between channels)
2898 */ 2898 */
2899 2899
2900 extra = (suspend_time / interval) << 24; 2900 extra = (suspend_time / interval) << 24;
2901 scan_suspend_time = 0xFF0FFFFF & 2901 scan_suspend_time = 0xFF0FFFFF &
2902 (extra | ((suspend_time % interval) * 1024)); 2902 (extra | ((suspend_time % interval) * 1024));
2903 2903
2904 scan->suspend_time = cpu_to_le32(scan_suspend_time); 2904 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2905 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", 2905 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2906 scan_suspend_time, interval); 2906 scan_suspend_time, interval);
2907 } 2907 }
2908 2908
2909 if (priv->is_internal_short_scan) { 2909 if (priv->is_internal_short_scan) {
2910 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 2910 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
2911 } else if (priv->scan_request->n_ssids) { 2911 } else if (priv->scan_request->n_ssids) {
2912 int i, p = 0; 2912 int i, p = 0;
2913 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 2913 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2914 for (i = 0; i < priv->scan_request->n_ssids; i++) { 2914 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2915 /* always does wildcard anyway */ 2915 /* always does wildcard anyway */
2916 if (!priv->scan_request->ssids[i].ssid_len) 2916 if (!priv->scan_request->ssids[i].ssid_len)
2917 continue; 2917 continue;
2918 scan->direct_scan[p].id = WLAN_EID_SSID; 2918 scan->direct_scan[p].id = WLAN_EID_SSID;
2919 scan->direct_scan[p].len = 2919 scan->direct_scan[p].len =
2920 priv->scan_request->ssids[i].ssid_len; 2920 priv->scan_request->ssids[i].ssid_len;
2921 memcpy(scan->direct_scan[p].ssid, 2921 memcpy(scan->direct_scan[p].ssid,
2922 priv->scan_request->ssids[i].ssid, 2922 priv->scan_request->ssids[i].ssid,
2923 priv->scan_request->ssids[i].ssid_len); 2923 priv->scan_request->ssids[i].ssid_len);
2924 n_probes++; 2924 n_probes++;
2925 p++; 2925 p++;
2926 } 2926 }
2927 is_active = true; 2927 is_active = true;
2928 } else 2928 } else
2929 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); 2929 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2930 2930
2931 /* We don't build a direct scan probe request; the uCode will do 2931 /* We don't build a direct scan probe request; the uCode will do
2932 * that based on the direct_mask added to each channel entry */ 2932 * that based on the direct_mask added to each channel entry */
2933 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 2933 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2934 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 2934 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
2935 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2935 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2936 2936
2937 /* flags + rate selection */ 2937 /* flags + rate selection */
2938 2938
2939 switch (priv->scan_band) { 2939 switch (priv->scan_band) {
2940 case IEEE80211_BAND_2GHZ: 2940 case IEEE80211_BAND_2GHZ:
2941 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2941 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2942 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 2942 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2943 scan->good_CRC_th = 0; 2943 scan->good_CRC_th = 0;
2944 band = IEEE80211_BAND_2GHZ; 2944 band = IEEE80211_BAND_2GHZ;
2945 break; 2945 break;
2946 case IEEE80211_BAND_5GHZ: 2946 case IEEE80211_BAND_5GHZ:
2947 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 2947 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2948 /* 2948 /*
2949 * If active scaning is requested but a certain channel 2949 * If active scaning is requested but a certain channel
2950 * is marked passive, we can do active scanning if we 2950 * is marked passive, we can do active scanning if we
2951 * detect transmissions. 2951 * detect transmissions.
2952 */ 2952 */
2953 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : 2953 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2954 IWL_GOOD_CRC_TH_DISABLED; 2954 IWL_GOOD_CRC_TH_DISABLED;
2955 band = IEEE80211_BAND_5GHZ; 2955 band = IEEE80211_BAND_5GHZ;
2956 break; 2956 break;
2957 default: 2957 default:
2958 IWL_WARN(priv, "Invalid scan band\n"); 2958 IWL_WARN(priv, "Invalid scan band\n");
2959 goto done; 2959 goto done;
2960 } 2960 }
2961 2961
2962 if (!priv->is_internal_short_scan) { 2962 if (!priv->is_internal_short_scan) {
2963 scan->tx_cmd.len = cpu_to_le16( 2963 scan->tx_cmd.len = cpu_to_le16(
2964 iwl_fill_probe_req(priv, 2964 iwl_fill_probe_req(priv,
2965 (struct ieee80211_mgmt *)scan->data, 2965 (struct ieee80211_mgmt *)scan->data,
2966 vif->addr, 2966 vif->addr,
2967 priv->scan_request->ie, 2967 priv->scan_request->ie,
2968 priv->scan_request->ie_len, 2968 priv->scan_request->ie_len,
2969 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2969 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2970 } else { 2970 } else {
2971 /* use bcast addr, will not be transmitted but must be valid */ 2971 /* use bcast addr, will not be transmitted but must be valid */
2972 scan->tx_cmd.len = cpu_to_le16( 2972 scan->tx_cmd.len = cpu_to_le16(
2973 iwl_fill_probe_req(priv, 2973 iwl_fill_probe_req(priv,
2974 (struct ieee80211_mgmt *)scan->data, 2974 (struct ieee80211_mgmt *)scan->data,
2975 iwl_bcast_addr, NULL, 0, 2975 iwl_bcast_addr, NULL, 0,
2976 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2976 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2977 } 2977 }
2978 /* select Rx antennas */ 2978 /* select Rx antennas */
2979 scan->flags |= iwl3945_get_antenna_flags(priv); 2979 scan->flags |= iwl3945_get_antenna_flags(priv);
2980 2980
2981 if (priv->is_internal_short_scan) { 2981 if (priv->is_internal_short_scan) {
2982 scan->channel_count = 2982 scan->channel_count =
2983 iwl3945_get_single_channel_for_scan(priv, vif, band, 2983 iwl3945_get_single_channel_for_scan(priv, vif, band,
2984 (void *)&scan->data[le16_to_cpu( 2984 (void *)&scan->data[le16_to_cpu(
2985 scan->tx_cmd.len)]); 2985 scan->tx_cmd.len)]);
2986 } else { 2986 } else {
2987 scan->channel_count = 2987 scan->channel_count =
2988 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, 2988 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2989 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif); 2989 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
2990 } 2990 }
2991 2991
2992 if (scan->channel_count == 0) { 2992 if (scan->channel_count == 0) {
2993 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 2993 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2994 goto done; 2994 goto done;
2995 } 2995 }
2996 2996
2997 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 2997 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2998 scan->channel_count * sizeof(struct iwl3945_scan_channel); 2998 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2999 cmd.data = scan; 2999 cmd.data = scan;
3000 scan->len = cpu_to_le16(cmd.len); 3000 scan->len = cpu_to_le16(cmd.len);
3001 3001
3002 set_bit(STATUS_SCAN_HW, &priv->status); 3002 set_bit(STATUS_SCAN_HW, &priv->status);
3003 if (iwl_send_cmd_sync(priv, &cmd)) 3003 if (iwl_send_cmd_sync(priv, &cmd))
3004 goto done; 3004 goto done;
3005 3005
3006 queue_delayed_work(priv->workqueue, &priv->scan_check, 3006 queue_delayed_work(priv->workqueue, &priv->scan_check,
3007 IWL_SCAN_CHECK_WATCHDOG); 3007 IWL_SCAN_CHECK_WATCHDOG);
3008 3008
3009 return; 3009 return;
3010 3010
3011 done: 3011 done:
3012 /* can not perform scan make sure we clear scanning 3012 /* can not perform scan make sure we clear scanning
3013 * bits from status so next scan request can be performed. 3013 * bits from status so next scan request can be performed.
3014 * if we dont clear scanning status bit here all next scan 3014 * if we dont clear scanning status bit here all next scan
3015 * will fail 3015 * will fail
3016 */ 3016 */
3017 clear_bit(STATUS_SCAN_HW, &priv->status); 3017 clear_bit(STATUS_SCAN_HW, &priv->status);
3018 clear_bit(STATUS_SCANNING, &priv->status); 3018 clear_bit(STATUS_SCANNING, &priv->status);
3019 3019
3020 /* inform mac80211 scan aborted */ 3020 /* inform mac80211 scan aborted */
3021 queue_work(priv->workqueue, &priv->scan_completed); 3021 queue_work(priv->workqueue, &priv->abort_scan);
3022 } 3022 }
3023 3023
3024 static void iwl3945_bg_restart(struct work_struct *data) 3024 static void iwl3945_bg_restart(struct work_struct *data)
3025 { 3025 {
3026 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 3026 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
3027 3027
3028 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3028 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3029 return; 3029 return;
3030 3030
3031 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 3031 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3032 mutex_lock(&priv->mutex); 3032 mutex_lock(&priv->mutex);
3033 priv->vif = NULL; 3033 priv->vif = NULL;
3034 priv->is_open = 0; 3034 priv->is_open = 0;
3035 mutex_unlock(&priv->mutex); 3035 mutex_unlock(&priv->mutex);
3036 iwl3945_down(priv); 3036 iwl3945_down(priv);
3037 ieee80211_restart_hw(priv->hw); 3037 ieee80211_restart_hw(priv->hw);
3038 } else { 3038 } else {
3039 iwl3945_down(priv); 3039 iwl3945_down(priv);
3040 3040
3041 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3041 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3042 return; 3042 return;
3043 3043
3044 mutex_lock(&priv->mutex); 3044 mutex_lock(&priv->mutex);
3045 __iwl3945_up(priv); 3045 __iwl3945_up(priv);
3046 mutex_unlock(&priv->mutex); 3046 mutex_unlock(&priv->mutex);
3047 } 3047 }
3048 } 3048 }
3049 3049
3050 static void iwl3945_bg_rx_replenish(struct work_struct *data) 3050 static void iwl3945_bg_rx_replenish(struct work_struct *data)
3051 { 3051 {
3052 struct iwl_priv *priv = 3052 struct iwl_priv *priv =
3053 container_of(data, struct iwl_priv, rx_replenish); 3053 container_of(data, struct iwl_priv, rx_replenish);
3054 3054
3055 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3055 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3056 return; 3056 return;
3057 3057
3058 mutex_lock(&priv->mutex); 3058 mutex_lock(&priv->mutex);
3059 iwl3945_rx_replenish(priv); 3059 iwl3945_rx_replenish(priv);
3060 mutex_unlock(&priv->mutex); 3060 mutex_unlock(&priv->mutex);
3061 } 3061 }
3062 3062
3063 void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3063 void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3064 { 3064 {
3065 int rc = 0; 3065 int rc = 0;
3066 struct ieee80211_conf *conf = NULL; 3066 struct ieee80211_conf *conf = NULL;
3067 3067
3068 if (!vif || !priv->is_open) 3068 if (!vif || !priv->is_open)
3069 return; 3069 return;
3070 3070
3071 if (vif->type == NL80211_IFTYPE_AP) { 3071 if (vif->type == NL80211_IFTYPE_AP) {
3072 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3072 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3073 return; 3073 return;
3074 } 3074 }
3075 3075
3076 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3076 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3077 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3077 vif->bss_conf.aid, priv->active_rxon.bssid_addr);
3078 3078
3079 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3079 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3080 return; 3080 return;
3081 3081
3082 iwl_scan_cancel_timeout(priv, 200); 3082 iwl_scan_cancel_timeout(priv, 200);
3083 3083
3084 conf = ieee80211_get_hw_conf(priv->hw); 3084 conf = ieee80211_get_hw_conf(priv->hw);
3085 3085
3086 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3086 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3087 iwlcore_commit_rxon(priv); 3087 iwlcore_commit_rxon(priv);
3088 3088
3089 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3089 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
3090 iwl_setup_rxon_timing(priv, vif); 3090 iwl_setup_rxon_timing(priv, vif);
3091 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3091 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3092 sizeof(priv->rxon_timing), &priv->rxon_timing); 3092 sizeof(priv->rxon_timing), &priv->rxon_timing);
3093 if (rc) 3093 if (rc)
3094 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3094 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3095 "Attempting to continue.\n"); 3095 "Attempting to continue.\n");
3096 3096
3097 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3097 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3098 3098
3099 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3099 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3100 3100
3101 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3101 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3102 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3102 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3103 3103
3104 if (vif->bss_conf.use_short_preamble) 3104 if (vif->bss_conf.use_short_preamble)
3105 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3105 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3106 else 3106 else
3107 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3107 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3108 3108
3109 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3109 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3110 if (vif->bss_conf.use_short_slot) 3110 if (vif->bss_conf.use_short_slot)
3111 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3111 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
3112 else 3112 else
3113 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3113 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3114 } 3114 }
3115 3115
3116 iwlcore_commit_rxon(priv); 3116 iwlcore_commit_rxon(priv);
3117 3117
3118 switch (vif->type) { 3118 switch (vif->type) {
3119 case NL80211_IFTYPE_STATION: 3119 case NL80211_IFTYPE_STATION:
3120 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 3120 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
3121 break; 3121 break;
3122 case NL80211_IFTYPE_ADHOC: 3122 case NL80211_IFTYPE_ADHOC:
3123 iwl3945_send_beacon_cmd(priv); 3123 iwl3945_send_beacon_cmd(priv);
3124 break; 3124 break;
3125 default: 3125 default:
3126 IWL_ERR(priv, "%s Should not be called in %d mode\n", 3126 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3127 __func__, vif->type); 3127 __func__, vif->type);
3128 break; 3128 break;
3129 } 3129 }
3130 } 3130 }
3131 3131
3132 /***************************************************************************** 3132 /*****************************************************************************
3133 * 3133 *
3134 * mac80211 entry point functions 3134 * mac80211 entry point functions
3135 * 3135 *
3136 *****************************************************************************/ 3136 *****************************************************************************/
3137 3137
3138 #define UCODE_READY_TIMEOUT (2 * HZ) 3138 #define UCODE_READY_TIMEOUT (2 * HZ)
3139 3139
3140 static int iwl3945_mac_start(struct ieee80211_hw *hw) 3140 static int iwl3945_mac_start(struct ieee80211_hw *hw)
3141 { 3141 {
3142 struct iwl_priv *priv = hw->priv; 3142 struct iwl_priv *priv = hw->priv;
3143 int ret; 3143 int ret;
3144 3144
3145 IWL_DEBUG_MAC80211(priv, "enter\n"); 3145 IWL_DEBUG_MAC80211(priv, "enter\n");
3146 3146
3147 /* we should be verifying the device is ready to be opened */ 3147 /* we should be verifying the device is ready to be opened */
3148 mutex_lock(&priv->mutex); 3148 mutex_lock(&priv->mutex);
3149 3149
3150 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 3150 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
3151 * ucode filename and max sizes are card-specific. */ 3151 * ucode filename and max sizes are card-specific. */
3152 3152
3153 if (!priv->ucode_code.len) { 3153 if (!priv->ucode_code.len) {
3154 ret = iwl3945_read_ucode(priv); 3154 ret = iwl3945_read_ucode(priv);
3155 if (ret) { 3155 if (ret) {
3156 IWL_ERR(priv, "Could not read microcode: %d\n", ret); 3156 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
3157 mutex_unlock(&priv->mutex); 3157 mutex_unlock(&priv->mutex);
3158 goto out_release_irq; 3158 goto out_release_irq;
3159 } 3159 }
3160 } 3160 }
3161 3161
3162 ret = __iwl3945_up(priv); 3162 ret = __iwl3945_up(priv);
3163 3163
3164 mutex_unlock(&priv->mutex); 3164 mutex_unlock(&priv->mutex);
3165 3165
3166 if (ret) 3166 if (ret)
3167 goto out_release_irq; 3167 goto out_release_irq;
3168 3168
3169 IWL_DEBUG_INFO(priv, "Start UP work.\n"); 3169 IWL_DEBUG_INFO(priv, "Start UP work.\n");
3170 3170
3171 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 3171 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
3172 * mac80211 will not be run successfully. */ 3172 * mac80211 will not be run successfully. */
3173 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 3173 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
3174 test_bit(STATUS_READY, &priv->status), 3174 test_bit(STATUS_READY, &priv->status),
3175 UCODE_READY_TIMEOUT); 3175 UCODE_READY_TIMEOUT);
3176 if (!ret) { 3176 if (!ret) {
3177 if (!test_bit(STATUS_READY, &priv->status)) { 3177 if (!test_bit(STATUS_READY, &priv->status)) {
3178 IWL_ERR(priv, 3178 IWL_ERR(priv,
3179 "Wait for START_ALIVE timeout after %dms.\n", 3179 "Wait for START_ALIVE timeout after %dms.\n",
3180 jiffies_to_msecs(UCODE_READY_TIMEOUT)); 3180 jiffies_to_msecs(UCODE_READY_TIMEOUT));
3181 ret = -ETIMEDOUT; 3181 ret = -ETIMEDOUT;
3182 goto out_release_irq; 3182 goto out_release_irq;
3183 } 3183 }
3184 } 3184 }
3185 3185
3186 /* ucode is running and will send rfkill notifications, 3186 /* ucode is running and will send rfkill notifications,
3187 * no need to poll the killswitch state anymore */ 3187 * no need to poll the killswitch state anymore */
3188 cancel_delayed_work(&priv->_3945.rfkill_poll); 3188 cancel_delayed_work(&priv->_3945.rfkill_poll);
3189 3189
3190 iwl_led_start(priv); 3190 iwl_led_start(priv);
3191 3191
3192 priv->is_open = 1; 3192 priv->is_open = 1;
3193 IWL_DEBUG_MAC80211(priv, "leave\n"); 3193 IWL_DEBUG_MAC80211(priv, "leave\n");
3194 return 0; 3194 return 0;
3195 3195
3196 out_release_irq: 3196 out_release_irq:
3197 priv->is_open = 0; 3197 priv->is_open = 0;
3198 IWL_DEBUG_MAC80211(priv, "leave - failed\n"); 3198 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
3199 return ret; 3199 return ret;
3200 } 3200 }
3201 3201
3202 static void iwl3945_mac_stop(struct ieee80211_hw *hw) 3202 static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3203 { 3203 {
3204 struct iwl_priv *priv = hw->priv; 3204 struct iwl_priv *priv = hw->priv;
3205 3205
3206 IWL_DEBUG_MAC80211(priv, "enter\n"); 3206 IWL_DEBUG_MAC80211(priv, "enter\n");
3207 3207
3208 if (!priv->is_open) { 3208 if (!priv->is_open) {
3209 IWL_DEBUG_MAC80211(priv, "leave - skip\n"); 3209 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
3210 return; 3210 return;
3211 } 3211 }
3212 3212
3213 priv->is_open = 0; 3213 priv->is_open = 0;
3214 3214
3215 if (iwl_is_ready_rf(priv)) { 3215 if (iwl_is_ready_rf(priv)) {
3216 /* stop mac, cancel any scan request and clear 3216 /* stop mac, cancel any scan request and clear
3217 * RXON_FILTER_ASSOC_MSK BIT 3217 * RXON_FILTER_ASSOC_MSK BIT
3218 */ 3218 */
3219 mutex_lock(&priv->mutex); 3219 mutex_lock(&priv->mutex);
3220 iwl_scan_cancel_timeout(priv, 100); 3220 iwl_scan_cancel_timeout(priv, 100);
3221 mutex_unlock(&priv->mutex); 3221 mutex_unlock(&priv->mutex);
3222 } 3222 }
3223 3223
3224 iwl3945_down(priv); 3224 iwl3945_down(priv);
3225 3225
3226 flush_workqueue(priv->workqueue); 3226 flush_workqueue(priv->workqueue);
3227 3227
3228 /* start polling the killswitch state again */ 3228 /* start polling the killswitch state again */
3229 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, 3229 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3230 round_jiffies_relative(2 * HZ)); 3230 round_jiffies_relative(2 * HZ));
3231 3231
3232 IWL_DEBUG_MAC80211(priv, "leave\n"); 3232 IWL_DEBUG_MAC80211(priv, "leave\n");
3233 } 3233 }
3234 3234
3235 static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3235 static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3236 { 3236 {
3237 struct iwl_priv *priv = hw->priv; 3237 struct iwl_priv *priv = hw->priv;
3238 3238
3239 IWL_DEBUG_MAC80211(priv, "enter\n"); 3239 IWL_DEBUG_MAC80211(priv, "enter\n");
3240 3240
3241 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 3241 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
3242 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 3242 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
3243 3243
3244 if (iwl3945_tx_skb(priv, skb)) 3244 if (iwl3945_tx_skb(priv, skb))
3245 dev_kfree_skb_any(skb); 3245 dev_kfree_skb_any(skb);
3246 3246
3247 IWL_DEBUG_MAC80211(priv, "leave\n"); 3247 IWL_DEBUG_MAC80211(priv, "leave\n");
3248 return NETDEV_TX_OK; 3248 return NETDEV_TX_OK;
3249 } 3249 }
3250 3250
3251 void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3251 void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3252 { 3252 {
3253 int rc = 0; 3253 int rc = 0;
3254 3254
3255 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3255 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3256 return; 3256 return;
3257 3257
3258 /* The following should be done only at AP bring up */ 3258 /* The following should be done only at AP bring up */
3259 if (!(iwl_is_associated(priv))) { 3259 if (!(iwl_is_associated(priv))) {
3260 3260
3261 /* RXON - unassoc (to set timing command) */ 3261 /* RXON - unassoc (to set timing command) */
3262 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3262 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3263 iwlcore_commit_rxon(priv); 3263 iwlcore_commit_rxon(priv);
3264 3264
3265 /* RXON Timing */ 3265 /* RXON Timing */
3266 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3266 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
3267 iwl_setup_rxon_timing(priv, vif); 3267 iwl_setup_rxon_timing(priv, vif);
3268 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3268 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3269 sizeof(priv->rxon_timing), 3269 sizeof(priv->rxon_timing),
3270 &priv->rxon_timing); 3270 &priv->rxon_timing);
3271 if (rc) 3271 if (rc)
3272 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3272 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3273 "Attempting to continue.\n"); 3273 "Attempting to continue.\n");
3274 3274
3275 priv->staging_rxon.assoc_id = 0; 3275 priv->staging_rxon.assoc_id = 0;
3276 3276
3277 if (vif->bss_conf.use_short_preamble) 3277 if (vif->bss_conf.use_short_preamble)
3278 priv->staging_rxon.flags |= 3278 priv->staging_rxon.flags |=
3279 RXON_FLG_SHORT_PREAMBLE_MSK; 3279 RXON_FLG_SHORT_PREAMBLE_MSK;
3280 else 3280 else
3281 priv->staging_rxon.flags &= 3281 priv->staging_rxon.flags &=
3282 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3282 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3283 3283
3284 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3284 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3285 if (vif->bss_conf.use_short_slot) 3285 if (vif->bss_conf.use_short_slot)
3286 priv->staging_rxon.flags |= 3286 priv->staging_rxon.flags |=
3287 RXON_FLG_SHORT_SLOT_MSK; 3287 RXON_FLG_SHORT_SLOT_MSK;
3288 else 3288 else
3289 priv->staging_rxon.flags &= 3289 priv->staging_rxon.flags &=
3290 ~RXON_FLG_SHORT_SLOT_MSK; 3290 ~RXON_FLG_SHORT_SLOT_MSK;
3291 } 3291 }
3292 /* restore RXON assoc */ 3292 /* restore RXON assoc */
3293 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3293 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3294 iwlcore_commit_rxon(priv); 3294 iwlcore_commit_rxon(priv);
3295 } 3295 }
3296 iwl3945_send_beacon_cmd(priv); 3296 iwl3945_send_beacon_cmd(priv);
3297 3297
3298 /* FIXME - we need to add code here to detect a totally new 3298 /* FIXME - we need to add code here to detect a totally new
3299 * configuration, reset the AP, unassoc, rxon timing, assoc, 3299 * configuration, reset the AP, unassoc, rxon timing, assoc,
3300 * clear sta table, add BCAST sta... */ 3300 * clear sta table, add BCAST sta... */
3301 } 3301 }
3302 3302
3303 static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3303 static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3304 struct ieee80211_vif *vif, 3304 struct ieee80211_vif *vif,
3305 struct ieee80211_sta *sta, 3305 struct ieee80211_sta *sta,
3306 struct ieee80211_key_conf *key) 3306 struct ieee80211_key_conf *key)
3307 { 3307 {
3308 struct iwl_priv *priv = hw->priv; 3308 struct iwl_priv *priv = hw->priv;
3309 int ret = 0; 3309 int ret = 0;
3310 u8 sta_id = IWL_INVALID_STATION; 3310 u8 sta_id = IWL_INVALID_STATION;
3311 u8 static_key; 3311 u8 static_key;
3312 3312
3313 IWL_DEBUG_MAC80211(priv, "enter\n"); 3313 IWL_DEBUG_MAC80211(priv, "enter\n");
3314 3314
3315 if (iwl3945_mod_params.sw_crypto) { 3315 if (iwl3945_mod_params.sw_crypto) {
3316 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 3316 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
3317 return -EOPNOTSUPP; 3317 return -EOPNOTSUPP;
3318 } 3318 }
3319 3319
3320 static_key = !iwl_is_associated(priv); 3320 static_key = !iwl_is_associated(priv);
3321 3321
3322 if (!static_key) { 3322 if (!static_key) {
3323 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3323 sta_id = iwl_sta_id_or_broadcast(priv, sta);
3324 if (sta_id == IWL_INVALID_STATION) 3324 if (sta_id == IWL_INVALID_STATION)
3325 return -EINVAL; 3325 return -EINVAL;
3326 } 3326 }
3327 3327
3328 mutex_lock(&priv->mutex); 3328 mutex_lock(&priv->mutex);
3329 iwl_scan_cancel_timeout(priv, 100); 3329 iwl_scan_cancel_timeout(priv, 100);
3330 3330
3331 switch (cmd) { 3331 switch (cmd) {
3332 case SET_KEY: 3332 case SET_KEY:
3333 if (static_key) 3333 if (static_key)
3334 ret = iwl3945_set_static_key(priv, key); 3334 ret = iwl3945_set_static_key(priv, key);
3335 else 3335 else
3336 ret = iwl3945_set_dynamic_key(priv, key, sta_id); 3336 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3337 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); 3337 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3338 break; 3338 break;
3339 case DISABLE_KEY: 3339 case DISABLE_KEY:
3340 if (static_key) 3340 if (static_key)
3341 ret = iwl3945_remove_static_key(priv); 3341 ret = iwl3945_remove_static_key(priv);
3342 else 3342 else
3343 ret = iwl3945_clear_sta_key_info(priv, sta_id); 3343 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3344 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); 3344 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3345 break; 3345 break;
3346 default: 3346 default:
3347 ret = -EINVAL; 3347 ret = -EINVAL;
3348 } 3348 }
3349 3349
3350 mutex_unlock(&priv->mutex); 3350 mutex_unlock(&priv->mutex);
3351 IWL_DEBUG_MAC80211(priv, "leave\n"); 3351 IWL_DEBUG_MAC80211(priv, "leave\n");
3352 3352
3353 return ret; 3353 return ret;
3354 } 3354 }
3355 3355
3356 static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, 3356 static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3357 struct ieee80211_vif *vif, 3357 struct ieee80211_vif *vif,
3358 struct ieee80211_sta *sta) 3358 struct ieee80211_sta *sta)
3359 { 3359 {
3360 struct iwl_priv *priv = hw->priv; 3360 struct iwl_priv *priv = hw->priv;
3361 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; 3361 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3362 int ret; 3362 int ret;
3363 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 3363 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3364 u8 sta_id; 3364 u8 sta_id;
3365 3365
3366 IWL_DEBUG_INFO(priv, "received request to add station %pM\n", 3366 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3367 sta->addr); 3367 sta->addr);
3368 mutex_lock(&priv->mutex); 3368 mutex_lock(&priv->mutex);
3369 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", 3369 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3370 sta->addr); 3370 sta->addr);
3371 sta_priv->common.sta_id = IWL_INVALID_STATION; 3371 sta_priv->common.sta_id = IWL_INVALID_STATION;
3372 3372
3373 3373
3374 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3374 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
3375 &sta_id); 3375 &sta_id);
3376 if (ret) { 3376 if (ret) {
3377 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3377 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3378 sta->addr, ret); 3378 sta->addr, ret);
3379 /* Should we return success if return code is EEXIST ? */ 3379 /* Should we return success if return code is EEXIST ? */
3380 mutex_unlock(&priv->mutex); 3380 mutex_unlock(&priv->mutex);
3381 return ret; 3381 return ret;
3382 } 3382 }
3383 3383
3384 sta_priv->common.sta_id = sta_id; 3384 sta_priv->common.sta_id = sta_id;
3385 3385
3386 /* Initialize rate scaling */ 3386 /* Initialize rate scaling */
3387 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", 3387 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3388 sta->addr); 3388 sta->addr);
3389 iwl3945_rs_rate_init(priv, sta, sta_id); 3389 iwl3945_rs_rate_init(priv, sta, sta_id);
3390 mutex_unlock(&priv->mutex); 3390 mutex_unlock(&priv->mutex);
3391 3391
3392 return 0; 3392 return 0;
3393 } 3393 }
3394 3394
3395 static void iwl3945_configure_filter(struct ieee80211_hw *hw, 3395 static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3396 unsigned int changed_flags, 3396 unsigned int changed_flags,
3397 unsigned int *total_flags, 3397 unsigned int *total_flags,
3398 u64 multicast) 3398 u64 multicast)
3399 { 3399 {
3400 struct iwl_priv *priv = hw->priv; 3400 struct iwl_priv *priv = hw->priv;
3401 __le32 filter_or = 0, filter_nand = 0; 3401 __le32 filter_or = 0, filter_nand = 0;
3402 3402
3403 #define CHK(test, flag) do { \ 3403 #define CHK(test, flag) do { \
3404 if (*total_flags & (test)) \ 3404 if (*total_flags & (test)) \
3405 filter_or |= (flag); \ 3405 filter_or |= (flag); \
3406 else \ 3406 else \
3407 filter_nand |= (flag); \ 3407 filter_nand |= (flag); \
3408 } while (0) 3408 } while (0)
3409 3409
3410 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", 3410 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3411 changed_flags, *total_flags); 3411 changed_flags, *total_flags);
3412 3412
3413 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); 3413 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3414 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 3414 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3415 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 3415 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3416 3416
3417 #undef CHK 3417 #undef CHK
3418 3418
3419 mutex_lock(&priv->mutex); 3419 mutex_lock(&priv->mutex);
3420 3420
3421 priv->staging_rxon.filter_flags &= ~filter_nand; 3421 priv->staging_rxon.filter_flags &= ~filter_nand;
3422 priv->staging_rxon.filter_flags |= filter_or; 3422 priv->staging_rxon.filter_flags |= filter_or;
3423 3423
3424 /* 3424 /*
3425 * Committing directly here breaks for some reason, 3425 * Committing directly here breaks for some reason,
3426 * but we'll eventually commit the filter flags 3426 * but we'll eventually commit the filter flags
3427 * change anyway. 3427 * change anyway.
3428 */ 3428 */
3429 3429
3430 mutex_unlock(&priv->mutex); 3430 mutex_unlock(&priv->mutex);
3431 3431
3432 /* 3432 /*
3433 * Receiving all multicast frames is always enabled by the 3433 * Receiving all multicast frames is always enabled by the
3434 * default flags setup in iwl_connection_init_rx_config() 3434 * default flags setup in iwl_connection_init_rx_config()
3435 * since we currently do not support programming multicast 3435 * since we currently do not support programming multicast
3436 * filters into the device. 3436 * filters into the device.
3437 */ 3437 */
3438 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | 3438 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3439 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3439 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3440 } 3440 }
3441 3441
3442 3442
3443 /***************************************************************************** 3443 /*****************************************************************************
3444 * 3444 *
3445 * sysfs attributes 3445 * sysfs attributes
3446 * 3446 *
3447 *****************************************************************************/ 3447 *****************************************************************************/
3448 3448
3449 #ifdef CONFIG_IWLWIFI_DEBUG 3449 #ifdef CONFIG_IWLWIFI_DEBUG
3450 3450
3451 /* 3451 /*
3452 * The following adds a new attribute to the sysfs representation 3452 * The following adds a new attribute to the sysfs representation
3453 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) 3453 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3454 * used for controlling the debug level. 3454 * used for controlling the debug level.
3455 * 3455 *
3456 * See the level definitions in iwl for details. 3456 * See the level definitions in iwl for details.
3457 * 3457 *
3458 * The debug_level being managed using sysfs below is a per device debug 3458 * The debug_level being managed using sysfs below is a per device debug
3459 * level that is used instead of the global debug level if it (the per 3459 * level that is used instead of the global debug level if it (the per
3460 * device debug level) is set. 3460 * device debug level) is set.
3461 */ 3461 */
3462 static ssize_t show_debug_level(struct device *d, 3462 static ssize_t show_debug_level(struct device *d,
3463 struct device_attribute *attr, char *buf) 3463 struct device_attribute *attr, char *buf)
3464 { 3464 {
3465 struct iwl_priv *priv = dev_get_drvdata(d); 3465 struct iwl_priv *priv = dev_get_drvdata(d);
3466 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 3466 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
3467 } 3467 }
3468 static ssize_t store_debug_level(struct device *d, 3468 static ssize_t store_debug_level(struct device *d,
3469 struct device_attribute *attr, 3469 struct device_attribute *attr,
3470 const char *buf, size_t count) 3470 const char *buf, size_t count)
3471 { 3471 {
3472 struct iwl_priv *priv = dev_get_drvdata(d); 3472 struct iwl_priv *priv = dev_get_drvdata(d);
3473 unsigned long val; 3473 unsigned long val;
3474 int ret; 3474 int ret;
3475 3475
3476 ret = strict_strtoul(buf, 0, &val); 3476 ret = strict_strtoul(buf, 0, &val);
3477 if (ret) 3477 if (ret)
3478 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); 3478 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3479 else { 3479 else {
3480 priv->debug_level = val; 3480 priv->debug_level = val;
3481 if (iwl_alloc_traffic_mem(priv)) 3481 if (iwl_alloc_traffic_mem(priv))
3482 IWL_ERR(priv, 3482 IWL_ERR(priv,
3483 "Not enough memory to generate traffic log\n"); 3483 "Not enough memory to generate traffic log\n");
3484 } 3484 }
3485 return strnlen(buf, count); 3485 return strnlen(buf, count);
3486 } 3486 }
3487 3487
3488 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, 3488 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3489 show_debug_level, store_debug_level); 3489 show_debug_level, store_debug_level);
3490 3490
3491 #endif /* CONFIG_IWLWIFI_DEBUG */ 3491 #endif /* CONFIG_IWLWIFI_DEBUG */
3492 3492
3493 static ssize_t show_temperature(struct device *d, 3493 static ssize_t show_temperature(struct device *d,
3494 struct device_attribute *attr, char *buf) 3494 struct device_attribute *attr, char *buf)
3495 { 3495 {
3496 struct iwl_priv *priv = dev_get_drvdata(d); 3496 struct iwl_priv *priv = dev_get_drvdata(d);
3497 3497
3498 if (!iwl_is_alive(priv)) 3498 if (!iwl_is_alive(priv))
3499 return -EAGAIN; 3499 return -EAGAIN;
3500 3500
3501 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); 3501 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3502 } 3502 }
3503 3503
3504 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 3504 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
3505 3505
3506 static ssize_t show_tx_power(struct device *d, 3506 static ssize_t show_tx_power(struct device *d,
3507 struct device_attribute *attr, char *buf) 3507 struct device_attribute *attr, char *buf)
3508 { 3508 {
3509 struct iwl_priv *priv = dev_get_drvdata(d); 3509 struct iwl_priv *priv = dev_get_drvdata(d);
3510 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 3510 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3511 } 3511 }
3512 3512
3513 static ssize_t store_tx_power(struct device *d, 3513 static ssize_t store_tx_power(struct device *d,
3514 struct device_attribute *attr, 3514 struct device_attribute *attr,
3515 const char *buf, size_t count) 3515 const char *buf, size_t count)
3516 { 3516 {
3517 struct iwl_priv *priv = dev_get_drvdata(d); 3517 struct iwl_priv *priv = dev_get_drvdata(d);
3518 char *p = (char *)buf; 3518 char *p = (char *)buf;
3519 u32 val; 3519 u32 val;
3520 3520
3521 val = simple_strtoul(p, &p, 10); 3521 val = simple_strtoul(p, &p, 10);
3522 if (p == buf) 3522 if (p == buf)
3523 IWL_INFO(priv, ": %s is not in decimal form.\n", buf); 3523 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3524 else 3524 else
3525 iwl3945_hw_reg_set_txpower(priv, val); 3525 iwl3945_hw_reg_set_txpower(priv, val);
3526 3526
3527 return count; 3527 return count;
3528 } 3528 }
3529 3529
3530 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3530 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
3531 3531
3532 static ssize_t show_flags(struct device *d, 3532 static ssize_t show_flags(struct device *d,
3533 struct device_attribute *attr, char *buf) 3533 struct device_attribute *attr, char *buf)
3534 { 3534 {
3535 struct iwl_priv *priv = dev_get_drvdata(d); 3535 struct iwl_priv *priv = dev_get_drvdata(d);
3536 3536
3537 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); 3537 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
3538 } 3538 }
3539 3539
3540 static ssize_t store_flags(struct device *d, 3540 static ssize_t store_flags(struct device *d,
3541 struct device_attribute *attr, 3541 struct device_attribute *attr,
3542 const char *buf, size_t count) 3542 const char *buf, size_t count)
3543 { 3543 {
3544 struct iwl_priv *priv = dev_get_drvdata(d); 3544 struct iwl_priv *priv = dev_get_drvdata(d);
3545 u32 flags = simple_strtoul(buf, NULL, 0); 3545 u32 flags = simple_strtoul(buf, NULL, 0);
3546 3546
3547 mutex_lock(&priv->mutex); 3547 mutex_lock(&priv->mutex);
3548 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3548 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
3549 /* Cancel any currently running scans... */ 3549 /* Cancel any currently running scans... */
3550 if (iwl_scan_cancel_timeout(priv, 100)) 3550 if (iwl_scan_cancel_timeout(priv, 100))
3551 IWL_WARN(priv, "Could not cancel scan.\n"); 3551 IWL_WARN(priv, "Could not cancel scan.\n");
3552 else { 3552 else {
3553 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3553 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3554 flags); 3554 flags);
3555 priv->staging_rxon.flags = cpu_to_le32(flags); 3555 priv->staging_rxon.flags = cpu_to_le32(flags);
3556 iwlcore_commit_rxon(priv); 3556 iwlcore_commit_rxon(priv);
3557 } 3557 }
3558 } 3558 }
3559 mutex_unlock(&priv->mutex); 3559 mutex_unlock(&priv->mutex);
3560 3560
3561 return count; 3561 return count;
3562 } 3562 }
3563 3563
3564 static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); 3564 static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
3565 3565
3566 static ssize_t show_filter_flags(struct device *d, 3566 static ssize_t show_filter_flags(struct device *d,
3567 struct device_attribute *attr, char *buf) 3567 struct device_attribute *attr, char *buf)
3568 { 3568 {
3569 struct iwl_priv *priv = dev_get_drvdata(d); 3569 struct iwl_priv *priv = dev_get_drvdata(d);
3570 3570
3571 return sprintf(buf, "0x%04X\n", 3571 return sprintf(buf, "0x%04X\n",
3572 le32_to_cpu(priv->active_rxon.filter_flags)); 3572 le32_to_cpu(priv->active_rxon.filter_flags));
3573 } 3573 }
3574 3574
3575 static ssize_t store_filter_flags(struct device *d, 3575 static ssize_t store_filter_flags(struct device *d,
3576 struct device_attribute *attr, 3576 struct device_attribute *attr,
3577 const char *buf, size_t count) 3577 const char *buf, size_t count)
3578 { 3578 {
3579 struct iwl_priv *priv = dev_get_drvdata(d); 3579 struct iwl_priv *priv = dev_get_drvdata(d);
3580 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3580 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3581 3581
3582 mutex_lock(&priv->mutex); 3582 mutex_lock(&priv->mutex);
3583 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3583 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
3584 /* Cancel any currently running scans... */ 3584 /* Cancel any currently running scans... */
3585 if (iwl_scan_cancel_timeout(priv, 100)) 3585 if (iwl_scan_cancel_timeout(priv, 100))
3586 IWL_WARN(priv, "Could not cancel scan.\n"); 3586 IWL_WARN(priv, "Could not cancel scan.\n");
3587 else { 3587 else {
3588 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3588 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3589 "0x%04X\n", filter_flags); 3589 "0x%04X\n", filter_flags);
3590 priv->staging_rxon.filter_flags = 3590 priv->staging_rxon.filter_flags =
3591 cpu_to_le32(filter_flags); 3591 cpu_to_le32(filter_flags);
3592 iwlcore_commit_rxon(priv); 3592 iwlcore_commit_rxon(priv);
3593 } 3593 }
3594 } 3594 }
3595 mutex_unlock(&priv->mutex); 3595 mutex_unlock(&priv->mutex);
3596 3596
3597 return count; 3597 return count;
3598 } 3598 }
3599 3599
3600 static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3600 static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3601 store_filter_flags); 3601 store_filter_flags);
3602 3602
3603 static ssize_t show_measurement(struct device *d, 3603 static ssize_t show_measurement(struct device *d,
3604 struct device_attribute *attr, char *buf) 3604 struct device_attribute *attr, char *buf)
3605 { 3605 {
3606 struct iwl_priv *priv = dev_get_drvdata(d); 3606 struct iwl_priv *priv = dev_get_drvdata(d);
3607 struct iwl_spectrum_notification measure_report; 3607 struct iwl_spectrum_notification measure_report;
3608 u32 size = sizeof(measure_report), len = 0, ofs = 0; 3608 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3609 u8 *data = (u8 *)&measure_report; 3609 u8 *data = (u8 *)&measure_report;
3610 unsigned long flags; 3610 unsigned long flags;
3611 3611
3612 spin_lock_irqsave(&priv->lock, flags); 3612 spin_lock_irqsave(&priv->lock, flags);
3613 if (!(priv->measurement_status & MEASUREMENT_READY)) { 3613 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3614 spin_unlock_irqrestore(&priv->lock, flags); 3614 spin_unlock_irqrestore(&priv->lock, flags);
3615 return 0; 3615 return 0;
3616 } 3616 }
3617 memcpy(&measure_report, &priv->measure_report, size); 3617 memcpy(&measure_report, &priv->measure_report, size);
3618 priv->measurement_status = 0; 3618 priv->measurement_status = 0;
3619 spin_unlock_irqrestore(&priv->lock, flags); 3619 spin_unlock_irqrestore(&priv->lock, flags);
3620 3620
3621 while (size && (PAGE_SIZE - len)) { 3621 while (size && (PAGE_SIZE - len)) {
3622 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, 3622 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3623 PAGE_SIZE - len, 1); 3623 PAGE_SIZE - len, 1);
3624 len = strlen(buf); 3624 len = strlen(buf);
3625 if (PAGE_SIZE - len) 3625 if (PAGE_SIZE - len)
3626 buf[len++] = '\n'; 3626 buf[len++] = '\n';
3627 3627
3628 ofs += 16; 3628 ofs += 16;
3629 size -= min(size, 16U); 3629 size -= min(size, 16U);
3630 } 3630 }
3631 3631
3632 return len; 3632 return len;
3633 } 3633 }
3634 3634
3635 static ssize_t store_measurement(struct device *d, 3635 static ssize_t store_measurement(struct device *d,
3636 struct device_attribute *attr, 3636 struct device_attribute *attr,
3637 const char *buf, size_t count) 3637 const char *buf, size_t count)
3638 { 3638 {
3639 struct iwl_priv *priv = dev_get_drvdata(d); 3639 struct iwl_priv *priv = dev_get_drvdata(d);
3640 struct ieee80211_measurement_params params = { 3640 struct ieee80211_measurement_params params = {
3641 .channel = le16_to_cpu(priv->active_rxon.channel), 3641 .channel = le16_to_cpu(priv->active_rxon.channel),
3642 .start_time = cpu_to_le64(priv->_3945.last_tsf), 3642 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3643 .duration = cpu_to_le16(1), 3643 .duration = cpu_to_le16(1),
3644 }; 3644 };
3645 u8 type = IWL_MEASURE_BASIC; 3645 u8 type = IWL_MEASURE_BASIC;
3646 u8 buffer[32]; 3646 u8 buffer[32];
3647 u8 channel; 3647 u8 channel;
3648 3648
3649 if (count) { 3649 if (count) {
3650 char *p = buffer; 3650 char *p = buffer;
3651 strncpy(buffer, buf, min(sizeof(buffer), count)); 3651 strncpy(buffer, buf, min(sizeof(buffer), count));
3652 channel = simple_strtoul(p, NULL, 0); 3652 channel = simple_strtoul(p, NULL, 0);
3653 if (channel) 3653 if (channel)
3654 params.channel = channel; 3654 params.channel = channel;
3655 3655
3656 p = buffer; 3656 p = buffer;
3657 while (*p && *p != ' ') 3657 while (*p && *p != ' ')
3658 p++; 3658 p++;
3659 if (*p) 3659 if (*p)
3660 type = simple_strtoul(p + 1, NULL, 0); 3660 type = simple_strtoul(p + 1, NULL, 0);
3661 } 3661 }
3662 3662
3663 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " 3663 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3664 "channel %d (for '%s')\n", type, params.channel, buf); 3664 "channel %d (for '%s')\n", type, params.channel, buf);
3665 iwl3945_get_measurement(priv, &params, type); 3665 iwl3945_get_measurement(priv, &params, type);
3666 3666
3667 return count; 3667 return count;
3668 } 3668 }
3669 3669
3670 static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, 3670 static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3671 show_measurement, store_measurement); 3671 show_measurement, store_measurement);
3672 3672
3673 static ssize_t store_retry_rate(struct device *d, 3673 static ssize_t store_retry_rate(struct device *d,
3674 struct device_attribute *attr, 3674 struct device_attribute *attr,
3675 const char *buf, size_t count) 3675 const char *buf, size_t count)
3676 { 3676 {
3677 struct iwl_priv *priv = dev_get_drvdata(d); 3677 struct iwl_priv *priv = dev_get_drvdata(d);
3678 3678
3679 priv->retry_rate = simple_strtoul(buf, NULL, 0); 3679 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3680 if (priv->retry_rate <= 0) 3680 if (priv->retry_rate <= 0)
3681 priv->retry_rate = 1; 3681 priv->retry_rate = 1;
3682 3682
3683 return count; 3683 return count;
3684 } 3684 }
3685 3685
3686 static ssize_t show_retry_rate(struct device *d, 3686 static ssize_t show_retry_rate(struct device *d,
3687 struct device_attribute *attr, char *buf) 3687 struct device_attribute *attr, char *buf)
3688 { 3688 {
3689 struct iwl_priv *priv = dev_get_drvdata(d); 3689 struct iwl_priv *priv = dev_get_drvdata(d);
3690 return sprintf(buf, "%d", priv->retry_rate); 3690 return sprintf(buf, "%d", priv->retry_rate);
3691 } 3691 }
3692 3692
3693 static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, 3693 static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
3694 store_retry_rate); 3694 store_retry_rate);
3695 3695
3696 3696
3697 static ssize_t show_channels(struct device *d, 3697 static ssize_t show_channels(struct device *d,
3698 struct device_attribute *attr, char *buf) 3698 struct device_attribute *attr, char *buf)
3699 { 3699 {
3700 /* all this shit doesn't belong into sysfs anyway */ 3700 /* all this shit doesn't belong into sysfs anyway */
3701 return 0; 3701 return 0;
3702 } 3702 }
3703 3703
3704 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3704 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
3705 3705
3706 static ssize_t show_antenna(struct device *d, 3706 static ssize_t show_antenna(struct device *d,
3707 struct device_attribute *attr, char *buf) 3707 struct device_attribute *attr, char *buf)
3708 { 3708 {
3709 struct iwl_priv *priv = dev_get_drvdata(d); 3709 struct iwl_priv *priv = dev_get_drvdata(d);
3710 3710
3711 if (!iwl_is_alive(priv)) 3711 if (!iwl_is_alive(priv))
3712 return -EAGAIN; 3712 return -EAGAIN;
3713 3713
3714 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); 3714 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3715 } 3715 }
3716 3716
3717 static ssize_t store_antenna(struct device *d, 3717 static ssize_t store_antenna(struct device *d,
3718 struct device_attribute *attr, 3718 struct device_attribute *attr,
3719 const char *buf, size_t count) 3719 const char *buf, size_t count)
3720 { 3720 {
3721 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); 3721 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3722 int ant; 3722 int ant;
3723 3723
3724 if (count == 0) 3724 if (count == 0)
3725 return 0; 3725 return 0;
3726 3726
3727 if (sscanf(buf, "%1i", &ant) != 1) { 3727 if (sscanf(buf, "%1i", &ant) != 1) {
3728 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); 3728 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3729 return count; 3729 return count;
3730 } 3730 }
3731 3731
3732 if ((ant >= 0) && (ant <= 2)) { 3732 if ((ant >= 0) && (ant <= 2)) {
3733 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); 3733 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3734 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; 3734 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3735 } else 3735 } else
3736 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); 3736 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3737 3737
3738 3738
3739 return count; 3739 return count;
3740 } 3740 }
3741 3741
3742 static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); 3742 static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
3743 3743
3744 static ssize_t show_status(struct device *d, 3744 static ssize_t show_status(struct device *d,
3745 struct device_attribute *attr, char *buf) 3745 struct device_attribute *attr, char *buf)
3746 { 3746 {
3747 struct iwl_priv *priv = dev_get_drvdata(d); 3747 struct iwl_priv *priv = dev_get_drvdata(d);
3748 if (!iwl_is_alive(priv)) 3748 if (!iwl_is_alive(priv))
3749 return -EAGAIN; 3749 return -EAGAIN;
3750 return sprintf(buf, "0x%08x\n", (int)priv->status); 3750 return sprintf(buf, "0x%08x\n", (int)priv->status);
3751 } 3751 }
3752 3752
3753 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 3753 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
3754 3754
3755 static ssize_t dump_error_log(struct device *d, 3755 static ssize_t dump_error_log(struct device *d,
3756 struct device_attribute *attr, 3756 struct device_attribute *attr,
3757 const char *buf, size_t count) 3757 const char *buf, size_t count)
3758 { 3758 {
3759 struct iwl_priv *priv = dev_get_drvdata(d); 3759 struct iwl_priv *priv = dev_get_drvdata(d);
3760 char *p = (char *)buf; 3760 char *p = (char *)buf;
3761 3761
3762 if (p[0] == '1') 3762 if (p[0] == '1')
3763 iwl3945_dump_nic_error_log(priv); 3763 iwl3945_dump_nic_error_log(priv);
3764 3764
3765 return strnlen(buf, count); 3765 return strnlen(buf, count);
3766 } 3766 }
3767 3767
3768 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 3768 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
3769 3769
3770 /***************************************************************************** 3770 /*****************************************************************************
3771 * 3771 *
3772 * driver setup and tear down 3772 * driver setup and tear down
3773 * 3773 *
3774 *****************************************************************************/ 3774 *****************************************************************************/
3775 3775
3776 static void iwl3945_setup_deferred_work(struct iwl_priv *priv) 3776 static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3777 { 3777 {
3778 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 3778 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3779 3779
3780 init_waitqueue_head(&priv->wait_command_queue); 3780 init_waitqueue_head(&priv->wait_command_queue);
3781 3781
3782 INIT_WORK(&priv->restart, iwl3945_bg_restart); 3782 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3783 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); 3783 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3784 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3784 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3785 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3785 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3786 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3786 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3787 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3787 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3788 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3788 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
3789 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3789 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
3790 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); 3790 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
3791 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 3791 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
3792 3792
3793 iwl3945_hw_setup_deferred_work(priv); 3793 iwl3945_hw_setup_deferred_work(priv);
3794 3794
3795 if (priv->cfg->ops->lib->recover_from_tx_stall) { 3795 if (priv->cfg->ops->lib->recover_from_tx_stall) {
3796 init_timer(&priv->monitor_recover); 3796 init_timer(&priv->monitor_recover);
3797 priv->monitor_recover.data = (unsigned long)priv; 3797 priv->monitor_recover.data = (unsigned long)priv;
3798 priv->monitor_recover.function = 3798 priv->monitor_recover.function =
3799 priv->cfg->ops->lib->recover_from_tx_stall; 3799 priv->cfg->ops->lib->recover_from_tx_stall;
3800 } 3800 }
3801 3801
3802 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3802 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3803 iwl3945_irq_tasklet, (unsigned long)priv); 3803 iwl3945_irq_tasklet, (unsigned long)priv);
3804 } 3804 }
3805 3805
3806 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) 3806 static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3807 { 3807 {
3808 iwl3945_hw_cancel_deferred_work(priv); 3808 iwl3945_hw_cancel_deferred_work(priv);
3809 3809
3810 cancel_delayed_work_sync(&priv->init_alive_start); 3810 cancel_delayed_work_sync(&priv->init_alive_start);
3811 cancel_delayed_work(&priv->scan_check); 3811 cancel_delayed_work(&priv->scan_check);
3812 cancel_delayed_work(&priv->alive_start); 3812 cancel_delayed_work(&priv->alive_start);
3813 cancel_work_sync(&priv->start_internal_scan); 3813 cancel_work_sync(&priv->start_internal_scan);
3814 cancel_work_sync(&priv->beacon_update); 3814 cancel_work_sync(&priv->beacon_update);
3815 if (priv->cfg->ops->lib->recover_from_tx_stall) 3815 if (priv->cfg->ops->lib->recover_from_tx_stall)
3816 del_timer_sync(&priv->monitor_recover); 3816 del_timer_sync(&priv->monitor_recover);
3817 } 3817 }
3818 3818
3819 static struct attribute *iwl3945_sysfs_entries[] = { 3819 static struct attribute *iwl3945_sysfs_entries[] = {
3820 &dev_attr_antenna.attr, 3820 &dev_attr_antenna.attr,
3821 &dev_attr_channels.attr, 3821 &dev_attr_channels.attr,
3822 &dev_attr_dump_errors.attr, 3822 &dev_attr_dump_errors.attr,
3823 &dev_attr_flags.attr, 3823 &dev_attr_flags.attr,
3824 &dev_attr_filter_flags.attr, 3824 &dev_attr_filter_flags.attr,
3825 &dev_attr_measurement.attr, 3825 &dev_attr_measurement.attr,
3826 &dev_attr_retry_rate.attr, 3826 &dev_attr_retry_rate.attr,
3827 &dev_attr_status.attr, 3827 &dev_attr_status.attr,
3828 &dev_attr_temperature.attr, 3828 &dev_attr_temperature.attr,
3829 &dev_attr_tx_power.attr, 3829 &dev_attr_tx_power.attr,
3830 #ifdef CONFIG_IWLWIFI_DEBUG 3830 #ifdef CONFIG_IWLWIFI_DEBUG
3831 &dev_attr_debug_level.attr, 3831 &dev_attr_debug_level.attr,
3832 #endif 3832 #endif
3833 NULL 3833 NULL
3834 }; 3834 };
3835 3835
3836 static struct attribute_group iwl3945_attribute_group = { 3836 static struct attribute_group iwl3945_attribute_group = {
3837 .name = NULL, /* put in device directory */ 3837 .name = NULL, /* put in device directory */
3838 .attrs = iwl3945_sysfs_entries, 3838 .attrs = iwl3945_sysfs_entries,
3839 }; 3839 };
3840 3840
3841 static struct ieee80211_ops iwl3945_hw_ops = { 3841 static struct ieee80211_ops iwl3945_hw_ops = {
3842 .tx = iwl3945_mac_tx, 3842 .tx = iwl3945_mac_tx,
3843 .start = iwl3945_mac_start, 3843 .start = iwl3945_mac_start,
3844 .stop = iwl3945_mac_stop, 3844 .stop = iwl3945_mac_stop,
3845 .add_interface = iwl_mac_add_interface, 3845 .add_interface = iwl_mac_add_interface,
3846 .remove_interface = iwl_mac_remove_interface, 3846 .remove_interface = iwl_mac_remove_interface,
3847 .config = iwl_mac_config, 3847 .config = iwl_mac_config,
3848 .configure_filter = iwl3945_configure_filter, 3848 .configure_filter = iwl3945_configure_filter,
3849 .set_key = iwl3945_mac_set_key, 3849 .set_key = iwl3945_mac_set_key,
3850 .conf_tx = iwl_mac_conf_tx, 3850 .conf_tx = iwl_mac_conf_tx,
3851 .reset_tsf = iwl_mac_reset_tsf, 3851 .reset_tsf = iwl_mac_reset_tsf,
3852 .bss_info_changed = iwl_bss_info_changed, 3852 .bss_info_changed = iwl_bss_info_changed,
3853 .hw_scan = iwl_mac_hw_scan, 3853 .hw_scan = iwl_mac_hw_scan,
3854 .sta_add = iwl3945_mac_sta_add, 3854 .sta_add = iwl3945_mac_sta_add,
3855 .sta_remove = iwl_mac_sta_remove, 3855 .sta_remove = iwl_mac_sta_remove,
3856 }; 3856 };
3857 3857
3858 static int iwl3945_init_drv(struct iwl_priv *priv) 3858 static int iwl3945_init_drv(struct iwl_priv *priv)
3859 { 3859 {
3860 int ret; 3860 int ret;
3861 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 3861 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3862 3862
3863 priv->retry_rate = 1; 3863 priv->retry_rate = 1;
3864 priv->ibss_beacon = NULL; 3864 priv->ibss_beacon = NULL;
3865 3865
3866 spin_lock_init(&priv->sta_lock); 3866 spin_lock_init(&priv->sta_lock);
3867 spin_lock_init(&priv->hcmd_lock); 3867 spin_lock_init(&priv->hcmd_lock);
3868 3868
3869 INIT_LIST_HEAD(&priv->free_frames); 3869 INIT_LIST_HEAD(&priv->free_frames);
3870 3870
3871 mutex_init(&priv->mutex); 3871 mutex_init(&priv->mutex);
3872 mutex_init(&priv->sync_cmd_mutex); 3872 mutex_init(&priv->sync_cmd_mutex);
3873 3873
3874 priv->ieee_channels = NULL; 3874 priv->ieee_channels = NULL;
3875 priv->ieee_rates = NULL; 3875 priv->ieee_rates = NULL;
3876 priv->band = IEEE80211_BAND_2GHZ; 3876 priv->band = IEEE80211_BAND_2GHZ;
3877 3877
3878 priv->iw_mode = NL80211_IFTYPE_STATION; 3878 priv->iw_mode = NL80211_IFTYPE_STATION;
3879 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3879 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3880 3880
3881 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3881 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3882 3882
3883 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3883 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3884 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3884 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3885 eeprom->version); 3885 eeprom->version);
3886 ret = -EINVAL; 3886 ret = -EINVAL;
3887 goto err; 3887 goto err;
3888 } 3888 }
3889 ret = iwl_init_channel_map(priv); 3889 ret = iwl_init_channel_map(priv);
3890 if (ret) { 3890 if (ret) {
3891 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3891 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3892 goto err; 3892 goto err;
3893 } 3893 }
3894 3894
3895 /* Set up txpower settings in driver for all channels */ 3895 /* Set up txpower settings in driver for all channels */
3896 if (iwl3945_txpower_set_from_eeprom(priv)) { 3896 if (iwl3945_txpower_set_from_eeprom(priv)) {
3897 ret = -EIO; 3897 ret = -EIO;
3898 goto err_free_channel_map; 3898 goto err_free_channel_map;
3899 } 3899 }
3900 3900
3901 ret = iwlcore_init_geos(priv); 3901 ret = iwlcore_init_geos(priv);
3902 if (ret) { 3902 if (ret) {
3903 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3903 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3904 goto err_free_channel_map; 3904 goto err_free_channel_map;
3905 } 3905 }
3906 iwl3945_init_hw_rates(priv, priv->ieee_rates); 3906 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3907 3907
3908 return 0; 3908 return 0;
3909 3909
3910 err_free_channel_map: 3910 err_free_channel_map:
3911 iwl_free_channel_map(priv); 3911 iwl_free_channel_map(priv);
3912 err: 3912 err:
3913 return ret; 3913 return ret;
3914 } 3914 }
3915 3915
3916 #define IWL3945_MAX_PROBE_REQUEST 200 3916 #define IWL3945_MAX_PROBE_REQUEST 200
3917 3917
3918 static int iwl3945_setup_mac(struct iwl_priv *priv) 3918 static int iwl3945_setup_mac(struct iwl_priv *priv)
3919 { 3919 {
3920 int ret; 3920 int ret;
3921 struct ieee80211_hw *hw = priv->hw; 3921 struct ieee80211_hw *hw = priv->hw;
3922 3922
3923 hw->rate_control_algorithm = "iwl-3945-rs"; 3923 hw->rate_control_algorithm = "iwl-3945-rs";
3924 hw->sta_data_size = sizeof(struct iwl3945_sta_priv); 3924 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3925 hw->vif_data_size = sizeof(struct iwl_vif_priv); 3925 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3926 3926
3927 /* Tell mac80211 our characteristics */ 3927 /* Tell mac80211 our characteristics */
3928 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3928 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3929 IEEE80211_HW_SPECTRUM_MGMT; 3929 IEEE80211_HW_SPECTRUM_MGMT;
3930 3930
3931 if (!priv->cfg->broken_powersave) 3931 if (!priv->cfg->broken_powersave)
3932 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3932 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3933 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3933 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3934 3934
3935 hw->wiphy->interface_modes = 3935 hw->wiphy->interface_modes =
3936 BIT(NL80211_IFTYPE_STATION) | 3936 BIT(NL80211_IFTYPE_STATION) |
3937 BIT(NL80211_IFTYPE_ADHOC); 3937 BIT(NL80211_IFTYPE_ADHOC);
3938 3938
3939 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3939 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3940 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3940 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3941 3941
3942 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3942 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3943 /* we create the 802.11 header and a zero-length SSID element */ 3943 /* we create the 802.11 header and a zero-length SSID element */
3944 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; 3944 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3945 3945
3946 /* Default value; 4 EDCA QOS priorities */ 3946 /* Default value; 4 EDCA QOS priorities */
3947 hw->queues = 4; 3947 hw->queues = 4;
3948 3948
3949 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) 3949 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3950 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 3950 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3951 &priv->bands[IEEE80211_BAND_2GHZ]; 3951 &priv->bands[IEEE80211_BAND_2GHZ];
3952 3952
3953 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) 3953 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3954 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3954 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3955 &priv->bands[IEEE80211_BAND_5GHZ]; 3955 &priv->bands[IEEE80211_BAND_5GHZ];
3956 3956
3957 ret = ieee80211_register_hw(priv->hw); 3957 ret = ieee80211_register_hw(priv->hw);
3958 if (ret) { 3958 if (ret) {
3959 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3959 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3960 return ret; 3960 return ret;
3961 } 3961 }
3962 priv->mac80211_registered = 1; 3962 priv->mac80211_registered = 1;
3963 3963
3964 return 0; 3964 return 0;
3965 } 3965 }
3966 3966
3967 static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3967 static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3968 { 3968 {
3969 int err = 0; 3969 int err = 0;
3970 struct iwl_priv *priv; 3970 struct iwl_priv *priv;
3971 struct ieee80211_hw *hw; 3971 struct ieee80211_hw *hw;
3972 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3972 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3973 struct iwl3945_eeprom *eeprom; 3973 struct iwl3945_eeprom *eeprom;
3974 unsigned long flags; 3974 unsigned long flags;
3975 3975
3976 /*********************** 3976 /***********************
3977 * 1. Allocating HW data 3977 * 1. Allocating HW data
3978 * ********************/ 3978 * ********************/
3979 3979
3980 /* mac80211 allocates memory for this device instance, including 3980 /* mac80211 allocates memory for this device instance, including
3981 * space for this driver's private structure */ 3981 * space for this driver's private structure */
3982 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); 3982 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
3983 if (hw == NULL) { 3983 if (hw == NULL) {
3984 pr_err("Can not allocate network device\n"); 3984 pr_err("Can not allocate network device\n");
3985 err = -ENOMEM; 3985 err = -ENOMEM;
3986 goto out; 3986 goto out;
3987 } 3987 }
3988 priv = hw->priv; 3988 priv = hw->priv;
3989 SET_IEEE80211_DEV(hw, &pdev->dev); 3989 SET_IEEE80211_DEV(hw, &pdev->dev);
3990 3990
3991 /* 3991 /*
3992 * Disabling hardware scan means that mac80211 will perform scans 3992 * Disabling hardware scan means that mac80211 will perform scans
3993 * "the hard way", rather than using device's scan. 3993 * "the hard way", rather than using device's scan.
3994 */ 3994 */
3995 if (iwl3945_mod_params.disable_hw_scan) { 3995 if (iwl3945_mod_params.disable_hw_scan) {
3996 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); 3996 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3997 iwl3945_hw_ops.hw_scan = NULL; 3997 iwl3945_hw_ops.hw_scan = NULL;
3998 } 3998 }
3999 3999
4000 4000
4001 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 4001 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
4002 priv->cfg = cfg; 4002 priv->cfg = cfg;
4003 priv->pci_dev = pdev; 4003 priv->pci_dev = pdev;
4004 priv->inta_mask = CSR_INI_SET_MASK; 4004 priv->inta_mask = CSR_INI_SET_MASK;
4005 4005
4006 if (iwl_alloc_traffic_mem(priv)) 4006 if (iwl_alloc_traffic_mem(priv))
4007 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 4007 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
4008 4008
4009 /*************************** 4009 /***************************
4010 * 2. Initializing PCI bus 4010 * 2. Initializing PCI bus
4011 * *************************/ 4011 * *************************/
4012 if (pci_enable_device(pdev)) { 4012 if (pci_enable_device(pdev)) {
4013 err = -ENODEV; 4013 err = -ENODEV;
4014 goto out_ieee80211_free_hw; 4014 goto out_ieee80211_free_hw;
4015 } 4015 }
4016 4016
4017 pci_set_master(pdev); 4017 pci_set_master(pdev);
4018 4018
4019 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4019 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4020 if (!err) 4020 if (!err)
4021 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4021 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4022 if (err) { 4022 if (err) {
4023 IWL_WARN(priv, "No suitable DMA available.\n"); 4023 IWL_WARN(priv, "No suitable DMA available.\n");
4024 goto out_pci_disable_device; 4024 goto out_pci_disable_device;
4025 } 4025 }
4026 4026
4027 pci_set_drvdata(pdev, priv); 4027 pci_set_drvdata(pdev, priv);
4028 err = pci_request_regions(pdev, DRV_NAME); 4028 err = pci_request_regions(pdev, DRV_NAME);
4029 if (err) 4029 if (err)
4030 goto out_pci_disable_device; 4030 goto out_pci_disable_device;
4031 4031
4032 /*********************** 4032 /***********************
4033 * 3. Read REV Register 4033 * 3. Read REV Register
4034 * ********************/ 4034 * ********************/
4035 priv->hw_base = pci_iomap(pdev, 0, 0); 4035 priv->hw_base = pci_iomap(pdev, 0, 0);
4036 if (!priv->hw_base) { 4036 if (!priv->hw_base) {
4037 err = -ENODEV; 4037 err = -ENODEV;
4038 goto out_pci_release_regions; 4038 goto out_pci_release_regions;
4039 } 4039 }
4040 4040
4041 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", 4041 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
4042 (unsigned long long) pci_resource_len(pdev, 0)); 4042 (unsigned long long) pci_resource_len(pdev, 0));
4043 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); 4043 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
4044 4044
4045 /* We disable the RETRY_TIMEOUT register (0x41) to keep 4045 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4046 * PCI Tx retries from interfering with C3 CPU state */ 4046 * PCI Tx retries from interfering with C3 CPU state */
4047 pci_write_config_byte(pdev, 0x41, 0x00); 4047 pci_write_config_byte(pdev, 0x41, 0x00);
4048 4048
4049 /* these spin locks will be used in apm_ops.init and EEPROM access 4049 /* these spin locks will be used in apm_ops.init and EEPROM access
4050 * we should init now 4050 * we should init now
4051 */ 4051 */
4052 spin_lock_init(&priv->reg_lock); 4052 spin_lock_init(&priv->reg_lock);
4053 spin_lock_init(&priv->lock); 4053 spin_lock_init(&priv->lock);
4054 4054
4055 /* 4055 /*
4056 * stop and reset the on-board processor just in case it is in a 4056 * stop and reset the on-board processor just in case it is in a
4057 * strange state ... like being left stranded by a primary kernel 4057 * strange state ... like being left stranded by a primary kernel
4058 * and this is now the kdump kernel trying to start up 4058 * and this is now the kdump kernel trying to start up
4059 */ 4059 */
4060 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 4060 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4061 4061
4062 /*********************** 4062 /***********************
4063 * 4. Read EEPROM 4063 * 4. Read EEPROM
4064 * ********************/ 4064 * ********************/
4065 4065
4066 /* Read the EEPROM */ 4066 /* Read the EEPROM */
4067 err = iwl_eeprom_init(priv); 4067 err = iwl_eeprom_init(priv);
4068 if (err) { 4068 if (err) {
4069 IWL_ERR(priv, "Unable to init EEPROM\n"); 4069 IWL_ERR(priv, "Unable to init EEPROM\n");
4070 goto out_iounmap; 4070 goto out_iounmap;
4071 } 4071 }
4072 /* MAC Address location in EEPROM same for 3945/4965 */ 4072 /* MAC Address location in EEPROM same for 3945/4965 */
4073 eeprom = (struct iwl3945_eeprom *)priv->eeprom; 4073 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
4074 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); 4074 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
4075 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); 4075 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
4076 4076
4077 /*********************** 4077 /***********************
4078 * 5. Setup HW Constants 4078 * 5. Setup HW Constants
4079 * ********************/ 4079 * ********************/
4080 /* Device-specific setup */ 4080 /* Device-specific setup */
4081 if (iwl3945_hw_set_hw_params(priv)) { 4081 if (iwl3945_hw_set_hw_params(priv)) {
4082 IWL_ERR(priv, "failed to set hw settings\n"); 4082 IWL_ERR(priv, "failed to set hw settings\n");
4083 goto out_eeprom_free; 4083 goto out_eeprom_free;
4084 } 4084 }
4085 4085
4086 /*********************** 4086 /***********************
4087 * 6. Setup priv 4087 * 6. Setup priv
4088 * ********************/ 4088 * ********************/
4089 4089
4090 err = iwl3945_init_drv(priv); 4090 err = iwl3945_init_drv(priv);
4091 if (err) { 4091 if (err) {
4092 IWL_ERR(priv, "initializing driver failed\n"); 4092 IWL_ERR(priv, "initializing driver failed\n");
4093 goto out_unset_hw_params; 4093 goto out_unset_hw_params;
4094 } 4094 }
4095 4095
4096 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", 4096 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
4097 priv->cfg->name); 4097 priv->cfg->name);
4098 4098
4099 /*********************** 4099 /***********************
4100 * 7. Setup Services 4100 * 7. Setup Services
4101 * ********************/ 4101 * ********************/
4102 4102
4103 spin_lock_irqsave(&priv->lock, flags); 4103 spin_lock_irqsave(&priv->lock, flags);
4104 iwl_disable_interrupts(priv); 4104 iwl_disable_interrupts(priv);
4105 spin_unlock_irqrestore(&priv->lock, flags); 4105 spin_unlock_irqrestore(&priv->lock, flags);
4106 4106
4107 pci_enable_msi(priv->pci_dev); 4107 pci_enable_msi(priv->pci_dev);
4108 4108
4109 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4109 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
4110 IRQF_SHARED, DRV_NAME, priv); 4110 IRQF_SHARED, DRV_NAME, priv);
4111 if (err) { 4111 if (err) {
4112 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4112 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
4113 goto out_disable_msi; 4113 goto out_disable_msi;
4114 } 4114 }
4115 4115
4116 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4116 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4117 if (err) { 4117 if (err) {
4118 IWL_ERR(priv, "failed to create sysfs device attributes\n"); 4118 IWL_ERR(priv, "failed to create sysfs device attributes\n");
4119 goto out_release_irq; 4119 goto out_release_irq;
4120 } 4120 }
4121 4121
4122 iwl_set_rxon_channel(priv, 4122 iwl_set_rxon_channel(priv,
4123 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4123 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
4124 iwl3945_setup_deferred_work(priv); 4124 iwl3945_setup_deferred_work(priv);
4125 iwl3945_setup_rx_handlers(priv); 4125 iwl3945_setup_rx_handlers(priv);
4126 iwl_power_initialize(priv); 4126 iwl_power_initialize(priv);
4127 4127
4128 /********************************* 4128 /*********************************
4129 * 8. Setup and Register mac80211 4129 * 8. Setup and Register mac80211
4130 * *******************************/ 4130 * *******************************/
4131 4131
4132 iwl_enable_interrupts(priv); 4132 iwl_enable_interrupts(priv);
4133 4133
4134 err = iwl3945_setup_mac(priv); 4134 err = iwl3945_setup_mac(priv);
4135 if (err) 4135 if (err)
4136 goto out_remove_sysfs; 4136 goto out_remove_sysfs;
4137 4137
4138 err = iwl_dbgfs_register(priv, DRV_NAME); 4138 err = iwl_dbgfs_register(priv, DRV_NAME);
4139 if (err) 4139 if (err)
4140 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4140 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4141 4141
4142 /* Start monitoring the killswitch */ 4142 /* Start monitoring the killswitch */
4143 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, 4143 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
4144 2 * HZ); 4144 2 * HZ);
4145 4145
4146 return 0; 4146 return 0;
4147 4147
4148 out_remove_sysfs: 4148 out_remove_sysfs:
4149 destroy_workqueue(priv->workqueue); 4149 destroy_workqueue(priv->workqueue);
4150 priv->workqueue = NULL; 4150 priv->workqueue = NULL;
4151 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4151 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4152 out_release_irq: 4152 out_release_irq:
4153 free_irq(priv->pci_dev->irq, priv); 4153 free_irq(priv->pci_dev->irq, priv);
4154 out_disable_msi: 4154 out_disable_msi:
4155 pci_disable_msi(priv->pci_dev); 4155 pci_disable_msi(priv->pci_dev);
4156 iwlcore_free_geos(priv); 4156 iwlcore_free_geos(priv);
4157 iwl_free_channel_map(priv); 4157 iwl_free_channel_map(priv);
4158 out_unset_hw_params: 4158 out_unset_hw_params:
4159 iwl3945_unset_hw_params(priv); 4159 iwl3945_unset_hw_params(priv);
4160 out_eeprom_free: 4160 out_eeprom_free:
4161 iwl_eeprom_free(priv); 4161 iwl_eeprom_free(priv);
4162 out_iounmap: 4162 out_iounmap:
4163 pci_iounmap(pdev, priv->hw_base); 4163 pci_iounmap(pdev, priv->hw_base);
4164 out_pci_release_regions: 4164 out_pci_release_regions:
4165 pci_release_regions(pdev); 4165 pci_release_regions(pdev);
4166 out_pci_disable_device: 4166 out_pci_disable_device:
4167 pci_set_drvdata(pdev, NULL); 4167 pci_set_drvdata(pdev, NULL);
4168 pci_disable_device(pdev); 4168 pci_disable_device(pdev);
4169 out_ieee80211_free_hw: 4169 out_ieee80211_free_hw:
4170 iwl_free_traffic_mem(priv); 4170 iwl_free_traffic_mem(priv);
4171 ieee80211_free_hw(priv->hw); 4171 ieee80211_free_hw(priv->hw);
4172 out: 4172 out:
4173 return err; 4173 return err;
4174 } 4174 }
4175 4175
4176 static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) 4176 static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4177 { 4177 {
4178 struct iwl_priv *priv = pci_get_drvdata(pdev); 4178 struct iwl_priv *priv = pci_get_drvdata(pdev);
4179 unsigned long flags; 4179 unsigned long flags;
4180 4180
4181 if (!priv) 4181 if (!priv)
4182 return; 4182 return;
4183 4183
4184 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 4184 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
4185 4185
4186 iwl_dbgfs_unregister(priv); 4186 iwl_dbgfs_unregister(priv);
4187 4187
4188 set_bit(STATUS_EXIT_PENDING, &priv->status); 4188 set_bit(STATUS_EXIT_PENDING, &priv->status);
4189 4189
4190 if (priv->mac80211_registered) { 4190 if (priv->mac80211_registered) {
4191 ieee80211_unregister_hw(priv->hw); 4191 ieee80211_unregister_hw(priv->hw);
4192 priv->mac80211_registered = 0; 4192 priv->mac80211_registered = 0;
4193 } else { 4193 } else {
4194 iwl3945_down(priv); 4194 iwl3945_down(priv);
4195 } 4195 }
4196 4196
4197 /* 4197 /*
4198 * Make sure device is reset to low power before unloading driver. 4198 * Make sure device is reset to low power before unloading driver.
4199 * This may be redundant with iwl_down(), but there are paths to 4199 * This may be redundant with iwl_down(), but there are paths to
4200 * run iwl_down() without calling apm_ops.stop(), and there are 4200 * run iwl_down() without calling apm_ops.stop(), and there are
4201 * paths to avoid running iwl_down() at all before leaving driver. 4201 * paths to avoid running iwl_down() at all before leaving driver.
4202 * This (inexpensive) call *makes sure* device is reset. 4202 * This (inexpensive) call *makes sure* device is reset.
4203 */ 4203 */
4204 priv->cfg->ops->lib->apm_ops.stop(priv); 4204 priv->cfg->ops->lib->apm_ops.stop(priv);
4205 4205
4206 /* make sure we flush any pending irq or 4206 /* make sure we flush any pending irq or
4207 * tasklet for the driver 4207 * tasklet for the driver
4208 */ 4208 */
4209 spin_lock_irqsave(&priv->lock, flags); 4209 spin_lock_irqsave(&priv->lock, flags);
4210 iwl_disable_interrupts(priv); 4210 iwl_disable_interrupts(priv);
4211 spin_unlock_irqrestore(&priv->lock, flags); 4211 spin_unlock_irqrestore(&priv->lock, flags);
4212 4212
4213 iwl_synchronize_irq(priv); 4213 iwl_synchronize_irq(priv);
4214 4214
4215 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4215 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4216 4216
4217 cancel_delayed_work_sync(&priv->_3945.rfkill_poll); 4217 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
4218 4218
4219 iwl3945_dealloc_ucode_pci(priv); 4219 iwl3945_dealloc_ucode_pci(priv);
4220 4220
4221 if (priv->rxq.bd) 4221 if (priv->rxq.bd)
4222 iwl3945_rx_queue_free(priv, &priv->rxq); 4222 iwl3945_rx_queue_free(priv, &priv->rxq);
4223 iwl3945_hw_txq_ctx_free(priv); 4223 iwl3945_hw_txq_ctx_free(priv);
4224 4224
4225 iwl3945_unset_hw_params(priv); 4225 iwl3945_unset_hw_params(priv);
4226 4226
4227 /*netif_stop_queue(dev); */ 4227 /*netif_stop_queue(dev); */
4228 flush_workqueue(priv->workqueue); 4228 flush_workqueue(priv->workqueue);
4229 4229
4230 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes 4230 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
4231 * priv->workqueue... so we can't take down the workqueue 4231 * priv->workqueue... so we can't take down the workqueue
4232 * until now... */ 4232 * until now... */
4233 destroy_workqueue(priv->workqueue); 4233 destroy_workqueue(priv->workqueue);
4234 priv->workqueue = NULL; 4234 priv->workqueue = NULL;
4235 iwl_free_traffic_mem(priv); 4235 iwl_free_traffic_mem(priv);
4236 4236
4237 free_irq(pdev->irq, priv); 4237 free_irq(pdev->irq, priv);
4238 pci_disable_msi(pdev); 4238 pci_disable_msi(pdev);
4239 4239
4240 pci_iounmap(pdev, priv->hw_base); 4240 pci_iounmap(pdev, priv->hw_base);
4241 pci_release_regions(pdev); 4241 pci_release_regions(pdev);
4242 pci_disable_device(pdev); 4242 pci_disable_device(pdev);
4243 pci_set_drvdata(pdev, NULL); 4243 pci_set_drvdata(pdev, NULL);
4244 4244
4245 iwl_free_channel_map(priv); 4245 iwl_free_channel_map(priv);
4246 iwlcore_free_geos(priv); 4246 iwlcore_free_geos(priv);
4247 kfree(priv->scan_cmd); 4247 kfree(priv->scan_cmd);
4248 if (priv->ibss_beacon) 4248 if (priv->ibss_beacon)
4249 dev_kfree_skb(priv->ibss_beacon); 4249 dev_kfree_skb(priv->ibss_beacon);
4250 4250
4251 ieee80211_free_hw(priv->hw); 4251 ieee80211_free_hw(priv->hw);
4252 } 4252 }
4253 4253
4254 4254
4255 /***************************************************************************** 4255 /*****************************************************************************
4256 * 4256 *
4257 * driver and module entry point 4257 * driver and module entry point
4258 * 4258 *
4259 *****************************************************************************/ 4259 *****************************************************************************/
4260 4260
4261 static struct pci_driver iwl3945_driver = { 4261 static struct pci_driver iwl3945_driver = {
4262 .name = DRV_NAME, 4262 .name = DRV_NAME,
4263 .id_table = iwl3945_hw_card_ids, 4263 .id_table = iwl3945_hw_card_ids,
4264 .probe = iwl3945_pci_probe, 4264 .probe = iwl3945_pci_probe,
4265 .remove = __devexit_p(iwl3945_pci_remove), 4265 .remove = __devexit_p(iwl3945_pci_remove),
4266 #ifdef CONFIG_PM 4266 #ifdef CONFIG_PM
4267 .suspend = iwl_pci_suspend, 4267 .suspend = iwl_pci_suspend,
4268 .resume = iwl_pci_resume, 4268 .resume = iwl_pci_resume,
4269 #endif 4269 #endif
4270 }; 4270 };
4271 4271
4272 static int __init iwl3945_init(void) 4272 static int __init iwl3945_init(void)
4273 { 4273 {
4274 4274
4275 int ret; 4275 int ret;
4276 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 4276 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
4277 pr_info(DRV_COPYRIGHT "\n"); 4277 pr_info(DRV_COPYRIGHT "\n");
4278 4278
4279 ret = iwl3945_rate_control_register(); 4279 ret = iwl3945_rate_control_register();
4280 if (ret) { 4280 if (ret) {
4281 pr_err("Unable to register rate control algorithm: %d\n", ret); 4281 pr_err("Unable to register rate control algorithm: %d\n", ret);
4282 return ret; 4282 return ret;
4283 } 4283 }
4284 4284
4285 ret = pci_register_driver(&iwl3945_driver); 4285 ret = pci_register_driver(&iwl3945_driver);
4286 if (ret) { 4286 if (ret) {
4287 pr_err("Unable to initialize PCI module\n"); 4287 pr_err("Unable to initialize PCI module\n");
4288 goto error_register; 4288 goto error_register;
4289 } 4289 }
4290 4290
4291 return ret; 4291 return ret;
4292 4292
4293 error_register: 4293 error_register:
4294 iwl3945_rate_control_unregister(); 4294 iwl3945_rate_control_unregister();
4295 return ret; 4295 return ret;
4296 } 4296 }
4297 4297
4298 static void __exit iwl3945_exit(void) 4298 static void __exit iwl3945_exit(void)
4299 { 4299 {
4300 pci_unregister_driver(&iwl3945_driver); 4300 pci_unregister_driver(&iwl3945_driver);
4301 iwl3945_rate_control_unregister(); 4301 iwl3945_rate_control_unregister();
4302 } 4302 }
4303 4303
4304 MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); 4304 MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4305 4305
4306 module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); 4306 module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4307 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4307 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4308 module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); 4308 module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4309 MODULE_PARM_DESC(swcrypto, 4309 MODULE_PARM_DESC(swcrypto,
4310 "using software crypto (default 1 [software])\n"); 4310 "using software crypto (default 1 [software])\n");
4311 #ifdef CONFIG_IWLWIFI_DEBUG 4311 #ifdef CONFIG_IWLWIFI_DEBUG
4312 module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); 4312 module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4313 MODULE_PARM_DESC(debug, "debug output mask"); 4313 MODULE_PARM_DESC(debug, "debug output mask");
4314 #endif 4314 #endif
4315 module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4315 module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4316 int, S_IRUGO); 4316 int, S_IRUGO);
4317 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4317 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4318 module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); 4318 module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4319 MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4319 MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4320 4320
4321 module_exit(iwl3945_exit); 4321 module_exit(iwl3945_exit);
4322 module_init(iwl3945_init); 4322 module_init(iwl3945_init);
4323 4323
net/8021q/vlan_core.c
1 #include <linux/skbuff.h> 1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h> 2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h> 3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h> 4 #include <linux/netpoll.h>
5 #include "vlan.h" 5 #include "vlan.h"
6 6
7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
10 { 10 {
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 u16 vlan_id; 12 u16 vlan_id;
13 13
14 if (netpoll_rx(skb)) 14 if (netpoll_rx(skb))
15 return NET_RX_DROP; 15 return NET_RX_DROP;
16 16
17 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 17 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
18 skb->deliver_no_wcard = 1; 18 skb->deliver_no_wcard = 1;
19 19
20 skb->skb_iif = skb->dev->ifindex; 20 skb->skb_iif = skb->dev->ifindex;
21 __vlan_hwaccel_put_tag(skb, vlan_tci); 21 __vlan_hwaccel_put_tag(skb, vlan_tci);
22 vlan_id = vlan_tci & VLAN_VID_MASK; 22 vlan_id = vlan_tci & VLAN_VID_MASK;
23 vlan_dev = vlan_group_get_device(grp, vlan_id); 23 vlan_dev = vlan_group_get_device(grp, vlan_id);
24 24
25 if (vlan_dev) 25 if (vlan_dev)
26 skb->dev = vlan_dev; 26 skb->dev = vlan_dev;
27 else if (vlan_id) 27 else if (vlan_id) {
28 goto drop; 28 if (!(skb->dev->flags & IFF_PROMISC))
29 goto drop;
30 skb->pkt_type = PACKET_OTHERHOST;
31 }
29 32
30 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 33 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
31 34
32 drop: 35 drop:
33 dev_kfree_skb_any(skb); 36 dev_kfree_skb_any(skb);
34 return NET_RX_DROP; 37 return NET_RX_DROP;
35 } 38 }
36 EXPORT_SYMBOL(__vlan_hwaccel_rx); 39 EXPORT_SYMBOL(__vlan_hwaccel_rx);
37 40
38 int vlan_hwaccel_do_receive(struct sk_buff *skb) 41 int vlan_hwaccel_do_receive(struct sk_buff *skb)
39 { 42 {
40 struct net_device *dev = skb->dev; 43 struct net_device *dev = skb->dev;
41 struct vlan_rx_stats *rx_stats; 44 struct vlan_rx_stats *rx_stats;
42 45
43 skb->dev = vlan_dev_info(dev)->real_dev; 46 skb->dev = vlan_dev_info(dev)->real_dev;
44 netif_nit_deliver(skb); 47 netif_nit_deliver(skb);
45 48
46 skb->dev = dev; 49 skb->dev = dev;
47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 50 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
48 skb->vlan_tci = 0; 51 skb->vlan_tci = 0;
49 52
50 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); 53 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
51 54
52 u64_stats_update_begin(&rx_stats->syncp); 55 u64_stats_update_begin(&rx_stats->syncp);
53 rx_stats->rx_packets++; 56 rx_stats->rx_packets++;
54 rx_stats->rx_bytes += skb->len; 57 rx_stats->rx_bytes += skb->len;
55 58
56 switch (skb->pkt_type) { 59 switch (skb->pkt_type) {
57 case PACKET_BROADCAST: 60 case PACKET_BROADCAST:
58 break; 61 break;
59 case PACKET_MULTICAST: 62 case PACKET_MULTICAST:
60 rx_stats->rx_multicast++; 63 rx_stats->rx_multicast++;
61 break; 64 break;
62 case PACKET_OTHERHOST: 65 case PACKET_OTHERHOST:
63 /* Our lower layer thinks this is not local, let's make sure. 66 /* Our lower layer thinks this is not local, let's make sure.
64 * This allows the VLAN to have a different MAC than the 67 * This allows the VLAN to have a different MAC than the
65 * underlying device, and still route correctly. */ 68 * underlying device, and still route correctly. */
66 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 69 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
67 dev->dev_addr)) 70 dev->dev_addr))
68 skb->pkt_type = PACKET_HOST; 71 skb->pkt_type = PACKET_HOST;
69 break; 72 break;
70 } 73 }
71 u64_stats_update_end(&rx_stats->syncp); 74 u64_stats_update_end(&rx_stats->syncp);
72 return 0; 75 return 0;
73 } 76 }
74 77
75 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 78 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
76 { 79 {
77 return vlan_dev_info(dev)->real_dev; 80 return vlan_dev_info(dev)->real_dev;
78 } 81 }
79 EXPORT_SYMBOL(vlan_dev_real_dev); 82 EXPORT_SYMBOL(vlan_dev_real_dev);
80 83
81 u16 vlan_dev_vlan_id(const struct net_device *dev) 84 u16 vlan_dev_vlan_id(const struct net_device *dev)
82 { 85 {
83 return vlan_dev_info(dev)->vlan_id; 86 return vlan_dev_info(dev)->vlan_id;
84 } 87 }
85 EXPORT_SYMBOL(vlan_dev_vlan_id); 88 EXPORT_SYMBOL(vlan_dev_vlan_id);
86 89
87 static gro_result_t 90 static gro_result_t
88 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 91 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
89 unsigned int vlan_tci, struct sk_buff *skb) 92 unsigned int vlan_tci, struct sk_buff *skb)
90 { 93 {
91 struct sk_buff *p; 94 struct sk_buff *p;
92 struct net_device *vlan_dev; 95 struct net_device *vlan_dev;
93 u16 vlan_id; 96 u16 vlan_id;
94 97
95 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 98 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
96 skb->deliver_no_wcard = 1; 99 skb->deliver_no_wcard = 1;
97 100
98 skb->skb_iif = skb->dev->ifindex; 101 skb->skb_iif = skb->dev->ifindex;
99 __vlan_hwaccel_put_tag(skb, vlan_tci); 102 __vlan_hwaccel_put_tag(skb, vlan_tci);
100 vlan_id = vlan_tci & VLAN_VID_MASK; 103 vlan_id = vlan_tci & VLAN_VID_MASK;
101 vlan_dev = vlan_group_get_device(grp, vlan_id); 104 vlan_dev = vlan_group_get_device(grp, vlan_id);
102 105
103 if (vlan_dev) 106 if (vlan_dev)
104 skb->dev = vlan_dev; 107 skb->dev = vlan_dev;
105 else if (vlan_id) 108 else if (vlan_id) {
106 goto drop; 109 if (!(skb->dev->flags & IFF_PROMISC))
110 goto drop;
111 skb->pkt_type = PACKET_OTHERHOST;
112 }
107 113
108 for (p = napi->gro_list; p; p = p->next) { 114 for (p = napi->gro_list; p; p = p->next) {
109 NAPI_GRO_CB(p)->same_flow = 115 NAPI_GRO_CB(p)->same_flow =
110 p->dev == skb->dev && !compare_ether_header( 116 p->dev == skb->dev && !compare_ether_header(
111 skb_mac_header(p), skb_gro_mac_header(skb)); 117 skb_mac_header(p), skb_gro_mac_header(skb));
112 NAPI_GRO_CB(p)->flush = 0; 118 NAPI_GRO_CB(p)->flush = 0;
113 } 119 }
114 120
115 return dev_gro_receive(napi, skb); 121 return dev_gro_receive(napi, skb);
116 122
117 drop: 123 drop:
118 return GRO_DROP; 124 return GRO_DROP;
119 } 125 }
120 126
121 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 127 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
122 unsigned int vlan_tci, struct sk_buff *skb) 128 unsigned int vlan_tci, struct sk_buff *skb)
123 { 129 {
124 if (netpoll_rx_on(skb)) 130 if (netpoll_rx_on(skb))
125 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 131 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
126 ? GRO_DROP : GRO_NORMAL; 132 ? GRO_DROP : GRO_NORMAL;
127 133
128 skb_gro_reset_offset(skb); 134 skb_gro_reset_offset(skb);
129 135
130 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); 136 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
131 } 137 }
132 EXPORT_SYMBOL(vlan_gro_receive); 138 EXPORT_SYMBOL(vlan_gro_receive);
133 139
134 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 140 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
135 unsigned int vlan_tci) 141 unsigned int vlan_tci)
136 { 142 {
137 struct sk_buff *skb = napi_frags_skb(napi); 143 struct sk_buff *skb = napi_frags_skb(napi);
138 144
139 if (!skb) 145 if (!skb)
140 return GRO_DROP; 146 return GRO_DROP;
141 147
142 if (netpoll_rx_on(skb)) { 148 if (netpoll_rx_on(skb)) {
143 skb->protocol = eth_type_trans(skb, skb->dev); 149 skb->protocol = eth_type_trans(skb, skb->dev);
144 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 150 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
145 ? GRO_DROP : GRO_NORMAL; 151 ? GRO_DROP : GRO_NORMAL;
146 } 152 }
147 153
148 return napi_frags_finish(napi, skb, 154 return napi_frags_finish(napi, skb,
149 vlan_gro_common(napi, grp, vlan_tci, skb)); 155 vlan_gro_common(napi, grp, vlan_tci, skb));
150 } 156 }
151 EXPORT_SYMBOL(vlan_gro_frags); 157 EXPORT_SYMBOL(vlan_gro_frags);
152 158
1 # 1 #
2 # IP configuration 2 # IP configuration
3 # 3 #
4 config IP_MULTICAST 4 config IP_MULTICAST
5 bool "IP: multicasting" 5 bool "IP: multicasting"
6 help 6 help
7 This is code for addressing several networked computers at once, 7 This is code for addressing several networked computers at once,
8 enlarging your kernel by about 2 KB. You need multicasting if you 8 enlarging your kernel by about 2 KB. You need multicasting if you
9 intend to participate in the MBONE, a high bandwidth network on top 9 intend to participate in the MBONE, a high bandwidth network on top
10 of the Internet which carries audio and video broadcasts. More 10 of the Internet which carries audio and video broadcasts. More
11 information about the MBONE is on the WWW at 11 information about the MBONE is on the WWW at
12 <http://www.savetz.com/mbone/>. Information about the multicast 12 <http://www.savetz.com/mbone/>. Information about the multicast
13 capabilities of the various network cards is contained in 13 capabilities of the various network cards is contained in
14 <file:Documentation/networking/multicast.txt>. For most people, it's 14 <file:Documentation/networking/multicast.txt>. For most people, it's
15 safe to say N. 15 safe to say N.
16 16
17 config IP_ADVANCED_ROUTER 17 config IP_ADVANCED_ROUTER
18 bool "IP: advanced router" 18 bool "IP: advanced router"
19 ---help--- 19 ---help---
20 If you intend to run your Linux box mostly as a router, i.e. as a 20 If you intend to run your Linux box mostly as a router, i.e. as a
21 computer that forwards and redistributes network packets, say Y; you 21 computer that forwards and redistributes network packets, say Y; you
22 will then be presented with several options that allow more precise 22 will then be presented with several options that allow more precise
23 control about the routing process. 23 control about the routing process.
24 24
25 The answer to this question won't directly affect the kernel: 25 The answer to this question won't directly affect the kernel:
26 answering N will just cause the configurator to skip all the 26 answering N will just cause the configurator to skip all the
27 questions about advanced routing. 27 questions about advanced routing.
28 28
29 Note that your box can only act as a router if you enable IP 29 Note that your box can only act as a router if you enable IP
30 forwarding in your kernel; you can do that by saying Y to "/proc 30 forwarding in your kernel; you can do that by saying Y to "/proc
31 file system support" and "Sysctl support" below and executing the 31 file system support" and "Sysctl support" below and executing the
32 line 32 line
33 33
34 echo "1" > /proc/sys/net/ipv4/ip_forward 34 echo "1" > /proc/sys/net/ipv4/ip_forward
35 35
36 at boot time after the /proc file system has been mounted. 36 at boot time after the /proc file system has been mounted.
37 37
38 If you turn on IP forwarding, you should consider the rp_filter, which 38 If you turn on IP forwarding, you should consider the rp_filter, which
39 automatically rejects incoming packets if the routing table entry 39 automatically rejects incoming packets if the routing table entry
40 for their source address doesn't match the network interface they're 40 for their source address doesn't match the network interface they're
41 arriving on. This has security advantages because it prevents the 41 arriving on. This has security advantages because it prevents the
42 so-called IP spoofing, however it can pose problems if you use 42 so-called IP spoofing, however it can pose problems if you use
43 asymmetric routing (packets from you to a host take a different path 43 asymmetric routing (packets from you to a host take a different path
44 than packets from that host to you) or if you operate a non-routing 44 than packets from that host to you) or if you operate a non-routing
45 host which has several IP addresses on different interfaces. To turn 45 host which has several IP addresses on different interfaces. To turn
46 rp_filter on use: 46 rp_filter on use:
47 47
48 echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter 48 echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
49 or 49 or
50 echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter 50 echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
51 51
52 Note that some distributions enable it in startup scripts. 52 Note that some distributions enable it in startup scripts.
53 For details about rp_filter strict and loose mode read 53 For details about rp_filter strict and loose mode read
54 <file:Documentation/networking/ip-sysctl.txt>. 54 <file:Documentation/networking/ip-sysctl.txt>.
55 55
56 If unsure, say N here. 56 If unsure, say N here.
57 57
58 choice 58 choice
59 prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)" 59 prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
60 depends on IP_ADVANCED_ROUTER 60 depends on IP_ADVANCED_ROUTER
61 default ASK_IP_FIB_HASH 61 default ASK_IP_FIB_HASH
62 62
63 config ASK_IP_FIB_HASH 63 config ASK_IP_FIB_HASH
64 bool "FIB_HASH" 64 bool "FIB_HASH"
65 ---help--- 65 ---help---
66 Current FIB is very proven and good enough for most users. 66 Current FIB is very proven and good enough for most users.
67 67
68 config IP_FIB_TRIE 68 config IP_FIB_TRIE
69 bool "FIB_TRIE" 69 bool "FIB_TRIE"
70 ---help--- 70 ---help---
71 Use new experimental LC-trie as FIB lookup algorithm. 71 Use new experimental LC-trie as FIB lookup algorithm.
72 This improves lookup performance if you have a large 72 This improves lookup performance if you have a large
73 number of routes. 73 number of routes.
74 74
75 LC-trie is a longest matching prefix lookup algorithm which 75 LC-trie is a longest matching prefix lookup algorithm which
76 performs better than FIB_HASH for large routing tables. 76 performs better than FIB_HASH for large routing tables.
77 But, it consumes more memory and is more complex. 77 But, it consumes more memory and is more complex.
78 78
79 LC-trie is described in: 79 LC-trie is described in:
80 80
81 IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson 81 IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
82 IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, 82 IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
83 June 1999 83 June 1999
84 84
85 An experimental study of compression methods for dynamic tries 85 An experimental study of compression methods for dynamic tries
86 Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. 86 Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
87 http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ 87 http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
88 88
89 endchoice 89 endchoice
90 90
91 config IP_FIB_HASH 91 config IP_FIB_HASH
92 def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER 92 def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
93 93
94 config IP_FIB_TRIE_STATS 94 config IP_FIB_TRIE_STATS
95 bool "FIB TRIE statistics" 95 bool "FIB TRIE statistics"
96 depends on IP_FIB_TRIE 96 depends on IP_FIB_TRIE
97 ---help--- 97 ---help---
98 Keep track of statistics on structure of FIB TRIE table. 98 Keep track of statistics on structure of FIB TRIE table.
99 Useful for testing and measuring TRIE performance. 99 Useful for testing and measuring TRIE performance.
100 100
101 config IP_MULTIPLE_TABLES 101 config IP_MULTIPLE_TABLES
102 bool "IP: policy routing" 102 bool "IP: policy routing"
103 depends on IP_ADVANCED_ROUTER 103 depends on IP_ADVANCED_ROUTER
104 select FIB_RULES 104 select FIB_RULES
105 ---help--- 105 ---help---
106 Normally, a router decides what to do with a received packet based 106 Normally, a router decides what to do with a received packet based
107 solely on the packet's final destination address. If you say Y here, 107 solely on the packet's final destination address. If you say Y here,
108 the Linux router will also be able to take the packet's source 108 the Linux router will also be able to take the packet's source
109 address into account. Furthermore, the TOS (Type-Of-Service) field 109 address into account. Furthermore, the TOS (Type-Of-Service) field
110 of the packet can be used for routing decisions as well. 110 of the packet can be used for routing decisions as well.
111 111
112 If you are interested in this, please see the preliminary 112 If you are interested in this, please see the preliminary
113 documentation at <http://www.compendium.com.ar/policy-routing.txt> 113 documentation at <http://www.compendium.com.ar/policy-routing.txt>
114 and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>. 114 and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
115 You will need supporting software from 115 You will need supporting software from
116 <ftp://ftp.tux.org/pub/net/ip-routing/>. 116 <ftp://ftp.tux.org/pub/net/ip-routing/>.
117 117
118 If unsure, say N. 118 If unsure, say N.
119 119
120 config IP_ROUTE_MULTIPATH 120 config IP_ROUTE_MULTIPATH
121 bool "IP: equal cost multipath" 121 bool "IP: equal cost multipath"
122 depends on IP_ADVANCED_ROUTER 122 depends on IP_ADVANCED_ROUTER
123 help 123 help
124 Normally, the routing tables specify a single action to be taken in 124 Normally, the routing tables specify a single action to be taken in
125 a deterministic manner for a given packet. If you say Y here 125 a deterministic manner for a given packet. If you say Y here
126 however, it becomes possible to attach several actions to a packet 126 however, it becomes possible to attach several actions to a packet
127 pattern, in effect specifying several alternative paths to travel 127 pattern, in effect specifying several alternative paths to travel
128 for those packets. The router considers all these paths to be of 128 for those packets. The router considers all these paths to be of
129 equal "cost" and chooses one of them in a non-deterministic fashion 129 equal "cost" and chooses one of them in a non-deterministic fashion
130 if a matching packet arrives. 130 if a matching packet arrives.
131 131
132 config IP_ROUTE_VERBOSE 132 config IP_ROUTE_VERBOSE
133 bool "IP: verbose route monitoring" 133 bool "IP: verbose route monitoring"
134 depends on IP_ADVANCED_ROUTER 134 depends on IP_ADVANCED_ROUTER
135 help 135 help
136 If you say Y here, which is recommended, then the kernel will print 136 If you say Y here, which is recommended, then the kernel will print
137 verbose messages regarding the routing, for example warnings about 137 verbose messages regarding the routing, for example warnings about
138 received packets which look strange and could be evidence of an 138 received packets which look strange and could be evidence of an
139 attack or a misconfigured system somewhere. The information is 139 attack or a misconfigured system somewhere. The information is
140 handled by the klogd daemon which is responsible for kernel messages 140 handled by the klogd daemon which is responsible for kernel messages
141 ("man klogd"). 141 ("man klogd").
142 142
143 config IP_PNP 143 config IP_PNP
144 bool "IP: kernel level autoconfiguration" 144 bool "IP: kernel level autoconfiguration"
145 help 145 help
146 This enables automatic configuration of IP addresses of devices and 146 This enables automatic configuration of IP addresses of devices and
147 of the routing table during kernel boot, based on either information 147 of the routing table during kernel boot, based on either information
148 supplied on the kernel command line or by BOOTP or RARP protocols. 148 supplied on the kernel command line or by BOOTP or RARP protocols.
149 You need to say Y only for diskless machines requiring network 149 You need to say Y only for diskless machines requiring network
150 access to boot (in which case you want to say Y to "Root file system 150 access to boot (in which case you want to say Y to "Root file system
151 on NFS" as well), because all other machines configure the network 151 on NFS" as well), because all other machines configure the network
152 in their startup scripts. 152 in their startup scripts.
153 153
154 config IP_PNP_DHCP 154 config IP_PNP_DHCP
155 bool "IP: DHCP support" 155 bool "IP: DHCP support"
156 depends on IP_PNP 156 depends on IP_PNP
157 ---help--- 157 ---help---
158 If you want your Linux box to mount its whole root file system (the 158 If you want your Linux box to mount its whole root file system (the
159 one containing the directory /) from some other computer over the 159 one containing the directory /) from some other computer over the
160 net via NFS and you want the IP address of your computer to be 160 net via NFS and you want the IP address of your computer to be
161 discovered automatically at boot time using the DHCP protocol (a 161 discovered automatically at boot time using the DHCP protocol (a
162 special protocol designed for doing this job), say Y here. In case 162 special protocol designed for doing this job), say Y here. In case
163 the boot ROM of your network card was designed for booting Linux and 163 the boot ROM of your network card was designed for booting Linux and
164 does DHCP itself, providing all necessary information on the kernel 164 does DHCP itself, providing all necessary information on the kernel
165 command line, you can say N here. 165 command line, you can say N here.
166 166
167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server 167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server
168 must be operating on your network. Read 168 must be operating on your network. Read
169 <file:Documentation/filesystems/nfs/nfsroot.txt> for details. 169 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
170 170
171 config IP_PNP_BOOTP 171 config IP_PNP_BOOTP
172 bool "IP: BOOTP support" 172 bool "IP: BOOTP support"
173 depends on IP_PNP 173 depends on IP_PNP
174 ---help--- 174 ---help---
175 If you want your Linux box to mount its whole root file system (the 175 If you want your Linux box to mount its whole root file system (the
176 one containing the directory /) from some other computer over the 176 one containing the directory /) from some other computer over the
177 net via NFS and you want the IP address of your computer to be 177 net via NFS and you want the IP address of your computer to be
178 discovered automatically at boot time using the BOOTP protocol (a 178 discovered automatically at boot time using the BOOTP protocol (a
179 special protocol designed for doing this job), say Y here. In case 179 special protocol designed for doing this job), say Y here. In case
180 the boot ROM of your network card was designed for booting Linux and 180 the boot ROM of your network card was designed for booting Linux and
181 does BOOTP itself, providing all necessary information on the kernel 181 does BOOTP itself, providing all necessary information on the kernel
182 command line, you can say N here. If unsure, say Y. Note that if you 182 command line, you can say N here. If unsure, say Y. Note that if you
183 want to use BOOTP, a BOOTP server must be operating on your network. 183 want to use BOOTP, a BOOTP server must be operating on your network.
184 Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details. 184 Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
185 185
186 config IP_PNP_RARP 186 config IP_PNP_RARP
187 bool "IP: RARP support" 187 bool "IP: RARP support"
188 depends on IP_PNP 188 depends on IP_PNP
189 help 189 help
190 If you want your Linux box to mount its whole root file system (the 190 If you want your Linux box to mount its whole root file system (the
191 one containing the directory /) from some other computer over the 191 one containing the directory /) from some other computer over the
192 net via NFS and you want the IP address of your computer to be 192 net via NFS and you want the IP address of your computer to be
193 discovered automatically at boot time using the RARP protocol (an 193 discovered automatically at boot time using the RARP protocol (an
194 older protocol which is being obsoleted by BOOTP and DHCP), say Y 194 older protocol which is being obsoleted by BOOTP and DHCP), say Y
195 here. Note that if you want to use RARP, a RARP server must be 195 here. Note that if you want to use RARP, a RARP server must be
196 operating on your network. Read 196 operating on your network. Read
197 <file:Documentation/filesystems/nfs/nfsroot.txt> for details. 197 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
198 198
199 # not yet ready.. 199 # not yet ready..
200 # bool ' IP: ARP support' CONFIG_IP_PNP_ARP 200 # bool ' IP: ARP support' CONFIG_IP_PNP_ARP
201 config NET_IPIP 201 config NET_IPIP
202 tristate "IP: tunneling" 202 tristate "IP: tunneling"
203 select INET_TUNNEL 203 select INET_TUNNEL
204 ---help--- 204 ---help---
205 Tunneling means encapsulating data of one protocol type within 205 Tunneling means encapsulating data of one protocol type within
206 another protocol and sending it over a channel that understands the 206 another protocol and sending it over a channel that understands the
207 encapsulating protocol. This particular tunneling driver implements 207 encapsulating protocol. This particular tunneling driver implements
208 encapsulation of IP within IP, which sounds kind of pointless, but 208 encapsulation of IP within IP, which sounds kind of pointless, but
209 can be useful if you want to make your (or some other) machine 209 can be useful if you want to make your (or some other) machine
210 appear on a different network than it physically is, or to use 210 appear on a different network than it physically is, or to use
211 mobile-IP facilities (allowing laptops to seamlessly move between 211 mobile-IP facilities (allowing laptops to seamlessly move between
212 networks without changing their IP addresses). 212 networks without changing their IP addresses).
213 213
214 Saying Y to this option will produce two modules ( = code which can 214 Saying Y to this option will produce two modules ( = code which can
215 be inserted in and removed from the running kernel whenever you 215 be inserted in and removed from the running kernel whenever you
216 want). Most people won't need this and can say N. 216 want). Most people won't need this and can say N.
217 217
218 config NET_IPGRE 218 config NET_IPGRE
219 tristate "IP: GRE tunnels over IP" 219 tristate "IP: GRE tunnels over IP"
220 depends on IPV6 || IPV6=n
220 help 221 help
221 Tunneling means encapsulating data of one protocol type within 222 Tunneling means encapsulating data of one protocol type within
222 another protocol and sending it over a channel that understands the 223 another protocol and sending it over a channel that understands the
223 encapsulating protocol. This particular tunneling driver implements 224 encapsulating protocol. This particular tunneling driver implements
224 GRE (Generic Routing Encapsulation) and at this time allows 225 GRE (Generic Routing Encapsulation) and at this time allows
225 encapsulating of IPv4 or IPv6 over existing IPv4 infrastructure. 226 encapsulating of IPv4 or IPv6 over existing IPv4 infrastructure.
226 This driver is useful if the other endpoint is a Cisco router: Cisco 227 This driver is useful if the other endpoint is a Cisco router: Cisco
227 likes GRE much better than the other Linux tunneling driver ("IP 228 likes GRE much better than the other Linux tunneling driver ("IP
228 tunneling" above). In addition, GRE allows multicast redistribution 229 tunneling" above). In addition, GRE allows multicast redistribution
229 through the tunnel. 230 through the tunnel.
230 231
231 config NET_IPGRE_BROADCAST 232 config NET_IPGRE_BROADCAST
232 bool "IP: broadcast GRE over IP" 233 bool "IP: broadcast GRE over IP"
233 depends on IP_MULTICAST && NET_IPGRE 234 depends on IP_MULTICAST && NET_IPGRE
234 help 235 help
235 One application of GRE/IP is to construct a broadcast WAN (Wide Area 236 One application of GRE/IP is to construct a broadcast WAN (Wide Area
236 Network), which looks like a normal Ethernet LAN (Local Area 237 Network), which looks like a normal Ethernet LAN (Local Area
237 Network), but can be distributed all over the Internet. If you want 238 Network), but can be distributed all over the Internet. If you want
238 to do that, say Y here and to "IP multicast routing" below. 239 to do that, say Y here and to "IP multicast routing" below.
239 240
240 config IP_MROUTE 241 config IP_MROUTE
241 bool "IP: multicast routing" 242 bool "IP: multicast routing"
242 depends on IP_MULTICAST 243 depends on IP_MULTICAST
243 help 244 help
244 This is used if you want your machine to act as a router for IP 245 This is used if you want your machine to act as a router for IP
245 packets that have several destination addresses. It is needed on the 246 packets that have several destination addresses. It is needed on the
246 MBONE, a high bandwidth network on top of the Internet which carries 247 MBONE, a high bandwidth network on top of the Internet which carries
247 audio and video broadcasts. In order to do that, you would most 248 audio and video broadcasts. In order to do that, you would most
248 likely run the program mrouted. Information about the multicast 249 likely run the program mrouted. Information about the multicast
249 capabilities of the various network cards is contained in 250 capabilities of the various network cards is contained in
250 <file:Documentation/networking/multicast.txt>. If you haven't heard 251 <file:Documentation/networking/multicast.txt>. If you haven't heard
251 about it, you don't need it. 252 about it, you don't need it.
252 253
253 config IP_MROUTE_MULTIPLE_TABLES 254 config IP_MROUTE_MULTIPLE_TABLES
254 bool "IP: multicast policy routing" 255 bool "IP: multicast policy routing"
255 depends on IP_MROUTE && IP_ADVANCED_ROUTER 256 depends on IP_MROUTE && IP_ADVANCED_ROUTER
256 select FIB_RULES 257 select FIB_RULES
257 help 258 help
258 Normally, a multicast router runs a userspace daemon and decides 259 Normally, a multicast router runs a userspace daemon and decides
259 what to do with a multicast packet based on the source and 260 what to do with a multicast packet based on the source and
260 destination addresses. If you say Y here, the multicast router 261 destination addresses. If you say Y here, the multicast router
261 will also be able to take interfaces and packet marks into 262 will also be able to take interfaces and packet marks into
262 account and run multiple instances of userspace daemons 263 account and run multiple instances of userspace daemons
263 simultaneously, each one handling a single table. 264 simultaneously, each one handling a single table.
264 265
265 If unsure, say N. 266 If unsure, say N.
266 267
267 config IP_PIMSM_V1 268 config IP_PIMSM_V1
268 bool "IP: PIM-SM version 1 support" 269 bool "IP: PIM-SM version 1 support"
269 depends on IP_MROUTE 270 depends on IP_MROUTE
270 help 271 help
271 Kernel side support for Sparse Mode PIM (Protocol Independent 272 Kernel side support for Sparse Mode PIM (Protocol Independent
272 Multicast) version 1. This multicast routing protocol is used widely 273 Multicast) version 1. This multicast routing protocol is used widely
273 because Cisco supports it. You need special software to use it 274 because Cisco supports it. You need special software to use it
274 (pimd-v1). Please see <http://netweb.usc.edu/pim/> for more 275 (pimd-v1). Please see <http://netweb.usc.edu/pim/> for more
275 information about PIM. 276 information about PIM.
276 277
277 Say Y if you want to use PIM-SM v1. Note that you can say N here if 278 Say Y if you want to use PIM-SM v1. Note that you can say N here if
278 you just want to use Dense Mode PIM. 279 you just want to use Dense Mode PIM.
279 280
280 config IP_PIMSM_V2 281 config IP_PIMSM_V2
281 bool "IP: PIM-SM version 2 support" 282 bool "IP: PIM-SM version 2 support"
282 depends on IP_MROUTE 283 depends on IP_MROUTE
283 help 284 help
284 Kernel side support for Sparse Mode PIM version 2. In order to use 285 Kernel side support for Sparse Mode PIM version 2. In order to use
285 this, you need an experimental routing daemon supporting it (pimd or 286 this, you need an experimental routing daemon supporting it (pimd or
286 gated-5). This routing protocol is not used widely, so say N unless 287 gated-5). This routing protocol is not used widely, so say N unless
287 you want to play with it. 288 you want to play with it.
288 289
289 config ARPD 290 config ARPD
290 bool "IP: ARP daemon support" 291 bool "IP: ARP daemon support"
291 ---help--- 292 ---help---
292 The kernel maintains an internal cache which maps IP addresses to 293 The kernel maintains an internal cache which maps IP addresses to
293 hardware addresses on the local network, so that Ethernet/Token Ring/ 294 hardware addresses on the local network, so that Ethernet/Token Ring/
294 etc. frames are sent to the proper address on the physical networking 295 etc. frames are sent to the proper address on the physical networking
295 layer. Normally, kernel uses the ARP protocol to resolve these 296 layer. Normally, kernel uses the ARP protocol to resolve these
296 mappings. 297 mappings.
297 298
298 Saying Y here adds support to have an user space daemon to do this 299 Saying Y here adds support to have an user space daemon to do this
299 resolution instead. This is useful for implementing an alternate 300 resolution instead. This is useful for implementing an alternate
300 address resolution protocol (e.g. NHRP on mGRE tunnels) and also for 301 address resolution protocol (e.g. NHRP on mGRE tunnels) and also for
301 testing purposes. 302 testing purposes.
302 303
303 If unsure, say N. 304 If unsure, say N.
304 305
305 config SYN_COOKIES 306 config SYN_COOKIES
306 bool "IP: TCP syncookie support" 307 bool "IP: TCP syncookie support"
307 ---help--- 308 ---help---
308 Normal TCP/IP networking is open to an attack known as "SYN 309 Normal TCP/IP networking is open to an attack known as "SYN
309 flooding". This denial-of-service attack prevents legitimate remote 310 flooding". This denial-of-service attack prevents legitimate remote
310 users from being able to connect to your computer during an ongoing 311 users from being able to connect to your computer during an ongoing
311 attack and requires very little work from the attacker, who can 312 attack and requires very little work from the attacker, who can
312 operate from anywhere on the Internet. 313 operate from anywhere on the Internet.
313 314
314 SYN cookies provide protection against this type of attack. If you 315 SYN cookies provide protection against this type of attack. If you
315 say Y here, the TCP/IP stack will use a cryptographic challenge 316 say Y here, the TCP/IP stack will use a cryptographic challenge
316 protocol known as "SYN cookies" to enable legitimate users to 317 protocol known as "SYN cookies" to enable legitimate users to
317 continue to connect, even when your machine is under attack. There 318 continue to connect, even when your machine is under attack. There
318 is no need for the legitimate users to change their TCP/IP software; 319 is no need for the legitimate users to change their TCP/IP software;
319 SYN cookies work transparently to them. For technical information 320 SYN cookies work transparently to them. For technical information
320 about SYN cookies, check out <http://cr.yp.to/syncookies.html>. 321 about SYN cookies, check out <http://cr.yp.to/syncookies.html>.
321 322
322 If you are SYN flooded, the source address reported by the kernel is 323 If you are SYN flooded, the source address reported by the kernel is
323 likely to have been forged by the attacker; it is only reported as 324 likely to have been forged by the attacker; it is only reported as
324 an aid in tracing the packets to their actual source and should not 325 an aid in tracing the packets to their actual source and should not
325 be taken as absolute truth. 326 be taken as absolute truth.
326 327
327 SYN cookies may prevent correct error reporting on clients when the 328 SYN cookies may prevent correct error reporting on clients when the
328 server is really overloaded. If this happens frequently better turn 329 server is really overloaded. If this happens frequently better turn
329 them off. 330 them off.
330 331
331 If you say Y here, you can disable SYN cookies at run time by 332 If you say Y here, you can disable SYN cookies at run time by
332 saying Y to "/proc file system support" and 333 saying Y to "/proc file system support" and
333 "Sysctl support" below and executing the command 334 "Sysctl support" below and executing the command
334 335
335 echo 0 > /proc/sys/net/ipv4/tcp_syncookies 336 echo 0 > /proc/sys/net/ipv4/tcp_syncookies
336 337
337 after the /proc file system has been mounted. 338 after the /proc file system has been mounted.
338 339
339 If unsure, say N. 340 If unsure, say N.
340 341
341 config INET_AH 342 config INET_AH
342 tristate "IP: AH transformation" 343 tristate "IP: AH transformation"
343 select XFRM 344 select XFRM
344 select CRYPTO 345 select CRYPTO
345 select CRYPTO_HMAC 346 select CRYPTO_HMAC
346 select CRYPTO_MD5 347 select CRYPTO_MD5
347 select CRYPTO_SHA1 348 select CRYPTO_SHA1
348 ---help--- 349 ---help---
349 Support for IPsec AH. 350 Support for IPsec AH.
350 351
351 If unsure, say Y. 352 If unsure, say Y.
352 353
353 config INET_ESP 354 config INET_ESP
354 tristate "IP: ESP transformation" 355 tristate "IP: ESP transformation"
355 select XFRM 356 select XFRM
356 select CRYPTO 357 select CRYPTO
357 select CRYPTO_AUTHENC 358 select CRYPTO_AUTHENC
358 select CRYPTO_HMAC 359 select CRYPTO_HMAC
359 select CRYPTO_MD5 360 select CRYPTO_MD5
360 select CRYPTO_CBC 361 select CRYPTO_CBC
361 select CRYPTO_SHA1 362 select CRYPTO_SHA1
362 select CRYPTO_DES 363 select CRYPTO_DES
363 ---help--- 364 ---help---
364 Support for IPsec ESP. 365 Support for IPsec ESP.
365 366
366 If unsure, say Y. 367 If unsure, say Y.
367 368
368 config INET_IPCOMP 369 config INET_IPCOMP
369 tristate "IP: IPComp transformation" 370 tristate "IP: IPComp transformation"
370 select INET_XFRM_TUNNEL 371 select INET_XFRM_TUNNEL
371 select XFRM_IPCOMP 372 select XFRM_IPCOMP
372 ---help--- 373 ---help---
373 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 374 Support for IP Payload Compression Protocol (IPComp) (RFC3173),
374 typically needed for IPsec. 375 typically needed for IPsec.
375 376
376 If unsure, say Y. 377 If unsure, say Y.
377 378
378 config INET_XFRM_TUNNEL 379 config INET_XFRM_TUNNEL
379 tristate 380 tristate
380 select INET_TUNNEL 381 select INET_TUNNEL
381 default n 382 default n
382 383
383 config INET_TUNNEL 384 config INET_TUNNEL
384 tristate 385 tristate
385 default n 386 default n
386 387
387 config INET_XFRM_MODE_TRANSPORT 388 config INET_XFRM_MODE_TRANSPORT
388 tristate "IP: IPsec transport mode" 389 tristate "IP: IPsec transport mode"
389 default y 390 default y
390 select XFRM 391 select XFRM
391 ---help--- 392 ---help---
392 Support for IPsec transport mode. 393 Support for IPsec transport mode.
393 394
394 If unsure, say Y. 395 If unsure, say Y.
395 396
396 config INET_XFRM_MODE_TUNNEL 397 config INET_XFRM_MODE_TUNNEL
397 tristate "IP: IPsec tunnel mode" 398 tristate "IP: IPsec tunnel mode"
398 default y 399 default y
399 select XFRM 400 select XFRM
400 ---help--- 401 ---help---
401 Support for IPsec tunnel mode. 402 Support for IPsec tunnel mode.
402 403
403 If unsure, say Y. 404 If unsure, say Y.
404 405
405 config INET_XFRM_MODE_BEET 406 config INET_XFRM_MODE_BEET
406 tristate "IP: IPsec BEET mode" 407 tristate "IP: IPsec BEET mode"
407 default y 408 default y
408 select XFRM 409 select XFRM
409 ---help--- 410 ---help---
410 Support for IPsec BEET mode. 411 Support for IPsec BEET mode.
411 412
412 If unsure, say Y. 413 If unsure, say Y.
413 414
414 config INET_LRO 415 config INET_LRO
415 bool "Large Receive Offload (ipv4/tcp)" 416 bool "Large Receive Offload (ipv4/tcp)"
416 default y 417 default y
417 ---help--- 418 ---help---
418 Support for Large Receive Offload (ipv4/tcp). 419 Support for Large Receive Offload (ipv4/tcp).
419 420
420 If unsure, say Y. 421 If unsure, say Y.
421 422
422 config INET_DIAG 423 config INET_DIAG
423 tristate "INET: socket monitoring interface" 424 tristate "INET: socket monitoring interface"
424 default y 425 default y
425 ---help--- 426 ---help---
426 Support for INET (TCP, DCCP, etc) socket monitoring interface used by 427 Support for INET (TCP, DCCP, etc) socket monitoring interface used by
427 native Linux tools such as ss. ss is included in iproute2, currently 428 native Linux tools such as ss. ss is included in iproute2, currently
428 downloadable at <http://linux-net.osdl.org/index.php/Iproute2>. 429 downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
429 430
430 If unsure, say Y. 431 If unsure, say Y.
431 432
432 config INET_TCP_DIAG 433 config INET_TCP_DIAG
433 depends on INET_DIAG 434 depends on INET_DIAG
434 def_tristate INET_DIAG 435 def_tristate INET_DIAG
435 436
436 menuconfig TCP_CONG_ADVANCED 437 menuconfig TCP_CONG_ADVANCED
437 bool "TCP: advanced congestion control" 438 bool "TCP: advanced congestion control"
438 ---help--- 439 ---help---
439 Support for selection of various TCP congestion control 440 Support for selection of various TCP congestion control
440 modules. 441 modules.
441 442
442 Nearly all users can safely say no here, and a safe default 443 Nearly all users can safely say no here, and a safe default
443 selection will be made (CUBIC with new Reno as a fallback). 444 selection will be made (CUBIC with new Reno as a fallback).
444 445
445 If unsure, say N. 446 If unsure, say N.
446 447
447 if TCP_CONG_ADVANCED 448 if TCP_CONG_ADVANCED
448 449
449 config TCP_CONG_BIC 450 config TCP_CONG_BIC
450 tristate "Binary Increase Congestion (BIC) control" 451 tristate "Binary Increase Congestion (BIC) control"
451 default m 452 default m
452 ---help--- 453 ---help---
453 BIC-TCP is a sender-side only change that ensures a linear RTT 454 BIC-TCP is a sender-side only change that ensures a linear RTT
454 fairness under large windows while offering both scalability and 455 fairness under large windows while offering both scalability and
455 bounded TCP-friendliness. The protocol combines two schemes 456 bounded TCP-friendliness. The protocol combines two schemes
456 called additive increase and binary search increase. When the 457 called additive increase and binary search increase. When the
457 congestion window is large, additive increase with a large 458 congestion window is large, additive increase with a large
458 increment ensures linear RTT fairness as well as good 459 increment ensures linear RTT fairness as well as good
459 scalability. Under small congestion windows, binary search 460 scalability. Under small congestion windows, binary search
460 increase provides TCP friendliness. 461 increase provides TCP friendliness.
461 See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/ 462 See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/
462 463
463 config TCP_CONG_CUBIC 464 config TCP_CONG_CUBIC
464 tristate "CUBIC TCP" 465 tristate "CUBIC TCP"
465 default y 466 default y
466 ---help--- 467 ---help---
467 This is version 2.0 of BIC-TCP which uses a cubic growth function 468 This is version 2.0 of BIC-TCP which uses a cubic growth function
468 among other techniques. 469 among other techniques.
469 See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf 470 See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf
470 471
471 config TCP_CONG_WESTWOOD 472 config TCP_CONG_WESTWOOD
472 tristate "TCP Westwood+" 473 tristate "TCP Westwood+"
473 default m 474 default m
474 ---help--- 475 ---help---
475 TCP Westwood+ is a sender-side only modification of the TCP Reno 476 TCP Westwood+ is a sender-side only modification of the TCP Reno
476 protocol stack that optimizes the performance of TCP congestion 477 protocol stack that optimizes the performance of TCP congestion
477 control. It is based on end-to-end bandwidth estimation to set 478 control. It is based on end-to-end bandwidth estimation to set
478 congestion window and slow start threshold after a congestion 479 congestion window and slow start threshold after a congestion
479 episode. Using this estimation, TCP Westwood+ adaptively sets a 480 episode. Using this estimation, TCP Westwood+ adaptively sets a
480 slow start threshold and a congestion window which takes into 481 slow start threshold and a congestion window which takes into
481 account the bandwidth used at the time congestion is experienced. 482 account the bandwidth used at the time congestion is experienced.
482 TCP Westwood+ significantly increases fairness wrt TCP Reno in 483 TCP Westwood+ significantly increases fairness wrt TCP Reno in
483 wired networks and throughput over wireless links. 484 wired networks and throughput over wireless links.
484 485
485 config TCP_CONG_HTCP 486 config TCP_CONG_HTCP
486 tristate "H-TCP" 487 tristate "H-TCP"
487 default m 488 default m
488 ---help--- 489 ---help---
489 H-TCP is a send-side only modifications of the TCP Reno 490 H-TCP is a send-side only modifications of the TCP Reno
490 protocol stack that optimizes the performance of TCP 491 protocol stack that optimizes the performance of TCP
491 congestion control for high speed network links. It uses a 492 congestion control for high speed network links. It uses a
492 modeswitch to change the alpha and beta parameters of TCP Reno 493 modeswitch to change the alpha and beta parameters of TCP Reno
493 based on network conditions and in a way so as to be fair with 494 based on network conditions and in a way so as to be fair with
494 other Reno and H-TCP flows. 495 other Reno and H-TCP flows.
495 496
496 config TCP_CONG_HSTCP 497 config TCP_CONG_HSTCP
497 tristate "High Speed TCP" 498 tristate "High Speed TCP"
498 depends on EXPERIMENTAL 499 depends on EXPERIMENTAL
499 default n 500 default n
500 ---help--- 501 ---help---
501 Sally Floyd's High Speed TCP (RFC 3649) congestion control. 502 Sally Floyd's High Speed TCP (RFC 3649) congestion control.
502 A modification to TCP's congestion control mechanism for use 503 A modification to TCP's congestion control mechanism for use
503 with large congestion windows. A table indicates how much to 504 with large congestion windows. A table indicates how much to
504 increase the congestion window by when an ACK is received. 505 increase the congestion window by when an ACK is received.
505 For more detail see http://www.icir.org/floyd/hstcp.html 506 For more detail see http://www.icir.org/floyd/hstcp.html
506 507
507 config TCP_CONG_HYBLA 508 config TCP_CONG_HYBLA
508 tristate "TCP-Hybla congestion control algorithm" 509 tristate "TCP-Hybla congestion control algorithm"
509 depends on EXPERIMENTAL 510 depends on EXPERIMENTAL
510 default n 511 default n
511 ---help--- 512 ---help---
512 TCP-Hybla is a sender-side only change that eliminates penalization of 513 TCP-Hybla is a sender-side only change that eliminates penalization of
513 long-RTT, large-bandwidth connections, like when satellite legs are 514 long-RTT, large-bandwidth connections, like when satellite legs are
514 involved, especially when sharing a common bottleneck with normal 515 involved, especially when sharing a common bottleneck with normal
515 terrestrial connections. 516 terrestrial connections.
516 517
517 config TCP_CONG_VEGAS 518 config TCP_CONG_VEGAS
518 tristate "TCP Vegas" 519 tristate "TCP Vegas"
519 depends on EXPERIMENTAL 520 depends on EXPERIMENTAL
520 default n 521 default n
521 ---help--- 522 ---help---
522 TCP Vegas is a sender-side only change to TCP that anticipates 523 TCP Vegas is a sender-side only change to TCP that anticipates
523 the onset of congestion by estimating the bandwidth. TCP Vegas 524 the onset of congestion by estimating the bandwidth. TCP Vegas
524 adjusts the sending rate by modifying the congestion 525 adjusts the sending rate by modifying the congestion
525 window. TCP Vegas should provide less packet loss, but it is 526 window. TCP Vegas should provide less packet loss, but it is
526 not as aggressive as TCP Reno. 527 not as aggressive as TCP Reno.
527 528
528 config TCP_CONG_SCALABLE 529 config TCP_CONG_SCALABLE
529 tristate "Scalable TCP" 530 tristate "Scalable TCP"
530 depends on EXPERIMENTAL 531 depends on EXPERIMENTAL
531 default n 532 default n
532 ---help--- 533 ---help---
533 Scalable TCP is a sender-side only change to TCP which uses a 534 Scalable TCP is a sender-side only change to TCP which uses a
534 MIMD congestion control algorithm which has some nice scaling 535 MIMD congestion control algorithm which has some nice scaling
535 properties, though is known to have fairness issues. 536 properties, though is known to have fairness issues.
536 See http://www.deneholme.net/tom/scalable/ 537 See http://www.deneholme.net/tom/scalable/
537 538
538 config TCP_CONG_LP 539 config TCP_CONG_LP
539 tristate "TCP Low Priority" 540 tristate "TCP Low Priority"
540 depends on EXPERIMENTAL 541 depends on EXPERIMENTAL
541 default n 542 default n
542 ---help--- 543 ---help---
543 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is 544 TCP Low Priority (TCP-LP), a distributed algorithm whose goal is
544 to utilize only the excess network bandwidth as compared to the 545 to utilize only the excess network bandwidth as compared to the
545 ``fair share`` of bandwidth as targeted by TCP. 546 ``fair share`` of bandwidth as targeted by TCP.
546 See http://www-ece.rice.edu/networks/TCP-LP/ 547 See http://www-ece.rice.edu/networks/TCP-LP/
547 548
548 config TCP_CONG_VENO 549 config TCP_CONG_VENO
549 tristate "TCP Veno" 550 tristate "TCP Veno"
550 depends on EXPERIMENTAL 551 depends on EXPERIMENTAL
551 default n 552 default n
552 ---help--- 553 ---help---
553 TCP Veno is a sender-side only enhancement of TCP to obtain better 554 TCP Veno is a sender-side only enhancement of TCP to obtain better
554 throughput over wireless networks. TCP Veno makes use of state 555 throughput over wireless networks. TCP Veno makes use of state
555 distinguishing to circumvent the difficult judgment of the packet loss 556 distinguishing to circumvent the difficult judgment of the packet loss
556 type. TCP Veno cuts down less congestion window in response to random 557 type. TCP Veno cuts down less congestion window in response to random
557 loss packets. 558 loss packets.
558 See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf 559 See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
559 560
560 config TCP_CONG_YEAH 561 config TCP_CONG_YEAH
561 tristate "YeAH TCP" 562 tristate "YeAH TCP"
562 depends on EXPERIMENTAL 563 depends on EXPERIMENTAL
563 select TCP_CONG_VEGAS 564 select TCP_CONG_VEGAS
564 default n 565 default n
565 ---help--- 566 ---help---
566 YeAH-TCP is a sender-side high-speed enabled TCP congestion control 567 YeAH-TCP is a sender-side high-speed enabled TCP congestion control
567 algorithm, which uses a mixed loss/delay approach to compute the 568 algorithm, which uses a mixed loss/delay approach to compute the
568 congestion window. It's design goals target high efficiency, 569 congestion window. It's design goals target high efficiency,
569 internal, RTT and Reno fairness, resilience to link loss while 570 internal, RTT and Reno fairness, resilience to link loss while
570 keeping network elements load as low as possible. 571 keeping network elements load as low as possible.
571 572
572 For further details look here: 573 For further details look here:
573 http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf 574 http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
574 575
575 config TCP_CONG_ILLINOIS 576 config TCP_CONG_ILLINOIS
576 tristate "TCP Illinois" 577 tristate "TCP Illinois"
577 depends on EXPERIMENTAL 578 depends on EXPERIMENTAL
578 default n 579 default n
579 ---help--- 580 ---help---
580 TCP-Illinois is a sender-side modification of TCP Reno for 581 TCP-Illinois is a sender-side modification of TCP Reno for
581 high speed long delay links. It uses round-trip-time to 582 high speed long delay links. It uses round-trip-time to
582 adjust the alpha and beta parameters to achieve a higher average 583 adjust the alpha and beta parameters to achieve a higher average
583 throughput and maintain fairness. 584 throughput and maintain fairness.
584 585
585 For further details see: 586 For further details see:
586 http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html 587 http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
587 588
588 choice 589 choice
589 prompt "Default TCP congestion control" 590 prompt "Default TCP congestion control"
590 default DEFAULT_CUBIC 591 default DEFAULT_CUBIC
591 help 592 help
592 Select the TCP congestion control that will be used by default 593 Select the TCP congestion control that will be used by default
593 for all connections. 594 for all connections.
594 595
595 config DEFAULT_BIC 596 config DEFAULT_BIC
596 bool "Bic" if TCP_CONG_BIC=y 597 bool "Bic" if TCP_CONG_BIC=y
597 598
598 config DEFAULT_CUBIC 599 config DEFAULT_CUBIC
599 bool "Cubic" if TCP_CONG_CUBIC=y 600 bool "Cubic" if TCP_CONG_CUBIC=y
600 601
601 config DEFAULT_HTCP 602 config DEFAULT_HTCP
602 bool "Htcp" if TCP_CONG_HTCP=y 603 bool "Htcp" if TCP_CONG_HTCP=y
603 604
604 config DEFAULT_HYBLA 605 config DEFAULT_HYBLA
605 bool "Hybla" if TCP_CONG_HYBLA=y 606 bool "Hybla" if TCP_CONG_HYBLA=y
606 607
607 config DEFAULT_VEGAS 608 config DEFAULT_VEGAS
608 bool "Vegas" if TCP_CONG_VEGAS=y 609 bool "Vegas" if TCP_CONG_VEGAS=y
609 610
610 config DEFAULT_VENO 611 config DEFAULT_VENO
611 bool "Veno" if TCP_CONG_VENO=y 612 bool "Veno" if TCP_CONG_VENO=y
612 613
613 config DEFAULT_WESTWOOD 614 config DEFAULT_WESTWOOD
614 bool "Westwood" if TCP_CONG_WESTWOOD=y 615 bool "Westwood" if TCP_CONG_WESTWOOD=y
615 616
616 config DEFAULT_RENO 617 config DEFAULT_RENO
617 bool "Reno" 618 bool "Reno"
618 619
619 endchoice 620 endchoice
620 621
621 endif 622 endif
622 623
623 config TCP_CONG_CUBIC 624 config TCP_CONG_CUBIC
624 tristate 625 tristate
625 depends on !TCP_CONG_ADVANCED 626 depends on !TCP_CONG_ADVANCED
626 default y 627 default y
627 628
628 config DEFAULT_TCP_CONG 629 config DEFAULT_TCP_CONG
629 string 630 string
630 default "bic" if DEFAULT_BIC 631 default "bic" if DEFAULT_BIC
631 default "cubic" if DEFAULT_CUBIC 632 default "cubic" if DEFAULT_CUBIC
632 default "htcp" if DEFAULT_HTCP 633 default "htcp" if DEFAULT_HTCP
633 default "hybla" if DEFAULT_HYBLA 634 default "hybla" if DEFAULT_HYBLA
634 default "vegas" if DEFAULT_VEGAS 635 default "vegas" if DEFAULT_VEGAS
635 default "westwood" if DEFAULT_WESTWOOD 636 default "westwood" if DEFAULT_WESTWOOD
636 default "veno" if DEFAULT_VENO 637 default "veno" if DEFAULT_VENO
637 default "reno" if DEFAULT_RENO 638 default "reno" if DEFAULT_RENO
638 default "cubic" 639 default "cubic"
639 640
640 config TCP_MD5SIG 641 config TCP_MD5SIG
641 bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)" 642 bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
642 depends on EXPERIMENTAL 643 depends on EXPERIMENTAL
643 select CRYPTO 644 select CRYPTO
644 select CRYPTO_MD5 645 select CRYPTO_MD5
645 ---help--- 646 ---help---
646 RFC2385 specifies a method of giving MD5 protection to TCP sessions. 647 RFC2385 specifies a method of giving MD5 protection to TCP sessions.
647 Its main (only?) use is to protect BGP sessions between core routers 648 Its main (only?) use is to protect BGP sessions between core routers
648 on the Internet. 649 on the Internet.
649 650
650 If unsure, say N. 651 If unsure, say N.
651 652
652 653
net/ipv4/tcp_timer.c
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Authors: Ross Biro 8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com> 16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net> 18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */ 19 */
20 20
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/gfp.h> 22 #include <linux/gfp.h>
23 #include <net/tcp.h> 23 #include <net/tcp.h>
24 24
25 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; 25 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; 26 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; 27 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28 int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES; 28 int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29 int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; 29 int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30 int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; 30 int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31 int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; 31 int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32 int sysctl_tcp_orphan_retries __read_mostly; 32 int sysctl_tcp_orphan_retries __read_mostly;
33 int sysctl_tcp_thin_linear_timeouts __read_mostly; 33 int sysctl_tcp_thin_linear_timeouts __read_mostly;
34 34
35 static void tcp_write_timer(unsigned long); 35 static void tcp_write_timer(unsigned long);
36 static void tcp_delack_timer(unsigned long); 36 static void tcp_delack_timer(unsigned long);
37 static void tcp_keepalive_timer (unsigned long data); 37 static void tcp_keepalive_timer (unsigned long data);
38 38
39 void tcp_init_xmit_timers(struct sock *sk) 39 void tcp_init_xmit_timers(struct sock *sk)
40 { 40 {
41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, 41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
42 &tcp_keepalive_timer); 42 &tcp_keepalive_timer);
43 } 43 }
44 EXPORT_SYMBOL(tcp_init_xmit_timers); 44 EXPORT_SYMBOL(tcp_init_xmit_timers);
45 45
46 static void tcp_write_err(struct sock *sk) 46 static void tcp_write_err(struct sock *sk)
47 { 47 {
48 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 48 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
49 sk->sk_error_report(sk); 49 sk->sk_error_report(sk);
50 50
51 tcp_done(sk); 51 tcp_done(sk);
52 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); 52 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
53 } 53 }
54 54
55 /* Do not allow orphaned sockets to eat all our resources. 55 /* Do not allow orphaned sockets to eat all our resources.
56 * This is direct violation of TCP specs, but it is required 56 * This is direct violation of TCP specs, but it is required
57 * to prevent DoS attacks. It is called when a retransmission timeout 57 * to prevent DoS attacks. It is called when a retransmission timeout
58 * or zero probe timeout occurs on orphaned socket. 58 * or zero probe timeout occurs on orphaned socket.
59 * 59 *
60 * Criteria is still not confirmed experimentally and may change. 60 * Criteria is still not confirmed experimentally and may change.
61 * We kill the socket, if: 61 * We kill the socket, if:
62 * 1. If number of orphaned sockets exceeds an administratively configured 62 * 1. If number of orphaned sockets exceeds an administratively configured
63 * limit. 63 * limit.
64 * 2. If we have strong memory pressure. 64 * 2. If we have strong memory pressure.
65 */ 65 */
66 static int tcp_out_of_resources(struct sock *sk, int do_reset) 66 static int tcp_out_of_resources(struct sock *sk, int do_reset)
67 { 67 {
68 struct tcp_sock *tp = tcp_sk(sk); 68 struct tcp_sock *tp = tcp_sk(sk);
69 int shift = 0; 69 int shift = 0;
70 70
71 /* If peer does not open window for long time, or did not transmit 71 /* If peer does not open window for long time, or did not transmit
72 * anything for long time, penalize it. */ 72 * anything for long time, penalize it. */
73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
74 shift++; 74 shift++;
75 75
76 /* If some dubious ICMP arrived, penalize even more. */ 76 /* If some dubious ICMP arrived, penalize even more. */
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 shift++; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, shift)) { 80 if (tcp_too_many_orphans(sk, shift)) {
81 if (net_ratelimit()) 81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n"); 82 printk(KERN_INFO "Out of socket memory\n");
83 83
84 /* Catch exceptional cases, when connection requires reset. 84 /* Catch exceptional cases, when connection requires reset.
85 * 1. Last segment was sent recently. */ 85 * 1. Last segment was sent recently. */
86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 86 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
87 /* 2. Window is closed. */ 87 /* 2. Window is closed. */
88 (!tp->snd_wnd && !tp->packets_out)) 88 (!tp->snd_wnd && !tp->packets_out))
89 do_reset = 1; 89 do_reset = 1;
90 if (do_reset) 90 if (do_reset)
91 tcp_send_active_reset(sk, GFP_ATOMIC); 91 tcp_send_active_reset(sk, GFP_ATOMIC);
92 tcp_done(sk); 92 tcp_done(sk);
93 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); 93 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
94 return 1; 94 return 1;
95 } 95 }
96 return 0; 96 return 0;
97 } 97 }
98 98
99 /* Calculate maximal number or retries on an orphaned socket. */ 99 /* Calculate maximal number or retries on an orphaned socket. */
100 static int tcp_orphan_retries(struct sock *sk, int alive) 100 static int tcp_orphan_retries(struct sock *sk, int alive)
101 { 101 {
102 int retries = sysctl_tcp_orphan_retries; /* May be zero. */ 102 int retries = sysctl_tcp_orphan_retries; /* May be zero. */
103 103
104 /* We know from an ICMP that something is wrong. */ 104 /* We know from an ICMP that something is wrong. */
105 if (sk->sk_err_soft && !alive) 105 if (sk->sk_err_soft && !alive)
106 retries = 0; 106 retries = 0;
107 107
108 /* However, if socket sent something recently, select some safe 108 /* However, if socket sent something recently, select some safe
109 * number of retries. 8 corresponds to >100 seconds with minimal 109 * number of retries. 8 corresponds to >100 seconds with minimal
110 * RTO of 200msec. */ 110 * RTO of 200msec. */
111 if (retries == 0 && alive) 111 if (retries == 0 && alive)
112 retries = 8; 112 retries = 8;
113 return retries; 113 return retries;
114 } 114 }
115 115
116 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) 116 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
117 { 117 {
118 /* Black hole detection */ 118 /* Black hole detection */
119 if (sysctl_tcp_mtu_probing) { 119 if (sysctl_tcp_mtu_probing) {
120 if (!icsk->icsk_mtup.enabled) { 120 if (!icsk->icsk_mtup.enabled) {
121 icsk->icsk_mtup.enabled = 1; 121 icsk->icsk_mtup.enabled = 1;
122 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 122 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
123 } else { 123 } else {
124 struct tcp_sock *tp = tcp_sk(sk); 124 struct tcp_sock *tp = tcp_sk(sk);
125 int mss; 125 int mss;
126 126
127 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; 127 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
128 mss = min(sysctl_tcp_base_mss, mss); 128 mss = min(sysctl_tcp_base_mss, mss);
129 mss = max(mss, 68 - tp->tcp_header_len); 129 mss = max(mss, 68 - tp->tcp_header_len);
130 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 130 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
131 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 131 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
132 } 132 }
133 } 133 }
134 } 134 }
135 135
136 /* This function calculates a "timeout" which is equivalent to the timeout of a 136 /* This function calculates a "timeout" which is equivalent to the timeout of a
137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off
138 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
139 * syn_set flag is set.
139 */ 140 */
140 static bool retransmits_timed_out(struct sock *sk, 141 static bool retransmits_timed_out(struct sock *sk,
141 unsigned int boundary) 142 unsigned int boundary,
143 bool syn_set)
142 { 144 {
143 unsigned int timeout, linear_backoff_thresh; 145 unsigned int timeout, linear_backoff_thresh;
144 unsigned int start_ts; 146 unsigned int start_ts;
147 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
145 148
146 if (!inet_csk(sk)->icsk_retransmits) 149 if (!inet_csk(sk)->icsk_retransmits)
147 return false; 150 return false;
148 151
149 if (unlikely(!tcp_sk(sk)->retrans_stamp)) 152 if (unlikely(!tcp_sk(sk)->retrans_stamp))
150 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; 153 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
151 else 154 else
152 start_ts = tcp_sk(sk)->retrans_stamp; 155 start_ts = tcp_sk(sk)->retrans_stamp;
153 156
154 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 157 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
155 158
156 if (boundary <= linear_backoff_thresh) 159 if (boundary <= linear_backoff_thresh)
157 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; 160 timeout = ((2 << boundary) - 1) * rto_base;
158 else 161 else
159 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + 162 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 163 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
161 164
162 return (tcp_time_stamp - start_ts) >= timeout; 165 return (tcp_time_stamp - start_ts) >= timeout;
163 } 166 }
164 167
165 /* A write timeout has occurred. Process the after effects. */ 168 /* A write timeout has occurred. Process the after effects. */
166 static int tcp_write_timeout(struct sock *sk) 169 static int tcp_write_timeout(struct sock *sk)
167 { 170 {
168 struct inet_connection_sock *icsk = inet_csk(sk); 171 struct inet_connection_sock *icsk = inet_csk(sk);
169 int retry_until; 172 int retry_until;
170 bool do_reset; 173 bool do_reset, syn_set = 0;
171 174
172 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 175 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
173 if (icsk->icsk_retransmits) 176 if (icsk->icsk_retransmits)
174 dst_negative_advice(sk); 177 dst_negative_advice(sk);
175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 178 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
179 syn_set = 1;
176 } else { 180 } else {
177 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 181 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
178 /* Black hole detection */ 182 /* Black hole detection */
179 tcp_mtu_probing(icsk, sk); 183 tcp_mtu_probing(icsk, sk);
180 184
181 dst_negative_advice(sk); 185 dst_negative_advice(sk);
182 } 186 }
183 187
184 retry_until = sysctl_tcp_retries2; 188 retry_until = sysctl_tcp_retries2;
185 if (sock_flag(sk, SOCK_DEAD)) { 189 if (sock_flag(sk, SOCK_DEAD)) {
186 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 190 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
187 191
188 retry_until = tcp_orphan_retries(sk, alive); 192 retry_until = tcp_orphan_retries(sk, alive);
189 do_reset = alive || 193 do_reset = alive ||
190 !retransmits_timed_out(sk, retry_until); 194 !retransmits_timed_out(sk, retry_until, 0);
191 195
192 if (tcp_out_of_resources(sk, do_reset)) 196 if (tcp_out_of_resources(sk, do_reset))
193 return 1; 197 return 1;
194 } 198 }
195 } 199 }
196 200
197 if (retransmits_timed_out(sk, retry_until)) { 201 if (retransmits_timed_out(sk, retry_until, syn_set)) {
198 /* Has it gone just too far? */ 202 /* Has it gone just too far? */
199 tcp_write_err(sk); 203 tcp_write_err(sk);
200 return 1; 204 return 1;
201 } 205 }
202 return 0; 206 return 0;
203 } 207 }
204 208
205 static void tcp_delack_timer(unsigned long data) 209 static void tcp_delack_timer(unsigned long data)
206 { 210 {
207 struct sock *sk = (struct sock *)data; 211 struct sock *sk = (struct sock *)data;
208 struct tcp_sock *tp = tcp_sk(sk); 212 struct tcp_sock *tp = tcp_sk(sk);
209 struct inet_connection_sock *icsk = inet_csk(sk); 213 struct inet_connection_sock *icsk = inet_csk(sk);
210 214
211 bh_lock_sock(sk); 215 bh_lock_sock(sk);
212 if (sock_owned_by_user(sk)) { 216 if (sock_owned_by_user(sk)) {
213 /* Try again later. */ 217 /* Try again later. */
214 icsk->icsk_ack.blocked = 1; 218 icsk->icsk_ack.blocked = 1;
215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); 219 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
216 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 220 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
217 goto out_unlock; 221 goto out_unlock;
218 } 222 }
219 223
220 sk_mem_reclaim_partial(sk); 224 sk_mem_reclaim_partial(sk);
221 225
222 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 226 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
223 goto out; 227 goto out;
224 228
225 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 229 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
226 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); 230 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
227 goto out; 231 goto out;
228 } 232 }
229 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 233 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
230 234
231 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 235 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
232 struct sk_buff *skb; 236 struct sk_buff *skb;
233 237
234 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); 238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
235 239
236 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 240 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
237 sk_backlog_rcv(sk, skb); 241 sk_backlog_rcv(sk, skb);
238 242
239 tp->ucopy.memory = 0; 243 tp->ucopy.memory = 0;
240 } 244 }
241 245
242 if (inet_csk_ack_scheduled(sk)) { 246 if (inet_csk_ack_scheduled(sk)) {
243 if (!icsk->icsk_ack.pingpong) { 247 if (!icsk->icsk_ack.pingpong) {
244 /* Delayed ACK missed: inflate ATO. */ 248 /* Delayed ACK missed: inflate ATO. */
245 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); 249 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
246 } else { 250 } else {
247 /* Delayed ACK missed: leave pingpong mode and 251 /* Delayed ACK missed: leave pingpong mode and
248 * deflate ATO. 252 * deflate ATO.
249 */ 253 */
250 icsk->icsk_ack.pingpong = 0; 254 icsk->icsk_ack.pingpong = 0;
251 icsk->icsk_ack.ato = TCP_ATO_MIN; 255 icsk->icsk_ack.ato = TCP_ATO_MIN;
252 } 256 }
253 tcp_send_ack(sk); 257 tcp_send_ack(sk);
254 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); 258 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
255 } 259 }
256 TCP_CHECK_TIMER(sk); 260 TCP_CHECK_TIMER(sk);
257 261
258 out: 262 out:
259 if (tcp_memory_pressure) 263 if (tcp_memory_pressure)
260 sk_mem_reclaim(sk); 264 sk_mem_reclaim(sk);
261 out_unlock: 265 out_unlock:
262 bh_unlock_sock(sk); 266 bh_unlock_sock(sk);
263 sock_put(sk); 267 sock_put(sk);
264 } 268 }
265 269
266 static void tcp_probe_timer(struct sock *sk) 270 static void tcp_probe_timer(struct sock *sk)
267 { 271 {
268 struct inet_connection_sock *icsk = inet_csk(sk); 272 struct inet_connection_sock *icsk = inet_csk(sk);
269 struct tcp_sock *tp = tcp_sk(sk); 273 struct tcp_sock *tp = tcp_sk(sk);
270 int max_probes; 274 int max_probes;
271 275
272 if (tp->packets_out || !tcp_send_head(sk)) { 276 if (tp->packets_out || !tcp_send_head(sk)) {
273 icsk->icsk_probes_out = 0; 277 icsk->icsk_probes_out = 0;
274 return; 278 return;
275 } 279 }
276 280
277 /* *WARNING* RFC 1122 forbids this 281 /* *WARNING* RFC 1122 forbids this
278 * 282 *
279 * It doesn't AFAIK, because we kill the retransmit timer -AK 283 * It doesn't AFAIK, because we kill the retransmit timer -AK
280 * 284 *
281 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing 285 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
282 * this behaviour in Solaris down as a bug fix. [AC] 286 * this behaviour in Solaris down as a bug fix. [AC]
283 * 287 *
284 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs 288 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
285 * even if they advertise zero window. Hence, connection is killed only 289 * even if they advertise zero window. Hence, connection is killed only
286 * if we received no ACKs for normal connection timeout. It is not killed 290 * if we received no ACKs for normal connection timeout. It is not killed
287 * only because window stays zero for some time, window may be zero 291 * only because window stays zero for some time, window may be zero
288 * until armageddon and even later. We are in full accordance 292 * until armageddon and even later. We are in full accordance
289 * with RFCs, only probe timer combines both retransmission timeout 293 * with RFCs, only probe timer combines both retransmission timeout
290 * and probe timeout in one bottle. --ANK 294 * and probe timeout in one bottle. --ANK
291 */ 295 */
292 max_probes = sysctl_tcp_retries2; 296 max_probes = sysctl_tcp_retries2;
293 297
294 if (sock_flag(sk, SOCK_DEAD)) { 298 if (sock_flag(sk, SOCK_DEAD)) {
295 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 299 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
296 300
297 max_probes = tcp_orphan_retries(sk, alive); 301 max_probes = tcp_orphan_retries(sk, alive);
298 302
299 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) 303 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
300 return; 304 return;
301 } 305 }
302 306
303 if (icsk->icsk_probes_out > max_probes) { 307 if (icsk->icsk_probes_out > max_probes) {
304 tcp_write_err(sk); 308 tcp_write_err(sk);
305 } else { 309 } else {
306 /* Only send another probe if we didn't close things up. */ 310 /* Only send another probe if we didn't close things up. */
307 tcp_send_probe0(sk); 311 tcp_send_probe0(sk);
308 } 312 }
309 } 313 }
310 314
311 /* 315 /*
312 * The TCP retransmit timer. 316 * The TCP retransmit timer.
313 */ 317 */
314 318
315 void tcp_retransmit_timer(struct sock *sk) 319 void tcp_retransmit_timer(struct sock *sk)
316 { 320 {
317 struct tcp_sock *tp = tcp_sk(sk); 321 struct tcp_sock *tp = tcp_sk(sk);
318 struct inet_connection_sock *icsk = inet_csk(sk); 322 struct inet_connection_sock *icsk = inet_csk(sk);
319 323
320 if (!tp->packets_out) 324 if (!tp->packets_out)
321 goto out; 325 goto out;
322 326
323 WARN_ON(tcp_write_queue_empty(sk)); 327 WARN_ON(tcp_write_queue_empty(sk));
324 328
325 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 329 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
326 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 330 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
327 /* Receiver dastardly shrinks window. Our retransmits 331 /* Receiver dastardly shrinks window. Our retransmits
328 * become zero probes, but we should not timeout this 332 * become zero probes, but we should not timeout this
329 * connection. If the socket is an orphan, time it out, 333 * connection. If the socket is an orphan, time it out,
330 * we cannot allow such beasts to hang infinitely. 334 * we cannot allow such beasts to hang infinitely.
331 */ 335 */
332 #ifdef TCP_DEBUG 336 #ifdef TCP_DEBUG
333 struct inet_sock *inet = inet_sk(sk); 337 struct inet_sock *inet = inet_sk(sk);
334 if (sk->sk_family == AF_INET) { 338 if (sk->sk_family == AF_INET) {
335 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 339 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
336 &inet->inet_daddr, ntohs(inet->inet_dport), 340 &inet->inet_daddr, ntohs(inet->inet_dport),
337 inet->inet_num, tp->snd_una, tp->snd_nxt); 341 inet->inet_num, tp->snd_una, tp->snd_nxt);
338 } 342 }
339 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 343 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
340 else if (sk->sk_family == AF_INET6) { 344 else if (sk->sk_family == AF_INET6) {
341 struct ipv6_pinfo *np = inet6_sk(sk); 345 struct ipv6_pinfo *np = inet6_sk(sk);
342 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 346 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
343 &np->daddr, ntohs(inet->inet_dport), 347 &np->daddr, ntohs(inet->inet_dport),
344 inet->inet_num, tp->snd_una, tp->snd_nxt); 348 inet->inet_num, tp->snd_una, tp->snd_nxt);
345 } 349 }
346 #endif 350 #endif
347 #endif 351 #endif
348 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 352 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
349 tcp_write_err(sk); 353 tcp_write_err(sk);
350 goto out; 354 goto out;
351 } 355 }
352 tcp_enter_loss(sk, 0); 356 tcp_enter_loss(sk, 0);
353 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 357 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
354 __sk_dst_reset(sk); 358 __sk_dst_reset(sk);
355 goto out_reset_timer; 359 goto out_reset_timer;
356 } 360 }
357 361
358 if (tcp_write_timeout(sk)) 362 if (tcp_write_timeout(sk))
359 goto out; 363 goto out;
360 364
361 if (icsk->icsk_retransmits == 0) { 365 if (icsk->icsk_retransmits == 0) {
362 int mib_idx; 366 int mib_idx;
363 367
364 if (icsk->icsk_ca_state == TCP_CA_Disorder) { 368 if (icsk->icsk_ca_state == TCP_CA_Disorder) {
365 if (tcp_is_sack(tp)) 369 if (tcp_is_sack(tp))
366 mib_idx = LINUX_MIB_TCPSACKFAILURES; 370 mib_idx = LINUX_MIB_TCPSACKFAILURES;
367 else 371 else
368 mib_idx = LINUX_MIB_TCPRENOFAILURES; 372 mib_idx = LINUX_MIB_TCPRENOFAILURES;
369 } else if (icsk->icsk_ca_state == TCP_CA_Recovery) { 373 } else if (icsk->icsk_ca_state == TCP_CA_Recovery) {
370 if (tcp_is_sack(tp)) 374 if (tcp_is_sack(tp))
371 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; 375 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
372 else 376 else
373 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; 377 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
374 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 378 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
375 mib_idx = LINUX_MIB_TCPLOSSFAILURES; 379 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
376 } else { 380 } else {
377 mib_idx = LINUX_MIB_TCPTIMEOUTS; 381 mib_idx = LINUX_MIB_TCPTIMEOUTS;
378 } 382 }
379 NET_INC_STATS_BH(sock_net(sk), mib_idx); 383 NET_INC_STATS_BH(sock_net(sk), mib_idx);
380 } 384 }
381 385
382 if (tcp_use_frto(sk)) { 386 if (tcp_use_frto(sk)) {
383 tcp_enter_frto(sk); 387 tcp_enter_frto(sk);
384 } else { 388 } else {
385 tcp_enter_loss(sk, 0); 389 tcp_enter_loss(sk, 0);
386 } 390 }
387 391
388 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { 392 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
389 /* Retransmission failed because of local congestion, 393 /* Retransmission failed because of local congestion,
390 * do not backoff. 394 * do not backoff.
391 */ 395 */
392 if (!icsk->icsk_retransmits) 396 if (!icsk->icsk_retransmits)
393 icsk->icsk_retransmits = 1; 397 icsk->icsk_retransmits = 1;
394 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 398 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
395 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), 399 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
396 TCP_RTO_MAX); 400 TCP_RTO_MAX);
397 goto out; 401 goto out;
398 } 402 }
399 403
400 /* Increase the timeout each time we retransmit. Note that 404 /* Increase the timeout each time we retransmit. Note that
401 * we do not increase the rtt estimate. rto is initialized 405 * we do not increase the rtt estimate. rto is initialized
402 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests 406 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
403 * that doubling rto each time is the least we can get away with. 407 * that doubling rto each time is the least we can get away with.
404 * In KA9Q, Karn uses this for the first few times, and then 408 * In KA9Q, Karn uses this for the first few times, and then
405 * goes to quadratic. netBSD doubles, but only goes up to *64, 409 * goes to quadratic. netBSD doubles, but only goes up to *64,
406 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is 410 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
407 * defined in the protocol as the maximum possible RTT. I guess 411 * defined in the protocol as the maximum possible RTT. I guess
408 * we'll have to use something other than TCP to talk to the 412 * we'll have to use something other than TCP to talk to the
409 * University of Mars. 413 * University of Mars.
410 * 414 *
411 * PAWS allows us longer timeouts and large windows, so once 415 * PAWS allows us longer timeouts and large windows, so once
412 * implemented ftp to mars will work nicely. We will have to fix 416 * implemented ftp to mars will work nicely. We will have to fix
413 * the 120 second clamps though! 417 * the 120 second clamps though!
414 */ 418 */
415 icsk->icsk_backoff++; 419 icsk->icsk_backoff++;
416 icsk->icsk_retransmits++; 420 icsk->icsk_retransmits++;
417 421
418 out_reset_timer: 422 out_reset_timer:
419 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is 423 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
420 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this 424 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
421 * might be increased if the stream oscillates between thin and thick, 425 * might be increased if the stream oscillates between thin and thick,
422 * thus the old value might already be too high compared to the value 426 * thus the old value might already be too high compared to the value
423 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without 427 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
424 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating 428 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
425 * exponential backoff behaviour to avoid continue hammering 429 * exponential backoff behaviour to avoid continue hammering
426 * linear-timeout retransmissions into a black hole 430 * linear-timeout retransmissions into a black hole
427 */ 431 */
428 if (sk->sk_state == TCP_ESTABLISHED && 432 if (sk->sk_state == TCP_ESTABLISHED &&
429 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) && 433 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
430 tcp_stream_is_thin(tp) && 434 tcp_stream_is_thin(tp) &&
431 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { 435 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
432 icsk->icsk_backoff = 0; 436 icsk->icsk_backoff = 0;
433 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); 437 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
434 } else { 438 } else {
435 /* Use normal (exponential) backoff */ 439 /* Use normal (exponential) backoff */
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 440 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 } 441 }
438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 442 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 443 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
440 __sk_dst_reset(sk); 444 __sk_dst_reset(sk);
441 445
442 out:; 446 out:;
443 } 447 }
444 448
445 static void tcp_write_timer(unsigned long data) 449 static void tcp_write_timer(unsigned long data)
446 { 450 {
447 struct sock *sk = (struct sock *)data; 451 struct sock *sk = (struct sock *)data;
448 struct inet_connection_sock *icsk = inet_csk(sk); 452 struct inet_connection_sock *icsk = inet_csk(sk);
449 int event; 453 int event;
450 454
451 bh_lock_sock(sk); 455 bh_lock_sock(sk);
452 if (sock_owned_by_user(sk)) { 456 if (sock_owned_by_user(sk)) {
453 /* Try again later */ 457 /* Try again later */
454 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); 458 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
455 goto out_unlock; 459 goto out_unlock;
456 } 460 }
457 461
458 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 462 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
459 goto out; 463 goto out;
460 464
461 if (time_after(icsk->icsk_timeout, jiffies)) { 465 if (time_after(icsk->icsk_timeout, jiffies)) {
462 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 466 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
463 goto out; 467 goto out;
464 } 468 }
465 469
466 event = icsk->icsk_pending; 470 event = icsk->icsk_pending;
467 icsk->icsk_pending = 0; 471 icsk->icsk_pending = 0;
468 472
469 switch (event) { 473 switch (event) {
470 case ICSK_TIME_RETRANS: 474 case ICSK_TIME_RETRANS:
471 tcp_retransmit_timer(sk); 475 tcp_retransmit_timer(sk);
472 break; 476 break;
473 case ICSK_TIME_PROBE0: 477 case ICSK_TIME_PROBE0:
474 tcp_probe_timer(sk); 478 tcp_probe_timer(sk);
475 break; 479 break;
476 } 480 }
477 TCP_CHECK_TIMER(sk); 481 TCP_CHECK_TIMER(sk);
478 482
479 out: 483 out:
480 sk_mem_reclaim(sk); 484 sk_mem_reclaim(sk);
481 out_unlock: 485 out_unlock:
482 bh_unlock_sock(sk); 486 bh_unlock_sock(sk);
483 sock_put(sk); 487 sock_put(sk);
484 } 488 }
485 489
486 /* 490 /*
487 * Timer for listening sockets 491 * Timer for listening sockets
488 */ 492 */
489 493
490 static void tcp_synack_timer(struct sock *sk) 494 static void tcp_synack_timer(struct sock *sk)
491 { 495 {
492 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, 496 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
493 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 497 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
494 } 498 }
495 499
496 void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req) 500 void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
497 { 501 {
498 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); 502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
499 } 503 }
500 EXPORT_SYMBOL(tcp_syn_ack_timeout); 504 EXPORT_SYMBOL(tcp_syn_ack_timeout);
501 505
502 void tcp_set_keepalive(struct sock *sk, int val) 506 void tcp_set_keepalive(struct sock *sk, int val)
503 { 507 {
504 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 508 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
505 return; 509 return;
506 510
507 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) 511 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
508 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 512 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
509 else if (!val) 513 else if (!val)
510 inet_csk_delete_keepalive_timer(sk); 514 inet_csk_delete_keepalive_timer(sk);
511 } 515 }
512 516
513 517
514 static void tcp_keepalive_timer (unsigned long data) 518 static void tcp_keepalive_timer (unsigned long data)
515 { 519 {
516 struct sock *sk = (struct sock *) data; 520 struct sock *sk = (struct sock *) data;
517 struct inet_connection_sock *icsk = inet_csk(sk); 521 struct inet_connection_sock *icsk = inet_csk(sk);
518 struct tcp_sock *tp = tcp_sk(sk); 522 struct tcp_sock *tp = tcp_sk(sk);
519 u32 elapsed; 523 u32 elapsed;
520 524
521 /* Only process if socket is not in use. */ 525 /* Only process if socket is not in use. */
522 bh_lock_sock(sk); 526 bh_lock_sock(sk);
523 if (sock_owned_by_user(sk)) { 527 if (sock_owned_by_user(sk)) {
524 /* Try again later. */ 528 /* Try again later. */
525 inet_csk_reset_keepalive_timer (sk, HZ/20); 529 inet_csk_reset_keepalive_timer (sk, HZ/20);
526 goto out; 530 goto out;
527 } 531 }
528 532
529 if (sk->sk_state == TCP_LISTEN) { 533 if (sk->sk_state == TCP_LISTEN) {
530 tcp_synack_timer(sk); 534 tcp_synack_timer(sk);
531 goto out; 535 goto out;
532 } 536 }
533 537
534 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 538 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
535 if (tp->linger2 >= 0) { 539 if (tp->linger2 >= 0) {
536 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 540 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
537 541
538 if (tmo > 0) { 542 if (tmo > 0) {
539 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 543 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
540 goto out; 544 goto out;
541 } 545 }
542 } 546 }
543 tcp_send_active_reset(sk, GFP_ATOMIC); 547 tcp_send_active_reset(sk, GFP_ATOMIC);
544 goto death; 548 goto death;
545 } 549 }
546 550
547 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 551 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
548 goto out; 552 goto out;
549 553
550 elapsed = keepalive_time_when(tp); 554 elapsed = keepalive_time_when(tp);
551 555
552 /* It is alive without keepalive 8) */ 556 /* It is alive without keepalive 8) */
553 if (tp->packets_out || tcp_send_head(sk)) 557 if (tp->packets_out || tcp_send_head(sk))
554 goto resched; 558 goto resched;
555 559
556 elapsed = keepalive_time_elapsed(tp); 560 elapsed = keepalive_time_elapsed(tp);
557 561
558 if (elapsed >= keepalive_time_when(tp)) { 562 if (elapsed >= keepalive_time_when(tp)) {
559 if (icsk->icsk_probes_out >= keepalive_probes(tp)) { 563 if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
560 tcp_send_active_reset(sk, GFP_ATOMIC); 564 tcp_send_active_reset(sk, GFP_ATOMIC);
561 tcp_write_err(sk); 565 tcp_write_err(sk);
562 goto out; 566 goto out;
563 } 567 }
564 if (tcp_write_wakeup(sk) <= 0) { 568 if (tcp_write_wakeup(sk) <= 0) {
565 icsk->icsk_probes_out++; 569 icsk->icsk_probes_out++;
566 elapsed = keepalive_intvl_when(tp); 570 elapsed = keepalive_intvl_when(tp);
567 } else { 571 } else {
568 /* If keepalive was lost due to local congestion, 572 /* If keepalive was lost due to local congestion,
569 * try harder. 573 * try harder.
570 */ 574 */
571 elapsed = TCP_RESOURCE_PROBE_INTERVAL; 575 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
572 } 576 }
573 } else { 577 } else {
574 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ 578 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
575 elapsed = keepalive_time_when(tp) - elapsed; 579 elapsed = keepalive_time_when(tp) - elapsed;
576 } 580 }
577 581
578 TCP_CHECK_TIMER(sk); 582 TCP_CHECK_TIMER(sk);
579 sk_mem_reclaim(sk); 583 sk_mem_reclaim(sk);
580 584
581 resched: 585 resched:
582 inet_csk_reset_keepalive_timer (sk, elapsed); 586 inet_csk_reset_keepalive_timer (sk, elapsed);
583 goto out; 587 goto out;
584 588
585 death: 589 death:
586 tcp_done(sk); 590 tcp_done(sk);
587 591
588 out: 592 out:
589 bh_unlock_sock(sk); 593 bh_unlock_sock(sk);
590 sock_put(sk); 594 sock_put(sk);
591 } 595 }
592 596
1 /* 1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12 #include <linux/jiffies.h> 12 #include <linux/jiffies.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/skbuff.h> 15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h> 16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h> 17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h> 18 #include <linux/rcupdate.h>
19 #include <net/mac80211.h> 19 #include <net/mac80211.h>
20 #include <net/ieee80211_radiotap.h> 20 #include <net/ieee80211_radiotap.h>
21 21
22 #include "ieee80211_i.h" 22 #include "ieee80211_i.h"
23 #include "driver-ops.h" 23 #include "driver-ops.h"
24 #include "led.h" 24 #include "led.h"
25 #include "mesh.h" 25 #include "mesh.h"
26 #include "wep.h" 26 #include "wep.h"
27 #include "wpa.h" 27 #include "wpa.h"
28 #include "tkip.h" 28 #include "tkip.h"
29 #include "wme.h" 29 #include "wme.h"
30 30
31 /* 31 /*
32 * monitor mode reception 32 * monitor mode reception
33 * 33 *
34 * This function cleans up the SKB, i.e. it removes all the stuff 34 * This function cleans up the SKB, i.e. it removes all the stuff
35 * only useful for monitoring. 35 * only useful for monitoring.
36 */ 36 */
37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
38 struct sk_buff *skb) 38 struct sk_buff *skb)
39 { 39 {
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN)) 41 if (likely(skb->len > FCS_LEN))
42 __pskb_trim(skb, skb->len - FCS_LEN); 42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else { 43 else {
44 /* driver bug */ 44 /* driver bug */
45 WARN_ON(1); 45 WARN_ON(1);
46 dev_kfree_skb(skb); 46 dev_kfree_skb(skb);
47 skb = NULL; 47 skb = NULL;
48 } 48 }
49 } 49 }
50 50
51 return skb; 51 return skb;
52 } 52 }
53 53
54 static inline int should_drop_frame(struct sk_buff *skb, 54 static inline int should_drop_frame(struct sk_buff *skb,
55 int present_fcs_len) 55 int present_fcs_len)
56 { 56 {
57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
59 59
60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
61 return 1; 61 return 1;
62 if (unlikely(skb->len < 16 + present_fcs_len)) 62 if (unlikely(skb->len < 16 + present_fcs_len))
63 return 1; 63 return 1;
64 if (ieee80211_is_ctl(hdr->frame_control) && 64 if (ieee80211_is_ctl(hdr->frame_control) &&
65 !ieee80211_is_pspoll(hdr->frame_control) && 65 !ieee80211_is_pspoll(hdr->frame_control) &&
66 !ieee80211_is_back_req(hdr->frame_control)) 66 !ieee80211_is_back_req(hdr->frame_control))
67 return 1; 67 return 1;
68 return 0; 68 return 0;
69 } 69 }
70 70
71 static int 71 static int
72 ieee80211_rx_radiotap_len(struct ieee80211_local *local, 72 ieee80211_rx_radiotap_len(struct ieee80211_local *local,
73 struct ieee80211_rx_status *status) 73 struct ieee80211_rx_status *status)
74 { 74 {
75 int len; 75 int len;
76 76
77 /* always present fields */ 77 /* always present fields */
78 len = sizeof(struct ieee80211_radiotap_header) + 9; 78 len = sizeof(struct ieee80211_radiotap_header) + 9;
79 79
80 if (status->flag & RX_FLAG_TSFT) 80 if (status->flag & RX_FLAG_TSFT)
81 len += 8; 81 len += 8;
82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
83 len += 1; 83 len += 1;
84 84
85 if (len & 1) /* padding for RX_FLAGS if necessary */ 85 if (len & 1) /* padding for RX_FLAGS if necessary */
86 len++; 86 len++;
87 87
88 return len; 88 return len;
89 } 89 }
90 90
91 /* 91 /*
92 * ieee80211_add_rx_radiotap_header - add radiotap header 92 * ieee80211_add_rx_radiotap_header - add radiotap header
93 * 93 *
94 * add a radiotap header containing all the fields which the hardware provided. 94 * add a radiotap header containing all the fields which the hardware provided.
95 */ 95 */
96 static void 96 static void
97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 97 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
98 struct sk_buff *skb, 98 struct sk_buff *skb,
99 struct ieee80211_rate *rate, 99 struct ieee80211_rate *rate,
100 int rtap_len) 100 int rtap_len)
101 { 101 {
102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
103 struct ieee80211_radiotap_header *rthdr; 103 struct ieee80211_radiotap_header *rthdr;
104 unsigned char *pos; 104 unsigned char *pos;
105 u16 rx_flags = 0; 105 u16 rx_flags = 0;
106 106
107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 107 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
108 memset(rthdr, 0, rtap_len); 108 memset(rthdr, 0, rtap_len);
109 109
110 /* radiotap header, set always present flags */ 110 /* radiotap header, set always present flags */
111 rthdr->it_present = 111 rthdr->it_present =
112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 112 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
113 (1 << IEEE80211_RADIOTAP_CHANNEL) | 113 (1 << IEEE80211_RADIOTAP_CHANNEL) |
114 (1 << IEEE80211_RADIOTAP_ANTENNA) | 114 (1 << IEEE80211_RADIOTAP_ANTENNA) |
115 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 115 (1 << IEEE80211_RADIOTAP_RX_FLAGS));
116 rthdr->it_len = cpu_to_le16(rtap_len); 116 rthdr->it_len = cpu_to_le16(rtap_len);
117 117
118 pos = (unsigned char *)(rthdr+1); 118 pos = (unsigned char *)(rthdr+1);
119 119
120 /* the order of the following fields is important */ 120 /* the order of the following fields is important */
121 121
122 /* IEEE80211_RADIOTAP_TSFT */ 122 /* IEEE80211_RADIOTAP_TSFT */
123 if (status->flag & RX_FLAG_TSFT) { 123 if (status->flag & RX_FLAG_TSFT) {
124 put_unaligned_le64(status->mactime, pos); 124 put_unaligned_le64(status->mactime, pos);
125 rthdr->it_present |= 125 rthdr->it_present |=
126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 126 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
127 pos += 8; 127 pos += 8;
128 } 128 }
129 129
130 /* IEEE80211_RADIOTAP_FLAGS */ 130 /* IEEE80211_RADIOTAP_FLAGS */
131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 131 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
132 *pos |= IEEE80211_RADIOTAP_F_FCS; 132 *pos |= IEEE80211_RADIOTAP_F_FCS;
133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 133 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
134 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 134 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
135 if (status->flag & RX_FLAG_SHORTPRE) 135 if (status->flag & RX_FLAG_SHORTPRE)
136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 136 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
137 pos++; 137 pos++;
138 138
139 /* IEEE80211_RADIOTAP_RATE */ 139 /* IEEE80211_RADIOTAP_RATE */
140 if (status->flag & RX_FLAG_HT) { 140 if (status->flag & RX_FLAG_HT) {
141 /* 141 /*
142 * TODO: add following information into radiotap header once 142 * TODO: add following information into radiotap header once
143 * suitable fields are defined for it: 143 * suitable fields are defined for it:
144 * - MCS index (status->rate_idx) 144 * - MCS index (status->rate_idx)
145 * - HT40 (status->flag & RX_FLAG_40MHZ) 145 * - HT40 (status->flag & RX_FLAG_40MHZ)
146 * - short-GI (status->flag & RX_FLAG_SHORT_GI) 146 * - short-GI (status->flag & RX_FLAG_SHORT_GI)
147 */ 147 */
148 *pos = 0; 148 *pos = 0;
149 } else { 149 } else {
150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 150 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
151 *pos = rate->bitrate / 5; 151 *pos = rate->bitrate / 5;
152 } 152 }
153 pos++; 153 pos++;
154 154
155 /* IEEE80211_RADIOTAP_CHANNEL */ 155 /* IEEE80211_RADIOTAP_CHANNEL */
156 put_unaligned_le16(status->freq, pos); 156 put_unaligned_le16(status->freq, pos);
157 pos += 2; 157 pos += 2;
158 if (status->band == IEEE80211_BAND_5GHZ) 158 if (status->band == IEEE80211_BAND_5GHZ)
159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 159 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
160 pos); 160 pos);
161 else if (status->flag & RX_FLAG_HT) 161 else if (status->flag & RX_FLAG_HT)
162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 162 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
163 pos); 163 pos);
164 else if (rate->flags & IEEE80211_RATE_ERP_G) 164 else if (rate->flags & IEEE80211_RATE_ERP_G)
165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 165 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
166 pos); 166 pos);
167 else 167 else
168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 168 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
169 pos); 169 pos);
170 pos += 2; 170 pos += 2;
171 171
172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 172 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 173 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
174 *pos = status->signal; 174 *pos = status->signal;
175 rthdr->it_present |= 175 rthdr->it_present |=
176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 176 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
177 pos++; 177 pos++;
178 } 178 }
179 179
180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 180 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
181 181
182 /* IEEE80211_RADIOTAP_ANTENNA */ 182 /* IEEE80211_RADIOTAP_ANTENNA */
183 *pos = status->antenna; 183 *pos = status->antenna;
184 pos++; 184 pos++;
185 185
186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 186 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
187 187
188 /* IEEE80211_RADIOTAP_RX_FLAGS */ 188 /* IEEE80211_RADIOTAP_RX_FLAGS */
189 /* ensure 2 byte alignment for the 2 byte field as required */ 189 /* ensure 2 byte alignment for the 2 byte field as required */
190 if ((pos - (u8 *)rthdr) & 1) 190 if ((pos - (u8 *)rthdr) & 1)
191 pos++; 191 pos++;
192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 192 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 193 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
194 put_unaligned_le16(rx_flags, pos); 194 put_unaligned_le16(rx_flags, pos);
195 pos += 2; 195 pos += 2;
196 } 196 }
197 197
198 /* 198 /*
199 * This function copies a received frame to all monitor interfaces and 199 * This function copies a received frame to all monitor interfaces and
200 * returns a cleaned-up SKB that no longer includes the FCS nor the 200 * returns a cleaned-up SKB that no longer includes the FCS nor the
201 * radiotap header the driver might have added. 201 * radiotap header the driver might have added.
202 */ 202 */
203 static struct sk_buff * 203 static struct sk_buff *
204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 204 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
205 struct ieee80211_rate *rate) 205 struct ieee80211_rate *rate)
206 { 206 {
207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
208 struct ieee80211_sub_if_data *sdata; 208 struct ieee80211_sub_if_data *sdata;
209 int needed_headroom = 0; 209 int needed_headroom = 0;
210 struct sk_buff *skb, *skb2; 210 struct sk_buff *skb, *skb2;
211 struct net_device *prev_dev = NULL; 211 struct net_device *prev_dev = NULL;
212 int present_fcs_len = 0; 212 int present_fcs_len = 0;
213 213
214 /* 214 /*
215 * First, we may need to make a copy of the skb because 215 * First, we may need to make a copy of the skb because
216 * (1) we need to modify it for radiotap (if not present), and 216 * (1) we need to modify it for radiotap (if not present), and
217 * (2) the other RX handlers will modify the skb we got. 217 * (2) the other RX handlers will modify the skb we got.
218 * 218 *
219 * We don't need to, of course, if we aren't going to return 219 * We don't need to, of course, if we aren't going to return
220 * the SKB because it has a bad FCS/PLCP checksum. 220 * the SKB because it has a bad FCS/PLCP checksum.
221 */ 221 */
222 222
223 /* room for the radiotap header based on driver features */ 223 /* room for the radiotap header based on driver features */
224 needed_headroom = ieee80211_rx_radiotap_len(local, status); 224 needed_headroom = ieee80211_rx_radiotap_len(local, status);
225 225
226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 226 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
227 present_fcs_len = FCS_LEN; 227 present_fcs_len = FCS_LEN;
228 228
229 /* make sure hdr->frame_control is on the linear part */ 229 /* make sure hdr->frame_control is on the linear part */
230 if (!pskb_may_pull(origskb, 2)) { 230 if (!pskb_may_pull(origskb, 2)) {
231 dev_kfree_skb(origskb); 231 dev_kfree_skb(origskb);
232 return NULL; 232 return NULL;
233 } 233 }
234 234
235 if (!local->monitors) { 235 if (!local->monitors) {
236 if (should_drop_frame(origskb, present_fcs_len)) { 236 if (should_drop_frame(origskb, present_fcs_len)) {
237 dev_kfree_skb(origskb); 237 dev_kfree_skb(origskb);
238 return NULL; 238 return NULL;
239 } 239 }
240 240
241 return remove_monitor_info(local, origskb); 241 return remove_monitor_info(local, origskb);
242 } 242 }
243 243
244 if (should_drop_frame(origskb, present_fcs_len)) { 244 if (should_drop_frame(origskb, present_fcs_len)) {
245 /* only need to expand headroom if necessary */ 245 /* only need to expand headroom if necessary */
246 skb = origskb; 246 skb = origskb;
247 origskb = NULL; 247 origskb = NULL;
248 248
249 /* 249 /*
250 * This shouldn't trigger often because most devices have an 250 * This shouldn't trigger often because most devices have an
251 * RX header they pull before we get here, and that should 251 * RX header they pull before we get here, and that should
252 * be big enough for our radiotap information. We should 252 * be big enough for our radiotap information. We should
253 * probably export the length to drivers so that we can have 253 * probably export the length to drivers so that we can have
254 * them allocate enough headroom to start with. 254 * them allocate enough headroom to start with.
255 */ 255 */
256 if (skb_headroom(skb) < needed_headroom && 256 if (skb_headroom(skb) < needed_headroom &&
257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 257 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
258 dev_kfree_skb(skb); 258 dev_kfree_skb(skb);
259 return NULL; 259 return NULL;
260 } 260 }
261 } else { 261 } else {
262 /* 262 /*
263 * Need to make a copy and possibly remove radiotap header 263 * Need to make a copy and possibly remove radiotap header
264 * and FCS from the original. 264 * and FCS from the original.
265 */ 265 */
266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 266 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
267 267
268 origskb = remove_monitor_info(local, origskb); 268 origskb = remove_monitor_info(local, origskb);
269 269
270 if (!skb) 270 if (!skb)
271 return origskb; 271 return origskb;
272 } 272 }
273 273
274 /* prepend radiotap information */ 274 /* prepend radiotap information */
275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 275 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
276 276
277 skb_reset_mac_header(skb); 277 skb_reset_mac_header(skb);
278 skb->ip_summed = CHECKSUM_UNNECESSARY; 278 skb->ip_summed = CHECKSUM_UNNECESSARY;
279 skb->pkt_type = PACKET_OTHERHOST; 279 skb->pkt_type = PACKET_OTHERHOST;
280 skb->protocol = htons(ETH_P_802_2); 280 skb->protocol = htons(ETH_P_802_2);
281 281
282 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 282 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 283 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
284 continue; 284 continue;
285 285
286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 286 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
287 continue; 287 continue;
288 288
289 if (!ieee80211_sdata_running(sdata)) 289 if (!ieee80211_sdata_running(sdata))
290 continue; 290 continue;
291 291
292 if (prev_dev) { 292 if (prev_dev) {
293 skb2 = skb_clone(skb, GFP_ATOMIC); 293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) { 294 if (skb2) {
295 skb2->dev = prev_dev; 295 skb2->dev = prev_dev;
296 netif_receive_skb(skb2); 296 netif_receive_skb(skb2);
297 } 297 }
298 } 298 }
299 299
300 prev_dev = sdata->dev; 300 prev_dev = sdata->dev;
301 sdata->dev->stats.rx_packets++; 301 sdata->dev->stats.rx_packets++;
302 sdata->dev->stats.rx_bytes += skb->len; 302 sdata->dev->stats.rx_bytes += skb->len;
303 } 303 }
304 304
305 if (prev_dev) { 305 if (prev_dev) {
306 skb->dev = prev_dev; 306 skb->dev = prev_dev;
307 netif_receive_skb(skb); 307 netif_receive_skb(skb);
308 } else 308 } else
309 dev_kfree_skb(skb); 309 dev_kfree_skb(skb);
310 310
311 return origskb; 311 return origskb;
312 } 312 }
313 313
314 314
315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 315 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
316 { 316 {
317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
318 int tid; 318 int tid;
319 319
320 /* does the frame have a qos control field? */ 320 /* does the frame have a qos control field? */
321 if (ieee80211_is_data_qos(hdr->frame_control)) { 321 if (ieee80211_is_data_qos(hdr->frame_control)) {
322 u8 *qc = ieee80211_get_qos_ctl(hdr); 322 u8 *qc = ieee80211_get_qos_ctl(hdr);
323 /* frame has qos control */ 323 /* frame has qos control */
324 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 324 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
325 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 325 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
326 rx->flags |= IEEE80211_RX_AMSDU; 326 rx->flags |= IEEE80211_RX_AMSDU;
327 else 327 else
328 rx->flags &= ~IEEE80211_RX_AMSDU; 328 rx->flags &= ~IEEE80211_RX_AMSDU;
329 } else { 329 } else {
330 /* 330 /*
331 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 331 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
332 * 332 *
333 * Sequence numbers for management frames, QoS data 333 * Sequence numbers for management frames, QoS data
334 * frames with a broadcast/multicast address in the 334 * frames with a broadcast/multicast address in the
335 * Address 1 field, and all non-QoS data frames sent 335 * Address 1 field, and all non-QoS data frames sent
336 * by QoS STAs are assigned using an additional single 336 * by QoS STAs are assigned using an additional single
337 * modulo-4096 counter, [...] 337 * modulo-4096 counter, [...]
338 * 338 *
339 * We also use that counter for non-QoS STAs. 339 * We also use that counter for non-QoS STAs.
340 */ 340 */
341 tid = NUM_RX_DATA_QUEUES - 1; 341 tid = NUM_RX_DATA_QUEUES - 1;
342 } 342 }
343 343
344 rx->queue = tid; 344 rx->queue = tid;
345 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 345 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
346 * For now, set skb->priority to 0 for other cases. */ 346 * For now, set skb->priority to 0 for other cases. */
347 rx->skb->priority = (tid > 7) ? 0 : tid; 347 rx->skb->priority = (tid > 7) ? 0 : tid;
348 } 348 }
349 349
350 /** 350 /**
351 * DOC: Packet alignment 351 * DOC: Packet alignment
352 * 352 *
353 * Drivers always need to pass packets that are aligned to two-byte boundaries 353 * Drivers always need to pass packets that are aligned to two-byte boundaries
354 * to the stack. 354 * to the stack.
355 * 355 *
356 * Additionally, should, if possible, align the payload data in a way that 356 * Additionally, should, if possible, align the payload data in a way that
357 * guarantees that the contained IP header is aligned to a four-byte 357 * guarantees that the contained IP header is aligned to a four-byte
358 * boundary. In the case of regular frames, this simply means aligning the 358 * boundary. In the case of regular frames, this simply means aligning the
359 * payload to a four-byte boundary (because either the IP header is directly 359 * payload to a four-byte boundary (because either the IP header is directly
360 * contained, or IV/RFC1042 headers that have a length divisible by four are 360 * contained, or IV/RFC1042 headers that have a length divisible by four are
361 * in front of it). If the payload data is not properly aligned and the 361 * in front of it). If the payload data is not properly aligned and the
362 * architecture doesn't support efficient unaligned operations, mac80211 362 * architecture doesn't support efficient unaligned operations, mac80211
363 * will align the data. 363 * will align the data.
364 * 364 *
365 * With A-MSDU frames, however, the payload data address must yield two modulo 365 * With A-MSDU frames, however, the payload data address must yield two modulo
366 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 366 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
367 * push the IP header further back to a multiple of four again. Thankfully, the 367 * push the IP header further back to a multiple of four again. Thankfully, the
368 * specs were sane enough this time around to require padding each A-MSDU 368 * specs were sane enough this time around to require padding each A-MSDU
369 * subframe to a length that is a multiple of four. 369 * subframe to a length that is a multiple of four.
370 * 370 *
371 * Padding like Atheros hardware adds which is inbetween the 802.11 header and 371 * Padding like Atheros hardware adds which is inbetween the 802.11 header and
372 * the payload is not supported, the driver is required to move the 802.11 372 * the payload is not supported, the driver is required to move the 802.11
373 * header to be directly in front of the payload in that case. 373 * header to be directly in front of the payload in that case.
374 */ 374 */
375 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 375 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
376 { 376 {
377 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 377 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
378 WARN_ONCE((unsigned long)rx->skb->data & 1, 378 WARN_ONCE((unsigned long)rx->skb->data & 1,
379 "unaligned packet at 0x%p\n", rx->skb->data); 379 "unaligned packet at 0x%p\n", rx->skb->data);
380 #endif 380 #endif
381 } 381 }
382 382
383 383
384 /* rx handlers */ 384 /* rx handlers */
385 385
386 static ieee80211_rx_result debug_noinline 386 static ieee80211_rx_result debug_noinline
387 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 387 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
388 { 388 {
389 struct ieee80211_local *local = rx->local; 389 struct ieee80211_local *local = rx->local;
390 struct sk_buff *skb = rx->skb; 390 struct sk_buff *skb = rx->skb;
391 391
392 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning))) 392 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
393 return ieee80211_scan_rx(rx->sdata, skb); 393 return ieee80211_scan_rx(rx->sdata, skb);
394 394
395 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) && 395 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
396 (rx->flags & IEEE80211_RX_IN_SCAN))) { 396 (rx->flags & IEEE80211_RX_IN_SCAN))) {
397 /* drop all the other packets during a software scan anyway */ 397 /* drop all the other packets during a software scan anyway */
398 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) 398 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
399 dev_kfree_skb(skb); 399 dev_kfree_skb(skb);
400 return RX_QUEUED; 400 return RX_QUEUED;
401 } 401 }
402 402
403 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) { 403 if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) {
404 /* scanning finished during invoking of handlers */ 404 /* scanning finished during invoking of handlers */
405 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 405 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
406 return RX_DROP_UNUSABLE; 406 return RX_DROP_UNUSABLE;
407 } 407 }
408 408
409 return RX_CONTINUE; 409 return RX_CONTINUE;
410 } 410 }
411 411
412 412
413 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 413 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
414 { 414 {
415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 415 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
416 416
417 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) 417 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
418 return 0; 418 return 0;
419 419
420 return ieee80211_is_robust_mgmt_frame(hdr); 420 return ieee80211_is_robust_mgmt_frame(hdr);
421 } 421 }
422 422
423 423
424 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 424 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
425 { 425 {
426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
427 427
428 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) 428 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
429 return 0; 429 return 0;
430 430
431 return ieee80211_is_robust_mgmt_frame(hdr); 431 return ieee80211_is_robust_mgmt_frame(hdr);
432 } 432 }
433 433
434 434
435 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 435 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
436 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 436 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
437 { 437 {
438 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 438 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
439 struct ieee80211_mmie *mmie; 439 struct ieee80211_mmie *mmie;
440 440
441 if (skb->len < 24 + sizeof(*mmie) || 441 if (skb->len < 24 + sizeof(*mmie) ||
442 !is_multicast_ether_addr(hdr->da)) 442 !is_multicast_ether_addr(hdr->da))
443 return -1; 443 return -1;
444 444
445 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 445 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
446 return -1; /* not a robust management frame */ 446 return -1; /* not a robust management frame */
447 447
448 mmie = (struct ieee80211_mmie *) 448 mmie = (struct ieee80211_mmie *)
449 (skb->data + skb->len - sizeof(*mmie)); 449 (skb->data + skb->len - sizeof(*mmie));
450 if (mmie->element_id != WLAN_EID_MMIE || 450 if (mmie->element_id != WLAN_EID_MMIE ||
451 mmie->length != sizeof(*mmie) - 2) 451 mmie->length != sizeof(*mmie) - 2)
452 return -1; 452 return -1;
453 453
454 return le16_to_cpu(mmie->key_id); 454 return le16_to_cpu(mmie->key_id);
455 } 455 }
456 456
457 457
458 static ieee80211_rx_result 458 static ieee80211_rx_result
459 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 459 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
460 { 460 {
461 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 461 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
462 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 462 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
463 char *dev_addr = rx->sdata->vif.addr; 463 char *dev_addr = rx->sdata->vif.addr;
464 464
465 if (ieee80211_is_data(hdr->frame_control)) { 465 if (ieee80211_is_data(hdr->frame_control)) {
466 if (is_multicast_ether_addr(hdr->addr1)) { 466 if (is_multicast_ether_addr(hdr->addr1)) {
467 if (ieee80211_has_tods(hdr->frame_control) || 467 if (ieee80211_has_tods(hdr->frame_control) ||
468 !ieee80211_has_fromds(hdr->frame_control)) 468 !ieee80211_has_fromds(hdr->frame_control))
469 return RX_DROP_MONITOR; 469 return RX_DROP_MONITOR;
470 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) 470 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
471 return RX_DROP_MONITOR; 471 return RX_DROP_MONITOR;
472 } else { 472 } else {
473 if (!ieee80211_has_a4(hdr->frame_control)) 473 if (!ieee80211_has_a4(hdr->frame_control))
474 return RX_DROP_MONITOR; 474 return RX_DROP_MONITOR;
475 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) 475 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
476 return RX_DROP_MONITOR; 476 return RX_DROP_MONITOR;
477 } 477 }
478 } 478 }
479 479
480 /* If there is not an established peer link and this is not a peer link 480 /* If there is not an established peer link and this is not a peer link
481 * establisment frame, beacon or probe, drop the frame. 481 * establisment frame, beacon or probe, drop the frame.
482 */ 482 */
483 483
484 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 484 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
485 struct ieee80211_mgmt *mgmt; 485 struct ieee80211_mgmt *mgmt;
486 486
487 if (!ieee80211_is_mgmt(hdr->frame_control)) 487 if (!ieee80211_is_mgmt(hdr->frame_control))
488 return RX_DROP_MONITOR; 488 return RX_DROP_MONITOR;
489 489
490 if (ieee80211_is_action(hdr->frame_control)) { 490 if (ieee80211_is_action(hdr->frame_control)) {
491 mgmt = (struct ieee80211_mgmt *)hdr; 491 mgmt = (struct ieee80211_mgmt *)hdr;
492 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) 492 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
493 return RX_DROP_MONITOR; 493 return RX_DROP_MONITOR;
494 return RX_CONTINUE; 494 return RX_CONTINUE;
495 } 495 }
496 496
497 if (ieee80211_is_probe_req(hdr->frame_control) || 497 if (ieee80211_is_probe_req(hdr->frame_control) ||
498 ieee80211_is_probe_resp(hdr->frame_control) || 498 ieee80211_is_probe_resp(hdr->frame_control) ||
499 ieee80211_is_beacon(hdr->frame_control)) 499 ieee80211_is_beacon(hdr->frame_control))
500 return RX_CONTINUE; 500 return RX_CONTINUE;
501 501
502 return RX_DROP_MONITOR; 502 return RX_DROP_MONITOR;
503 503
504 } 504 }
505 505
506 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) 506 #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
507 507
508 if (ieee80211_is_data(hdr->frame_control) && 508 if (ieee80211_is_data(hdr->frame_control) &&
509 is_multicast_ether_addr(hdr->addr1) && 509 is_multicast_ether_addr(hdr->addr1) &&
510 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) 510 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
511 return RX_DROP_MONITOR; 511 return RX_DROP_MONITOR;
512 #undef msh_h_get 512 #undef msh_h_get
513 513
514 return RX_CONTINUE; 514 return RX_CONTINUE;
515 } 515 }
516 516
517 #define SEQ_MODULO 0x1000 517 #define SEQ_MODULO 0x1000
518 #define SEQ_MASK 0xfff 518 #define SEQ_MASK 0xfff
519 519
520 static inline int seq_less(u16 sq1, u16 sq2) 520 static inline int seq_less(u16 sq1, u16 sq2)
521 { 521 {
522 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 522 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
523 } 523 }
524 524
525 static inline u16 seq_inc(u16 sq) 525 static inline u16 seq_inc(u16 sq)
526 { 526 {
527 return (sq + 1) & SEQ_MASK; 527 return (sq + 1) & SEQ_MASK;
528 } 528 }
529 529
530 static inline u16 seq_sub(u16 sq1, u16 sq2) 530 static inline u16 seq_sub(u16 sq1, u16 sq2)
531 { 531 {
532 return (sq1 - sq2) & SEQ_MASK; 532 return (sq1 - sq2) & SEQ_MASK;
533 } 533 }
534 534
535 535
536 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 536 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
537 struct tid_ampdu_rx *tid_agg_rx, 537 struct tid_ampdu_rx *tid_agg_rx,
538 int index, 538 int index,
539 struct sk_buff_head *frames) 539 struct sk_buff_head *frames)
540 { 540 {
541 struct ieee80211_supported_band *sband; 541 struct ieee80211_supported_band *sband;
542 struct ieee80211_rate *rate = NULL; 542 struct ieee80211_rate *rate = NULL;
543 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 543 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
544 struct ieee80211_rx_status *status; 544 struct ieee80211_rx_status *status;
545 545
546 if (!skb) 546 if (!skb)
547 goto no_frame; 547 goto no_frame;
548 548
549 status = IEEE80211_SKB_RXCB(skb); 549 status = IEEE80211_SKB_RXCB(skb);
550 550
551 /* release the reordered frames to stack */ 551 /* release the reordered frames to stack */
552 sband = hw->wiphy->bands[status->band]; 552 sband = hw->wiphy->bands[status->band];
553 if (!(status->flag & RX_FLAG_HT)) 553 if (!(status->flag & RX_FLAG_HT))
554 rate = &sband->bitrates[status->rate_idx]; 554 rate = &sband->bitrates[status->rate_idx];
555 tid_agg_rx->stored_mpdu_num--; 555 tid_agg_rx->stored_mpdu_num--;
556 tid_agg_rx->reorder_buf[index] = NULL; 556 tid_agg_rx->reorder_buf[index] = NULL;
557 __skb_queue_tail(frames, skb); 557 __skb_queue_tail(frames, skb);
558 558
559 no_frame: 559 no_frame:
560 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 560 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
561 } 561 }
562 562
563 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 563 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
564 struct tid_ampdu_rx *tid_agg_rx, 564 struct tid_ampdu_rx *tid_agg_rx,
565 u16 head_seq_num, 565 u16 head_seq_num,
566 struct sk_buff_head *frames) 566 struct sk_buff_head *frames)
567 { 567 {
568 int index; 568 int index;
569 569
570 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 570 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
571 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 571 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
572 tid_agg_rx->buf_size; 572 tid_agg_rx->buf_size;
573 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); 573 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
574 } 574 }
575 } 575 }
576 576
577 /* 577 /*
578 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 578 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
579 * the skb was added to the buffer longer than this time ago, the earlier 579 * the skb was added to the buffer longer than this time ago, the earlier
580 * frames that have not yet been received are assumed to be lost and the skb 580 * frames that have not yet been received are assumed to be lost and the skb
581 * can be released for processing. This may also release other skb's from the 581 * can be released for processing. This may also release other skb's from the
582 * reorder buffer if there are no additional gaps between the frames. 582 * reorder buffer if there are no additional gaps between the frames.
583 */ 583 */
584 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 584 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
585 585
586 /* 586 /*
587 * As this function belongs to the RX path it must be under 587 * As this function belongs to the RX path it must be under
588 * rcu_read_lock protection. It returns false if the frame 588 * rcu_read_lock protection. It returns false if the frame
589 * can be processed immediately, true if it was consumed. 589 * can be processed immediately, true if it was consumed.
590 */ 590 */
591 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 591 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
592 struct tid_ampdu_rx *tid_agg_rx, 592 struct tid_ampdu_rx *tid_agg_rx,
593 struct sk_buff *skb, 593 struct sk_buff *skb,
594 struct sk_buff_head *frames) 594 struct sk_buff_head *frames)
595 { 595 {
596 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 596 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
597 u16 sc = le16_to_cpu(hdr->seq_ctrl); 597 u16 sc = le16_to_cpu(hdr->seq_ctrl);
598 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 598 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
599 u16 head_seq_num, buf_size; 599 u16 head_seq_num, buf_size;
600 int index; 600 int index;
601 601
602 buf_size = tid_agg_rx->buf_size; 602 buf_size = tid_agg_rx->buf_size;
603 head_seq_num = tid_agg_rx->head_seq_num; 603 head_seq_num = tid_agg_rx->head_seq_num;
604 604
605 /* frame with out of date sequence number */ 605 /* frame with out of date sequence number */
606 if (seq_less(mpdu_seq_num, head_seq_num)) { 606 if (seq_less(mpdu_seq_num, head_seq_num)) {
607 dev_kfree_skb(skb); 607 dev_kfree_skb(skb);
608 return true; 608 return true;
609 } 609 }
610 610
611 /* 611 /*
612 * If frame the sequence number exceeds our buffering window 612 * If frame the sequence number exceeds our buffering window
613 * size release some previous frames to make room for this one. 613 * size release some previous frames to make room for this one.
614 */ 614 */
615 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 615 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
616 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 616 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
617 /* release stored frames up to new head to stack */ 617 /* release stored frames up to new head to stack */
618 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, 618 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
619 frames); 619 frames);
620 } 620 }
621 621
622 /* Now the new frame is always in the range of the reordering buffer */ 622 /* Now the new frame is always in the range of the reordering buffer */
623 623
624 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 624 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
625 625
626 /* check if we already stored this frame */ 626 /* check if we already stored this frame */
627 if (tid_agg_rx->reorder_buf[index]) { 627 if (tid_agg_rx->reorder_buf[index]) {
628 dev_kfree_skb(skb); 628 dev_kfree_skb(skb);
629 return true; 629 return true;
630 } 630 }
631 631
632 /* 632 /*
633 * If the current MPDU is in the right order and nothing else 633 * If the current MPDU is in the right order and nothing else
634 * is stored we can process it directly, no need to buffer it. 634 * is stored we can process it directly, no need to buffer it.
635 */ 635 */
636 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 636 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
637 tid_agg_rx->stored_mpdu_num == 0) { 637 tid_agg_rx->stored_mpdu_num == 0) {
638 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 638 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
639 return false; 639 return false;
640 } 640 }
641 641
642 /* put the frame in the reordering buffer */ 642 /* put the frame in the reordering buffer */
643 tid_agg_rx->reorder_buf[index] = skb; 643 tid_agg_rx->reorder_buf[index] = skb;
644 tid_agg_rx->reorder_time[index] = jiffies; 644 tid_agg_rx->reorder_time[index] = jiffies;
645 tid_agg_rx->stored_mpdu_num++; 645 tid_agg_rx->stored_mpdu_num++;
646 /* release the buffer until next missing frame */ 646 /* release the buffer until next missing frame */
647 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 647 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
648 tid_agg_rx->buf_size; 648 tid_agg_rx->buf_size;
649 if (!tid_agg_rx->reorder_buf[index] && 649 if (!tid_agg_rx->reorder_buf[index] &&
650 tid_agg_rx->stored_mpdu_num > 1) { 650 tid_agg_rx->stored_mpdu_num > 1) {
651 /* 651 /*
652 * No buffers ready to be released, but check whether any 652 * No buffers ready to be released, but check whether any
653 * frames in the reorder buffer have timed out. 653 * frames in the reorder buffer have timed out.
654 */ 654 */
655 int j; 655 int j;
656 int skipped = 1; 656 int skipped = 1;
657 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 657 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
658 j = (j + 1) % tid_agg_rx->buf_size) { 658 j = (j + 1) % tid_agg_rx->buf_size) {
659 if (!tid_agg_rx->reorder_buf[j]) { 659 if (!tid_agg_rx->reorder_buf[j]) {
660 skipped++; 660 skipped++;
661 continue; 661 continue;
662 } 662 }
663 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + 663 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
664 HT_RX_REORDER_BUF_TIMEOUT)) 664 HT_RX_REORDER_BUF_TIMEOUT))
665 break; 665 break;
666 666
667 #ifdef CONFIG_MAC80211_HT_DEBUG 667 #ifdef CONFIG_MAC80211_HT_DEBUG
668 if (net_ratelimit()) 668 if (net_ratelimit())
669 printk(KERN_DEBUG "%s: release an RX reorder " 669 printk(KERN_DEBUG "%s: release an RX reorder "
670 "frame due to timeout on earlier " 670 "frame due to timeout on earlier "
671 "frames\n", 671 "frames\n",
672 wiphy_name(hw->wiphy)); 672 wiphy_name(hw->wiphy));
673 #endif 673 #endif
674 ieee80211_release_reorder_frame(hw, tid_agg_rx, 674 ieee80211_release_reorder_frame(hw, tid_agg_rx,
675 j, frames); 675 j, frames);
676 676
677 /* 677 /*
678 * Increment the head seq# also for the skipped slots. 678 * Increment the head seq# also for the skipped slots.
679 */ 679 */
680 tid_agg_rx->head_seq_num = 680 tid_agg_rx->head_seq_num =
681 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 681 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
682 skipped = 0; 682 skipped = 0;
683 } 683 }
684 } else while (tid_agg_rx->reorder_buf[index]) { 684 } else while (tid_agg_rx->reorder_buf[index]) {
685 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); 685 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
686 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 686 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
687 tid_agg_rx->buf_size; 687 tid_agg_rx->buf_size;
688 } 688 }
689 689
690 return true; 690 return true;
691 } 691 }
692 692
693 /* 693 /*
694 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 694 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
695 * true if the MPDU was buffered, false if it should be processed. 695 * true if the MPDU was buffered, false if it should be processed.
696 */ 696 */
697 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 697 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
698 struct sk_buff_head *frames) 698 struct sk_buff_head *frames)
699 { 699 {
700 struct sk_buff *skb = rx->skb; 700 struct sk_buff *skb = rx->skb;
701 struct ieee80211_local *local = rx->local; 701 struct ieee80211_local *local = rx->local;
702 struct ieee80211_hw *hw = &local->hw; 702 struct ieee80211_hw *hw = &local->hw;
703 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 703 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
704 struct sta_info *sta = rx->sta; 704 struct sta_info *sta = rx->sta;
705 struct tid_ampdu_rx *tid_agg_rx; 705 struct tid_ampdu_rx *tid_agg_rx;
706 u16 sc; 706 u16 sc;
707 int tid; 707 int tid;
708 708
709 if (!ieee80211_is_data_qos(hdr->frame_control)) 709 if (!ieee80211_is_data_qos(hdr->frame_control))
710 goto dont_reorder; 710 goto dont_reorder;
711 711
712 /* 712 /*
713 * filter the QoS data rx stream according to 713 * filter the QoS data rx stream according to
714 * STA/TID and check if this STA/TID is on aggregation 714 * STA/TID and check if this STA/TID is on aggregation
715 */ 715 */
716 716
717 if (!sta) 717 if (!sta)
718 goto dont_reorder; 718 goto dont_reorder;
719 719
720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
721 721
722 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 722 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
723 if (!tid_agg_rx) 723 if (!tid_agg_rx)
724 goto dont_reorder; 724 goto dont_reorder;
725 725
726 /* qos null data frames are excluded */ 726 /* qos null data frames are excluded */
727 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 727 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
728 goto dont_reorder; 728 goto dont_reorder;
729 729
730 /* new, potentially un-ordered, ampdu frame - process it */ 730 /* new, potentially un-ordered, ampdu frame - process it */
731 731
732 /* reset session timer */ 732 /* reset session timer */
733 if (tid_agg_rx->timeout) 733 if (tid_agg_rx->timeout)
734 mod_timer(&tid_agg_rx->session_timer, 734 mod_timer(&tid_agg_rx->session_timer,
735 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 735 TU_TO_EXP_TIME(tid_agg_rx->timeout));
736 736
737 /* if this mpdu is fragmented - terminate rx aggregation session */ 737 /* if this mpdu is fragmented - terminate rx aggregation session */
738 sc = le16_to_cpu(hdr->seq_ctrl); 738 sc = le16_to_cpu(hdr->seq_ctrl);
739 if (sc & IEEE80211_SCTL_FRAG) { 739 if (sc & IEEE80211_SCTL_FRAG) {
740 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 740 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
741 skb_queue_tail(&rx->sdata->skb_queue, skb); 741 skb_queue_tail(&rx->sdata->skb_queue, skb);
742 ieee80211_queue_work(&local->hw, &rx->sdata->work); 742 ieee80211_queue_work(&local->hw, &rx->sdata->work);
743 return; 743 return;
744 } 744 }
745 745
746 /* 746 /*
747 * No locking needed -- we will only ever process one 747 * No locking needed -- we will only ever process one
748 * RX packet at a time, and thus own tid_agg_rx. All 748 * RX packet at a time, and thus own tid_agg_rx. All
749 * other code manipulating it needs to (and does) make 749 * other code manipulating it needs to (and does) make
750 * sure that we cannot get to it any more before doing 750 * sure that we cannot get to it any more before doing
751 * anything with it. 751 * anything with it.
752 */ 752 */
753 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) 753 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
754 return; 754 return;
755 755
756 dont_reorder: 756 dont_reorder:
757 __skb_queue_tail(frames, skb); 757 __skb_queue_tail(frames, skb);
758 } 758 }
759 759
760 static ieee80211_rx_result debug_noinline 760 static ieee80211_rx_result debug_noinline
761 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 761 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
762 { 762 {
763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
764 764
765 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 765 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
766 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 766 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
767 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 767 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
768 rx->sta->last_seq_ctrl[rx->queue] == 768 rx->sta->last_seq_ctrl[rx->queue] ==
769 hdr->seq_ctrl)) { 769 hdr->seq_ctrl)) {
770 if (rx->flags & IEEE80211_RX_RA_MATCH) { 770 if (rx->flags & IEEE80211_RX_RA_MATCH) {
771 rx->local->dot11FrameDuplicateCount++; 771 rx->local->dot11FrameDuplicateCount++;
772 rx->sta->num_duplicates++; 772 rx->sta->num_duplicates++;
773 } 773 }
774 return RX_DROP_MONITOR; 774 return RX_DROP_MONITOR;
775 } else 775 } else
776 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; 776 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
777 } 777 }
778 778
779 if (unlikely(rx->skb->len < 16)) { 779 if (unlikely(rx->skb->len < 16)) {
780 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 780 I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
781 return RX_DROP_MONITOR; 781 return RX_DROP_MONITOR;
782 } 782 }
783 783
784 /* Drop disallowed frame classes based on STA auth/assoc state; 784 /* Drop disallowed frame classes based on STA auth/assoc state;
785 * IEEE 802.11, Chap 5.5. 785 * IEEE 802.11, Chap 5.5.
786 * 786 *
787 * mac80211 filters only based on association state, i.e. it drops 787 * mac80211 filters only based on association state, i.e. it drops
788 * Class 3 frames from not associated stations. hostapd sends 788 * Class 3 frames from not associated stations. hostapd sends
789 * deauth/disassoc frames when needed. In addition, hostapd is 789 * deauth/disassoc frames when needed. In addition, hostapd is
790 * responsible for filtering on both auth and assoc states. 790 * responsible for filtering on both auth and assoc states.
791 */ 791 */
792 792
793 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 793 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
794 return ieee80211_rx_mesh_check(rx); 794 return ieee80211_rx_mesh_check(rx);
795 795
796 if (unlikely((ieee80211_is_data(hdr->frame_control) || 796 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
797 ieee80211_is_pspoll(hdr->frame_control)) && 797 ieee80211_is_pspoll(hdr->frame_control)) &&
798 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 798 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
799 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { 799 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
800 if ((!ieee80211_has_fromds(hdr->frame_control) && 800 if ((!ieee80211_has_fromds(hdr->frame_control) &&
801 !ieee80211_has_tods(hdr->frame_control) && 801 !ieee80211_has_tods(hdr->frame_control) &&
802 ieee80211_is_data(hdr->frame_control)) || 802 ieee80211_is_data(hdr->frame_control)) ||
803 !(rx->flags & IEEE80211_RX_RA_MATCH)) { 803 !(rx->flags & IEEE80211_RX_RA_MATCH)) {
804 /* Drop IBSS frames and frames for other hosts 804 /* Drop IBSS frames and frames for other hosts
805 * silently. */ 805 * silently. */
806 return RX_DROP_MONITOR; 806 return RX_DROP_MONITOR;
807 } 807 }
808 808
809 return RX_DROP_MONITOR; 809 return RX_DROP_MONITOR;
810 } 810 }
811 811
812 return RX_CONTINUE; 812 return RX_CONTINUE;
813 } 813 }
814 814
815 815
816 static ieee80211_rx_result debug_noinline 816 static ieee80211_rx_result debug_noinline
817 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 817 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
818 { 818 {
819 struct sk_buff *skb = rx->skb; 819 struct sk_buff *skb = rx->skb;
820 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 820 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
822 int keyidx; 822 int keyidx;
823 int hdrlen; 823 int hdrlen;
824 ieee80211_rx_result result = RX_DROP_UNUSABLE; 824 ieee80211_rx_result result = RX_DROP_UNUSABLE;
825 struct ieee80211_key *stakey = NULL; 825 struct ieee80211_key *stakey = NULL;
826 int mmie_keyidx = -1; 826 int mmie_keyidx = -1;
827 __le16 fc; 827 __le16 fc;
828 828
829 /* 829 /*
830 * Key selection 101 830 * Key selection 101
831 * 831 *
832 * There are four types of keys: 832 * There are four types of keys:
833 * - GTK (group keys) 833 * - GTK (group keys)
834 * - IGTK (group keys for management frames) 834 * - IGTK (group keys for management frames)
835 * - PTK (pairwise keys) 835 * - PTK (pairwise keys)
836 * - STK (station-to-station pairwise keys) 836 * - STK (station-to-station pairwise keys)
837 * 837 *
838 * When selecting a key, we have to distinguish between multicast 838 * When selecting a key, we have to distinguish between multicast
839 * (including broadcast) and unicast frames, the latter can only 839 * (including broadcast) and unicast frames, the latter can only
840 * use PTKs and STKs while the former always use GTKs and IGTKs. 840 * use PTKs and STKs while the former always use GTKs and IGTKs.
841 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 841 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
842 * unicast frames can also use key indices like GTKs. Hence, if we 842 * unicast frames can also use key indices like GTKs. Hence, if we
843 * don't have a PTK/STK we check the key index for a WEP key. 843 * don't have a PTK/STK we check the key index for a WEP key.
844 * 844 *
845 * Note that in a regular BSS, multicast frames are sent by the 845 * Note that in a regular BSS, multicast frames are sent by the
846 * AP only, associated stations unicast the frame to the AP first 846 * AP only, associated stations unicast the frame to the AP first
847 * which then multicasts it on their behalf. 847 * which then multicasts it on their behalf.
848 * 848 *
849 * There is also a slight problem in IBSS mode: GTKs are negotiated 849 * There is also a slight problem in IBSS mode: GTKs are negotiated
850 * with each station, that is something we don't currently handle. 850 * with each station, that is something we don't currently handle.
851 * The spec seems to expect that one negotiates the same key with 851 * The spec seems to expect that one negotiates the same key with
852 * every station but there's no such requirement; VLANs could be 852 * every station but there's no such requirement; VLANs could be
853 * possible. 853 * possible.
854 */ 854 */
855 855
856 /* 856 /*
857 * No point in finding a key and decrypting if the frame is neither 857 * No point in finding a key and decrypting if the frame is neither
858 * addressed to us nor a multicast frame. 858 * addressed to us nor a multicast frame.
859 */ 859 */
860 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 860 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
861 return RX_CONTINUE; 861 return RX_CONTINUE;
862 862
863 /* start without a key */ 863 /* start without a key */
864 rx->key = NULL; 864 rx->key = NULL;
865 865
866 if (rx->sta) 866 if (rx->sta)
867 stakey = rcu_dereference(rx->sta->key); 867 stakey = rcu_dereference(rx->sta->key);
868 868
869 fc = hdr->frame_control; 869 fc = hdr->frame_control;
870 870
871 if (!ieee80211_has_protected(fc)) 871 if (!ieee80211_has_protected(fc))
872 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 872 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
873 873
874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
875 rx->key = stakey; 875 rx->key = stakey;
876 /* Skip decryption if the frame is not protected. */ 876 /* Skip decryption if the frame is not protected. */
877 if (!ieee80211_has_protected(fc)) 877 if (!ieee80211_has_protected(fc))
878 return RX_CONTINUE; 878 return RX_CONTINUE;
879 } else if (mmie_keyidx >= 0) { 879 } else if (mmie_keyidx >= 0) {
880 /* Broadcast/multicast robust management frame / BIP */ 880 /* Broadcast/multicast robust management frame / BIP */
881 if ((status->flag & RX_FLAG_DECRYPTED) && 881 if ((status->flag & RX_FLAG_DECRYPTED) &&
882 (status->flag & RX_FLAG_IV_STRIPPED)) 882 (status->flag & RX_FLAG_IV_STRIPPED))
883 return RX_CONTINUE; 883 return RX_CONTINUE;
884 884
885 if (mmie_keyidx < NUM_DEFAULT_KEYS || 885 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
886 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 886 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
887 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 887 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
888 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 888 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
889 } else if (!ieee80211_has_protected(fc)) { 889 } else if (!ieee80211_has_protected(fc)) {
890 /* 890 /*
891 * The frame was not protected, so skip decryption. However, we 891 * The frame was not protected, so skip decryption. However, we
892 * need to set rx->key if there is a key that could have been 892 * need to set rx->key if there is a key that could have been
893 * used so that the frame may be dropped if encryption would 893 * used so that the frame may be dropped if encryption would
894 * have been expected. 894 * have been expected.
895 */ 895 */
896 struct ieee80211_key *key = NULL; 896 struct ieee80211_key *key = NULL;
897 if (ieee80211_is_mgmt(fc) && 897 if (ieee80211_is_mgmt(fc) &&
898 is_multicast_ether_addr(hdr->addr1) && 898 is_multicast_ether_addr(hdr->addr1) &&
899 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 899 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
900 rx->key = key; 900 rx->key = key;
901 else if ((key = rcu_dereference(rx->sdata->default_key))) 901 else if ((key = rcu_dereference(rx->sdata->default_key)))
902 rx->key = key; 902 rx->key = key;
903 return RX_CONTINUE; 903 return RX_CONTINUE;
904 } else { 904 } else {
905 u8 keyid; 905 u8 keyid;
906 /* 906 /*
907 * The device doesn't give us the IV so we won't be 907 * The device doesn't give us the IV so we won't be
908 * able to look up the key. That's ok though, we 908 * able to look up the key. That's ok though, we
909 * don't need to decrypt the frame, we just won't 909 * don't need to decrypt the frame, we just won't
910 * be able to keep statistics accurate. 910 * be able to keep statistics accurate.
911 * Except for key threshold notifications, should 911 * Except for key threshold notifications, should
912 * we somehow allow the driver to tell us which key 912 * we somehow allow the driver to tell us which key
913 * the hardware used if this flag is set? 913 * the hardware used if this flag is set?
914 */ 914 */
915 if ((status->flag & RX_FLAG_DECRYPTED) && 915 if ((status->flag & RX_FLAG_DECRYPTED) &&
916 (status->flag & RX_FLAG_IV_STRIPPED)) 916 (status->flag & RX_FLAG_IV_STRIPPED))
917 return RX_CONTINUE; 917 return RX_CONTINUE;
918 918
919 hdrlen = ieee80211_hdrlen(fc); 919 hdrlen = ieee80211_hdrlen(fc);
920 920
921 if (rx->skb->len < 8 + hdrlen) 921 if (rx->skb->len < 8 + hdrlen)
922 return RX_DROP_UNUSABLE; /* TODO: count this? */ 922 return RX_DROP_UNUSABLE; /* TODO: count this? */
923 923
924 /* 924 /*
925 * no need to call ieee80211_wep_get_keyidx, 925 * no need to call ieee80211_wep_get_keyidx,
926 * it verifies a bunch of things we've done already 926 * it verifies a bunch of things we've done already
927 */ 927 */
928 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 928 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
929 keyidx = keyid >> 6; 929 keyidx = keyid >> 6;
930 930
931 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 931 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
932 932
933 /* 933 /*
934 * RSNA-protected unicast frames should always be sent with 934 * RSNA-protected unicast frames should always be sent with
935 * pairwise or station-to-station keys, but for WEP we allow 935 * pairwise or station-to-station keys, but for WEP we allow
936 * using a key index as well. 936 * using a key index as well.
937 */ 937 */
938 if (rx->key && rx->key->conf.alg != ALG_WEP && 938 if (rx->key && rx->key->conf.alg != ALG_WEP &&
939 !is_multicast_ether_addr(hdr->addr1)) 939 !is_multicast_ether_addr(hdr->addr1))
940 rx->key = NULL; 940 rx->key = NULL;
941 } 941 }
942 942
943 if (rx->key) { 943 if (rx->key) {
944 rx->key->tx_rx_count++; 944 rx->key->tx_rx_count++;
945 /* TODO: add threshold stuff again */ 945 /* TODO: add threshold stuff again */
946 } else { 946 } else {
947 return RX_DROP_MONITOR; 947 return RX_DROP_MONITOR;
948 } 948 }
949 949
950 if (skb_linearize(rx->skb)) 950 if (skb_linearize(rx->skb))
951 return RX_DROP_UNUSABLE; 951 return RX_DROP_UNUSABLE;
952 /* the hdr variable is invalid now! */ 952 /* the hdr variable is invalid now! */
953 953
954 switch (rx->key->conf.alg) { 954 switch (rx->key->conf.alg) {
955 case ALG_WEP: 955 case ALG_WEP:
956 /* Check for weak IVs if possible */ 956 /* Check for weak IVs if possible */
957 if (rx->sta && ieee80211_is_data(fc) && 957 if (rx->sta && ieee80211_is_data(fc) &&
958 (!(status->flag & RX_FLAG_IV_STRIPPED) || 958 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
959 !(status->flag & RX_FLAG_DECRYPTED)) && 959 !(status->flag & RX_FLAG_DECRYPTED)) &&
960 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 960 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
961 rx->sta->wep_weak_iv_count++; 961 rx->sta->wep_weak_iv_count++;
962 962
963 result = ieee80211_crypto_wep_decrypt(rx); 963 result = ieee80211_crypto_wep_decrypt(rx);
964 break; 964 break;
965 case ALG_TKIP: 965 case ALG_TKIP:
966 result = ieee80211_crypto_tkip_decrypt(rx); 966 result = ieee80211_crypto_tkip_decrypt(rx);
967 break; 967 break;
968 case ALG_CCMP: 968 case ALG_CCMP:
969 result = ieee80211_crypto_ccmp_decrypt(rx); 969 result = ieee80211_crypto_ccmp_decrypt(rx);
970 break; 970 break;
971 case ALG_AES_CMAC: 971 case ALG_AES_CMAC:
972 result = ieee80211_crypto_aes_cmac_decrypt(rx); 972 result = ieee80211_crypto_aes_cmac_decrypt(rx);
973 break; 973 break;
974 } 974 }
975 975
976 /* either the frame has been decrypted or will be dropped */ 976 /* either the frame has been decrypted or will be dropped */
977 status->flag |= RX_FLAG_DECRYPTED; 977 status->flag |= RX_FLAG_DECRYPTED;
978 978
979 return result; 979 return result;
980 } 980 }
981 981
982 static ieee80211_rx_result debug_noinline 982 static ieee80211_rx_result debug_noinline
983 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 983 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
984 { 984 {
985 struct ieee80211_local *local; 985 struct ieee80211_local *local;
986 struct ieee80211_hdr *hdr; 986 struct ieee80211_hdr *hdr;
987 struct sk_buff *skb; 987 struct sk_buff *skb;
988 988
989 local = rx->local; 989 local = rx->local;
990 skb = rx->skb; 990 skb = rx->skb;
991 hdr = (struct ieee80211_hdr *) skb->data; 991 hdr = (struct ieee80211_hdr *) skb->data;
992 992
993 if (!local->pspolling) 993 if (!local->pspolling)
994 return RX_CONTINUE; 994 return RX_CONTINUE;
995 995
996 if (!ieee80211_has_fromds(hdr->frame_control)) 996 if (!ieee80211_has_fromds(hdr->frame_control))
997 /* this is not from AP */ 997 /* this is not from AP */
998 return RX_CONTINUE; 998 return RX_CONTINUE;
999 999
1000 if (!ieee80211_is_data(hdr->frame_control)) 1000 if (!ieee80211_is_data(hdr->frame_control))
1001 return RX_CONTINUE; 1001 return RX_CONTINUE;
1002 1002
1003 if (!ieee80211_has_moredata(hdr->frame_control)) { 1003 if (!ieee80211_has_moredata(hdr->frame_control)) {
1004 /* AP has no more frames buffered for us */ 1004 /* AP has no more frames buffered for us */
1005 local->pspolling = false; 1005 local->pspolling = false;
1006 return RX_CONTINUE; 1006 return RX_CONTINUE;
1007 } 1007 }
1008 1008
1009 /* more data bit is set, let's request a new frame from the AP */ 1009 /* more data bit is set, let's request a new frame from the AP */
1010 ieee80211_send_pspoll(local, rx->sdata); 1010 ieee80211_send_pspoll(local, rx->sdata);
1011 1011
1012 return RX_CONTINUE; 1012 return RX_CONTINUE;
1013 } 1013 }
1014 1014
1015 static void ap_sta_ps_start(struct sta_info *sta) 1015 static void ap_sta_ps_start(struct sta_info *sta)
1016 { 1016 {
1017 struct ieee80211_sub_if_data *sdata = sta->sdata; 1017 struct ieee80211_sub_if_data *sdata = sta->sdata;
1018 struct ieee80211_local *local = sdata->local; 1018 struct ieee80211_local *local = sdata->local;
1019 1019
1020 atomic_inc(&sdata->bss->num_sta_ps); 1020 atomic_inc(&sdata->bss->num_sta_ps);
1021 set_sta_flags(sta, WLAN_STA_PS_STA); 1021 set_sta_flags(sta, WLAN_STA_PS_STA);
1022 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1022 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1023 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1023 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1024 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1024 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1025 sdata->name, sta->sta.addr, sta->sta.aid); 1025 sdata->name, sta->sta.addr, sta->sta.aid);
1026 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1026 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1027 } 1027 }
1028 1028
1029 static void ap_sta_ps_end(struct sta_info *sta) 1029 static void ap_sta_ps_end(struct sta_info *sta)
1030 { 1030 {
1031 struct ieee80211_sub_if_data *sdata = sta->sdata; 1031 struct ieee80211_sub_if_data *sdata = sta->sdata;
1032 1032
1033 atomic_dec(&sdata->bss->num_sta_ps); 1033 atomic_dec(&sdata->bss->num_sta_ps);
1034 1034
1035 clear_sta_flags(sta, WLAN_STA_PS_STA); 1035 clear_sta_flags(sta, WLAN_STA_PS_STA);
1036 1036
1037 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1037 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1038 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1038 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1039 sdata->name, sta->sta.addr, sta->sta.aid); 1039 sdata->name, sta->sta.addr, sta->sta.aid);
1040 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1040 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1041 1041
1042 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1042 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1043 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1043 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1044 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1044 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1045 sdata->name, sta->sta.addr, sta->sta.aid); 1045 sdata->name, sta->sta.addr, sta->sta.aid);
1046 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1046 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1047 return; 1047 return;
1048 } 1048 }
1049 1049
1050 ieee80211_sta_ps_deliver_wakeup(sta); 1050 ieee80211_sta_ps_deliver_wakeup(sta);
1051 } 1051 }
1052 1052
1053 static ieee80211_rx_result debug_noinline 1053 static ieee80211_rx_result debug_noinline
1054 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1054 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1055 { 1055 {
1056 struct sta_info *sta = rx->sta; 1056 struct sta_info *sta = rx->sta;
1057 struct sk_buff *skb = rx->skb; 1057 struct sk_buff *skb = rx->skb;
1058 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1058 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1060 1060
1061 if (!sta) 1061 if (!sta)
1062 return RX_CONTINUE; 1062 return RX_CONTINUE;
1063 1063
1064 /* 1064 /*
1065 * Update last_rx only for IBSS packets which are for the current 1065 * Update last_rx only for IBSS packets which are for the current
1066 * BSSID to avoid keeping the current IBSS network alive in cases 1066 * BSSID to avoid keeping the current IBSS network alive in cases
1067 * where other STAs start using different BSSID. 1067 * where other STAs start using different BSSID.
1068 */ 1068 */
1069 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1069 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1070 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1070 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1071 NL80211_IFTYPE_ADHOC); 1071 NL80211_IFTYPE_ADHOC);
1072 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) 1072 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
1073 sta->last_rx = jiffies; 1073 sta->last_rx = jiffies;
1074 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1074 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1075 /* 1075 /*
1076 * Mesh beacons will update last_rx when if they are found to 1076 * Mesh beacons will update last_rx when if they are found to
1077 * match the current local configuration when processed. 1077 * match the current local configuration when processed.
1078 */ 1078 */
1079 sta->last_rx = jiffies; 1079 sta->last_rx = jiffies;
1080 } 1080 }
1081 1081
1082 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1082 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1083 return RX_CONTINUE; 1083 return RX_CONTINUE;
1084 1084
1085 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1085 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1086 ieee80211_sta_rx_notify(rx->sdata, hdr); 1086 ieee80211_sta_rx_notify(rx->sdata, hdr);
1087 1087
1088 sta->rx_fragments++; 1088 sta->rx_fragments++;
1089 sta->rx_bytes += rx->skb->len; 1089 sta->rx_bytes += rx->skb->len;
1090 sta->last_signal = status->signal; 1090 sta->last_signal = status->signal;
1091 1091
1092 /* 1092 /*
1093 * Change STA power saving mode only at the end of a frame 1093 * Change STA power saving mode only at the end of a frame
1094 * exchange sequence. 1094 * exchange sequence.
1095 */ 1095 */
1096 if (!ieee80211_has_morefrags(hdr->frame_control) && 1096 if (!ieee80211_has_morefrags(hdr->frame_control) &&
1097 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1097 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1098 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1098 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1099 if (test_sta_flags(sta, WLAN_STA_PS_STA)) { 1099 if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
1100 /* 1100 /*
1101 * Ignore doze->wake transitions that are 1101 * Ignore doze->wake transitions that are
1102 * indicated by non-data frames, the standard 1102 * indicated by non-data frames, the standard
1103 * is unclear here, but for example going to 1103 * is unclear here, but for example going to
1104 * PS mode and then scanning would cause a 1104 * PS mode and then scanning would cause a
1105 * doze->wake transition for the probe request, 1105 * doze->wake transition for the probe request,
1106 * and that is clearly undesirable. 1106 * and that is clearly undesirable.
1107 */ 1107 */
1108 if (ieee80211_is_data(hdr->frame_control) && 1108 if (ieee80211_is_data(hdr->frame_control) &&
1109 !ieee80211_has_pm(hdr->frame_control)) 1109 !ieee80211_has_pm(hdr->frame_control))
1110 ap_sta_ps_end(sta); 1110 ap_sta_ps_end(sta);
1111 } else { 1111 } else {
1112 if (ieee80211_has_pm(hdr->frame_control)) 1112 if (ieee80211_has_pm(hdr->frame_control))
1113 ap_sta_ps_start(sta); 1113 ap_sta_ps_start(sta);
1114 } 1114 }
1115 } 1115 }
1116 1116
1117 /* 1117 /*
1118 * Drop (qos-)data::nullfunc frames silently, since they 1118 * Drop (qos-)data::nullfunc frames silently, since they
1119 * are used only to control station power saving mode. 1119 * are used only to control station power saving mode.
1120 */ 1120 */
1121 if (ieee80211_is_nullfunc(hdr->frame_control) || 1121 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1122 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1122 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1123 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1123 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1124 1124
1125 /* 1125 /*
1126 * If we receive a 4-addr nullfunc frame from a STA 1126 * If we receive a 4-addr nullfunc frame from a STA
1127 * that was not moved to a 4-addr STA vlan yet, drop 1127 * that was not moved to a 4-addr STA vlan yet, drop
1128 * the frame to the monitor interface, to make sure 1128 * the frame to the monitor interface, to make sure
1129 * that hostapd sees it 1129 * that hostapd sees it
1130 */ 1130 */
1131 if (ieee80211_has_a4(hdr->frame_control) && 1131 if (ieee80211_has_a4(hdr->frame_control) &&
1132 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1132 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1133 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1133 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1134 !rx->sdata->u.vlan.sta))) 1134 !rx->sdata->u.vlan.sta)))
1135 return RX_DROP_MONITOR; 1135 return RX_DROP_MONITOR;
1136 /* 1136 /*
1137 * Update counter and free packet here to avoid 1137 * Update counter and free packet here to avoid
1138 * counting this as a dropped packed. 1138 * counting this as a dropped packed.
1139 */ 1139 */
1140 sta->rx_packets++; 1140 sta->rx_packets++;
1141 dev_kfree_skb(rx->skb); 1141 dev_kfree_skb(rx->skb);
1142 return RX_QUEUED; 1142 return RX_QUEUED;
1143 } 1143 }
1144 1144
1145 return RX_CONTINUE; 1145 return RX_CONTINUE;
1146 } /* ieee80211_rx_h_sta_process */ 1146 } /* ieee80211_rx_h_sta_process */
1147 1147
1148 static inline struct ieee80211_fragment_entry * 1148 static inline struct ieee80211_fragment_entry *
1149 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1149 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1150 unsigned int frag, unsigned int seq, int rx_queue, 1150 unsigned int frag, unsigned int seq, int rx_queue,
1151 struct sk_buff **skb) 1151 struct sk_buff **skb)
1152 { 1152 {
1153 struct ieee80211_fragment_entry *entry; 1153 struct ieee80211_fragment_entry *entry;
1154 int idx; 1154 int idx;
1155 1155
1156 idx = sdata->fragment_next; 1156 idx = sdata->fragment_next;
1157 entry = &sdata->fragments[sdata->fragment_next++]; 1157 entry = &sdata->fragments[sdata->fragment_next++];
1158 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1158 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1159 sdata->fragment_next = 0; 1159 sdata->fragment_next = 0;
1160 1160
1161 if (!skb_queue_empty(&entry->skb_list)) { 1161 if (!skb_queue_empty(&entry->skb_list)) {
1162 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1162 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1163 struct ieee80211_hdr *hdr = 1163 struct ieee80211_hdr *hdr =
1164 (struct ieee80211_hdr *) entry->skb_list.next->data; 1164 (struct ieee80211_hdr *) entry->skb_list.next->data;
1165 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1165 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1166 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1166 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1167 "addr1=%pM addr2=%pM\n", 1167 "addr1=%pM addr2=%pM\n",
1168 sdata->name, idx, 1168 sdata->name, idx,
1169 jiffies - entry->first_frag_time, entry->seq, 1169 jiffies - entry->first_frag_time, entry->seq,
1170 entry->last_frag, hdr->addr1, hdr->addr2); 1170 entry->last_frag, hdr->addr1, hdr->addr2);
1171 #endif 1171 #endif
1172 __skb_queue_purge(&entry->skb_list); 1172 __skb_queue_purge(&entry->skb_list);
1173 } 1173 }
1174 1174
1175 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1175 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1176 *skb = NULL; 1176 *skb = NULL;
1177 entry->first_frag_time = jiffies; 1177 entry->first_frag_time = jiffies;
1178 entry->seq = seq; 1178 entry->seq = seq;
1179 entry->rx_queue = rx_queue; 1179 entry->rx_queue = rx_queue;
1180 entry->last_frag = frag; 1180 entry->last_frag = frag;
1181 entry->ccmp = 0; 1181 entry->ccmp = 0;
1182 entry->extra_len = 0; 1182 entry->extra_len = 0;
1183 1183
1184 return entry; 1184 return entry;
1185 } 1185 }
1186 1186
1187 static inline struct ieee80211_fragment_entry * 1187 static inline struct ieee80211_fragment_entry *
1188 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1188 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1189 unsigned int frag, unsigned int seq, 1189 unsigned int frag, unsigned int seq,
1190 int rx_queue, struct ieee80211_hdr *hdr) 1190 int rx_queue, struct ieee80211_hdr *hdr)
1191 { 1191 {
1192 struct ieee80211_fragment_entry *entry; 1192 struct ieee80211_fragment_entry *entry;
1193 int i, idx; 1193 int i, idx;
1194 1194
1195 idx = sdata->fragment_next; 1195 idx = sdata->fragment_next;
1196 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1196 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1197 struct ieee80211_hdr *f_hdr; 1197 struct ieee80211_hdr *f_hdr;
1198 1198
1199 idx--; 1199 idx--;
1200 if (idx < 0) 1200 if (idx < 0)
1201 idx = IEEE80211_FRAGMENT_MAX - 1; 1201 idx = IEEE80211_FRAGMENT_MAX - 1;
1202 1202
1203 entry = &sdata->fragments[idx]; 1203 entry = &sdata->fragments[idx];
1204 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1204 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1205 entry->rx_queue != rx_queue || 1205 entry->rx_queue != rx_queue ||
1206 entry->last_frag + 1 != frag) 1206 entry->last_frag + 1 != frag)
1207 continue; 1207 continue;
1208 1208
1209 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1209 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1210 1210
1211 /* 1211 /*
1212 * Check ftype and addresses are equal, else check next fragment 1212 * Check ftype and addresses are equal, else check next fragment
1213 */ 1213 */
1214 if (((hdr->frame_control ^ f_hdr->frame_control) & 1214 if (((hdr->frame_control ^ f_hdr->frame_control) &
1215 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1215 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1216 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 1216 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1217 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 1217 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1218 continue; 1218 continue;
1219 1219
1220 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1220 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1221 __skb_queue_purge(&entry->skb_list); 1221 __skb_queue_purge(&entry->skb_list);
1222 continue; 1222 continue;
1223 } 1223 }
1224 return entry; 1224 return entry;
1225 } 1225 }
1226 1226
1227 return NULL; 1227 return NULL;
1228 } 1228 }
1229 1229
1230 static ieee80211_rx_result debug_noinline 1230 static ieee80211_rx_result debug_noinline
1231 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1231 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1232 { 1232 {
1233 struct ieee80211_hdr *hdr; 1233 struct ieee80211_hdr *hdr;
1234 u16 sc; 1234 u16 sc;
1235 __le16 fc; 1235 __le16 fc;
1236 unsigned int frag, seq; 1236 unsigned int frag, seq;
1237 struct ieee80211_fragment_entry *entry; 1237 struct ieee80211_fragment_entry *entry;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 1239
1240 hdr = (struct ieee80211_hdr *)rx->skb->data; 1240 hdr = (struct ieee80211_hdr *)rx->skb->data;
1241 fc = hdr->frame_control; 1241 fc = hdr->frame_control;
1242 sc = le16_to_cpu(hdr->seq_ctrl); 1242 sc = le16_to_cpu(hdr->seq_ctrl);
1243 frag = sc & IEEE80211_SCTL_FRAG; 1243 frag = sc & IEEE80211_SCTL_FRAG;
1244 1244
1245 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1245 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1246 (rx->skb)->len < 24 || 1246 (rx->skb)->len < 24 ||
1247 is_multicast_ether_addr(hdr->addr1))) { 1247 is_multicast_ether_addr(hdr->addr1))) {
1248 /* not fragmented */ 1248 /* not fragmented */
1249 goto out; 1249 goto out;
1250 } 1250 }
1251 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1251 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1252 1252
1253 if (skb_linearize(rx->skb)) 1253 if (skb_linearize(rx->skb))
1254 return RX_DROP_UNUSABLE; 1254 return RX_DROP_UNUSABLE;
1255 1255
1256 /* 1256 /*
1257 * skb_linearize() might change the skb->data and 1257 * skb_linearize() might change the skb->data and
1258 * previously cached variables (in this case, hdr) need to 1258 * previously cached variables (in this case, hdr) need to
1259 * be refreshed with the new data. 1259 * be refreshed with the new data.
1260 */ 1260 */
1261 hdr = (struct ieee80211_hdr *)rx->skb->data; 1261 hdr = (struct ieee80211_hdr *)rx->skb->data;
1262 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1262 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1263 1263
1264 if (frag == 0) { 1264 if (frag == 0) {
1265 /* This is the first fragment of a new frame. */ 1265 /* This is the first fragment of a new frame. */
1266 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1266 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1267 rx->queue, &(rx->skb)); 1267 rx->queue, &(rx->skb));
1268 if (rx->key && rx->key->conf.alg == ALG_CCMP && 1268 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1269 ieee80211_has_protected(fc)) { 1269 ieee80211_has_protected(fc)) {
1270 int queue = ieee80211_is_mgmt(fc) ? 1270 int queue = ieee80211_is_mgmt(fc) ?
1271 NUM_RX_DATA_QUEUES : rx->queue; 1271 NUM_RX_DATA_QUEUES : rx->queue;
1272 /* Store CCMP PN so that we can verify that the next 1272 /* Store CCMP PN so that we can verify that the next
1273 * fragment has a sequential PN value. */ 1273 * fragment has a sequential PN value. */
1274 entry->ccmp = 1; 1274 entry->ccmp = 1;
1275 memcpy(entry->last_pn, 1275 memcpy(entry->last_pn,
1276 rx->key->u.ccmp.rx_pn[queue], 1276 rx->key->u.ccmp.rx_pn[queue],
1277 CCMP_PN_LEN); 1277 CCMP_PN_LEN);
1278 } 1278 }
1279 return RX_QUEUED; 1279 return RX_QUEUED;
1280 } 1280 }
1281 1281
1282 /* This is a fragment for a frame that should already be pending in 1282 /* This is a fragment for a frame that should already be pending in
1283 * fragment cache. Add this fragment to the end of the pending entry. 1283 * fragment cache. Add this fragment to the end of the pending entry.
1284 */ 1284 */
1285 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); 1285 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
1286 if (!entry) { 1286 if (!entry) {
1287 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1287 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1288 return RX_DROP_MONITOR; 1288 return RX_DROP_MONITOR;
1289 } 1289 }
1290 1290
1291 /* Verify that MPDUs within one MSDU have sequential PN values. 1291 /* Verify that MPDUs within one MSDU have sequential PN values.
1292 * (IEEE 802.11i, 8.3.3.4.5) */ 1292 * (IEEE 802.11i, 8.3.3.4.5) */
1293 if (entry->ccmp) { 1293 if (entry->ccmp) {
1294 int i; 1294 int i;
1295 u8 pn[CCMP_PN_LEN], *rpn; 1295 u8 pn[CCMP_PN_LEN], *rpn;
1296 int queue; 1296 int queue;
1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1298 return RX_DROP_UNUSABLE; 1298 return RX_DROP_UNUSABLE;
1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1300 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1300 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1301 pn[i]++; 1301 pn[i]++;
1302 if (pn[i]) 1302 if (pn[i])
1303 break; 1303 break;
1304 } 1304 }
1305 queue = ieee80211_is_mgmt(fc) ? 1305 queue = ieee80211_is_mgmt(fc) ?
1306 NUM_RX_DATA_QUEUES : rx->queue; 1306 NUM_RX_DATA_QUEUES : rx->queue;
1307 rpn = rx->key->u.ccmp.rx_pn[queue]; 1307 rpn = rx->key->u.ccmp.rx_pn[queue];
1308 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1308 if (memcmp(pn, rpn, CCMP_PN_LEN))
1309 return RX_DROP_UNUSABLE; 1309 return RX_DROP_UNUSABLE;
1310 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1310 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1311 } 1311 }
1312 1312
1313 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1313 skb_pull(rx->skb, ieee80211_hdrlen(fc));
1314 __skb_queue_tail(&entry->skb_list, rx->skb); 1314 __skb_queue_tail(&entry->skb_list, rx->skb);
1315 entry->last_frag = frag; 1315 entry->last_frag = frag;
1316 entry->extra_len += rx->skb->len; 1316 entry->extra_len += rx->skb->len;
1317 if (ieee80211_has_morefrags(fc)) { 1317 if (ieee80211_has_morefrags(fc)) {
1318 rx->skb = NULL; 1318 rx->skb = NULL;
1319 return RX_QUEUED; 1319 return RX_QUEUED;
1320 } 1320 }
1321 1321
1322 rx->skb = __skb_dequeue(&entry->skb_list); 1322 rx->skb = __skb_dequeue(&entry->skb_list);
1323 if (skb_tailroom(rx->skb) < entry->extra_len) { 1323 if (skb_tailroom(rx->skb) < entry->extra_len) {
1324 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1324 I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1325 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1325 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1326 GFP_ATOMIC))) { 1326 GFP_ATOMIC))) {
1327 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1327 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1328 __skb_queue_purge(&entry->skb_list); 1328 __skb_queue_purge(&entry->skb_list);
1329 return RX_DROP_UNUSABLE; 1329 return RX_DROP_UNUSABLE;
1330 } 1330 }
1331 } 1331 }
1332 while ((skb = __skb_dequeue(&entry->skb_list))) { 1332 while ((skb = __skb_dequeue(&entry->skb_list))) {
1333 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1333 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1334 dev_kfree_skb(skb); 1334 dev_kfree_skb(skb);
1335 } 1335 }
1336 1336
1337 /* Complete frame has been reassembled - process it now */ 1337 /* Complete frame has been reassembled - process it now */
1338 rx->flags |= IEEE80211_RX_FRAGMENTED; 1338 rx->flags |= IEEE80211_RX_FRAGMENTED;
1339 1339
1340 out: 1340 out:
1341 if (rx->sta) 1341 if (rx->sta)
1342 rx->sta->rx_packets++; 1342 rx->sta->rx_packets++;
1343 if (is_multicast_ether_addr(hdr->addr1)) 1343 if (is_multicast_ether_addr(hdr->addr1))
1344 rx->local->dot11MulticastReceivedFrameCount++; 1344 rx->local->dot11MulticastReceivedFrameCount++;
1345 else 1345 else
1346 ieee80211_led_rx(rx->local); 1346 ieee80211_led_rx(rx->local);
1347 return RX_CONTINUE; 1347 return RX_CONTINUE;
1348 } 1348 }
1349 1349
1350 static ieee80211_rx_result debug_noinline 1350 static ieee80211_rx_result debug_noinline
1351 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 1351 ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1352 { 1352 {
1353 struct ieee80211_sub_if_data *sdata = rx->sdata; 1353 struct ieee80211_sub_if_data *sdata = rx->sdata;
1354 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; 1354 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
1355 1355
1356 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || 1356 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
1357 !(rx->flags & IEEE80211_RX_RA_MATCH))) 1357 !(rx->flags & IEEE80211_RX_RA_MATCH)))
1358 return RX_CONTINUE; 1358 return RX_CONTINUE;
1359 1359
1360 if ((sdata->vif.type != NL80211_IFTYPE_AP) && 1360 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
1361 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) 1361 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1362 return RX_DROP_UNUSABLE; 1362 return RX_DROP_UNUSABLE;
1363 1363
1364 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER)) 1364 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
1365 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1365 ieee80211_sta_ps_deliver_poll_response(rx->sta);
1366 else 1366 else
1367 set_sta_flags(rx->sta, WLAN_STA_PSPOLL); 1367 set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
1368 1368
1369 /* Free PS Poll skb here instead of returning RX_DROP that would 1369 /* Free PS Poll skb here instead of returning RX_DROP that would
1370 * count as an dropped frame. */ 1370 * count as an dropped frame. */
1371 dev_kfree_skb(rx->skb); 1371 dev_kfree_skb(rx->skb);
1372 1372
1373 return RX_QUEUED; 1373 return RX_QUEUED;
1374 } 1374 }
1375 1375
1376 static ieee80211_rx_result debug_noinline 1376 static ieee80211_rx_result debug_noinline
1377 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1377 ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1378 { 1378 {
1379 u8 *data = rx->skb->data; 1379 u8 *data = rx->skb->data;
1380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; 1380 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1381 1381
1382 if (!ieee80211_is_data_qos(hdr->frame_control)) 1382 if (!ieee80211_is_data_qos(hdr->frame_control))
1383 return RX_CONTINUE; 1383 return RX_CONTINUE;
1384 1384
1385 /* remove the qos control field, update frame type and meta-data */ 1385 /* remove the qos control field, update frame type and meta-data */
1386 memmove(data + IEEE80211_QOS_CTL_LEN, data, 1386 memmove(data + IEEE80211_QOS_CTL_LEN, data,
1387 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1387 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1388 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1388 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1389 /* change frame type to non QOS */ 1389 /* change frame type to non QOS */
1390 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1390 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1391 1391
1392 return RX_CONTINUE; 1392 return RX_CONTINUE;
1393 } 1393 }
1394 1394
1395 static int 1395 static int
1396 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1396 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1397 { 1397 {
1398 if (unlikely(!rx->sta || 1398 if (unlikely(!rx->sta ||
1399 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) 1399 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
1400 return -EACCES; 1400 return -EACCES;
1401 1401
1402 return 0; 1402 return 0;
1403 } 1403 }
1404 1404
1405 static int 1405 static int
1406 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1406 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1407 { 1407 {
1408 struct sk_buff *skb = rx->skb; 1408 struct sk_buff *skb = rx->skb;
1409 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1409 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1410 1410
1411 /* 1411 /*
1412 * Pass through unencrypted frames if the hardware has 1412 * Pass through unencrypted frames if the hardware has
1413 * decrypted them already. 1413 * decrypted them already.
1414 */ 1414 */
1415 if (status->flag & RX_FLAG_DECRYPTED) 1415 if (status->flag & RX_FLAG_DECRYPTED)
1416 return 0; 1416 return 0;
1417 1417
1418 /* Drop unencrypted frames if key is set. */ 1418 /* Drop unencrypted frames if key is set. */
1419 if (unlikely(!ieee80211_has_protected(fc) && 1419 if (unlikely(!ieee80211_has_protected(fc) &&
1420 !ieee80211_is_nullfunc(fc) && 1420 !ieee80211_is_nullfunc(fc) &&
1421 ieee80211_is_data(fc) && 1421 ieee80211_is_data(fc) &&
1422 (rx->key || rx->sdata->drop_unencrypted))) 1422 (rx->key || rx->sdata->drop_unencrypted)))
1423 return -EACCES; 1423 return -EACCES;
1424 1424
1425 return 0; 1425 return 0;
1426 } 1426 }
1427 1427
1428 static int 1428 static int
1429 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1429 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1430 { 1430 {
1431 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1431 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1432 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1432 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1433 __le16 fc = hdr->frame_control; 1433 __le16 fc = hdr->frame_control;
1434 1434
1435 /* 1435 /*
1436 * Pass through unencrypted frames if the hardware has 1436 * Pass through unencrypted frames if the hardware has
1437 * decrypted them already. 1437 * decrypted them already.
1438 */ 1438 */
1439 if (status->flag & RX_FLAG_DECRYPTED) 1439 if (status->flag & RX_FLAG_DECRYPTED)
1440 return 0; 1440 return 0;
1441 1441
1442 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1442 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1443 if (unlikely(!ieee80211_has_protected(fc) && 1443 if (unlikely(!ieee80211_has_protected(fc) &&
1444 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1444 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1445 rx->key)) 1445 rx->key))
1446 return -EACCES; 1446 return -EACCES;
1447 /* BIP does not use Protected field, so need to check MMIE */ 1447 /* BIP does not use Protected field, so need to check MMIE */
1448 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1448 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1449 ieee80211_get_mmie_keyidx(rx->skb) < 0)) 1449 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1450 return -EACCES; 1450 return -EACCES;
1451 /* 1451 /*
1452 * When using MFP, Action frames are not allowed prior to 1452 * When using MFP, Action frames are not allowed prior to
1453 * having configured keys. 1453 * having configured keys.
1454 */ 1454 */
1455 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1455 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1456 ieee80211_is_robust_mgmt_frame( 1456 ieee80211_is_robust_mgmt_frame(
1457 (struct ieee80211_hdr *) rx->skb->data))) 1457 (struct ieee80211_hdr *) rx->skb->data)))
1458 return -EACCES; 1458 return -EACCES;
1459 } 1459 }
1460 1460
1461 return 0; 1461 return 0;
1462 } 1462 }
1463 1463
1464 static int 1464 static int
1465 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1465 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1466 { 1466 {
1467 struct ieee80211_sub_if_data *sdata = rx->sdata; 1467 struct ieee80211_sub_if_data *sdata = rx->sdata;
1468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1469 1469
1470 if (ieee80211_has_a4(hdr->frame_control) && 1470 if (ieee80211_has_a4(hdr->frame_control) &&
1471 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1471 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1472 return -1; 1472 return -1;
1473 1473
1474 if (is_multicast_ether_addr(hdr->addr1) && 1474 if (is_multicast_ether_addr(hdr->addr1) &&
1475 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) || 1475 ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
1476 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1476 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1477 return -1; 1477 return -1;
1478 1478
1479 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1479 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1480 } 1480 }
1481 1481
1482 /* 1482 /*
1483 * requires that rx->skb is a frame with ethernet header 1483 * requires that rx->skb is a frame with ethernet header
1484 */ 1484 */
1485 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 1485 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1486 { 1486 {
1487 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1487 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1488 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1488 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1489 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1489 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1490 1490
1491 /* 1491 /*
1492 * Allow EAPOL frames to us/the PAE group address regardless 1492 * Allow EAPOL frames to us/the PAE group address regardless
1493 * of whether the frame was encrypted or not. 1493 * of whether the frame was encrypted or not.
1494 */ 1494 */
1495 if (ehdr->h_proto == htons(ETH_P_PAE) && 1495 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1496 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1496 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1497 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1497 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1498 return true; 1498 return true;
1499 1499
1500 if (ieee80211_802_1x_port_control(rx) || 1500 if (ieee80211_802_1x_port_control(rx) ||
1501 ieee80211_drop_unencrypted(rx, fc)) 1501 ieee80211_drop_unencrypted(rx, fc))
1502 return false; 1502 return false;
1503 1503
1504 return true; 1504 return true;
1505 } 1505 }
1506 1506
1507 /* 1507 /*
1508 * requires that rx->skb is a frame with ethernet header 1508 * requires that rx->skb is a frame with ethernet header
1509 */ 1509 */
1510 static void 1510 static void
1511 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1511 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1512 { 1512 {
1513 struct ieee80211_sub_if_data *sdata = rx->sdata; 1513 struct ieee80211_sub_if_data *sdata = rx->sdata;
1514 struct net_device *dev = sdata->dev; 1514 struct net_device *dev = sdata->dev;
1515 struct sk_buff *skb, *xmit_skb; 1515 struct sk_buff *skb, *xmit_skb;
1516 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1516 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1517 struct sta_info *dsta; 1517 struct sta_info *dsta;
1518 1518
1519 skb = rx->skb; 1519 skb = rx->skb;
1520 xmit_skb = NULL; 1520 xmit_skb = NULL;
1521 1521
1522 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1522 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1523 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1523 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1524 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1524 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1525 (rx->flags & IEEE80211_RX_RA_MATCH) && 1525 (rx->flags & IEEE80211_RX_RA_MATCH) &&
1526 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 1526 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1527 if (is_multicast_ether_addr(ehdr->h_dest)) { 1527 if (is_multicast_ether_addr(ehdr->h_dest)) {
1528 /* 1528 /*
1529 * send multicast frames both to higher layers in 1529 * send multicast frames both to higher layers in
1530 * local net stack and back to the wireless medium 1530 * local net stack and back to the wireless medium
1531 */ 1531 */
1532 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1532 xmit_skb = skb_copy(skb, GFP_ATOMIC);
1533 if (!xmit_skb && net_ratelimit()) 1533 if (!xmit_skb && net_ratelimit())
1534 printk(KERN_DEBUG "%s: failed to clone " 1534 printk(KERN_DEBUG "%s: failed to clone "
1535 "multicast frame\n", dev->name); 1535 "multicast frame\n", dev->name);
1536 } else { 1536 } else {
1537 dsta = sta_info_get(sdata, skb->data); 1537 dsta = sta_info_get(sdata, skb->data);
1538 if (dsta) { 1538 if (dsta) {
1539 /* 1539 /*
1540 * The destination station is associated to 1540 * The destination station is associated to
1541 * this AP (in this VLAN), so send the frame 1541 * this AP (in this VLAN), so send the frame
1542 * directly to it and do not pass it to local 1542 * directly to it and do not pass it to local
1543 * net stack. 1543 * net stack.
1544 */ 1544 */
1545 xmit_skb = skb; 1545 xmit_skb = skb;
1546 skb = NULL; 1546 skb = NULL;
1547 } 1547 }
1548 } 1548 }
1549 } 1549 }
1550 1550
1551 if (skb) { 1551 if (skb) {
1552 int align __maybe_unused; 1552 int align __maybe_unused;
1553 1553
1554 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1554 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1555 /* 1555 /*
1556 * 'align' will only take the values 0 or 2 here 1556 * 'align' will only take the values 0 or 2 here
1557 * since all frames are required to be aligned 1557 * since all frames are required to be aligned
1558 * to 2-byte boundaries when being passed to 1558 * to 2-byte boundaries when being passed to
1559 * mac80211. That also explains the __skb_push() 1559 * mac80211. That also explains the __skb_push()
1560 * below. 1560 * below.
1561 */ 1561 */
1562 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1562 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1563 if (align) { 1563 if (align) {
1564 if (WARN_ON(skb_headroom(skb) < 3)) { 1564 if (WARN_ON(skb_headroom(skb) < 3)) {
1565 dev_kfree_skb(skb); 1565 dev_kfree_skb(skb);
1566 skb = NULL; 1566 skb = NULL;
1567 } else { 1567 } else {
1568 u8 *data = skb->data; 1568 u8 *data = skb->data;
1569 size_t len = skb_headlen(skb); 1569 size_t len = skb_headlen(skb);
1570 skb->data -= align; 1570 skb->data -= align;
1571 memmove(skb->data, data, len); 1571 memmove(skb->data, data, len);
1572 skb_set_tail_pointer(skb, len); 1572 skb_set_tail_pointer(skb, len);
1573 } 1573 }
1574 } 1574 }
1575 #endif 1575 #endif
1576 1576
1577 if (skb) { 1577 if (skb) {
1578 /* deliver to local stack */ 1578 /* deliver to local stack */
1579 skb->protocol = eth_type_trans(skb, dev); 1579 skb->protocol = eth_type_trans(skb, dev);
1580 memset(skb->cb, 0, sizeof(skb->cb)); 1580 memset(skb->cb, 0, sizeof(skb->cb));
1581 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1582 } 1582 }
1583 } 1583 }
1584 1584
1585 if (xmit_skb) { 1585 if (xmit_skb) {
1586 /* send to wireless media */ 1586 /* send to wireless media */
1587 xmit_skb->protocol = htons(ETH_P_802_3); 1587 xmit_skb->protocol = htons(ETH_P_802_3);
1588 skb_reset_network_header(xmit_skb); 1588 skb_reset_network_header(xmit_skb);
1589 skb_reset_mac_header(xmit_skb); 1589 skb_reset_mac_header(xmit_skb);
1590 dev_queue_xmit(xmit_skb); 1590 dev_queue_xmit(xmit_skb);
1591 } 1591 }
1592 } 1592 }
1593 1593
1594 static ieee80211_rx_result debug_noinline 1594 static ieee80211_rx_result debug_noinline
1595 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1595 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1596 { 1596 {
1597 struct net_device *dev = rx->sdata->dev; 1597 struct net_device *dev = rx->sdata->dev;
1598 struct sk_buff *skb = rx->skb; 1598 struct sk_buff *skb = rx->skb;
1599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1600 __le16 fc = hdr->frame_control; 1600 __le16 fc = hdr->frame_control;
1601 struct sk_buff_head frame_list; 1601 struct sk_buff_head frame_list;
1602 1602
1603 if (unlikely(!ieee80211_is_data(fc))) 1603 if (unlikely(!ieee80211_is_data(fc)))
1604 return RX_CONTINUE; 1604 return RX_CONTINUE;
1605 1605
1606 if (unlikely(!ieee80211_is_data_present(fc))) 1606 if (unlikely(!ieee80211_is_data_present(fc)))
1607 return RX_DROP_MONITOR; 1607 return RX_DROP_MONITOR;
1608 1608
1609 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1609 if (!(rx->flags & IEEE80211_RX_AMSDU))
1610 return RX_CONTINUE; 1610 return RX_CONTINUE;
1611 1611
1612 if (ieee80211_has_a4(hdr->frame_control) && 1612 if (ieee80211_has_a4(hdr->frame_control) &&
1613 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1613 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1614 !rx->sdata->u.vlan.sta) 1614 !rx->sdata->u.vlan.sta)
1615 return RX_DROP_UNUSABLE; 1615 return RX_DROP_UNUSABLE;
1616 1616
1617 if (is_multicast_ether_addr(hdr->addr1) && 1617 if (is_multicast_ether_addr(hdr->addr1) &&
1618 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1618 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1619 rx->sdata->u.vlan.sta) || 1619 rx->sdata->u.vlan.sta) ||
1620 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1620 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1621 rx->sdata->u.mgd.use_4addr))) 1621 rx->sdata->u.mgd.use_4addr)))
1622 return RX_DROP_UNUSABLE; 1622 return RX_DROP_UNUSABLE;
1623 1623
1624 skb->dev = dev; 1624 skb->dev = dev;
1625 __skb_queue_head_init(&frame_list); 1625 __skb_queue_head_init(&frame_list);
1626 1626
1627 if (skb_linearize(skb)) 1627 if (skb_linearize(skb))
1628 return RX_DROP_UNUSABLE; 1628 return RX_DROP_UNUSABLE;
1629 1629
1630 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1630 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1631 rx->sdata->vif.type, 1631 rx->sdata->vif.type,
1632 rx->local->hw.extra_tx_headroom); 1632 rx->local->hw.extra_tx_headroom);
1633 1633
1634 while (!skb_queue_empty(&frame_list)) { 1634 while (!skb_queue_empty(&frame_list)) {
1635 rx->skb = __skb_dequeue(&frame_list); 1635 rx->skb = __skb_dequeue(&frame_list);
1636 1636
1637 if (!ieee80211_frame_allowed(rx, fc)) { 1637 if (!ieee80211_frame_allowed(rx, fc)) {
1638 dev_kfree_skb(rx->skb); 1638 dev_kfree_skb(rx->skb);
1639 continue; 1639 continue;
1640 } 1640 }
1641 dev->stats.rx_packets++; 1641 dev->stats.rx_packets++;
1642 dev->stats.rx_bytes += rx->skb->len; 1642 dev->stats.rx_bytes += rx->skb->len;
1643 1643
1644 ieee80211_deliver_skb(rx); 1644 ieee80211_deliver_skb(rx);
1645 } 1645 }
1646 1646
1647 return RX_QUEUED; 1647 return RX_QUEUED;
1648 } 1648 }
1649 1649
1650 #ifdef CONFIG_MAC80211_MESH 1650 #ifdef CONFIG_MAC80211_MESH
1651 static ieee80211_rx_result 1651 static ieee80211_rx_result
1652 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1652 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1653 { 1653 {
1654 struct ieee80211_hdr *hdr; 1654 struct ieee80211_hdr *hdr;
1655 struct ieee80211s_hdr *mesh_hdr; 1655 struct ieee80211s_hdr *mesh_hdr;
1656 unsigned int hdrlen; 1656 unsigned int hdrlen;
1657 struct sk_buff *skb = rx->skb, *fwd_skb; 1657 struct sk_buff *skb = rx->skb, *fwd_skb;
1658 struct ieee80211_local *local = rx->local; 1658 struct ieee80211_local *local = rx->local;
1659 struct ieee80211_sub_if_data *sdata = rx->sdata; 1659 struct ieee80211_sub_if_data *sdata = rx->sdata;
1660 1660
1661 hdr = (struct ieee80211_hdr *) skb->data; 1661 hdr = (struct ieee80211_hdr *) skb->data;
1662 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1662 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1663 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1663 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1664 1664
1665 if (!ieee80211_is_data(hdr->frame_control)) 1665 if (!ieee80211_is_data(hdr->frame_control))
1666 return RX_CONTINUE; 1666 return RX_CONTINUE;
1667 1667
1668 if (!mesh_hdr->ttl) 1668 if (!mesh_hdr->ttl)
1669 /* illegal frame */ 1669 /* illegal frame */
1670 return RX_DROP_MONITOR; 1670 return RX_DROP_MONITOR;
1671 1671
1672 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1672 if (mesh_hdr->flags & MESH_FLAGS_AE) {
1673 struct mesh_path *mppath; 1673 struct mesh_path *mppath;
1674 char *proxied_addr; 1674 char *proxied_addr;
1675 char *mpp_addr; 1675 char *mpp_addr;
1676 1676
1677 if (is_multicast_ether_addr(hdr->addr1)) { 1677 if (is_multicast_ether_addr(hdr->addr1)) {
1678 mpp_addr = hdr->addr3; 1678 mpp_addr = hdr->addr3;
1679 proxied_addr = mesh_hdr->eaddr1; 1679 proxied_addr = mesh_hdr->eaddr1;
1680 } else { 1680 } else {
1681 mpp_addr = hdr->addr4; 1681 mpp_addr = hdr->addr4;
1682 proxied_addr = mesh_hdr->eaddr2; 1682 proxied_addr = mesh_hdr->eaddr2;
1683 } 1683 }
1684 1684
1685 rcu_read_lock(); 1685 rcu_read_lock();
1686 mppath = mpp_path_lookup(proxied_addr, sdata); 1686 mppath = mpp_path_lookup(proxied_addr, sdata);
1687 if (!mppath) { 1687 if (!mppath) {
1688 mpp_path_add(proxied_addr, mpp_addr, sdata); 1688 mpp_path_add(proxied_addr, mpp_addr, sdata);
1689 } else { 1689 } else {
1690 spin_lock_bh(&mppath->state_lock); 1690 spin_lock_bh(&mppath->state_lock);
1691 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1691 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1692 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1692 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1693 spin_unlock_bh(&mppath->state_lock); 1693 spin_unlock_bh(&mppath->state_lock);
1694 } 1694 }
1695 rcu_read_unlock(); 1695 rcu_read_unlock();
1696 } 1696 }
1697 1697
1698 /* Frame has reached destination. Don't forward */ 1698 /* Frame has reached destination. Don't forward */
1699 if (!is_multicast_ether_addr(hdr->addr1) && 1699 if (!is_multicast_ether_addr(hdr->addr1) &&
1700 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) 1700 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1701 return RX_CONTINUE; 1701 return RX_CONTINUE;
1702 1702
1703 mesh_hdr->ttl--; 1703 mesh_hdr->ttl--;
1704 1704
1705 if (rx->flags & IEEE80211_RX_RA_MATCH) { 1705 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1706 if (!mesh_hdr->ttl) 1706 if (!mesh_hdr->ttl)
1707 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, 1707 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1708 dropped_frames_ttl); 1708 dropped_frames_ttl);
1709 else { 1709 else {
1710 struct ieee80211_hdr *fwd_hdr; 1710 struct ieee80211_hdr *fwd_hdr;
1711 struct ieee80211_tx_info *info; 1711 struct ieee80211_tx_info *info;
1712 1712
1713 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1713 fwd_skb = skb_copy(skb, GFP_ATOMIC);
1714 1714
1715 if (!fwd_skb && net_ratelimit()) 1715 if (!fwd_skb && net_ratelimit())
1716 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1716 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1717 sdata->name); 1717 sdata->name);
1718 1718
1719 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1719 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1720 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1720 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1721 info = IEEE80211_SKB_CB(fwd_skb); 1721 info = IEEE80211_SKB_CB(fwd_skb);
1722 memset(info, 0, sizeof(*info)); 1722 memset(info, 0, sizeof(*info));
1723 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1723 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1724 info->control.vif = &rx->sdata->vif; 1724 info->control.vif = &rx->sdata->vif;
1725 skb_set_queue_mapping(skb, 1725 skb_set_queue_mapping(skb,
1726 ieee80211_select_queue(rx->sdata, fwd_skb)); 1726 ieee80211_select_queue(rx->sdata, fwd_skb));
1727 ieee80211_set_qos_hdr(local, skb); 1727 ieee80211_set_qos_hdr(local, skb);
1728 if (is_multicast_ether_addr(fwd_hdr->addr1)) 1728 if (is_multicast_ether_addr(fwd_hdr->addr1))
1729 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1729 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1730 fwded_mcast); 1730 fwded_mcast);
1731 else { 1731 else {
1732 int err; 1732 int err;
1733 /* 1733 /*
1734 * Save TA to addr1 to send TA a path error if a 1734 * Save TA to addr1 to send TA a path error if a
1735 * suitable next hop is not found 1735 * suitable next hop is not found
1736 */ 1736 */
1737 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, 1737 memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
1738 ETH_ALEN); 1738 ETH_ALEN);
1739 err = mesh_nexthop_lookup(fwd_skb, sdata); 1739 err = mesh_nexthop_lookup(fwd_skb, sdata);
1740 /* Failed to immediately resolve next hop: 1740 /* Failed to immediately resolve next hop:
1741 * fwded frame was dropped or will be added 1741 * fwded frame was dropped or will be added
1742 * later to the pending skb queue. */ 1742 * later to the pending skb queue. */
1743 if (err) 1743 if (err)
1744 return RX_DROP_MONITOR; 1744 return RX_DROP_MONITOR;
1745 1745
1746 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1746 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1747 fwded_unicast); 1747 fwded_unicast);
1748 } 1748 }
1749 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1749 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1750 fwded_frames); 1750 fwded_frames);
1751 ieee80211_add_pending_skb(local, fwd_skb); 1751 ieee80211_add_pending_skb(local, fwd_skb);
1752 } 1752 }
1753 } 1753 }
1754 1754
1755 if (is_multicast_ether_addr(hdr->addr1) || 1755 if (is_multicast_ether_addr(hdr->addr1) ||
1756 sdata->dev->flags & IFF_PROMISC) 1756 sdata->dev->flags & IFF_PROMISC)
1757 return RX_CONTINUE; 1757 return RX_CONTINUE;
1758 else 1758 else
1759 return RX_DROP_MONITOR; 1759 return RX_DROP_MONITOR;
1760 } 1760 }
1761 #endif 1761 #endif
1762 1762
1763 static ieee80211_rx_result debug_noinline 1763 static ieee80211_rx_result debug_noinline
1764 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1764 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1765 { 1765 {
1766 struct ieee80211_sub_if_data *sdata = rx->sdata; 1766 struct ieee80211_sub_if_data *sdata = rx->sdata;
1767 struct ieee80211_local *local = rx->local; 1767 struct ieee80211_local *local = rx->local;
1768 struct net_device *dev = sdata->dev; 1768 struct net_device *dev = sdata->dev;
1769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1770 __le16 fc = hdr->frame_control; 1770 __le16 fc = hdr->frame_control;
1771 int err; 1771 int err;
1772 1772
1773 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1773 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1774 return RX_CONTINUE; 1774 return RX_CONTINUE;
1775 1775
1776 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1776 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1777 return RX_DROP_MONITOR; 1777 return RX_DROP_MONITOR;
1778 1778
1779 /* 1779 /*
1780 * Allow the cooked monitor interface of an AP to see 4-addr frames so 1780 * Allow the cooked monitor interface of an AP to see 4-addr frames so
1781 * that a 4-addr station can be detected and moved into a separate VLAN 1781 * that a 4-addr station can be detected and moved into a separate VLAN
1782 */ 1782 */
1783 if (ieee80211_has_a4(hdr->frame_control) && 1783 if (ieee80211_has_a4(hdr->frame_control) &&
1784 sdata->vif.type == NL80211_IFTYPE_AP) 1784 sdata->vif.type == NL80211_IFTYPE_AP)
1785 return RX_DROP_MONITOR; 1785 return RX_DROP_MONITOR;
1786 1786
1787 err = __ieee80211_data_to_8023(rx); 1787 err = __ieee80211_data_to_8023(rx);
1788 if (unlikely(err)) 1788 if (unlikely(err))
1789 return RX_DROP_UNUSABLE; 1789 return RX_DROP_UNUSABLE;
1790 1790
1791 if (!ieee80211_frame_allowed(rx, fc)) 1791 if (!ieee80211_frame_allowed(rx, fc))
1792 return RX_DROP_MONITOR; 1792 return RX_DROP_MONITOR;
1793 1793
1794 rx->skb->dev = dev; 1794 rx->skb->dev = dev;
1795 1795
1796 dev->stats.rx_packets++; 1796 dev->stats.rx_packets++;
1797 dev->stats.rx_bytes += rx->skb->len; 1797 dev->stats.rx_bytes += rx->skb->len;
1798 1798
1799 if (ieee80211_is_data(hdr->frame_control) && 1799 if (ieee80211_is_data(hdr->frame_control) &&
1800 !is_multicast_ether_addr(hdr->addr1) && 1800 !is_multicast_ether_addr(hdr->addr1) &&
1801 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) { 1801 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1802 mod_timer(&local->dynamic_ps_timer, jiffies + 1802 mod_timer(&local->dynamic_ps_timer, jiffies +
1803 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 1803 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1804 } 1804 }
1805 1805
1806 ieee80211_deliver_skb(rx); 1806 ieee80211_deliver_skb(rx);
1807 1807
1808 return RX_QUEUED; 1808 return RX_QUEUED;
1809 } 1809 }
1810 1810
1811 static ieee80211_rx_result debug_noinline 1811 static ieee80211_rx_result debug_noinline
1812 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 1812 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1813 { 1813 {
1814 struct ieee80211_local *local = rx->local; 1814 struct ieee80211_local *local = rx->local;
1815 struct ieee80211_hw *hw = &local->hw; 1815 struct ieee80211_hw *hw = &local->hw;
1816 struct sk_buff *skb = rx->skb; 1816 struct sk_buff *skb = rx->skb;
1817 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 1817 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
1818 struct tid_ampdu_rx *tid_agg_rx; 1818 struct tid_ampdu_rx *tid_agg_rx;
1819 u16 start_seq_num; 1819 u16 start_seq_num;
1820 u16 tid; 1820 u16 tid;
1821 1821
1822 if (likely(!ieee80211_is_ctl(bar->frame_control))) 1822 if (likely(!ieee80211_is_ctl(bar->frame_control)))
1823 return RX_CONTINUE; 1823 return RX_CONTINUE;
1824 1824
1825 if (ieee80211_is_back_req(bar->frame_control)) { 1825 if (ieee80211_is_back_req(bar->frame_control)) {
1826 struct { 1826 struct {
1827 __le16 control, start_seq_num; 1827 __le16 control, start_seq_num;
1828 } __packed bar_data; 1828 } __packed bar_data;
1829 1829
1830 if (!rx->sta) 1830 if (!rx->sta)
1831 return RX_DROP_MONITOR; 1831 return RX_DROP_MONITOR;
1832 1832
1833 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 1833 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1834 &bar_data, sizeof(bar_data))) 1834 &bar_data, sizeof(bar_data)))
1835 return RX_DROP_MONITOR; 1835 return RX_DROP_MONITOR;
1836 1836
1837 tid = le16_to_cpu(bar_data.control) >> 12; 1837 tid = le16_to_cpu(bar_data.control) >> 12;
1838 1838
1839 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 1839 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1840 if (!tid_agg_rx) 1840 if (!tid_agg_rx)
1841 return RX_DROP_MONITOR; 1841 return RX_DROP_MONITOR;
1842 1842
1843 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1843 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1844 1844
1845 /* reset session timer */ 1845 /* reset session timer */
1846 if (tid_agg_rx->timeout) 1846 if (tid_agg_rx->timeout)
1847 mod_timer(&tid_agg_rx->session_timer, 1847 mod_timer(&tid_agg_rx->session_timer,
1848 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 1848 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1849 1849
1850 /* release stored frames up to start of BAR */ 1850 /* release stored frames up to start of BAR */
1851 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1851 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1852 frames); 1852 frames);
1853 kfree_skb(skb); 1853 kfree_skb(skb);
1854 return RX_QUEUED; 1854 return RX_QUEUED;
1855 } 1855 }
1856 1856
1857 /* 1857 /*
1858 * After this point, we only want management frames, 1858 * After this point, we only want management frames,
1859 * so we can drop all remaining control frames to 1859 * so we can drop all remaining control frames to
1860 * cooked monitor interfaces. 1860 * cooked monitor interfaces.
1861 */ 1861 */
1862 return RX_DROP_MONITOR; 1862 return RX_DROP_MONITOR;
1863 } 1863 }
1864 1864
1865 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 1865 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1866 struct ieee80211_mgmt *mgmt, 1866 struct ieee80211_mgmt *mgmt,
1867 size_t len) 1867 size_t len)
1868 { 1868 {
1869 struct ieee80211_local *local = sdata->local; 1869 struct ieee80211_local *local = sdata->local;
1870 struct sk_buff *skb; 1870 struct sk_buff *skb;
1871 struct ieee80211_mgmt *resp; 1871 struct ieee80211_mgmt *resp;
1872 1872
1873 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { 1873 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1874 /* Not to own unicast address */ 1874 /* Not to own unicast address */
1875 return; 1875 return;
1876 } 1876 }
1877 1877
1878 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || 1878 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
1879 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { 1879 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
1880 /* Not from the current AP or not associated yet. */ 1880 /* Not from the current AP or not associated yet. */
1881 return; 1881 return;
1882 } 1882 }
1883 1883
1884 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 1884 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
1885 /* Too short SA Query request frame */ 1885 /* Too short SA Query request frame */
1886 return; 1886 return;
1887 } 1887 }
1888 1888
1889 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 1889 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
1890 if (skb == NULL) 1890 if (skb == NULL)
1891 return; 1891 return;
1892 1892
1893 skb_reserve(skb, local->hw.extra_tx_headroom); 1893 skb_reserve(skb, local->hw.extra_tx_headroom);
1894 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1894 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1895 memset(resp, 0, 24); 1895 memset(resp, 0, 24);
1896 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1896 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1897 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 1897 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1898 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1898 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1899 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1899 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1900 IEEE80211_STYPE_ACTION); 1900 IEEE80211_STYPE_ACTION);
1901 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 1901 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
1902 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 1902 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
1903 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 1903 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
1904 memcpy(resp->u.action.u.sa_query.trans_id, 1904 memcpy(resp->u.action.u.sa_query.trans_id,
1905 mgmt->u.action.u.sa_query.trans_id, 1905 mgmt->u.action.u.sa_query.trans_id,
1906 WLAN_SA_QUERY_TR_ID_LEN); 1906 WLAN_SA_QUERY_TR_ID_LEN);
1907 1907
1908 ieee80211_tx_skb(sdata, skb); 1908 ieee80211_tx_skb(sdata, skb);
1909 } 1909 }
1910 1910
1911 static ieee80211_rx_result debug_noinline 1911 static ieee80211_rx_result debug_noinline
1912 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 1912 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1913 { 1913 {
1914 struct ieee80211_local *local = rx->local; 1914 struct ieee80211_local *local = rx->local;
1915 struct ieee80211_sub_if_data *sdata = rx->sdata; 1915 struct ieee80211_sub_if_data *sdata = rx->sdata;
1916 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1916 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1917 struct sk_buff *nskb; 1917 struct sk_buff *nskb;
1918 struct ieee80211_rx_status *status; 1918 struct ieee80211_rx_status *status;
1919 int len = rx->skb->len; 1919 int len = rx->skb->len;
1920 1920
1921 if (!ieee80211_is_action(mgmt->frame_control)) 1921 if (!ieee80211_is_action(mgmt->frame_control))
1922 return RX_CONTINUE; 1922 return RX_CONTINUE;
1923 1923
1924 /* drop too small frames */ 1924 /* drop too small frames */
1925 if (len < IEEE80211_MIN_ACTION_SIZE) 1925 if (len < IEEE80211_MIN_ACTION_SIZE)
1926 return RX_DROP_UNUSABLE; 1926 return RX_DROP_UNUSABLE;
1927 1927
1928 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 1928 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
1929 return RX_DROP_UNUSABLE; 1929 return RX_DROP_UNUSABLE;
1930 1930
1931 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1931 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1932 return RX_DROP_UNUSABLE; 1932 return RX_DROP_UNUSABLE;
1933 1933
1934 if (ieee80211_drop_unencrypted_mgmt(rx)) 1934 if (ieee80211_drop_unencrypted_mgmt(rx))
1935 return RX_DROP_UNUSABLE; 1935 return RX_DROP_UNUSABLE;
1936 1936
1937 switch (mgmt->u.action.category) { 1937 switch (mgmt->u.action.category) {
1938 case WLAN_CATEGORY_BACK: 1938 case WLAN_CATEGORY_BACK:
1939 /* 1939 /*
1940 * The aggregation code is not prepared to handle 1940 * The aggregation code is not prepared to handle
1941 * anything but STA/AP due to the BSSID handling; 1941 * anything but STA/AP due to the BSSID handling;
1942 * IBSS could work in the code but isn't supported 1942 * IBSS could work in the code but isn't supported
1943 * by drivers or the standard. 1943 * by drivers or the standard.
1944 */ 1944 */
1945 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1945 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1946 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1946 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1947 sdata->vif.type != NL80211_IFTYPE_AP) 1947 sdata->vif.type != NL80211_IFTYPE_AP)
1948 break; 1948 break;
1949 1949
1950 /* verify action_code is present */ 1950 /* verify action_code is present */
1951 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1951 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1952 break; 1952 break;
1953 1953
1954 switch (mgmt->u.action.u.addba_req.action_code) { 1954 switch (mgmt->u.action.u.addba_req.action_code) {
1955 case WLAN_ACTION_ADDBA_REQ: 1955 case WLAN_ACTION_ADDBA_REQ:
1956 if (len < (IEEE80211_MIN_ACTION_SIZE + 1956 if (len < (IEEE80211_MIN_ACTION_SIZE +
1957 sizeof(mgmt->u.action.u.addba_req))) 1957 sizeof(mgmt->u.action.u.addba_req)))
1958 goto invalid; 1958 goto invalid;
1959 break; 1959 break;
1960 case WLAN_ACTION_ADDBA_RESP: 1960 case WLAN_ACTION_ADDBA_RESP:
1961 if (len < (IEEE80211_MIN_ACTION_SIZE + 1961 if (len < (IEEE80211_MIN_ACTION_SIZE +
1962 sizeof(mgmt->u.action.u.addba_resp))) 1962 sizeof(mgmt->u.action.u.addba_resp)))
1963 goto invalid; 1963 goto invalid;
1964 break; 1964 break;
1965 case WLAN_ACTION_DELBA: 1965 case WLAN_ACTION_DELBA:
1966 if (len < (IEEE80211_MIN_ACTION_SIZE + 1966 if (len < (IEEE80211_MIN_ACTION_SIZE +
1967 sizeof(mgmt->u.action.u.delba))) 1967 sizeof(mgmt->u.action.u.delba)))
1968 goto invalid; 1968 goto invalid;
1969 break; 1969 break;
1970 default: 1970 default:
1971 goto invalid; 1971 goto invalid;
1972 } 1972 }
1973 1973
1974 goto queue; 1974 goto queue;
1975 case WLAN_CATEGORY_SPECTRUM_MGMT: 1975 case WLAN_CATEGORY_SPECTRUM_MGMT:
1976 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 1976 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1977 break; 1977 break;
1978 1978
1979 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1979 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1980 break; 1980 break;
1981 1981
1982 /* verify action_code is present */ 1982 /* verify action_code is present */
1983 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1983 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1984 break; 1984 break;
1985 1985
1986 switch (mgmt->u.action.u.measurement.action_code) { 1986 switch (mgmt->u.action.u.measurement.action_code) {
1987 case WLAN_ACTION_SPCT_MSR_REQ: 1987 case WLAN_ACTION_SPCT_MSR_REQ:
1988 if (len < (IEEE80211_MIN_ACTION_SIZE + 1988 if (len < (IEEE80211_MIN_ACTION_SIZE +
1989 sizeof(mgmt->u.action.u.measurement))) 1989 sizeof(mgmt->u.action.u.measurement)))
1990 break; 1990 break;
1991 ieee80211_process_measurement_req(sdata, mgmt, len); 1991 ieee80211_process_measurement_req(sdata, mgmt, len);
1992 goto handled; 1992 goto handled;
1993 case WLAN_ACTION_SPCT_CHL_SWITCH: 1993 case WLAN_ACTION_SPCT_CHL_SWITCH:
1994 if (len < (IEEE80211_MIN_ACTION_SIZE + 1994 if (len < (IEEE80211_MIN_ACTION_SIZE +
1995 sizeof(mgmt->u.action.u.chan_switch))) 1995 sizeof(mgmt->u.action.u.chan_switch)))
1996 break; 1996 break;
1997 1997
1998 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1998 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1999 break; 1999 break;
2000 2000
2001 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2001 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2002 break; 2002 break;
2003 2003
2004 goto queue; 2004 goto queue;
2005 } 2005 }
2006 break; 2006 break;
2007 case WLAN_CATEGORY_SA_QUERY: 2007 case WLAN_CATEGORY_SA_QUERY:
2008 if (len < (IEEE80211_MIN_ACTION_SIZE + 2008 if (len < (IEEE80211_MIN_ACTION_SIZE +
2009 sizeof(mgmt->u.action.u.sa_query))) 2009 sizeof(mgmt->u.action.u.sa_query)))
2010 break; 2010 break;
2011 2011
2012 switch (mgmt->u.action.u.sa_query.action) { 2012 switch (mgmt->u.action.u.sa_query.action) {
2013 case WLAN_ACTION_SA_QUERY_REQUEST: 2013 case WLAN_ACTION_SA_QUERY_REQUEST:
2014 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2014 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2015 break; 2015 break;
2016 ieee80211_process_sa_query_req(sdata, mgmt, len); 2016 ieee80211_process_sa_query_req(sdata, mgmt, len);
2017 goto handled; 2017 goto handled;
2018 } 2018 }
2019 break; 2019 break;
2020 case WLAN_CATEGORY_MESH_PLINK: 2020 case WLAN_CATEGORY_MESH_PLINK:
2021 case WLAN_CATEGORY_MESH_PATH_SEL: 2021 case WLAN_CATEGORY_MESH_PATH_SEL:
2022 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2022 if (!ieee80211_vif_is_mesh(&sdata->vif))
2023 break; 2023 break;
2024 goto queue; 2024 goto queue;
2025 } 2025 }
2026 2026
2027 invalid: 2027 invalid:
2028 /* 2028 /*
2029 * For AP mode, hostapd is responsible for handling any action 2029 * For AP mode, hostapd is responsible for handling any action
2030 * frames that we didn't handle, including returning unknown 2030 * frames that we didn't handle, including returning unknown
2031 * ones. For all other modes we will return them to the sender, 2031 * ones. For all other modes we will return them to the sender,
2032 * setting the 0x80 bit in the action category, as required by 2032 * setting the 0x80 bit in the action category, as required by
2033 * 802.11-2007 7.3.1.11. 2033 * 802.11-2007 7.3.1.11.
2034 */ 2034 */
2035 if (sdata->vif.type == NL80211_IFTYPE_AP || 2035 if (sdata->vif.type == NL80211_IFTYPE_AP ||
2036 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2036 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2037 return RX_DROP_MONITOR; 2037 return RX_DROP_MONITOR;
2038 2038
2039 /* 2039 /*
2040 * Getting here means the kernel doesn't know how to handle 2040 * Getting here means the kernel doesn't know how to handle
2041 * it, but maybe userspace does ... include returned frames 2041 * it, but maybe userspace does ... include returned frames
2042 * so userspace can register for those to know whether ones 2042 * so userspace can register for those to know whether ones
2043 * it transmitted were processed or returned. 2043 * it transmitted were processed or returned.
2044 */ 2044 */
2045 status = IEEE80211_SKB_RXCB(rx->skb); 2045 status = IEEE80211_SKB_RXCB(rx->skb);
2046 2046
2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq, 2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq,
2048 rx->skb->data, rx->skb->len, 2048 rx->skb->data, rx->skb->len,
2049 GFP_ATOMIC)) 2049 GFP_ATOMIC))
2050 goto handled; 2050 goto handled;
2051 2051
2052 /* do not return rejected action frames */ 2052 /* do not return rejected action frames */
2053 if (mgmt->u.action.category & 0x80) 2053 if (mgmt->u.action.category & 0x80)
2054 return RX_DROP_UNUSABLE; 2054 return RX_DROP_UNUSABLE;
2055 2055
2056 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2056 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2057 GFP_ATOMIC); 2057 GFP_ATOMIC);
2058 if (nskb) { 2058 if (nskb) {
2059 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2059 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2060 2060
2061 nmgmt->u.action.category |= 0x80; 2061 nmgmt->u.action.category |= 0x80;
2062 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2062 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2063 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2063 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2064 2064
2065 memset(nskb->cb, 0, sizeof(nskb->cb)); 2065 memset(nskb->cb, 0, sizeof(nskb->cb));
2066 2066
2067 ieee80211_tx_skb(rx->sdata, nskb); 2067 ieee80211_tx_skb(rx->sdata, nskb);
2068 } 2068 }
2069 2069
2070 handled: 2070 handled:
2071 if (rx->sta) 2071 if (rx->sta)
2072 rx->sta->rx_packets++; 2072 rx->sta->rx_packets++;
2073 dev_kfree_skb(rx->skb); 2073 dev_kfree_skb(rx->skb);
2074 return RX_QUEUED; 2074 return RX_QUEUED;
2075 2075
2076 queue: 2076 queue:
2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2078 skb_queue_tail(&sdata->skb_queue, rx->skb); 2078 skb_queue_tail(&sdata->skb_queue, rx->skb);
2079 ieee80211_queue_work(&local->hw, &sdata->work); 2079 ieee80211_queue_work(&local->hw, &sdata->work);
2080 if (rx->sta) 2080 if (rx->sta)
2081 rx->sta->rx_packets++; 2081 rx->sta->rx_packets++;
2082 return RX_QUEUED; 2082 return RX_QUEUED;
2083 } 2083 }
2084 2084
2085 static ieee80211_rx_result debug_noinline 2085 static ieee80211_rx_result debug_noinline
2086 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2086 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2087 { 2087 {
2088 struct ieee80211_sub_if_data *sdata = rx->sdata; 2088 struct ieee80211_sub_if_data *sdata = rx->sdata;
2089 ieee80211_rx_result rxs; 2089 ieee80211_rx_result rxs;
2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2091 __le16 stype; 2091 __le16 stype;
2092 2092
2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2094 return RX_DROP_MONITOR; 2094 return RX_DROP_MONITOR;
2095 2095
2096 if (rx->skb->len < 24) 2096 if (rx->skb->len < 24)
2097 return RX_DROP_MONITOR; 2097 return RX_DROP_MONITOR;
2098 2098
2099 if (ieee80211_drop_unencrypted_mgmt(rx)) 2099 if (ieee80211_drop_unencrypted_mgmt(rx))
2100 return RX_DROP_UNUSABLE; 2100 return RX_DROP_UNUSABLE;
2101 2101
2102 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); 2102 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2103 if (rxs != RX_CONTINUE) 2103 if (rxs != RX_CONTINUE)
2104 return rxs; 2104 return rxs;
2105 2105
2106 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2106 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2107 2107
2108 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2108 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2109 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2109 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2110 sdata->vif.type != NL80211_IFTYPE_STATION) 2110 sdata->vif.type != NL80211_IFTYPE_STATION)
2111 return RX_DROP_MONITOR; 2111 return RX_DROP_MONITOR;
2112 2112
2113 switch (stype) { 2113 switch (stype) {
2114 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2114 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2115 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2115 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2116 /* process for all: mesh, mlme, ibss */ 2116 /* process for all: mesh, mlme, ibss */
2117 break; 2117 break;
2118 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2118 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2119 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2119 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2120 /* process only for station */ 2120 /* process only for station */
2121 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2121 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2122 return RX_DROP_MONITOR; 2122 return RX_DROP_MONITOR;
2123 break; 2123 break;
2124 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2124 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2125 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2125 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2126 /* process only for ibss */ 2126 /* process only for ibss */
2127 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2127 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2128 return RX_DROP_MONITOR; 2128 return RX_DROP_MONITOR;
2129 break; 2129 break;
2130 default: 2130 default:
2131 return RX_DROP_MONITOR; 2131 return RX_DROP_MONITOR;
2132 } 2132 }
2133 2133
2134 /* queue up frame and kick off work to process it */ 2134 /* queue up frame and kick off work to process it */
2135 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2135 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2136 skb_queue_tail(&sdata->skb_queue, rx->skb); 2136 skb_queue_tail(&sdata->skb_queue, rx->skb);
2137 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2137 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2138 if (rx->sta) 2138 if (rx->sta)
2139 rx->sta->rx_packets++; 2139 rx->sta->rx_packets++;
2140 2140
2141 return RX_QUEUED; 2141 return RX_QUEUED;
2142 } 2142 }
2143 2143
2144 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, 2144 static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2145 struct ieee80211_rx_data *rx) 2145 struct ieee80211_rx_data *rx)
2146 { 2146 {
2147 int keyidx; 2147 int keyidx;
2148 unsigned int hdrlen; 2148 unsigned int hdrlen;
2149 2149
2150 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2150 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2151 if (rx->skb->len >= hdrlen + 4) 2151 if (rx->skb->len >= hdrlen + 4)
2152 keyidx = rx->skb->data[hdrlen + 3] >> 6; 2152 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2153 else 2153 else
2154 keyidx = -1; 2154 keyidx = -1;
2155 2155
2156 if (!rx->sta) { 2156 if (!rx->sta) {
2157 /* 2157 /*
2158 * Some hardware seem to generate incorrect Michael MIC 2158 * Some hardware seem to generate incorrect Michael MIC
2159 * reports; ignore them to avoid triggering countermeasures. 2159 * reports; ignore them to avoid triggering countermeasures.
2160 */ 2160 */
2161 return; 2161 return;
2162 } 2162 }
2163 2163
2164 if (!ieee80211_has_protected(hdr->frame_control)) 2164 if (!ieee80211_has_protected(hdr->frame_control))
2165 return; 2165 return;
2166 2166
2167 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { 2167 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2168 /* 2168 /*
2169 * APs with pairwise keys should never receive Michael MIC 2169 * APs with pairwise keys should never receive Michael MIC
2170 * errors for non-zero keyidx because these are reserved for 2170 * errors for non-zero keyidx because these are reserved for
2171 * group keys and only the AP is sending real multicast 2171 * group keys and only the AP is sending real multicast
2172 * frames in the BSS. 2172 * frames in the BSS.
2173 */ 2173 */
2174 return; 2174 return;
2175 } 2175 }
2176 2176
2177 if (!ieee80211_is_data(hdr->frame_control) && 2177 if (!ieee80211_is_data(hdr->frame_control) &&
2178 !ieee80211_is_auth(hdr->frame_control)) 2178 !ieee80211_is_auth(hdr->frame_control))
2179 return; 2179 return;
2180 2180
2181 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, 2181 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2182 GFP_ATOMIC); 2182 GFP_ATOMIC);
2183 } 2183 }
2184 2184
2185 /* TODO: use IEEE80211_RX_FRAGMENTED */ 2185 /* TODO: use IEEE80211_RX_FRAGMENTED */
2186 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2186 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2187 struct ieee80211_rate *rate) 2187 struct ieee80211_rate *rate)
2188 { 2188 {
2189 struct ieee80211_sub_if_data *sdata; 2189 struct ieee80211_sub_if_data *sdata;
2190 struct ieee80211_local *local = rx->local; 2190 struct ieee80211_local *local = rx->local;
2191 struct ieee80211_rtap_hdr { 2191 struct ieee80211_rtap_hdr {
2192 struct ieee80211_radiotap_header hdr; 2192 struct ieee80211_radiotap_header hdr;
2193 u8 flags; 2193 u8 flags;
2194 u8 rate_or_pad; 2194 u8 rate_or_pad;
2195 __le16 chan_freq; 2195 __le16 chan_freq;
2196 __le16 chan_flags; 2196 __le16 chan_flags;
2197 } __packed *rthdr; 2197 } __packed *rthdr;
2198 struct sk_buff *skb = rx->skb, *skb2; 2198 struct sk_buff *skb = rx->skb, *skb2;
2199 struct net_device *prev_dev = NULL; 2199 struct net_device *prev_dev = NULL;
2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2201 2201
2202 if (status->flag & RX_FLAG_INTERNAL_CMTR)
2203 goto out_free_skb;
2204
2205 if (skb_headroom(skb) < sizeof(*rthdr) && 2202 if (skb_headroom(skb) < sizeof(*rthdr) &&
2206 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2203 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2207 goto out_free_skb; 2204 goto out_free_skb;
2208 2205
2209 rthdr = (void *)skb_push(skb, sizeof(*rthdr)); 2206 rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2210 memset(rthdr, 0, sizeof(*rthdr)); 2207 memset(rthdr, 0, sizeof(*rthdr));
2211 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); 2208 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2212 rthdr->hdr.it_present = 2209 rthdr->hdr.it_present =
2213 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 2210 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2214 (1 << IEEE80211_RADIOTAP_CHANNEL)); 2211 (1 << IEEE80211_RADIOTAP_CHANNEL));
2215 2212
2216 if (rate) { 2213 if (rate) {
2217 rthdr->rate_or_pad = rate->bitrate / 5; 2214 rthdr->rate_or_pad = rate->bitrate / 5;
2218 rthdr->hdr.it_present |= 2215 rthdr->hdr.it_present |=
2219 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 2216 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2220 } 2217 }
2221 rthdr->chan_freq = cpu_to_le16(status->freq); 2218 rthdr->chan_freq = cpu_to_le16(status->freq);
2222 2219
2223 if (status->band == IEEE80211_BAND_5GHZ) 2220 if (status->band == IEEE80211_BAND_5GHZ)
2224 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | 2221 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2225 IEEE80211_CHAN_5GHZ); 2222 IEEE80211_CHAN_5GHZ);
2226 else 2223 else
2227 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | 2224 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2228 IEEE80211_CHAN_2GHZ); 2225 IEEE80211_CHAN_2GHZ);
2229 2226
2230 skb_set_mac_header(skb, 0); 2227 skb_set_mac_header(skb, 0);
2231 skb->ip_summed = CHECKSUM_UNNECESSARY; 2228 skb->ip_summed = CHECKSUM_UNNECESSARY;
2232 skb->pkt_type = PACKET_OTHERHOST; 2229 skb->pkt_type = PACKET_OTHERHOST;
2233 skb->protocol = htons(ETH_P_802_2); 2230 skb->protocol = htons(ETH_P_802_2);
2234 2231
2235 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2232 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2236 if (!ieee80211_sdata_running(sdata)) 2233 if (!ieee80211_sdata_running(sdata))
2237 continue; 2234 continue;
2238 2235
2239 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2236 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2240 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 2237 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2241 continue; 2238 continue;
2242 2239
2243 if (prev_dev) { 2240 if (prev_dev) {
2244 skb2 = skb_clone(skb, GFP_ATOMIC); 2241 skb2 = skb_clone(skb, GFP_ATOMIC);
2245 if (skb2) { 2242 if (skb2) {
2246 skb2->dev = prev_dev; 2243 skb2->dev = prev_dev;
2247 netif_receive_skb(skb2); 2244 netif_receive_skb(skb2);
2248 } 2245 }
2249 } 2246 }
2250 2247
2251 prev_dev = sdata->dev; 2248 prev_dev = sdata->dev;
2252 sdata->dev->stats.rx_packets++; 2249 sdata->dev->stats.rx_packets++;
2253 sdata->dev->stats.rx_bytes += skb->len; 2250 sdata->dev->stats.rx_bytes += skb->len;
2254 } 2251 }
2255 2252
2256 if (prev_dev) { 2253 if (prev_dev) {
2257 skb->dev = prev_dev; 2254 skb->dev = prev_dev;
2258 netif_receive_skb(skb); 2255 netif_receive_skb(skb);
2259 skb = NULL; 2256 skb = NULL;
2260 } else 2257 } else
2261 goto out_free_skb; 2258 goto out_free_skb;
2262 2259
2263 status->flag |= RX_FLAG_INTERNAL_CMTR;
2264 return; 2260 return;
2265 2261
2266 out_free_skb: 2262 out_free_skb:
2267 dev_kfree_skb(skb); 2263 dev_kfree_skb(skb);
2268 } 2264 }
2269 2265
2270 2266
2271 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 2267 static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2272 struct ieee80211_rx_data *rx, 2268 struct ieee80211_rx_data *rx,
2273 struct sk_buff *skb, 2269 struct sk_buff *skb,
2274 struct ieee80211_rate *rate) 2270 struct ieee80211_rate *rate)
2275 { 2271 {
2276 struct sk_buff_head reorder_release; 2272 struct sk_buff_head reorder_release;
2277 ieee80211_rx_result res = RX_DROP_MONITOR; 2273 ieee80211_rx_result res = RX_DROP_MONITOR;
2278 2274
2279 __skb_queue_head_init(&reorder_release); 2275 __skb_queue_head_init(&reorder_release);
2280 2276
2281 rx->skb = skb; 2277 rx->skb = skb;
2282 rx->sdata = sdata; 2278 rx->sdata = sdata;
2283 2279
2284 #define CALL_RXH(rxh) \ 2280 #define CALL_RXH(rxh) \
2285 do { \ 2281 do { \
2286 res = rxh(rx); \ 2282 res = rxh(rx); \
2287 if (res != RX_CONTINUE) \ 2283 if (res != RX_CONTINUE) \
2288 goto rxh_next; \ 2284 goto rxh_next; \
2289 } while (0); 2285 } while (0);
2290 2286
2291 /* 2287 /*
2292 * NB: the rxh_next label works even if we jump 2288 * NB: the rxh_next label works even if we jump
2293 * to it from here because then the list will 2289 * to it from here because then the list will
2294 * be empty, which is a trivial check 2290 * be empty, which is a trivial check
2295 */ 2291 */
2296 CALL_RXH(ieee80211_rx_h_passive_scan) 2292 CALL_RXH(ieee80211_rx_h_passive_scan)
2297 CALL_RXH(ieee80211_rx_h_check) 2293 CALL_RXH(ieee80211_rx_h_check)
2298 2294
2299 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 2295 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2300 2296
2301 while ((skb = __skb_dequeue(&reorder_release))) { 2297 while ((skb = __skb_dequeue(&reorder_release))) {
2302 /* 2298 /*
2303 * all the other fields are valid across frames 2299 * all the other fields are valid across frames
2304 * that belong to an aMPDU since they are on the 2300 * that belong to an aMPDU since they are on the
2305 * same TID from the same station 2301 * same TID from the same station
2306 */ 2302 */
2307 rx->skb = skb; 2303 rx->skb = skb;
2308 2304
2309 CALL_RXH(ieee80211_rx_h_decrypt) 2305 CALL_RXH(ieee80211_rx_h_decrypt)
2310 CALL_RXH(ieee80211_rx_h_check_more_data) 2306 CALL_RXH(ieee80211_rx_h_check_more_data)
2311 CALL_RXH(ieee80211_rx_h_sta_process) 2307 CALL_RXH(ieee80211_rx_h_sta_process)
2312 CALL_RXH(ieee80211_rx_h_defragment) 2308 CALL_RXH(ieee80211_rx_h_defragment)
2313 CALL_RXH(ieee80211_rx_h_ps_poll) 2309 CALL_RXH(ieee80211_rx_h_ps_poll)
2314 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2310 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2315 /* must be after MMIC verify so header is counted in MPDU mic */ 2311 /* must be after MMIC verify so header is counted in MPDU mic */
2316 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2312 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2317 CALL_RXH(ieee80211_rx_h_amsdu) 2313 CALL_RXH(ieee80211_rx_h_amsdu)
2318 #ifdef CONFIG_MAC80211_MESH 2314 #ifdef CONFIG_MAC80211_MESH
2319 if (ieee80211_vif_is_mesh(&sdata->vif)) 2315 if (ieee80211_vif_is_mesh(&sdata->vif))
2320 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2316 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2321 #endif 2317 #endif
2322 CALL_RXH(ieee80211_rx_h_data) 2318 CALL_RXH(ieee80211_rx_h_data)
2323 2319
2324 /* special treatment -- needs the queue */ 2320 /* special treatment -- needs the queue */
2325 res = ieee80211_rx_h_ctrl(rx, &reorder_release); 2321 res = ieee80211_rx_h_ctrl(rx, &reorder_release);
2326 if (res != RX_CONTINUE) 2322 if (res != RX_CONTINUE)
2327 goto rxh_next; 2323 goto rxh_next;
2328 2324
2329 CALL_RXH(ieee80211_rx_h_action) 2325 CALL_RXH(ieee80211_rx_h_action)
2330 CALL_RXH(ieee80211_rx_h_mgmt) 2326 CALL_RXH(ieee80211_rx_h_mgmt)
2331 2327
2332 #undef CALL_RXH 2328 #undef CALL_RXH
2333 2329
2334 rxh_next: 2330 rxh_next:
2335 switch (res) { 2331 switch (res) {
2336 case RX_DROP_MONITOR: 2332 case RX_DROP_MONITOR:
2337 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2333 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2338 if (rx->sta) 2334 if (rx->sta)
2339 rx->sta->rx_dropped++; 2335 rx->sta->rx_dropped++;
2340 /* fall through */ 2336 /* fall through */
2341 case RX_CONTINUE: 2337 case RX_CONTINUE:
2342 ieee80211_rx_cooked_monitor(rx, rate); 2338 ieee80211_rx_cooked_monitor(rx, rate);
2343 break; 2339 break;
2344 case RX_DROP_UNUSABLE: 2340 case RX_DROP_UNUSABLE:
2345 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2341 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
2346 if (rx->sta) 2342 if (rx->sta)
2347 rx->sta->rx_dropped++; 2343 rx->sta->rx_dropped++;
2348 dev_kfree_skb(rx->skb); 2344 dev_kfree_skb(rx->skb);
2349 break; 2345 break;
2350 case RX_QUEUED: 2346 case RX_QUEUED:
2351 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 2347 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
2352 break; 2348 break;
2353 } 2349 }
2354 } 2350 }
2355 } 2351 }
2356 2352
2357 /* main receive path */ 2353 /* main receive path */
2358 2354
2359 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, 2355 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2360 struct ieee80211_rx_data *rx, 2356 struct ieee80211_rx_data *rx,
2361 struct ieee80211_hdr *hdr) 2357 struct ieee80211_hdr *hdr)
2362 { 2358 {
2363 struct sk_buff *skb = rx->skb; 2359 struct sk_buff *skb = rx->skb;
2364 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2360 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2365 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 2361 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2366 int multicast = is_multicast_ether_addr(hdr->addr1); 2362 int multicast = is_multicast_ether_addr(hdr->addr1);
2367 2363
2368 switch (sdata->vif.type) { 2364 switch (sdata->vif.type) {
2369 case NL80211_IFTYPE_STATION: 2365 case NL80211_IFTYPE_STATION:
2370 if (!bssid && !sdata->u.mgd.use_4addr) 2366 if (!bssid && !sdata->u.mgd.use_4addr)
2371 return 0; 2367 return 0;
2372 if (!multicast && 2368 if (!multicast &&
2373 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { 2369 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2374 if (!(sdata->dev->flags & IFF_PROMISC)) 2370 if (!(sdata->dev->flags & IFF_PROMISC))
2375 return 0; 2371 return 0;
2376 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2372 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2377 } 2373 }
2378 break; 2374 break;
2379 case NL80211_IFTYPE_ADHOC: 2375 case NL80211_IFTYPE_ADHOC:
2380 if (!bssid) 2376 if (!bssid)
2381 return 0; 2377 return 0;
2382 if (ieee80211_is_beacon(hdr->frame_control)) { 2378 if (ieee80211_is_beacon(hdr->frame_control)) {
2383 return 1; 2379 return 1;
2384 } 2380 }
2385 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2381 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2386 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2382 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2387 return 0; 2383 return 0;
2388 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2384 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2389 } else if (!multicast && 2385 } else if (!multicast &&
2390 compare_ether_addr(sdata->vif.addr, 2386 compare_ether_addr(sdata->vif.addr,
2391 hdr->addr1) != 0) { 2387 hdr->addr1) != 0) {
2392 if (!(sdata->dev->flags & IFF_PROMISC)) 2388 if (!(sdata->dev->flags & IFF_PROMISC))
2393 return 0; 2389 return 0;
2394 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2390 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2395 } else if (!rx->sta) { 2391 } else if (!rx->sta) {
2396 int rate_idx; 2392 int rate_idx;
2397 if (status->flag & RX_FLAG_HT) 2393 if (status->flag & RX_FLAG_HT)
2398 rate_idx = 0; /* TODO: HT rates */ 2394 rate_idx = 0; /* TODO: HT rates */
2399 else 2395 else
2400 rate_idx = status->rate_idx; 2396 rate_idx = status->rate_idx;
2401 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, 2397 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2402 hdr->addr2, BIT(rate_idx), GFP_ATOMIC); 2398 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2403 } 2399 }
2404 break; 2400 break;
2405 case NL80211_IFTYPE_MESH_POINT: 2401 case NL80211_IFTYPE_MESH_POINT:
2406 if (!multicast && 2402 if (!multicast &&
2407 compare_ether_addr(sdata->vif.addr, 2403 compare_ether_addr(sdata->vif.addr,
2408 hdr->addr1) != 0) { 2404 hdr->addr1) != 0) {
2409 if (!(sdata->dev->flags & IFF_PROMISC)) 2405 if (!(sdata->dev->flags & IFF_PROMISC))
2410 return 0; 2406 return 0;
2411 2407
2412 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2408 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2413 } 2409 }
2414 break; 2410 break;
2415 case NL80211_IFTYPE_AP_VLAN: 2411 case NL80211_IFTYPE_AP_VLAN:
2416 case NL80211_IFTYPE_AP: 2412 case NL80211_IFTYPE_AP:
2417 if (!bssid) { 2413 if (!bssid) {
2418 if (compare_ether_addr(sdata->vif.addr, 2414 if (compare_ether_addr(sdata->vif.addr,
2419 hdr->addr1)) 2415 hdr->addr1))
2420 return 0; 2416 return 0;
2421 } else if (!ieee80211_bssid_match(bssid, 2417 } else if (!ieee80211_bssid_match(bssid,
2422 sdata->vif.addr)) { 2418 sdata->vif.addr)) {
2423 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2419 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2424 return 0; 2420 return 0;
2425 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2421 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2426 } 2422 }
2427 break; 2423 break;
2428 case NL80211_IFTYPE_WDS: 2424 case NL80211_IFTYPE_WDS:
2429 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2425 if (bssid || !ieee80211_is_data(hdr->frame_control))
2430 return 0; 2426 return 0;
2431 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2427 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2432 return 0; 2428 return 0;
2433 break; 2429 break;
2434 case NL80211_IFTYPE_MONITOR: 2430 case NL80211_IFTYPE_MONITOR:
2435 case NL80211_IFTYPE_UNSPECIFIED: 2431 case NL80211_IFTYPE_UNSPECIFIED:
2436 case __NL80211_IFTYPE_AFTER_LAST: 2432 case __NL80211_IFTYPE_AFTER_LAST:
2437 /* should never get here */ 2433 /* should never get here */
2438 WARN_ON(1); 2434 WARN_ON(1);
2439 break; 2435 break;
2440 } 2436 }
2441 2437
2442 return 1; 2438 return 1;
2443 } 2439 }
2444 2440
2445 /* 2441 /*
2446 * This is the actual Rx frames handler. as it blongs to Rx path it must 2442 * This is the actual Rx frames handler. as it blongs to Rx path it must
2447 * be called with rcu_read_lock protection. 2443 * be called with rcu_read_lock protection.
2448 */ 2444 */
2449 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2445 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2450 struct sk_buff *skb, 2446 struct sk_buff *skb,
2451 struct ieee80211_rate *rate) 2447 struct ieee80211_rate *rate)
2452 { 2448 {
2453 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2454 struct ieee80211_local *local = hw_to_local(hw); 2450 struct ieee80211_local *local = hw_to_local(hw);
2455 struct ieee80211_sub_if_data *sdata; 2451 struct ieee80211_sub_if_data *sdata;
2456 struct ieee80211_hdr *hdr; 2452 struct ieee80211_hdr *hdr;
2457 __le16 fc; 2453 __le16 fc;
2458 struct ieee80211_rx_data rx; 2454 struct ieee80211_rx_data rx;
2459 int prepares; 2455 int prepares;
2460 struct ieee80211_sub_if_data *prev = NULL; 2456 struct ieee80211_sub_if_data *prev = NULL;
2461 struct sk_buff *skb_new; 2457 struct sk_buff *skb_new;
2462 struct sta_info *sta, *tmp; 2458 struct sta_info *sta, *tmp;
2463 bool found_sta = false; 2459 bool found_sta = false;
2464 int err = 0; 2460 int err = 0;
2465 2461
2466 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 2462 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2467 memset(&rx, 0, sizeof(rx)); 2463 memset(&rx, 0, sizeof(rx));
2468 rx.skb = skb; 2464 rx.skb = skb;
2469 rx.local = local; 2465 rx.local = local;
2470 2466
2471 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2467 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2472 local->dot11ReceivedFragmentCount++; 2468 local->dot11ReceivedFragmentCount++;
2473 2469
2474 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2470 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2475 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2471 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2476 rx.flags |= IEEE80211_RX_IN_SCAN; 2472 rx.flags |= IEEE80211_RX_IN_SCAN;
2477 2473
2478 if (ieee80211_is_mgmt(fc)) 2474 if (ieee80211_is_mgmt(fc))
2479 err = skb_linearize(skb); 2475 err = skb_linearize(skb);
2480 else 2476 else
2481 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2477 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2482 2478
2483 if (err) { 2479 if (err) {
2484 dev_kfree_skb(skb); 2480 dev_kfree_skb(skb);
2485 return; 2481 return;
2486 } 2482 }
2487 2483
2488 hdr = (struct ieee80211_hdr *)skb->data; 2484 hdr = (struct ieee80211_hdr *)skb->data;
2489 ieee80211_parse_qos(&rx); 2485 ieee80211_parse_qos(&rx);
2490 ieee80211_verify_alignment(&rx); 2486 ieee80211_verify_alignment(&rx);
2491 2487
2492 if (ieee80211_is_data(fc)) { 2488 if (ieee80211_is_data(fc)) {
2493 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2489 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2494 rx.sta = sta; 2490 rx.sta = sta;
2495 found_sta = true; 2491 found_sta = true;
2496 rx.sdata = sta->sdata; 2492 rx.sdata = sta->sdata;
2497 2493
2498 rx.flags |= IEEE80211_RX_RA_MATCH; 2494 rx.flags |= IEEE80211_RX_RA_MATCH;
2499 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2495 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2500 if (prepares) { 2496 if (prepares) {
2501 if (status->flag & RX_FLAG_MMIC_ERROR) { 2497 if (status->flag & RX_FLAG_MMIC_ERROR) {
2502 if (rx.flags & IEEE80211_RX_RA_MATCH) 2498 if (rx.flags & IEEE80211_RX_RA_MATCH)
2503 ieee80211_rx_michael_mic_report(hdr, &rx); 2499 ieee80211_rx_michael_mic_report(hdr, &rx);
2504 } else 2500 } else
2505 prev = rx.sdata; 2501 prev = rx.sdata;
2506 } 2502 }
2507 } 2503 }
2508 } 2504 }
2509 if (!found_sta) { 2505 if (!found_sta) {
2510 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2506 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2511 if (!ieee80211_sdata_running(sdata)) 2507 if (!ieee80211_sdata_running(sdata))
2512 continue; 2508 continue;
2513 2509
2514 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2510 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2515 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2511 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2516 continue; 2512 continue;
2517 2513
2518 /* 2514 /*
2519 * frame is destined for this interface, but if it's 2515 * frame is destined for this interface, but if it's
2520 * not also for the previous one we handle that after 2516 * not also for the previous one we handle that after
2521 * the loop to avoid copying the SKB once too much 2517 * the loop to avoid copying the SKB once too much
2522 */ 2518 */
2523 2519
2524 if (!prev) { 2520 if (!prev) {
2525 prev = sdata; 2521 prev = sdata;
2526 continue; 2522 continue;
2527 } 2523 }
2528 2524
2529 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2525 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2530 2526
2531 rx.flags |= IEEE80211_RX_RA_MATCH; 2527 rx.flags |= IEEE80211_RX_RA_MATCH;
2532 prepares = prepare_for_handlers(prev, &rx, hdr); 2528 prepares = prepare_for_handlers(prev, &rx, hdr);
2533 2529
2534 if (!prepares) 2530 if (!prepares)
2535 goto next; 2531 goto next;
2536 2532
2537 if (status->flag & RX_FLAG_MMIC_ERROR) { 2533 if (status->flag & RX_FLAG_MMIC_ERROR) {
2538 rx.sdata = prev; 2534 rx.sdata = prev;
2539 if (rx.flags & IEEE80211_RX_RA_MATCH) 2535 if (rx.flags & IEEE80211_RX_RA_MATCH)
2540 ieee80211_rx_michael_mic_report(hdr, 2536 ieee80211_rx_michael_mic_report(hdr,
2541 &rx); 2537 &rx);
2542 goto next; 2538 goto next;
2543 } 2539 }
2544 2540
2545 /* 2541 /*
2546 * frame was destined for the previous interface 2542 * frame was destined for the previous interface
2547 * so invoke RX handlers for it 2543 * so invoke RX handlers for it
2548 */ 2544 */
2549 2545
2550 skb_new = skb_copy(skb, GFP_ATOMIC); 2546 skb_new = skb_copy(skb, GFP_ATOMIC);
2551 if (!skb_new) { 2547 if (!skb_new) {
2552 if (net_ratelimit()) 2548 if (net_ratelimit())
2553 printk(KERN_DEBUG "%s: failed to copy " 2549 printk(KERN_DEBUG "%s: failed to copy "
2554 "multicast frame for %s\n", 2550 "multicast frame for %s\n",
2555 wiphy_name(local->hw.wiphy), 2551 wiphy_name(local->hw.wiphy),
2556 prev->name); 2552 prev->name);
2557 goto next; 2553 goto next;
2558 } 2554 }
2559 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate); 2555 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2560 next: 2556 next:
2561 prev = sdata; 2557 prev = sdata;
2562 } 2558 }
2563 2559
2564 if (prev) { 2560 if (prev) {
2565 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2561 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2566 2562
2567 rx.flags |= IEEE80211_RX_RA_MATCH; 2563 rx.flags |= IEEE80211_RX_RA_MATCH;
2568 prepares = prepare_for_handlers(prev, &rx, hdr); 2564 prepares = prepare_for_handlers(prev, &rx, hdr);
2569 2565
2570 if (!prepares) 2566 if (!prepares)
2571 prev = NULL; 2567 prev = NULL;
2572 } 2568 }
2573 } 2569 }
2574 if (prev) 2570 if (prev)
2575 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2571 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
2576 else 2572 else
2577 dev_kfree_skb(skb); 2573 dev_kfree_skb(skb);
2578 } 2574 }
2579 2575
2580 /* 2576 /*
2581 * This is the receive path handler. It is called by a low level driver when an 2577 * This is the receive path handler. It is called by a low level driver when an
2582 * 802.11 MPDU is received from the hardware. 2578 * 802.11 MPDU is received from the hardware.
2583 */ 2579 */
2584 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 2580 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2585 { 2581 {
2586 struct ieee80211_local *local = hw_to_local(hw); 2582 struct ieee80211_local *local = hw_to_local(hw);
2587 struct ieee80211_rate *rate = NULL; 2583 struct ieee80211_rate *rate = NULL;
2588 struct ieee80211_supported_band *sband; 2584 struct ieee80211_supported_band *sband;
2589 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2585 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2590 2586
2591 WARN_ON_ONCE(softirq_count() == 0); 2587 WARN_ON_ONCE(softirq_count() == 0);
2592 2588
2593 if (WARN_ON(status->band < 0 || 2589 if (WARN_ON(status->band < 0 ||
2594 status->band >= IEEE80211_NUM_BANDS)) 2590 status->band >= IEEE80211_NUM_BANDS))
2595 goto drop; 2591 goto drop;
2596 2592
2597 sband = local->hw.wiphy->bands[status->band]; 2593 sband = local->hw.wiphy->bands[status->band];
2598 if (WARN_ON(!sband)) 2594 if (WARN_ON(!sband))
2599 goto drop; 2595 goto drop;
2600 2596
2601 /* 2597 /*
2602 * If we're suspending, it is possible although not too likely 2598 * If we're suspending, it is possible although not too likely
2603 * that we'd be receiving frames after having already partially 2599 * that we'd be receiving frames after having already partially
2604 * quiesced the stack. We can't process such frames then since 2600 * quiesced the stack. We can't process such frames then since
2605 * that might, for example, cause stations to be added or other 2601 * that might, for example, cause stations to be added or other
2606 * driver callbacks be invoked. 2602 * driver callbacks be invoked.
2607 */ 2603 */
2608 if (unlikely(local->quiescing || local->suspended)) 2604 if (unlikely(local->quiescing || local->suspended))
2609 goto drop; 2605 goto drop;
2610 2606
2611 /* 2607 /*
2612 * The same happens when we're not even started, 2608 * The same happens when we're not even started,
2613 * but that's worth a warning. 2609 * but that's worth a warning.
2614 */ 2610 */
2615 if (WARN_ON(!local->started)) 2611 if (WARN_ON(!local->started))
2616 goto drop; 2612 goto drop;
2617 2613
2618 if (status->flag & RX_FLAG_HT) { 2614 if (status->flag & RX_FLAG_HT) {
2619 /* 2615 /*
2620 * rate_idx is MCS index, which can be [0-76] as documented on: 2616 * rate_idx is MCS index, which can be [0-76] as documented on:
2621 * 2617 *
2622 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 2618 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2623 * 2619 *
2624 * Anything else would be some sort of driver or hardware error. 2620 * Anything else would be some sort of driver or hardware error.
2625 * The driver should catch hardware errors. 2621 * The driver should catch hardware errors.
2626 */ 2622 */
2627 if (WARN((status->rate_idx < 0 || 2623 if (WARN((status->rate_idx < 0 ||
2628 status->rate_idx > 76), 2624 status->rate_idx > 76),
2629 "Rate marked as an HT rate but passed " 2625 "Rate marked as an HT rate but passed "
2630 "status->rate_idx is not " 2626 "status->rate_idx is not "
2631 "an MCS index [0-76]: %d (0x%02x)\n", 2627 "an MCS index [0-76]: %d (0x%02x)\n",
2632 status->rate_idx, 2628 status->rate_idx,
2633 status->rate_idx)) 2629 status->rate_idx))
2634 goto drop; 2630 goto drop;
2635 } else { 2631 } else {
2636 if (WARN_ON(status->rate_idx < 0 || 2632 if (WARN_ON(status->rate_idx < 0 ||
2637 status->rate_idx >= sband->n_bitrates)) 2633 status->rate_idx >= sband->n_bitrates))
2638 goto drop; 2634 goto drop;
2639 rate = &sband->bitrates[status->rate_idx]; 2635 rate = &sband->bitrates[status->rate_idx];
2640 } 2636 }
2641 2637
2642 /* 2638 /*
2643 * key references and virtual interfaces are protected using RCU 2639 * key references and virtual interfaces are protected using RCU
2644 * and this requires that we are in a read-side RCU section during 2640 * and this requires that we are in a read-side RCU section during
2645 * receive processing 2641 * receive processing
2646 */ 2642 */
2647 rcu_read_lock(); 2643 rcu_read_lock();
2648 2644
2649 /* 2645 /*
2650 * Frames with failed FCS/PLCP checksum are not returned, 2646 * Frames with failed FCS/PLCP checksum are not returned,
2651 * all other frames are returned without radiotap header 2647 * all other frames are returned without radiotap header
2652 * if it was previously present. 2648 * if it was previously present.
2653 * Also, frames with less than 16 bytes are dropped. 2649 * Also, frames with less than 16 bytes are dropped.
2654 */ 2650 */
2655 skb = ieee80211_rx_monitor(local, skb, rate); 2651 skb = ieee80211_rx_monitor(local, skb, rate);
2656 if (!skb) { 2652 if (!skb) {
2657 rcu_read_unlock(); 2653 rcu_read_unlock();
2658 return; 2654 return;
2659 } 2655 }
2660 2656
2661 __ieee80211_rx_handle_packet(hw, skb, rate); 2657 __ieee80211_rx_handle_packet(hw, skb, rate);
2662 2658
2663 rcu_read_unlock(); 2659 rcu_read_unlock();
2664 2660
2665 return; 2661 return;
2666 drop: 2662 drop:
2667 kfree_skb(skb); 2663 kfree_skb(skb);
2668 } 2664 }
2669 EXPORT_SYMBOL(ieee80211_rx); 2665 EXPORT_SYMBOL(ieee80211_rx);
2670 2666
2671 /* This is a version of the rx handler that can be called from hard irq 2667 /* This is a version of the rx handler that can be called from hard irq
2672 * context. Post the skb on the queue and schedule the tasklet */ 2668 * context. Post the skb on the queue and schedule the tasklet */
2673 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 2669 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
2674 { 2670 {
2675 struct ieee80211_local *local = hw_to_local(hw); 2671 struct ieee80211_local *local = hw_to_local(hw);
2676 2672
2677 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 2673 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
2678 2674
2679 skb->pkt_type = IEEE80211_RX_MSG; 2675 skb->pkt_type = IEEE80211_RX_MSG;
2680 skb_queue_tail(&local->skb_queue, skb); 2676 skb_queue_tail(&local->skb_queue, skb);
2681 tasklet_schedule(&local->tasklet); 2677 tasklet_schedule(&local->tasklet);
2682 } 2678 }
2683 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 2679 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
2684 2680
1 /* 1 /*
2 * File: pep.c 2 * File: pep.c
3 * 3 *
4 * Phonet pipe protocol end point socket 4 * Phonet pipe protocol end point socket
5 * 5 *
6 * Copyright (C) 2008 Nokia Corporation. 6 * Copyright (C) 2008 Nokia Corporation.
7 * 7 *
8 * Author: Rรฉmi Denis-Courmont <remi.denis-courmont@nokia.com> 8 * Author: Rรฉmi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation. 12 * version 2 as published by the Free Software Foundation.
13 * 13 *
14 * This program is distributed in the hope that it will be useful, but 14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA 22 * 02110-1301 USA
23 */ 23 */
24 24
25 #include <linux/kernel.h> 25 #include <linux/kernel.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/socket.h> 27 #include <linux/socket.h>
28 #include <net/sock.h> 28 #include <net/sock.h>
29 #include <net/tcp_states.h> 29 #include <net/tcp_states.h>
30 #include <asm/ioctls.h> 30 #include <asm/ioctls.h>
31 31
32 #include <linux/phonet.h> 32 #include <linux/phonet.h>
33 #include <net/phonet/phonet.h> 33 #include <net/phonet/phonet.h>
34 #include <net/phonet/pep.h> 34 #include <net/phonet/pep.h>
35 #include <net/phonet/gprs.h> 35 #include <net/phonet/gprs.h>
36 36
37 /* sk_state values: 37 /* sk_state values:
38 * TCP_CLOSE sock not in use yet 38 * TCP_CLOSE sock not in use yet
39 * TCP_CLOSE_WAIT disconnected pipe 39 * TCP_CLOSE_WAIT disconnected pipe
40 * TCP_LISTEN listening pipe endpoint 40 * TCP_LISTEN listening pipe endpoint
41 * TCP_SYN_RECV connected pipe in disabled state 41 * TCP_SYN_RECV connected pipe in disabled state
42 * TCP_ESTABLISHED connected pipe in enabled state 42 * TCP_ESTABLISHED connected pipe in enabled state
43 * 43 *
44 * pep_sock locking: 44 * pep_sock locking:
45 * - sk_state, ackq, hlist: sock lock needed 45 * - sk_state, ackq, hlist: sock lock needed
46 * - listener: read only 46 * - listener: read only
47 * - pipe_handle: read only 47 * - pipe_handle: read only
48 */ 48 */
49 49
50 #define CREDITS_MAX 10 50 #define CREDITS_MAX 10
51 #define CREDITS_THR 7 51 #define CREDITS_THR 7
52 52
53 static const struct sockaddr_pn pipe_srv = { 53 static const struct sockaddr_pn pipe_srv = {
54 .spn_family = AF_PHONET, 54 .spn_family = AF_PHONET,
55 .spn_resource = 0xD9, /* pipe service */ 55 .spn_resource = 0xD9, /* pipe service */
56 }; 56 };
57 57
58 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */ 58 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
59 59
60 /* Get the next TLV sub-block. */ 60 /* Get the next TLV sub-block. */
61 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen, 61 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
62 void *buf) 62 void *buf)
63 { 63 {
64 void *data = NULL; 64 void *data = NULL;
65 struct { 65 struct {
66 u8 sb_type; 66 u8 sb_type;
67 u8 sb_len; 67 u8 sb_len;
68 } *ph, h; 68 } *ph, h;
69 int buflen = *plen; 69 int buflen = *plen;
70 70
71 ph = skb_header_pointer(skb, 0, 2, &h); 71 ph = skb_header_pointer(skb, 0, 2, &h);
72 if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len)) 72 if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
73 return NULL; 73 return NULL;
74 ph->sb_len -= 2; 74 ph->sb_len -= 2;
75 *ptype = ph->sb_type; 75 *ptype = ph->sb_type;
76 *plen = ph->sb_len; 76 *plen = ph->sb_len;
77 77
78 if (buflen > ph->sb_len) 78 if (buflen > ph->sb_len)
79 buflen = ph->sb_len; 79 buflen = ph->sb_len;
80 data = skb_header_pointer(skb, 2, buflen, buf); 80 data = skb_header_pointer(skb, 2, buflen, buf);
81 __skb_pull(skb, 2 + ph->sb_len); 81 __skb_pull(skb, 2 + ph->sb_len);
82 return data; 82 return data;
83 } 83 }
84 84
85 static int pep_reply(struct sock *sk, struct sk_buff *oskb, 85 static int pep_reply(struct sock *sk, struct sk_buff *oskb,
86 u8 code, const void *data, int len, gfp_t priority) 86 u8 code, const void *data, int len, gfp_t priority)
87 { 87 {
88 const struct pnpipehdr *oph = pnp_hdr(oskb); 88 const struct pnpipehdr *oph = pnp_hdr(oskb);
89 struct pnpipehdr *ph; 89 struct pnpipehdr *ph;
90 struct sk_buff *skb; 90 struct sk_buff *skb;
91 91
92 skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); 92 skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
93 if (!skb) 93 if (!skb)
94 return -ENOMEM; 94 return -ENOMEM;
95 skb_set_owner_w(skb, sk); 95 skb_set_owner_w(skb, sk);
96 96
97 skb_reserve(skb, MAX_PNPIPE_HEADER); 97 skb_reserve(skb, MAX_PNPIPE_HEADER);
98 __skb_put(skb, len); 98 __skb_put(skb, len);
99 skb_copy_to_linear_data(skb, data, len); 99 skb_copy_to_linear_data(skb, data, len);
100 __skb_push(skb, sizeof(*ph)); 100 __skb_push(skb, sizeof(*ph));
101 skb_reset_transport_header(skb); 101 skb_reset_transport_header(skb);
102 ph = pnp_hdr(skb); 102 ph = pnp_hdr(skb);
103 ph->utid = oph->utid; 103 ph->utid = oph->utid;
104 ph->message_id = oph->message_id + 1; /* REQ -> RESP */ 104 ph->message_id = oph->message_id + 1; /* REQ -> RESP */
105 ph->pipe_handle = oph->pipe_handle; 105 ph->pipe_handle = oph->pipe_handle;
106 ph->error_code = code; 106 ph->error_code = code;
107 107
108 return pn_skb_send(sk, skb, &pipe_srv); 108 return pn_skb_send(sk, skb, &pipe_srv);
109 } 109 }
110 110
111 #define PAD 0x00 111 #define PAD 0x00
112 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb) 112 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
113 { 113 {
114 static const u8 data[20] = { 114 static const u8 data[20] = {
115 PAD, PAD, PAD, 2 /* sub-blocks */, 115 PAD, PAD, PAD, 2 /* sub-blocks */,
116 PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD, 116 PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
117 PN_MULTI_CREDIT_FLOW_CONTROL, 117 PN_MULTI_CREDIT_FLOW_CONTROL,
118 PN_ONE_CREDIT_FLOW_CONTROL, 118 PN_ONE_CREDIT_FLOW_CONTROL,
119 PN_LEGACY_FLOW_CONTROL, 119 PN_LEGACY_FLOW_CONTROL,
120 PAD, 120 PAD,
121 PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD, 121 PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
122 PN_MULTI_CREDIT_FLOW_CONTROL, 122 PN_MULTI_CREDIT_FLOW_CONTROL,
123 PN_ONE_CREDIT_FLOW_CONTROL, 123 PN_ONE_CREDIT_FLOW_CONTROL,
124 PN_LEGACY_FLOW_CONTROL, 124 PN_LEGACY_FLOW_CONTROL,
125 PAD, 125 PAD,
126 }; 126 };
127 127
128 might_sleep(); 128 might_sleep();
129 return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data), 129 return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
130 GFP_KERNEL); 130 GFP_KERNEL);
131 } 131 }
132 132
133 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code) 133 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
134 { 134 {
135 static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ }; 135 static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
136 WARN_ON(code == PN_PIPE_NO_ERROR); 136 WARN_ON(code == PN_PIPE_NO_ERROR);
137 return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC); 137 return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
138 } 138 }
139 139
140 /* Control requests are not sent by the pipe service and have a specific 140 /* Control requests are not sent by the pipe service and have a specific
141 * message format. */ 141 * message format. */
142 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, 142 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
143 gfp_t priority) 143 gfp_t priority)
144 { 144 {
145 const struct pnpipehdr *oph = pnp_hdr(oskb); 145 const struct pnpipehdr *oph = pnp_hdr(oskb);
146 struct sk_buff *skb; 146 struct sk_buff *skb;
147 struct pnpipehdr *ph; 147 struct pnpipehdr *ph;
148 struct sockaddr_pn dst; 148 struct sockaddr_pn dst;
149 149
150 skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority); 150 skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
151 if (!skb) 151 if (!skb)
152 return -ENOMEM; 152 return -ENOMEM;
153 skb_set_owner_w(skb, sk); 153 skb_set_owner_w(skb, sk);
154 154
155 skb_reserve(skb, MAX_PHONET_HEADER); 155 skb_reserve(skb, MAX_PHONET_HEADER);
156 ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4); 156 ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);
157 157
158 ph->utid = oph->utid; 158 ph->utid = oph->utid;
159 ph->message_id = PNS_PEP_CTRL_RESP; 159 ph->message_id = PNS_PEP_CTRL_RESP;
160 ph->pipe_handle = oph->pipe_handle; 160 ph->pipe_handle = oph->pipe_handle;
161 ph->data[0] = oph->data[1]; /* CTRL id */ 161 ph->data[0] = oph->data[1]; /* CTRL id */
162 ph->data[1] = oph->data[0]; /* PEP type */ 162 ph->data[1] = oph->data[0]; /* PEP type */
163 ph->data[2] = code; /* error code, at an usual offset */ 163 ph->data[2] = code; /* error code, at an usual offset */
164 ph->data[3] = PAD; 164 ph->data[3] = PAD;
165 ph->data[4] = PAD; 165 ph->data[4] = PAD;
166 166
167 pn_skb_get_src_sockaddr(oskb, &dst); 167 pn_skb_get_src_sockaddr(oskb, &dst);
168 return pn_skb_send(sk, skb, &dst); 168 return pn_skb_send(sk, skb, &dst);
169 } 169 }
170 170
171 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) 171 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
172 { 172 {
173 struct pep_sock *pn = pep_sk(sk); 173 struct pep_sock *pn = pep_sk(sk);
174 struct pnpipehdr *ph; 174 struct pnpipehdr *ph;
175 struct sk_buff *skb; 175 struct sk_buff *skb;
176 176
177 skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority); 177 skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
178 if (!skb) 178 if (!skb)
179 return -ENOMEM; 179 return -ENOMEM;
180 skb_set_owner_w(skb, sk); 180 skb_set_owner_w(skb, sk);
181 181
182 skb_reserve(skb, MAX_PNPIPE_HEADER + 4); 182 skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
183 __skb_push(skb, sizeof(*ph) + 4); 183 __skb_push(skb, sizeof(*ph) + 4);
184 skb_reset_transport_header(skb); 184 skb_reset_transport_header(skb);
185 ph = pnp_hdr(skb); 185 ph = pnp_hdr(skb);
186 ph->utid = 0; 186 ph->utid = 0;
187 ph->message_id = PNS_PEP_STATUS_IND; 187 ph->message_id = PNS_PEP_STATUS_IND;
188 ph->pipe_handle = pn->pipe_handle; 188 ph->pipe_handle = pn->pipe_handle;
189 ph->pep_type = PN_PEP_TYPE_COMMON; 189 ph->pep_type = PN_PEP_TYPE_COMMON;
190 ph->data[1] = type; 190 ph->data[1] = type;
191 ph->data[2] = PAD; 191 ph->data[2] = PAD;
192 ph->data[3] = PAD; 192 ph->data[3] = PAD;
193 ph->data[4] = status; 193 ph->data[4] = status;
194 194
195 return pn_skb_send(sk, skb, &pipe_srv); 195 return pn_skb_send(sk, skb, &pipe_srv);
196 } 196 }
197 197
198 /* Send our RX flow control information to the sender. 198 /* Send our RX flow control information to the sender.
199 * Socket must be locked. */ 199 * Socket must be locked. */
200 static void pipe_grant_credits(struct sock *sk) 200 static void pipe_grant_credits(struct sock *sk)
201 { 201 {
202 struct pep_sock *pn = pep_sk(sk); 202 struct pep_sock *pn = pep_sk(sk);
203 203
204 BUG_ON(sk->sk_state != TCP_ESTABLISHED); 204 BUG_ON(sk->sk_state != TCP_ESTABLISHED);
205 205
206 switch (pn->rx_fc) { 206 switch (pn->rx_fc) {
207 case PN_LEGACY_FLOW_CONTROL: /* TODO */ 207 case PN_LEGACY_FLOW_CONTROL: /* TODO */
208 break; 208 break;
209 case PN_ONE_CREDIT_FLOW_CONTROL: 209 case PN_ONE_CREDIT_FLOW_CONTROL:
210 pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL, 210 pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
211 PEP_IND_READY, GFP_ATOMIC); 211 PEP_IND_READY, GFP_ATOMIC);
212 pn->rx_credits = 1; 212 pn->rx_credits = 1;
213 break; 213 break;
214 case PN_MULTI_CREDIT_FLOW_CONTROL: 214 case PN_MULTI_CREDIT_FLOW_CONTROL:
215 if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX) 215 if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
216 break; 216 break;
217 if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS, 217 if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
218 CREDITS_MAX - pn->rx_credits, 218 CREDITS_MAX - pn->rx_credits,
219 GFP_ATOMIC) == 0) 219 GFP_ATOMIC) == 0)
220 pn->rx_credits = CREDITS_MAX; 220 pn->rx_credits = CREDITS_MAX;
221 break; 221 break;
222 } 222 }
223 } 223 }
224 224
225 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) 225 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
226 { 226 {
227 struct pep_sock *pn = pep_sk(sk); 227 struct pep_sock *pn = pep_sk(sk);
228 struct pnpipehdr *hdr = pnp_hdr(skb); 228 struct pnpipehdr *hdr;
229 int wake = 0; 229 int wake = 0;
230 230
231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 231 if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
232 return -EINVAL; 232 return -EINVAL;
233 233
234 hdr = pnp_hdr(skb);
234 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 235 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
235 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 236 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
236 (unsigned)hdr->data[0]); 237 (unsigned)hdr->data[0]);
237 return -EOPNOTSUPP; 238 return -EOPNOTSUPP;
238 } 239 }
239 240
240 switch (hdr->data[1]) { 241 switch (hdr->data[1]) {
241 case PN_PEP_IND_FLOW_CONTROL: 242 case PN_PEP_IND_FLOW_CONTROL:
242 switch (pn->tx_fc) { 243 switch (pn->tx_fc) {
243 case PN_LEGACY_FLOW_CONTROL: 244 case PN_LEGACY_FLOW_CONTROL:
244 switch (hdr->data[4]) { 245 switch (hdr->data[4]) {
245 case PEP_IND_BUSY: 246 case PEP_IND_BUSY:
246 atomic_set(&pn->tx_credits, 0); 247 atomic_set(&pn->tx_credits, 0);
247 break; 248 break;
248 case PEP_IND_READY: 249 case PEP_IND_READY:
249 atomic_set(&pn->tx_credits, wake = 1); 250 atomic_set(&pn->tx_credits, wake = 1);
250 break; 251 break;
251 } 252 }
252 break; 253 break;
253 case PN_ONE_CREDIT_FLOW_CONTROL: 254 case PN_ONE_CREDIT_FLOW_CONTROL:
254 if (hdr->data[4] == PEP_IND_READY) 255 if (hdr->data[4] == PEP_IND_READY)
255 atomic_set(&pn->tx_credits, wake = 1); 256 atomic_set(&pn->tx_credits, wake = 1);
256 break; 257 break;
257 } 258 }
258 break; 259 break;
259 260
260 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 261 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
261 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 262 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
262 break; 263 break;
263 atomic_add(wake = hdr->data[4], &pn->tx_credits); 264 atomic_add(wake = hdr->data[4], &pn->tx_credits);
264 break; 265 break;
265 266
266 default: 267 default:
267 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", 268 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
268 (unsigned)hdr->data[1]); 269 (unsigned)hdr->data[1]);
269 return -EOPNOTSUPP; 270 return -EOPNOTSUPP;
270 } 271 }
271 if (wake) 272 if (wake)
272 sk->sk_write_space(sk); 273 sk->sk_write_space(sk);
273 return 0; 274 return 0;
274 } 275 }
275 276
276 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) 277 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
277 { 278 {
278 struct pep_sock *pn = pep_sk(sk); 279 struct pep_sock *pn = pep_sk(sk);
279 struct pnpipehdr *hdr = pnp_hdr(skb); 280 struct pnpipehdr *hdr = pnp_hdr(skb);
280 u8 n_sb = hdr->data[0]; 281 u8 n_sb = hdr->data[0];
281 282
282 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 283 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
283 __skb_pull(skb, sizeof(*hdr)); 284 __skb_pull(skb, sizeof(*hdr));
284 while (n_sb > 0) { 285 while (n_sb > 0) {
285 u8 type, buf[2], len = sizeof(buf); 286 u8 type, buf[2], len = sizeof(buf);
286 u8 *data = pep_get_sb(skb, &type, &len, buf); 287 u8 *data = pep_get_sb(skb, &type, &len, buf);
287 288
288 if (data == NULL) 289 if (data == NULL)
289 return -EINVAL; 290 return -EINVAL;
290 switch (type) { 291 switch (type) {
291 case PN_PIPE_SB_NEGOTIATED_FC: 292 case PN_PIPE_SB_NEGOTIATED_FC:
292 if (len < 2 || (data[0] | data[1]) > 3) 293 if (len < 2 || (data[0] | data[1]) > 3)
293 break; 294 break;
294 pn->tx_fc = data[0] & 3; 295 pn->tx_fc = data[0] & 3;
295 pn->rx_fc = data[1] & 3; 296 pn->rx_fc = data[1] & 3;
296 break; 297 break;
297 } 298 }
298 n_sb--; 299 n_sb--;
299 } 300 }
300 return 0; 301 return 0;
301 } 302 }
302 303
303 /* Queue an skb to a connected sock. 304 /* Queue an skb to a connected sock.
304 * Socket lock must be held. */ 305 * Socket lock must be held. */
305 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) 306 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
306 { 307 {
307 struct pep_sock *pn = pep_sk(sk); 308 struct pep_sock *pn = pep_sk(sk);
308 struct pnpipehdr *hdr = pnp_hdr(skb); 309 struct pnpipehdr *hdr = pnp_hdr(skb);
309 struct sk_buff_head *queue; 310 struct sk_buff_head *queue;
310 int err = 0; 311 int err = 0;
311 312
312 BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); 313 BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
313 314
314 switch (hdr->message_id) { 315 switch (hdr->message_id) {
315 case PNS_PEP_CONNECT_REQ: 316 case PNS_PEP_CONNECT_REQ:
316 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); 317 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
317 break; 318 break;
318 319
319 case PNS_PEP_DISCONNECT_REQ: 320 case PNS_PEP_DISCONNECT_REQ:
320 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 321 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
321 sk->sk_state = TCP_CLOSE_WAIT; 322 sk->sk_state = TCP_CLOSE_WAIT;
322 if (!sock_flag(sk, SOCK_DEAD)) 323 if (!sock_flag(sk, SOCK_DEAD))
323 sk->sk_state_change(sk); 324 sk->sk_state_change(sk);
324 break; 325 break;
325 326
326 case PNS_PEP_ENABLE_REQ: 327 case PNS_PEP_ENABLE_REQ:
327 /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */ 328 /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
328 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 329 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
329 break; 330 break;
330 331
331 case PNS_PEP_RESET_REQ: 332 case PNS_PEP_RESET_REQ:
332 switch (hdr->state_after_reset) { 333 switch (hdr->state_after_reset) {
333 case PN_PIPE_DISABLE: 334 case PN_PIPE_DISABLE:
334 pn->init_enable = 0; 335 pn->init_enable = 0;
335 break; 336 break;
336 case PN_PIPE_ENABLE: 337 case PN_PIPE_ENABLE:
337 pn->init_enable = 1; 338 pn->init_enable = 1;
338 break; 339 break;
339 default: /* not allowed to send an error here!? */ 340 default: /* not allowed to send an error here!? */
340 err = -EINVAL; 341 err = -EINVAL;
341 goto out; 342 goto out;
342 } 343 }
343 /* fall through */ 344 /* fall through */
344 case PNS_PEP_DISABLE_REQ: 345 case PNS_PEP_DISABLE_REQ:
345 atomic_set(&pn->tx_credits, 0); 346 atomic_set(&pn->tx_credits, 0);
346 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 347 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
347 break; 348 break;
348 349
349 case PNS_PEP_CTRL_REQ: 350 case PNS_PEP_CTRL_REQ:
350 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { 351 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
351 atomic_inc(&sk->sk_drops); 352 atomic_inc(&sk->sk_drops);
352 break; 353 break;
353 } 354 }
354 __skb_pull(skb, 4); 355 __skb_pull(skb, 4);
355 queue = &pn->ctrlreq_queue; 356 queue = &pn->ctrlreq_queue;
356 goto queue; 357 goto queue;
357 358
358 case PNS_PIPE_ALIGNED_DATA: 359 case PNS_PIPE_ALIGNED_DATA:
359 __skb_pull(skb, 1); 360 __skb_pull(skb, 1);
360 /* fall through */ 361 /* fall through */
361 case PNS_PIPE_DATA: 362 case PNS_PIPE_DATA:
362 __skb_pull(skb, 3); /* Pipe data header */ 363 __skb_pull(skb, 3); /* Pipe data header */
363 if (!pn_flow_safe(pn->rx_fc)) { 364 if (!pn_flow_safe(pn->rx_fc)) {
364 err = sock_queue_rcv_skb(sk, skb); 365 err = sock_queue_rcv_skb(sk, skb);
365 if (!err) 366 if (!err)
366 return 0; 367 return 0;
367 break; 368 break;
368 } 369 }
369 370
370 if (pn->rx_credits == 0) { 371 if (pn->rx_credits == 0) {
371 atomic_inc(&sk->sk_drops); 372 atomic_inc(&sk->sk_drops);
372 err = -ENOBUFS; 373 err = -ENOBUFS;
373 break; 374 break;
374 } 375 }
375 pn->rx_credits--; 376 pn->rx_credits--;
376 queue = &sk->sk_receive_queue; 377 queue = &sk->sk_receive_queue;
377 goto queue; 378 goto queue;
378 379
379 case PNS_PEP_STATUS_IND: 380 case PNS_PEP_STATUS_IND:
380 pipe_rcv_status(sk, skb); 381 pipe_rcv_status(sk, skb);
381 break; 382 break;
382 383
383 case PNS_PIPE_REDIRECTED_IND: 384 case PNS_PIPE_REDIRECTED_IND:
384 err = pipe_rcv_created(sk, skb); 385 err = pipe_rcv_created(sk, skb);
385 break; 386 break;
386 387
387 case PNS_PIPE_CREATED_IND: 388 case PNS_PIPE_CREATED_IND:
388 err = pipe_rcv_created(sk, skb); 389 err = pipe_rcv_created(sk, skb);
389 if (err) 390 if (err)
390 break; 391 break;
391 /* fall through */ 392 /* fall through */
392 case PNS_PIPE_RESET_IND: 393 case PNS_PIPE_RESET_IND:
393 if (!pn->init_enable) 394 if (!pn->init_enable)
394 break; 395 break;
395 /* fall through */ 396 /* fall through */
396 case PNS_PIPE_ENABLED_IND: 397 case PNS_PIPE_ENABLED_IND:
397 if (!pn_flow_safe(pn->tx_fc)) { 398 if (!pn_flow_safe(pn->tx_fc)) {
398 atomic_set(&pn->tx_credits, 1); 399 atomic_set(&pn->tx_credits, 1);
399 sk->sk_write_space(sk); 400 sk->sk_write_space(sk);
400 } 401 }
401 if (sk->sk_state == TCP_ESTABLISHED) 402 if (sk->sk_state == TCP_ESTABLISHED)
402 break; /* Nothing to do */ 403 break; /* Nothing to do */
403 sk->sk_state = TCP_ESTABLISHED; 404 sk->sk_state = TCP_ESTABLISHED;
404 pipe_grant_credits(sk); 405 pipe_grant_credits(sk);
405 break; 406 break;
406 407
407 case PNS_PIPE_DISABLED_IND: 408 case PNS_PIPE_DISABLED_IND:
408 sk->sk_state = TCP_SYN_RECV; 409 sk->sk_state = TCP_SYN_RECV;
409 pn->rx_credits = 0; 410 pn->rx_credits = 0;
410 break; 411 break;
411 412
412 default: 413 default:
413 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n", 414 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
414 hdr->message_id); 415 hdr->message_id);
415 err = -EINVAL; 416 err = -EINVAL;
416 } 417 }
417 out: 418 out:
418 kfree_skb(skb); 419 kfree_skb(skb);
419 return err; 420 return err;
420 421
421 queue: 422 queue:
422 skb->dev = NULL; 423 skb->dev = NULL;
423 skb_set_owner_r(skb, sk); 424 skb_set_owner_r(skb, sk);
424 err = skb->len; 425 err = skb->len;
425 skb_queue_tail(queue, skb); 426 skb_queue_tail(queue, skb);
426 if (!sock_flag(sk, SOCK_DEAD)) 427 if (!sock_flag(sk, SOCK_DEAD))
427 sk->sk_data_ready(sk, err); 428 sk->sk_data_ready(sk, err);
428 return 0; 429 return 0;
429 } 430 }
430 431
431 /* Destroy connected sock. */ 432 /* Destroy connected sock. */
432 static void pipe_destruct(struct sock *sk) 433 static void pipe_destruct(struct sock *sk)
433 { 434 {
434 struct pep_sock *pn = pep_sk(sk); 435 struct pep_sock *pn = pep_sk(sk);
435 436
436 skb_queue_purge(&sk->sk_receive_queue); 437 skb_queue_purge(&sk->sk_receive_queue);
437 skb_queue_purge(&pn->ctrlreq_queue); 438 skb_queue_purge(&pn->ctrlreq_queue);
438 } 439 }
439 440
440 static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) 441 static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
441 { 442 {
442 struct sock *newsk; 443 struct sock *newsk;
443 struct pep_sock *newpn, *pn = pep_sk(sk); 444 struct pep_sock *newpn, *pn = pep_sk(sk);
444 struct pnpipehdr *hdr; 445 struct pnpipehdr *hdr;
445 struct sockaddr_pn dst; 446 struct sockaddr_pn dst;
446 u16 peer_type; 447 u16 peer_type;
447 u8 pipe_handle, enabled, n_sb; 448 u8 pipe_handle, enabled, n_sb;
448 u8 aligned = 0; 449 u8 aligned = 0;
449 450
450 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 451 if (!pskb_pull(skb, sizeof(*hdr) + 4))
451 return -EINVAL; 452 return -EINVAL;
452 453
453 hdr = pnp_hdr(skb); 454 hdr = pnp_hdr(skb);
454 pipe_handle = hdr->pipe_handle; 455 pipe_handle = hdr->pipe_handle;
455 switch (hdr->state_after_connect) { 456 switch (hdr->state_after_connect) {
456 case PN_PIPE_DISABLE: 457 case PN_PIPE_DISABLE:
457 enabled = 0; 458 enabled = 0;
458 break; 459 break;
459 case PN_PIPE_ENABLE: 460 case PN_PIPE_ENABLE:
460 enabled = 1; 461 enabled = 1;
461 break; 462 break;
462 default: 463 default:
463 pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM); 464 pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
464 return -EINVAL; 465 return -EINVAL;
465 } 466 }
466 peer_type = hdr->other_pep_type << 8; 467 peer_type = hdr->other_pep_type << 8;
467 468
468 if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) { 469 if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
469 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); 470 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
470 return -ENOBUFS; 471 return -ENOBUFS;
471 } 472 }
472 473
473 /* Parse sub-blocks (options) */ 474 /* Parse sub-blocks (options) */
474 n_sb = hdr->data[4]; 475 n_sb = hdr->data[4];
475 while (n_sb > 0) { 476 while (n_sb > 0) {
476 u8 type, buf[1], len = sizeof(buf); 477 u8 type, buf[1], len = sizeof(buf);
477 const u8 *data = pep_get_sb(skb, &type, &len, buf); 478 const u8 *data = pep_get_sb(skb, &type, &len, buf);
478 479
479 if (data == NULL) 480 if (data == NULL)
480 return -EINVAL; 481 return -EINVAL;
481 switch (type) { 482 switch (type) {
482 case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: 483 case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
483 if (len < 1) 484 if (len < 1)
484 return -EINVAL; 485 return -EINVAL;
485 peer_type = (peer_type & 0xff00) | data[0]; 486 peer_type = (peer_type & 0xff00) | data[0];
486 break; 487 break;
487 case PN_PIPE_SB_ALIGNED_DATA: 488 case PN_PIPE_SB_ALIGNED_DATA:
488 aligned = data[0] != 0; 489 aligned = data[0] != 0;
489 break; 490 break;
490 } 491 }
491 n_sb--; 492 n_sb--;
492 } 493 }
493 494
494 skb = skb_clone(skb, GFP_ATOMIC); 495 skb = skb_clone(skb, GFP_ATOMIC);
495 if (!skb) 496 if (!skb)
496 return -ENOMEM; 497 return -ENOMEM;
497 498
498 /* Create a new to-be-accepted sock */ 499 /* Create a new to-be-accepted sock */
499 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot); 500 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
500 if (!newsk) { 501 if (!newsk) {
501 kfree_skb(skb); 502 kfree_skb(skb);
502 return -ENOMEM; 503 return -ENOMEM;
503 } 504 }
504 sock_init_data(NULL, newsk); 505 sock_init_data(NULL, newsk);
505 newsk->sk_state = TCP_SYN_RECV; 506 newsk->sk_state = TCP_SYN_RECV;
506 newsk->sk_backlog_rcv = pipe_do_rcv; 507 newsk->sk_backlog_rcv = pipe_do_rcv;
507 newsk->sk_protocol = sk->sk_protocol; 508 newsk->sk_protocol = sk->sk_protocol;
508 newsk->sk_destruct = pipe_destruct; 509 newsk->sk_destruct = pipe_destruct;
509 510
510 newpn = pep_sk(newsk); 511 newpn = pep_sk(newsk);
511 pn_skb_get_dst_sockaddr(skb, &dst); 512 pn_skb_get_dst_sockaddr(skb, &dst);
512 newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); 513 newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
513 newpn->pn_sk.resource = pn->pn_sk.resource; 514 newpn->pn_sk.resource = pn->pn_sk.resource;
514 skb_queue_head_init(&newpn->ctrlreq_queue); 515 skb_queue_head_init(&newpn->ctrlreq_queue);
515 newpn->pipe_handle = pipe_handle; 516 newpn->pipe_handle = pipe_handle;
516 atomic_set(&newpn->tx_credits, 0); 517 atomic_set(&newpn->tx_credits, 0);
517 newpn->peer_type = peer_type; 518 newpn->peer_type = peer_type;
518 newpn->rx_credits = 0; 519 newpn->rx_credits = 0;
519 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 520 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
520 newpn->init_enable = enabled; 521 newpn->init_enable = enabled;
521 newpn->aligned = aligned; 522 newpn->aligned = aligned;
522 523
523 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 524 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
524 skb_queue_head(&newsk->sk_receive_queue, skb); 525 skb_queue_head(&newsk->sk_receive_queue, skb);
525 if (!sock_flag(sk, SOCK_DEAD)) 526 if (!sock_flag(sk, SOCK_DEAD))
526 sk->sk_data_ready(sk, 0); 527 sk->sk_data_ready(sk, 0);
527 528
528 sk_acceptq_added(sk); 529 sk_acceptq_added(sk);
529 sk_add_node(newsk, &pn->ackq); 530 sk_add_node(newsk, &pn->ackq);
530 return 0; 531 return 0;
531 } 532 }
532 533
533 /* Listening sock must be locked */ 534 /* Listening sock must be locked */
534 static struct sock *pep_find_pipe(const struct hlist_head *hlist, 535 static struct sock *pep_find_pipe(const struct hlist_head *hlist,
535 const struct sockaddr_pn *dst, 536 const struct sockaddr_pn *dst,
536 u8 pipe_handle) 537 u8 pipe_handle)
537 { 538 {
538 struct hlist_node *node; 539 struct hlist_node *node;
539 struct sock *sknode; 540 struct sock *sknode;
540 u16 dobj = pn_sockaddr_get_object(dst); 541 u16 dobj = pn_sockaddr_get_object(dst);
541 542
542 sk_for_each(sknode, node, hlist) { 543 sk_for_each(sknode, node, hlist) {
543 struct pep_sock *pnnode = pep_sk(sknode); 544 struct pep_sock *pnnode = pep_sk(sknode);
544 545
545 /* Ports match, but addresses might not: */ 546 /* Ports match, but addresses might not: */
546 if (pnnode->pn_sk.sobject != dobj) 547 if (pnnode->pn_sk.sobject != dobj)
547 continue; 548 continue;
548 if (pnnode->pipe_handle != pipe_handle) 549 if (pnnode->pipe_handle != pipe_handle)
549 continue; 550 continue;
550 if (sknode->sk_state == TCP_CLOSE_WAIT) 551 if (sknode->sk_state == TCP_CLOSE_WAIT)
551 continue; 552 continue;
552 553
553 sock_hold(sknode); 554 sock_hold(sknode);
554 return sknode; 555 return sknode;
555 } 556 }
556 return NULL; 557 return NULL;
557 } 558 }
558 559
559 /* 560 /*
560 * Deliver an skb to a listening sock. 561 * Deliver an skb to a listening sock.
561 * Socket lock must be held. 562 * Socket lock must be held.
562 * We then queue the skb to the right connected sock (if any). 563 * We then queue the skb to the right connected sock (if any).
563 */ 564 */
564 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) 565 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
565 { 566 {
566 struct pep_sock *pn = pep_sk(sk); 567 struct pep_sock *pn = pep_sk(sk);
567 struct sock *sknode; 568 struct sock *sknode;
568 struct pnpipehdr *hdr; 569 struct pnpipehdr *hdr;
569 struct sockaddr_pn dst; 570 struct sockaddr_pn dst;
570 int err = NET_RX_SUCCESS; 571 int err = NET_RX_SUCCESS;
571 u8 pipe_handle; 572 u8 pipe_handle;
572 573
573 if (!pskb_may_pull(skb, sizeof(*hdr))) 574 if (!pskb_may_pull(skb, sizeof(*hdr)))
574 goto drop; 575 goto drop;
575 576
576 hdr = pnp_hdr(skb); 577 hdr = pnp_hdr(skb);
577 pipe_handle = hdr->pipe_handle; 578 pipe_handle = hdr->pipe_handle;
578 if (pipe_handle == PN_PIPE_INVALID_HANDLE) 579 if (pipe_handle == PN_PIPE_INVALID_HANDLE)
579 goto drop; 580 goto drop;
580 581
581 pn_skb_get_dst_sockaddr(skb, &dst); 582 pn_skb_get_dst_sockaddr(skb, &dst);
582 583
583 /* Look for an existing pipe handle */ 584 /* Look for an existing pipe handle */
584 sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle); 585 sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
585 if (sknode) 586 if (sknode)
586 return sk_receive_skb(sknode, skb, 1); 587 return sk_receive_skb(sknode, skb, 1);
587 588
588 /* Look for a pipe handle pending accept */ 589 /* Look for a pipe handle pending accept */
589 sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle); 590 sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
590 if (sknode) { 591 if (sknode) {
591 sock_put(sknode); 592 sock_put(sknode);
592 if (net_ratelimit()) 593 if (net_ratelimit())
593 printk(KERN_WARNING"Phonet unconnected PEP ignored"); 594 printk(KERN_WARNING"Phonet unconnected PEP ignored");
594 err = NET_RX_DROP; 595 err = NET_RX_DROP;
595 goto drop; 596 goto drop;
596 } 597 }
597 598
598 switch (hdr->message_id) { 599 switch (hdr->message_id) {
599 case PNS_PEP_CONNECT_REQ: 600 case PNS_PEP_CONNECT_REQ:
600 err = pep_connreq_rcv(sk, skb); 601 err = pep_connreq_rcv(sk, skb);
601 break; 602 break;
602 603
603 case PNS_PEP_DISCONNECT_REQ: 604 case PNS_PEP_DISCONNECT_REQ:
604 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 605 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
605 break; 606 break;
606 607
607 case PNS_PEP_CTRL_REQ: 608 case PNS_PEP_CTRL_REQ:
608 pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC); 609 pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
609 break; 610 break;
610 611
611 case PNS_PEP_RESET_REQ: 612 case PNS_PEP_RESET_REQ:
612 case PNS_PEP_ENABLE_REQ: 613 case PNS_PEP_ENABLE_REQ:
613 case PNS_PEP_DISABLE_REQ: 614 case PNS_PEP_DISABLE_REQ:
614 /* invalid handle is not even allowed here! */ 615 /* invalid handle is not even allowed here! */
615 default: 616 default:
616 err = NET_RX_DROP; 617 err = NET_RX_DROP;
617 } 618 }
618 drop: 619 drop:
619 kfree_skb(skb); 620 kfree_skb(skb);
620 return err; 621 return err;
621 } 622 }
622 623
623 /* associated socket ceases to exist */ 624 /* associated socket ceases to exist */
624 static void pep_sock_close(struct sock *sk, long timeout) 625 static void pep_sock_close(struct sock *sk, long timeout)
625 { 626 {
626 struct pep_sock *pn = pep_sk(sk); 627 struct pep_sock *pn = pep_sk(sk);
627 int ifindex = 0; 628 int ifindex = 0;
628 629
629 sock_hold(sk); /* keep a reference after sk_common_release() */ 630 sock_hold(sk); /* keep a reference after sk_common_release() */
630 sk_common_release(sk); 631 sk_common_release(sk);
631 632
632 lock_sock(sk); 633 lock_sock(sk);
633 if (sk->sk_state == TCP_LISTEN) { 634 if (sk->sk_state == TCP_LISTEN) {
634 /* Destroy the listen queue */ 635 /* Destroy the listen queue */
635 struct sock *sknode; 636 struct sock *sknode;
636 struct hlist_node *p, *n; 637 struct hlist_node *p, *n;
637 638
638 sk_for_each_safe(sknode, p, n, &pn->ackq) 639 sk_for_each_safe(sknode, p, n, &pn->ackq)
639 sk_del_node_init(sknode); 640 sk_del_node_init(sknode);
640 sk->sk_state = TCP_CLOSE; 641 sk->sk_state = TCP_CLOSE;
641 } 642 }
642 ifindex = pn->ifindex; 643 ifindex = pn->ifindex;
643 pn->ifindex = 0; 644 pn->ifindex = 0;
644 release_sock(sk); 645 release_sock(sk);
645 646
646 if (ifindex) 647 if (ifindex)
647 gprs_detach(sk); 648 gprs_detach(sk);
648 sock_put(sk); 649 sock_put(sk);
649 } 650 }
650 651
651 static int pep_wait_connreq(struct sock *sk, int noblock) 652 static int pep_wait_connreq(struct sock *sk, int noblock)
652 { 653 {
653 struct task_struct *tsk = current; 654 struct task_struct *tsk = current;
654 struct pep_sock *pn = pep_sk(sk); 655 struct pep_sock *pn = pep_sk(sk);
655 long timeo = sock_rcvtimeo(sk, noblock); 656 long timeo = sock_rcvtimeo(sk, noblock);
656 657
657 for (;;) { 658 for (;;) {
658 DEFINE_WAIT(wait); 659 DEFINE_WAIT(wait);
659 660
660 if (sk->sk_state != TCP_LISTEN) 661 if (sk->sk_state != TCP_LISTEN)
661 return -EINVAL; 662 return -EINVAL;
662 if (!hlist_empty(&pn->ackq)) 663 if (!hlist_empty(&pn->ackq))
663 break; 664 break;
664 if (!timeo) 665 if (!timeo)
665 return -EWOULDBLOCK; 666 return -EWOULDBLOCK;
666 if (signal_pending(tsk)) 667 if (signal_pending(tsk))
667 return sock_intr_errno(timeo); 668 return sock_intr_errno(timeo);
668 669
669 prepare_to_wait_exclusive(sk_sleep(sk), &wait, 670 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
670 TASK_INTERRUPTIBLE); 671 TASK_INTERRUPTIBLE);
671 release_sock(sk); 672 release_sock(sk);
672 timeo = schedule_timeout(timeo); 673 timeo = schedule_timeout(timeo);
673 lock_sock(sk); 674 lock_sock(sk);
674 finish_wait(sk_sleep(sk), &wait); 675 finish_wait(sk_sleep(sk), &wait);
675 } 676 }
676 677
677 return 0; 678 return 0;
678 } 679 }
679 680
680 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 681 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
681 { 682 {
682 struct pep_sock *pn = pep_sk(sk); 683 struct pep_sock *pn = pep_sk(sk);
683 struct sock *newsk = NULL; 684 struct sock *newsk = NULL;
684 struct sk_buff *oskb; 685 struct sk_buff *oskb;
685 int err; 686 int err;
686 687
687 lock_sock(sk); 688 lock_sock(sk);
688 err = pep_wait_connreq(sk, flags & O_NONBLOCK); 689 err = pep_wait_connreq(sk, flags & O_NONBLOCK);
689 if (err) 690 if (err)
690 goto out; 691 goto out;
691 692
692 newsk = __sk_head(&pn->ackq); 693 newsk = __sk_head(&pn->ackq);
693 694
694 oskb = skb_dequeue(&newsk->sk_receive_queue); 695 oskb = skb_dequeue(&newsk->sk_receive_queue);
695 err = pep_accept_conn(newsk, oskb); 696 err = pep_accept_conn(newsk, oskb);
696 if (err) { 697 if (err) {
697 skb_queue_head(&newsk->sk_receive_queue, oskb); 698 skb_queue_head(&newsk->sk_receive_queue, oskb);
698 newsk = NULL; 699 newsk = NULL;
699 goto out; 700 goto out;
700 } 701 }
701 kfree_skb(oskb); 702 kfree_skb(oskb);
702 703
703 sock_hold(sk); 704 sock_hold(sk);
704 pep_sk(newsk)->listener = sk; 705 pep_sk(newsk)->listener = sk;
705 706
706 sock_hold(newsk); 707 sock_hold(newsk);
707 sk_del_node_init(newsk); 708 sk_del_node_init(newsk);
708 sk_acceptq_removed(sk); 709 sk_acceptq_removed(sk);
709 sk_add_node(newsk, &pn->hlist); 710 sk_add_node(newsk, &pn->hlist);
710 __sock_put(newsk); 711 __sock_put(newsk);
711 712
712 out: 713 out:
713 release_sock(sk); 714 release_sock(sk);
714 *errp = err; 715 *errp = err;
715 return newsk; 716 return newsk;
716 } 717 }
717 718
718 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) 719 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
719 { 720 {
720 struct pep_sock *pn = pep_sk(sk); 721 struct pep_sock *pn = pep_sk(sk);
721 int answ; 722 int answ;
722 723
723 switch (cmd) { 724 switch (cmd) {
724 case SIOCINQ: 725 case SIOCINQ:
725 if (sk->sk_state == TCP_LISTEN) 726 if (sk->sk_state == TCP_LISTEN)
726 return -EINVAL; 727 return -EINVAL;
727 728
728 lock_sock(sk); 729 lock_sock(sk);
729 if (sock_flag(sk, SOCK_URGINLINE) && 730 if (sock_flag(sk, SOCK_URGINLINE) &&
730 !skb_queue_empty(&pn->ctrlreq_queue)) 731 !skb_queue_empty(&pn->ctrlreq_queue))
731 answ = skb_peek(&pn->ctrlreq_queue)->len; 732 answ = skb_peek(&pn->ctrlreq_queue)->len;
732 else if (!skb_queue_empty(&sk->sk_receive_queue)) 733 else if (!skb_queue_empty(&sk->sk_receive_queue))
733 answ = skb_peek(&sk->sk_receive_queue)->len; 734 answ = skb_peek(&sk->sk_receive_queue)->len;
734 else 735 else
735 answ = 0; 736 answ = 0;
736 release_sock(sk); 737 release_sock(sk);
737 return put_user(answ, (int __user *)arg); 738 return put_user(answ, (int __user *)arg);
738 } 739 }
739 740
740 return -ENOIOCTLCMD; 741 return -ENOIOCTLCMD;
741 } 742 }
742 743
743 static int pep_init(struct sock *sk) 744 static int pep_init(struct sock *sk)
744 { 745 {
745 struct pep_sock *pn = pep_sk(sk); 746 struct pep_sock *pn = pep_sk(sk);
746 747
747 INIT_HLIST_HEAD(&pn->ackq); 748 INIT_HLIST_HEAD(&pn->ackq);
748 INIT_HLIST_HEAD(&pn->hlist); 749 INIT_HLIST_HEAD(&pn->hlist);
749 skb_queue_head_init(&pn->ctrlreq_queue); 750 skb_queue_head_init(&pn->ctrlreq_queue);
750 pn->pipe_handle = PN_PIPE_INVALID_HANDLE; 751 pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
751 return 0; 752 return 0;
752 } 753 }
753 754
754 static int pep_setsockopt(struct sock *sk, int level, int optname, 755 static int pep_setsockopt(struct sock *sk, int level, int optname,
755 char __user *optval, unsigned int optlen) 756 char __user *optval, unsigned int optlen)
756 { 757 {
757 struct pep_sock *pn = pep_sk(sk); 758 struct pep_sock *pn = pep_sk(sk);
758 int val = 0, err = 0; 759 int val = 0, err = 0;
759 760
760 if (level != SOL_PNPIPE) 761 if (level != SOL_PNPIPE)
761 return -ENOPROTOOPT; 762 return -ENOPROTOOPT;
762 if (optlen >= sizeof(int)) { 763 if (optlen >= sizeof(int)) {
763 if (get_user(val, (int __user *) optval)) 764 if (get_user(val, (int __user *) optval))
764 return -EFAULT; 765 return -EFAULT;
765 } 766 }
766 767
767 lock_sock(sk); 768 lock_sock(sk);
768 switch (optname) { 769 switch (optname) {
769 case PNPIPE_ENCAP: 770 case PNPIPE_ENCAP:
770 if (val && val != PNPIPE_ENCAP_IP) { 771 if (val && val != PNPIPE_ENCAP_IP) {
771 err = -EINVAL; 772 err = -EINVAL;
772 break; 773 break;
773 } 774 }
774 if (!pn->ifindex == !val) 775 if (!pn->ifindex == !val)
775 break; /* Nothing to do! */ 776 break; /* Nothing to do! */
776 if (!capable(CAP_NET_ADMIN)) { 777 if (!capable(CAP_NET_ADMIN)) {
777 err = -EPERM; 778 err = -EPERM;
778 break; 779 break;
779 } 780 }
780 if (val) { 781 if (val) {
781 release_sock(sk); 782 release_sock(sk);
782 err = gprs_attach(sk); 783 err = gprs_attach(sk);
783 if (err > 0) { 784 if (err > 0) {
784 pn->ifindex = err; 785 pn->ifindex = err;
785 err = 0; 786 err = 0;
786 } 787 }
787 } else { 788 } else {
788 pn->ifindex = 0; 789 pn->ifindex = 0;
789 release_sock(sk); 790 release_sock(sk);
790 gprs_detach(sk); 791 gprs_detach(sk);
791 err = 0; 792 err = 0;
792 } 793 }
793 goto out_norel; 794 goto out_norel;
794 default: 795 default:
795 err = -ENOPROTOOPT; 796 err = -ENOPROTOOPT;
796 } 797 }
797 release_sock(sk); 798 release_sock(sk);
798 799
799 out_norel: 800 out_norel:
800 return err; 801 return err;
801 } 802 }
802 803
803 static int pep_getsockopt(struct sock *sk, int level, int optname, 804 static int pep_getsockopt(struct sock *sk, int level, int optname,
804 char __user *optval, int __user *optlen) 805 char __user *optval, int __user *optlen)
805 { 806 {
806 struct pep_sock *pn = pep_sk(sk); 807 struct pep_sock *pn = pep_sk(sk);
807 int len, val; 808 int len, val;
808 809
809 if (level != SOL_PNPIPE) 810 if (level != SOL_PNPIPE)
810 return -ENOPROTOOPT; 811 return -ENOPROTOOPT;
811 if (get_user(len, optlen)) 812 if (get_user(len, optlen))
812 return -EFAULT; 813 return -EFAULT;
813 814
814 switch (optname) { 815 switch (optname) {
815 case PNPIPE_ENCAP: 816 case PNPIPE_ENCAP:
816 val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE; 817 val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
817 break; 818 break;
818 case PNPIPE_IFINDEX: 819 case PNPIPE_IFINDEX:
819 val = pn->ifindex; 820 val = pn->ifindex;
820 break; 821 break;
821 default: 822 default:
822 return -ENOPROTOOPT; 823 return -ENOPROTOOPT;
823 } 824 }
824 825
825 len = min_t(unsigned int, sizeof(int), len); 826 len = min_t(unsigned int, sizeof(int), len);
826 if (put_user(len, optlen)) 827 if (put_user(len, optlen))
827 return -EFAULT; 828 return -EFAULT;
828 if (put_user(val, (int __user *) optval)) 829 if (put_user(val, (int __user *) optval))
829 return -EFAULT; 830 return -EFAULT;
830 return 0; 831 return 0;
831 } 832 }
832 833
833 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) 834 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
834 { 835 {
835 struct pep_sock *pn = pep_sk(sk); 836 struct pep_sock *pn = pep_sk(sk);
836 struct pnpipehdr *ph; 837 struct pnpipehdr *ph;
837 838
838 if (pn_flow_safe(pn->tx_fc) && 839 if (pn_flow_safe(pn->tx_fc) &&
839 !atomic_add_unless(&pn->tx_credits, -1, 0)) { 840 !atomic_add_unless(&pn->tx_credits, -1, 0)) {
840 kfree_skb(skb); 841 kfree_skb(skb);
841 return -ENOBUFS; 842 return -ENOBUFS;
842 } 843 }
843 844
844 skb_push(skb, 3 + pn->aligned); 845 skb_push(skb, 3 + pn->aligned);
845 skb_reset_transport_header(skb); 846 skb_reset_transport_header(skb);
846 ph = pnp_hdr(skb); 847 ph = pnp_hdr(skb);
847 ph->utid = 0; 848 ph->utid = 0;
848 if (pn->aligned) { 849 if (pn->aligned) {
849 ph->message_id = PNS_PIPE_ALIGNED_DATA; 850 ph->message_id = PNS_PIPE_ALIGNED_DATA;
850 ph->data[0] = 0; /* padding */ 851 ph->data[0] = 0; /* padding */
851 } else 852 } else
852 ph->message_id = PNS_PIPE_DATA; 853 ph->message_id = PNS_PIPE_DATA;
853 ph->pipe_handle = pn->pipe_handle; 854 ph->pipe_handle = pn->pipe_handle;
854 855
855 return pn_skb_send(sk, skb, &pipe_srv); 856 return pn_skb_send(sk, skb, &pipe_srv);
856 } 857 }
857 858
858 static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, 859 static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
859 struct msghdr *msg, size_t len) 860 struct msghdr *msg, size_t len)
860 { 861 {
861 struct pep_sock *pn = pep_sk(sk); 862 struct pep_sock *pn = pep_sk(sk);
862 struct sk_buff *skb; 863 struct sk_buff *skb;
863 long timeo; 864 long timeo;
864 int flags = msg->msg_flags; 865 int flags = msg->msg_flags;
865 int err, done; 866 int err, done;
866 867
867 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 868 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
868 MSG_CMSG_COMPAT)) || 869 MSG_CMSG_COMPAT)) ||
869 !(msg->msg_flags & MSG_EOR)) 870 !(msg->msg_flags & MSG_EOR))
870 return -EOPNOTSUPP; 871 return -EOPNOTSUPP;
871 872
872 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 873 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
873 flags & MSG_DONTWAIT, &err); 874 flags & MSG_DONTWAIT, &err);
874 if (!skb) 875 if (!skb)
875 return -ENOBUFS; 876 return -ENOBUFS;
876 877
877 skb_reserve(skb, MAX_PHONET_HEADER + 3); 878 skb_reserve(skb, MAX_PHONET_HEADER + 3);
878 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 879 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
879 if (err < 0) 880 if (err < 0)
880 goto outfree; 881 goto outfree;
881 882
882 lock_sock(sk); 883 lock_sock(sk);
883 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 884 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
884 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { 885 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
885 err = -ENOTCONN; 886 err = -ENOTCONN;
886 goto out; 887 goto out;
887 } 888 }
888 if (sk->sk_state != TCP_ESTABLISHED) { 889 if (sk->sk_state != TCP_ESTABLISHED) {
889 /* Wait until the pipe gets to enabled state */ 890 /* Wait until the pipe gets to enabled state */
890 disabled: 891 disabled:
891 err = sk_stream_wait_connect(sk, &timeo); 892 err = sk_stream_wait_connect(sk, &timeo);
892 if (err) 893 if (err)
893 goto out; 894 goto out;
894 895
895 if (sk->sk_state == TCP_CLOSE_WAIT) { 896 if (sk->sk_state == TCP_CLOSE_WAIT) {
896 err = -ECONNRESET; 897 err = -ECONNRESET;
897 goto out; 898 goto out;
898 } 899 }
899 } 900 }
900 BUG_ON(sk->sk_state != TCP_ESTABLISHED); 901 BUG_ON(sk->sk_state != TCP_ESTABLISHED);
901 902
902 /* Wait until flow control allows TX */ 903 /* Wait until flow control allows TX */
903 done = atomic_read(&pn->tx_credits); 904 done = atomic_read(&pn->tx_credits);
904 while (!done) { 905 while (!done) {
905 DEFINE_WAIT(wait); 906 DEFINE_WAIT(wait);
906 907
907 if (!timeo) { 908 if (!timeo) {
908 err = -EAGAIN; 909 err = -EAGAIN;
909 goto out; 910 goto out;
910 } 911 }
911 if (signal_pending(current)) { 912 if (signal_pending(current)) {
912 err = sock_intr_errno(timeo); 913 err = sock_intr_errno(timeo);
913 goto out; 914 goto out;
914 } 915 }
915 916
916 prepare_to_wait(sk_sleep(sk), &wait, 917 prepare_to_wait(sk_sleep(sk), &wait,
917 TASK_INTERRUPTIBLE); 918 TASK_INTERRUPTIBLE);
918 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); 919 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
919 finish_wait(sk_sleep(sk), &wait); 920 finish_wait(sk_sleep(sk), &wait);
920 921
921 if (sk->sk_state != TCP_ESTABLISHED) 922 if (sk->sk_state != TCP_ESTABLISHED)
922 goto disabled; 923 goto disabled;
923 } 924 }
924 925
925 err = pipe_skb_send(sk, skb); 926 err = pipe_skb_send(sk, skb);
926 if (err >= 0) 927 if (err >= 0)
927 err = len; /* success! */ 928 err = len; /* success! */
928 skb = NULL; 929 skb = NULL;
929 out: 930 out:
930 release_sock(sk); 931 release_sock(sk);
931 outfree: 932 outfree:
932 kfree_skb(skb); 933 kfree_skb(skb);
933 return err; 934 return err;
934 } 935 }
935 936
936 int pep_writeable(struct sock *sk) 937 int pep_writeable(struct sock *sk)
937 { 938 {
938 struct pep_sock *pn = pep_sk(sk); 939 struct pep_sock *pn = pep_sk(sk);
939 940
940 return atomic_read(&pn->tx_credits); 941 return atomic_read(&pn->tx_credits);
941 } 942 }
942 943
943 int pep_write(struct sock *sk, struct sk_buff *skb) 944 int pep_write(struct sock *sk, struct sk_buff *skb)
944 { 945 {
945 struct sk_buff *rskb, *fs; 946 struct sk_buff *rskb, *fs;
946 int flen = 0; 947 int flen = 0;
947 948
948 if (pep_sk(sk)->aligned) 949 if (pep_sk(sk)->aligned)
949 return pipe_skb_send(sk, skb); 950 return pipe_skb_send(sk, skb);
950 951
951 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 952 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
952 if (!rskb) { 953 if (!rskb) {
953 kfree_skb(skb); 954 kfree_skb(skb);
954 return -ENOMEM; 955 return -ENOMEM;
955 } 956 }
956 skb_shinfo(rskb)->frag_list = skb; 957 skb_shinfo(rskb)->frag_list = skb;
957 rskb->len += skb->len; 958 rskb->len += skb->len;
958 rskb->data_len += rskb->len; 959 rskb->data_len += rskb->len;
959 rskb->truesize += rskb->len; 960 rskb->truesize += rskb->len;
960 961
961 /* Avoid nested fragments */ 962 /* Avoid nested fragments */
962 skb_walk_frags(skb, fs) 963 skb_walk_frags(skb, fs)
963 flen += fs->len; 964 flen += fs->len;
964 skb->next = skb_shinfo(skb)->frag_list; 965 skb->next = skb_shinfo(skb)->frag_list;
965 skb_frag_list_init(skb); 966 skb_frag_list_init(skb);
966 skb->len -= flen; 967 skb->len -= flen;
967 skb->data_len -= flen; 968 skb->data_len -= flen;
968 skb->truesize -= flen; 969 skb->truesize -= flen;
969 970
970 skb_reserve(rskb, MAX_PHONET_HEADER + 3); 971 skb_reserve(rskb, MAX_PHONET_HEADER + 3);
971 return pipe_skb_send(sk, rskb); 972 return pipe_skb_send(sk, rskb);
972 } 973 }
973 974
974 struct sk_buff *pep_read(struct sock *sk) 975 struct sk_buff *pep_read(struct sock *sk)
975 { 976 {
976 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); 977 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
977 978
978 if (sk->sk_state == TCP_ESTABLISHED) 979 if (sk->sk_state == TCP_ESTABLISHED)
979 pipe_grant_credits(sk); 980 pipe_grant_credits(sk);
980 return skb; 981 return skb;
981 } 982 }
982 983
983 static int pep_recvmsg(struct kiocb *iocb, struct sock *sk, 984 static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
984 struct msghdr *msg, size_t len, int noblock, 985 struct msghdr *msg, size_t len, int noblock,
985 int flags, int *addr_len) 986 int flags, int *addr_len)
986 { 987 {
987 struct sk_buff *skb; 988 struct sk_buff *skb;
988 int err; 989 int err;
989 990
990 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL| 991 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
991 MSG_NOSIGNAL|MSG_CMSG_COMPAT)) 992 MSG_NOSIGNAL|MSG_CMSG_COMPAT))
992 return -EOPNOTSUPP; 993 return -EOPNOTSUPP;
993 994
994 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 995 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
995 return -ENOTCONN; 996 return -ENOTCONN;
996 997
997 if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) { 998 if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
998 /* Dequeue and acknowledge control request */ 999 /* Dequeue and acknowledge control request */
999 struct pep_sock *pn = pep_sk(sk); 1000 struct pep_sock *pn = pep_sk(sk);
1000 1001
1001 if (flags & MSG_PEEK) 1002 if (flags & MSG_PEEK)
1002 return -EOPNOTSUPP; 1003 return -EOPNOTSUPP;
1003 skb = skb_dequeue(&pn->ctrlreq_queue); 1004 skb = skb_dequeue(&pn->ctrlreq_queue);
1004 if (skb) { 1005 if (skb) {
1005 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1006 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1006 GFP_KERNEL); 1007 GFP_KERNEL);
1007 msg->msg_flags |= MSG_OOB; 1008 msg->msg_flags |= MSG_OOB;
1008 goto copy; 1009 goto copy;
1009 } 1010 }
1010 if (flags & MSG_OOB) 1011 if (flags & MSG_OOB)
1011 return -EINVAL; 1012 return -EINVAL;
1012 } 1013 }
1013 1014
1014 skb = skb_recv_datagram(sk, flags, noblock, &err); 1015 skb = skb_recv_datagram(sk, flags, noblock, &err);
1015 lock_sock(sk); 1016 lock_sock(sk);
1016 if (skb == NULL) { 1017 if (skb == NULL) {
1017 if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT) 1018 if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1018 err = -ECONNRESET; 1019 err = -ECONNRESET;
1019 release_sock(sk); 1020 release_sock(sk);
1020 return err; 1021 return err;
1021 } 1022 }
1022 1023
1023 if (sk->sk_state == TCP_ESTABLISHED) 1024 if (sk->sk_state == TCP_ESTABLISHED)
1024 pipe_grant_credits(sk); 1025 pipe_grant_credits(sk);
1025 release_sock(sk); 1026 release_sock(sk);
1026 copy: 1027 copy:
1027 msg->msg_flags |= MSG_EOR; 1028 msg->msg_flags |= MSG_EOR;
1028 if (skb->len > len) 1029 if (skb->len > len)
1029 msg->msg_flags |= MSG_TRUNC; 1030 msg->msg_flags |= MSG_TRUNC;
1030 else 1031 else
1031 len = skb->len; 1032 len = skb->len;
1032 1033
1033 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); 1034 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
1034 if (!err) 1035 if (!err)
1035 err = (flags & MSG_TRUNC) ? skb->len : len; 1036 err = (flags & MSG_TRUNC) ? skb->len : len;
1036 1037
1037 skb_free_datagram(sk, skb); 1038 skb_free_datagram(sk, skb);
1038 return err; 1039 return err;
1039 } 1040 }
1040 1041
1041 static void pep_sock_unhash(struct sock *sk) 1042 static void pep_sock_unhash(struct sock *sk)
1042 { 1043 {
1043 struct pep_sock *pn = pep_sk(sk); 1044 struct pep_sock *pn = pep_sk(sk);
1044 struct sock *skparent = NULL; 1045 struct sock *skparent = NULL;
1045 1046
1046 lock_sock(sk); 1047 lock_sock(sk);
1047 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { 1048 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
1048 skparent = pn->listener; 1049 skparent = pn->listener;
1049 release_sock(sk); 1050 release_sock(sk);
1050 1051
1051 pn = pep_sk(skparent); 1052 pn = pep_sk(skparent);
1052 lock_sock(skparent); 1053 lock_sock(skparent);
1053 sk_del_node_init(sk); 1054 sk_del_node_init(sk);
1054 sk = skparent; 1055 sk = skparent;
1055 } 1056 }
1056 /* Unhash a listening sock only when it is closed 1057 /* Unhash a listening sock only when it is closed
1057 * and all of its active connected pipes are closed. */ 1058 * and all of its active connected pipes are closed. */
1058 if (hlist_empty(&pn->hlist)) 1059 if (hlist_empty(&pn->hlist))
1059 pn_sock_unhash(&pn->pn_sk.sk); 1060 pn_sock_unhash(&pn->pn_sk.sk);
1060 release_sock(sk); 1061 release_sock(sk);
1061 1062
1062 if (skparent) 1063 if (skparent)
1063 sock_put(skparent); 1064 sock_put(skparent);
1064 } 1065 }
1065 1066
1066 static struct proto pep_proto = { 1067 static struct proto pep_proto = {
1067 .close = pep_sock_close, 1068 .close = pep_sock_close,
1068 .accept = pep_sock_accept, 1069 .accept = pep_sock_accept,
1069 .ioctl = pep_ioctl, 1070 .ioctl = pep_ioctl,
1070 .init = pep_init, 1071 .init = pep_init,
1071 .setsockopt = pep_setsockopt, 1072 .setsockopt = pep_setsockopt,
1072 .getsockopt = pep_getsockopt, 1073 .getsockopt = pep_getsockopt,
1073 .sendmsg = pep_sendmsg, 1074 .sendmsg = pep_sendmsg,
1074 .recvmsg = pep_recvmsg, 1075 .recvmsg = pep_recvmsg,
1075 .backlog_rcv = pep_do_rcv, 1076 .backlog_rcv = pep_do_rcv,
1076 .hash = pn_sock_hash, 1077 .hash = pn_sock_hash,
1077 .unhash = pep_sock_unhash, 1078 .unhash = pep_sock_unhash,
1078 .get_port = pn_sock_get_port, 1079 .get_port = pn_sock_get_port,
1079 .obj_size = sizeof(struct pep_sock), 1080 .obj_size = sizeof(struct pep_sock),
1080 .owner = THIS_MODULE, 1081 .owner = THIS_MODULE,
1081 .name = "PNPIPE", 1082 .name = "PNPIPE",
1082 }; 1083 };
1083 1084
1084 static struct phonet_protocol pep_pn_proto = { 1085 static struct phonet_protocol pep_pn_proto = {
1085 .ops = &phonet_stream_ops, 1086 .ops = &phonet_stream_ops,
1086 .prot = &pep_proto, 1087 .prot = &pep_proto,
1087 .sock_type = SOCK_SEQPACKET, 1088 .sock_type = SOCK_SEQPACKET,
1088 }; 1089 };
1089 1090
1090 static int __init pep_register(void) 1091 static int __init pep_register(void)
1091 { 1092 {
1092 return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto); 1093 return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1093 } 1094 }
1094 1095
1095 static void __exit pep_unregister(void) 1096 static void __exit pep_unregister(void)
1096 { 1097 {
1097 phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto); 1098 phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1098 } 1099 }
1099 1100
1100 module_init(pep_register); 1101 module_init(pep_register);
1101 module_exit(pep_unregister); 1102 module_exit(pep_unregister);
1102 MODULE_AUTHOR("Remi Denis-Courmont, Nokia"); 1103 MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1103 MODULE_DESCRIPTION("Phonet pipe protocol"); 1104 MODULE_DESCRIPTION("Phonet pipe protocol");
1104 MODULE_LICENSE("GPL"); 1105 MODULE_LICENSE("GPL");
1105 MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE); 1106 MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
1106 1107