Commit 5f04d5068a90602b93a7953e9a47c496705c6976

Authored by Eric W. Biederman
Committed by David S. Miller
1 parent 2205a6ea93

net: Fix more stale on-stack list_head objects.

From: Eric W. Biederman <ebiederm@xmission.com>

In the beginning with batching unreg_list was a list that was used only
once in the lifetime of a network device (I think).  Now we have calls
using the unreg_list that can happen multiple times in the life of a
network device like dev_deactivate and dev_close that are also using the
unreg_list.  In addition in unregister_netdevice_queue we also do a
list_move because for devices like veth pairs it is possible that
unregister_netdevice_queue will be called multiple times.

So I think the change below to fix dev_deactivate which Eric D. missed
will fix this problem.  Now to go test that.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 2 additions and 0 deletions Inline Diff

net/mac80211/iface.c
1 /* 1 /*
2 * Interface handling (except master interface) 2 * Interface handling (except master interface)
3 * 3 *
4 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2002-2005, Instant802 Networks, Inc.
5 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2005-2006, Devicescape Software, Inc.
6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/if_arp.h> 15 #include <linux/if_arp.h>
16 #include <linux/netdevice.h> 16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h> 17 #include <linux/rtnetlink.h>
18 #include <net/mac80211.h> 18 #include <net/mac80211.h>
19 #include <net/ieee80211_radiotap.h> 19 #include <net/ieee80211_radiotap.h>
20 #include "ieee80211_i.h" 20 #include "ieee80211_i.h"
21 #include "sta_info.h" 21 #include "sta_info.h"
22 #include "debugfs_netdev.h" 22 #include "debugfs_netdev.h"
23 #include "mesh.h" 23 #include "mesh.h"
24 #include "led.h" 24 #include "led.h"
25 #include "driver-ops.h" 25 #include "driver-ops.h"
26 #include "wme.h" 26 #include "wme.h"
27 #include "rate.h" 27 #include "rate.h"
28 28
29 /** 29 /**
30 * DOC: Interface list locking 30 * DOC: Interface list locking
31 * 31 *
32 * The interface list in each struct ieee80211_local is protected 32 * The interface list in each struct ieee80211_local is protected
33 * three-fold: 33 * three-fold:
34 * 34 *
35 * (1) modifications may only be done under the RTNL 35 * (1) modifications may only be done under the RTNL
36 * (2) modifications and readers are protected against each other by 36 * (2) modifications and readers are protected against each other by
37 * the iflist_mtx. 37 * the iflist_mtx.
38 * (3) modifications are done in an RCU manner so atomic readers 38 * (3) modifications are done in an RCU manner so atomic readers
39 * can traverse the list in RCU-safe blocks. 39 * can traverse the list in RCU-safe blocks.
40 * 40 *
41 * As a consequence, reads (traversals) of the list can be protected 41 * As a consequence, reads (traversals) of the list can be protected
42 * by either the RTNL, the iflist_mtx or RCU. 42 * by either the RTNL, the iflist_mtx or RCU.
43 */ 43 */
44 44
45 45
46 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) 46 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
47 { 47 {
48 int meshhdrlen; 48 int meshhdrlen;
49 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 49 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
50 50
51 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; 51 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
52 52
53 /* FIX: what would be proper limits for MTU? 53 /* FIX: what would be proper limits for MTU?
54 * This interface uses 802.3 frames. */ 54 * This interface uses 802.3 frames. */
55 if (new_mtu < 256 || 55 if (new_mtu < 256 ||
56 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { 56 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
57 return -EINVAL; 57 return -EINVAL;
58 } 58 }
59 59
60 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 60 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
61 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); 61 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
62 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 62 #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
63 dev->mtu = new_mtu; 63 dev->mtu = new_mtu;
64 return 0; 64 return 0;
65 } 65 }
66 66
67 static int ieee80211_change_mac(struct net_device *dev, void *addr) 67 static int ieee80211_change_mac(struct net_device *dev, void *addr)
68 { 68 {
69 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 69 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
70 struct sockaddr *sa = addr; 70 struct sockaddr *sa = addr;
71 int ret; 71 int ret;
72 72
73 if (ieee80211_sdata_running(sdata)) 73 if (ieee80211_sdata_running(sdata))
74 return -EBUSY; 74 return -EBUSY;
75 75
76 ret = eth_mac_addr(dev, sa); 76 ret = eth_mac_addr(dev, sa);
77 77
78 if (ret == 0) 78 if (ret == 0)
79 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); 79 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
80 80
81 return ret; 81 return ret;
82 } 82 }
83 83
84 static inline int identical_mac_addr_allowed(int type1, int type2) 84 static inline int identical_mac_addr_allowed(int type1, int type2)
85 { 85 {
86 return type1 == NL80211_IFTYPE_MONITOR || 86 return type1 == NL80211_IFTYPE_MONITOR ||
87 type2 == NL80211_IFTYPE_MONITOR || 87 type2 == NL80211_IFTYPE_MONITOR ||
88 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || 88 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
89 (type1 == NL80211_IFTYPE_WDS && 89 (type1 == NL80211_IFTYPE_WDS &&
90 (type2 == NL80211_IFTYPE_WDS || 90 (type2 == NL80211_IFTYPE_WDS ||
91 type2 == NL80211_IFTYPE_AP)) || 91 type2 == NL80211_IFTYPE_AP)) ||
92 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || 92 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) ||
93 (type1 == NL80211_IFTYPE_AP_VLAN && 93 (type1 == NL80211_IFTYPE_AP_VLAN &&
94 (type2 == NL80211_IFTYPE_AP || 94 (type2 == NL80211_IFTYPE_AP ||
95 type2 == NL80211_IFTYPE_AP_VLAN)); 95 type2 == NL80211_IFTYPE_AP_VLAN));
96 } 96 }
97 97
98 static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, 98 static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
99 enum nl80211_iftype iftype) 99 enum nl80211_iftype iftype)
100 { 100 {
101 struct ieee80211_local *local = sdata->local; 101 struct ieee80211_local *local = sdata->local;
102 struct ieee80211_sub_if_data *nsdata; 102 struct ieee80211_sub_if_data *nsdata;
103 struct net_device *dev = sdata->dev; 103 struct net_device *dev = sdata->dev;
104 104
105 ASSERT_RTNL(); 105 ASSERT_RTNL();
106 106
107 /* we hold the RTNL here so can safely walk the list */ 107 /* we hold the RTNL here so can safely walk the list */
108 list_for_each_entry(nsdata, &local->interfaces, list) { 108 list_for_each_entry(nsdata, &local->interfaces, list) {
109 struct net_device *ndev = nsdata->dev; 109 struct net_device *ndev = nsdata->dev;
110 110
111 if (ndev != dev && ieee80211_sdata_running(nsdata)) { 111 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
112 /* 112 /*
113 * Allow only a single IBSS interface to be up at any 113 * Allow only a single IBSS interface to be up at any
114 * time. This is restricted because beacon distribution 114 * time. This is restricted because beacon distribution
115 * cannot work properly if both are in the same IBSS. 115 * cannot work properly if both are in the same IBSS.
116 * 116 *
117 * To remove this restriction we'd have to disallow them 117 * To remove this restriction we'd have to disallow them
118 * from setting the same SSID on different IBSS interfaces 118 * from setting the same SSID on different IBSS interfaces
119 * belonging to the same hardware. Then, however, we're 119 * belonging to the same hardware. Then, however, we're
120 * faced with having to adopt two different TSF timers... 120 * faced with having to adopt two different TSF timers...
121 */ 121 */
122 if (iftype == NL80211_IFTYPE_ADHOC && 122 if (iftype == NL80211_IFTYPE_ADHOC &&
123 nsdata->vif.type == NL80211_IFTYPE_ADHOC) 123 nsdata->vif.type == NL80211_IFTYPE_ADHOC)
124 return -EBUSY; 124 return -EBUSY;
125 125
126 /* 126 /*
127 * The remaining checks are only performed for interfaces 127 * The remaining checks are only performed for interfaces
128 * with the same MAC address. 128 * with the same MAC address.
129 */ 129 */
130 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr)) 130 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
131 continue; 131 continue;
132 132
133 /* 133 /*
134 * check whether it may have the same address 134 * check whether it may have the same address
135 */ 135 */
136 if (!identical_mac_addr_allowed(iftype, 136 if (!identical_mac_addr_allowed(iftype,
137 nsdata->vif.type)) 137 nsdata->vif.type))
138 return -ENOTUNIQ; 138 return -ENOTUNIQ;
139 139
140 /* 140 /*
141 * can only add VLANs to enabled APs 141 * can only add VLANs to enabled APs
142 */ 142 */
143 if (iftype == NL80211_IFTYPE_AP_VLAN && 143 if (iftype == NL80211_IFTYPE_AP_VLAN &&
144 nsdata->vif.type == NL80211_IFTYPE_AP) 144 nsdata->vif.type == NL80211_IFTYPE_AP)
145 sdata->bss = &nsdata->u.ap; 145 sdata->bss = &nsdata->u.ap;
146 } 146 }
147 } 147 }
148 148
149 return 0; 149 return 0;
150 } 150 }
151 151
152 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 152 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
153 const int offset) 153 const int offset)
154 { 154 {
155 struct ieee80211_local *local = sdata->local; 155 struct ieee80211_local *local = sdata->local;
156 u32 flags = sdata->u.mntr_flags; 156 u32 flags = sdata->u.mntr_flags;
157 157
158 #define ADJUST(_f, _s) do { \ 158 #define ADJUST(_f, _s) do { \
159 if (flags & MONITOR_FLAG_##_f) \ 159 if (flags & MONITOR_FLAG_##_f) \
160 local->fif_##_s += offset; \ 160 local->fif_##_s += offset; \
161 } while (0) 161 } while (0)
162 162
163 ADJUST(FCSFAIL, fcsfail); 163 ADJUST(FCSFAIL, fcsfail);
164 ADJUST(PLCPFAIL, plcpfail); 164 ADJUST(PLCPFAIL, plcpfail);
165 ADJUST(CONTROL, control); 165 ADJUST(CONTROL, control);
166 ADJUST(CONTROL, pspoll); 166 ADJUST(CONTROL, pspoll);
167 ADJUST(OTHER_BSS, other_bss); 167 ADJUST(OTHER_BSS, other_bss);
168 168
169 #undef ADJUST 169 #undef ADJUST
170 } 170 }
171 171
172 /* 172 /*
173 * NOTE: Be very careful when changing this function, it must NOT return 173 * NOTE: Be very careful when changing this function, it must NOT return
174 * an error on interface type changes that have been pre-checked, so most 174 * an error on interface type changes that have been pre-checked, so most
175 * checks should be in ieee80211_check_concurrent_iface. 175 * checks should be in ieee80211_check_concurrent_iface.
176 */ 176 */
177 static int ieee80211_do_open(struct net_device *dev, bool coming_up) 177 static int ieee80211_do_open(struct net_device *dev, bool coming_up)
178 { 178 {
179 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 179 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
180 struct ieee80211_local *local = sdata->local; 180 struct ieee80211_local *local = sdata->local;
181 struct sta_info *sta; 181 struct sta_info *sta;
182 u32 changed = 0; 182 u32 changed = 0;
183 int res; 183 int res;
184 u32 hw_reconf_flags = 0; 184 u32 hw_reconf_flags = 0;
185 185
186 switch (sdata->vif.type) { 186 switch (sdata->vif.type) {
187 case NL80211_IFTYPE_WDS: 187 case NL80211_IFTYPE_WDS:
188 if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) 188 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
189 return -ENOLINK; 189 return -ENOLINK;
190 break; 190 break;
191 case NL80211_IFTYPE_AP_VLAN: 191 case NL80211_IFTYPE_AP_VLAN:
192 if (!sdata->bss) 192 if (!sdata->bss)
193 return -ENOLINK; 193 return -ENOLINK;
194 list_add(&sdata->u.vlan.list, &sdata->bss->vlans); 194 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
195 break; 195 break;
196 case NL80211_IFTYPE_AP: 196 case NL80211_IFTYPE_AP:
197 sdata->bss = &sdata->u.ap; 197 sdata->bss = &sdata->u.ap;
198 break; 198 break;
199 case NL80211_IFTYPE_MESH_POINT: 199 case NL80211_IFTYPE_MESH_POINT:
200 case NL80211_IFTYPE_STATION: 200 case NL80211_IFTYPE_STATION:
201 case NL80211_IFTYPE_MONITOR: 201 case NL80211_IFTYPE_MONITOR:
202 case NL80211_IFTYPE_ADHOC: 202 case NL80211_IFTYPE_ADHOC:
203 /* no special treatment */ 203 /* no special treatment */
204 break; 204 break;
205 case NL80211_IFTYPE_UNSPECIFIED: 205 case NL80211_IFTYPE_UNSPECIFIED:
206 case NUM_NL80211_IFTYPES: 206 case NUM_NL80211_IFTYPES:
207 case NL80211_IFTYPE_P2P_CLIENT: 207 case NL80211_IFTYPE_P2P_CLIENT:
208 case NL80211_IFTYPE_P2P_GO: 208 case NL80211_IFTYPE_P2P_GO:
209 /* cannot happen */ 209 /* cannot happen */
210 WARN_ON(1); 210 WARN_ON(1);
211 break; 211 break;
212 } 212 }
213 213
214 if (local->open_count == 0) { 214 if (local->open_count == 0) {
215 res = drv_start(local); 215 res = drv_start(local);
216 if (res) 216 if (res)
217 goto err_del_bss; 217 goto err_del_bss;
218 if (local->ops->napi_poll) 218 if (local->ops->napi_poll)
219 napi_enable(&local->napi); 219 napi_enable(&local->napi);
220 /* we're brought up, everything changes */ 220 /* we're brought up, everything changes */
221 hw_reconf_flags = ~0; 221 hw_reconf_flags = ~0;
222 ieee80211_led_radio(local, true); 222 ieee80211_led_radio(local, true);
223 ieee80211_mod_tpt_led_trig(local, 223 ieee80211_mod_tpt_led_trig(local,
224 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); 224 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
225 } 225 }
226 226
227 /* 227 /*
228 * Copy the hopefully now-present MAC address to 228 * Copy the hopefully now-present MAC address to
229 * this interface, if it has the special null one. 229 * this interface, if it has the special null one.
230 */ 230 */
231 if (is_zero_ether_addr(dev->dev_addr)) { 231 if (is_zero_ether_addr(dev->dev_addr)) {
232 memcpy(dev->dev_addr, 232 memcpy(dev->dev_addr,
233 local->hw.wiphy->perm_addr, 233 local->hw.wiphy->perm_addr,
234 ETH_ALEN); 234 ETH_ALEN);
235 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 235 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
236 236
237 if (!is_valid_ether_addr(dev->dev_addr)) { 237 if (!is_valid_ether_addr(dev->dev_addr)) {
238 if (!local->open_count) 238 if (!local->open_count)
239 drv_stop(local); 239 drv_stop(local);
240 return -EADDRNOTAVAIL; 240 return -EADDRNOTAVAIL;
241 } 241 }
242 } 242 }
243 243
244 switch (sdata->vif.type) { 244 switch (sdata->vif.type) {
245 case NL80211_IFTYPE_AP_VLAN: 245 case NL80211_IFTYPE_AP_VLAN:
246 /* no need to tell driver */ 246 /* no need to tell driver */
247 break; 247 break;
248 case NL80211_IFTYPE_MONITOR: 248 case NL80211_IFTYPE_MONITOR:
249 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 249 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
250 local->cooked_mntrs++; 250 local->cooked_mntrs++;
251 break; 251 break;
252 } 252 }
253 253
254 /* must be before the call to ieee80211_configure_filter */ 254 /* must be before the call to ieee80211_configure_filter */
255 local->monitors++; 255 local->monitors++;
256 if (local->monitors == 1) { 256 if (local->monitors == 1) {
257 local->hw.conf.flags |= IEEE80211_CONF_MONITOR; 257 local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
258 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; 258 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
259 } 259 }
260 260
261 ieee80211_adjust_monitor_flags(sdata, 1); 261 ieee80211_adjust_monitor_flags(sdata, 1);
262 ieee80211_configure_filter(local); 262 ieee80211_configure_filter(local);
263 263
264 netif_carrier_on(dev); 264 netif_carrier_on(dev);
265 break; 265 break;
266 default: 266 default:
267 if (coming_up) { 267 if (coming_up) {
268 res = drv_add_interface(local, &sdata->vif); 268 res = drv_add_interface(local, &sdata->vif);
269 if (res) 269 if (res)
270 goto err_stop; 270 goto err_stop;
271 } 271 }
272 272
273 if (sdata->vif.type == NL80211_IFTYPE_AP) { 273 if (sdata->vif.type == NL80211_IFTYPE_AP) {
274 local->fif_pspoll++; 274 local->fif_pspoll++;
275 local->fif_probe_req++; 275 local->fif_probe_req++;
276 276
277 ieee80211_configure_filter(local); 277 ieee80211_configure_filter(local);
278 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 278 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
279 local->fif_probe_req++; 279 local->fif_probe_req++;
280 } 280 }
281 281
282 changed |= ieee80211_reset_erp_info(sdata); 282 changed |= ieee80211_reset_erp_info(sdata);
283 ieee80211_bss_info_change_notify(sdata, changed); 283 ieee80211_bss_info_change_notify(sdata, changed);
284 284
285 if (sdata->vif.type == NL80211_IFTYPE_STATION) 285 if (sdata->vif.type == NL80211_IFTYPE_STATION)
286 netif_carrier_off(dev); 286 netif_carrier_off(dev);
287 else 287 else
288 netif_carrier_on(dev); 288 netif_carrier_on(dev);
289 } 289 }
290 290
291 set_bit(SDATA_STATE_RUNNING, &sdata->state); 291 set_bit(SDATA_STATE_RUNNING, &sdata->state);
292 292
293 if (sdata->vif.type == NL80211_IFTYPE_WDS) { 293 if (sdata->vif.type == NL80211_IFTYPE_WDS) {
294 /* Create STA entry for the WDS peer */ 294 /* Create STA entry for the WDS peer */
295 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, 295 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
296 GFP_KERNEL); 296 GFP_KERNEL);
297 if (!sta) { 297 if (!sta) {
298 res = -ENOMEM; 298 res = -ENOMEM;
299 goto err_del_interface; 299 goto err_del_interface;
300 } 300 }
301 301
302 /* no locking required since STA is not live yet */ 302 /* no locking required since STA is not live yet */
303 sta->flags |= WLAN_STA_AUTHORIZED; 303 sta->flags |= WLAN_STA_AUTHORIZED;
304 304
305 res = sta_info_insert(sta); 305 res = sta_info_insert(sta);
306 if (res) { 306 if (res) {
307 /* STA has been freed */ 307 /* STA has been freed */
308 goto err_del_interface; 308 goto err_del_interface;
309 } 309 }
310 310
311 rate_control_rate_init(sta); 311 rate_control_rate_init(sta);
312 } 312 }
313 313
314 /* 314 /*
315 * set_multicast_list will be invoked by the networking core 315 * set_multicast_list will be invoked by the networking core
316 * which will check whether any increments here were done in 316 * which will check whether any increments here were done in
317 * error and sync them down to the hardware as filter flags. 317 * error and sync them down to the hardware as filter flags.
318 */ 318 */
319 if (sdata->flags & IEEE80211_SDATA_ALLMULTI) 319 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
320 atomic_inc(&local->iff_allmultis); 320 atomic_inc(&local->iff_allmultis);
321 321
322 if (sdata->flags & IEEE80211_SDATA_PROMISC) 322 if (sdata->flags & IEEE80211_SDATA_PROMISC)
323 atomic_inc(&local->iff_promiscs); 323 atomic_inc(&local->iff_promiscs);
324 324
325 mutex_lock(&local->mtx); 325 mutex_lock(&local->mtx);
326 hw_reconf_flags |= __ieee80211_recalc_idle(local); 326 hw_reconf_flags |= __ieee80211_recalc_idle(local);
327 mutex_unlock(&local->mtx); 327 mutex_unlock(&local->mtx);
328 328
329 if (coming_up) 329 if (coming_up)
330 local->open_count++; 330 local->open_count++;
331 331
332 if (hw_reconf_flags) { 332 if (hw_reconf_flags) {
333 ieee80211_hw_config(local, hw_reconf_flags); 333 ieee80211_hw_config(local, hw_reconf_flags);
334 /* 334 /*
335 * set default queue parameters so drivers don't 335 * set default queue parameters so drivers don't
336 * need to initialise the hardware if the hardware 336 * need to initialise the hardware if the hardware
337 * doesn't start up with sane defaults 337 * doesn't start up with sane defaults
338 */ 338 */
339 ieee80211_set_wmm_default(sdata); 339 ieee80211_set_wmm_default(sdata);
340 } 340 }
341 341
342 ieee80211_recalc_ps(local, -1); 342 ieee80211_recalc_ps(local, -1);
343 343
344 netif_tx_start_all_queues(dev); 344 netif_tx_start_all_queues(dev);
345 345
346 return 0; 346 return 0;
347 err_del_interface: 347 err_del_interface:
348 drv_remove_interface(local, &sdata->vif); 348 drv_remove_interface(local, &sdata->vif);
349 err_stop: 349 err_stop:
350 if (!local->open_count) 350 if (!local->open_count)
351 drv_stop(local); 351 drv_stop(local);
352 err_del_bss: 352 err_del_bss:
353 sdata->bss = NULL; 353 sdata->bss = NULL;
354 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 354 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
355 list_del(&sdata->u.vlan.list); 355 list_del(&sdata->u.vlan.list);
356 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 356 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
357 return res; 357 return res;
358 } 358 }
359 359
360 static int ieee80211_open(struct net_device *dev) 360 static int ieee80211_open(struct net_device *dev)
361 { 361 {
362 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 362 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
363 int err; 363 int err;
364 364
365 /* fail early if user set an invalid address */ 365 /* fail early if user set an invalid address */
366 if (!is_zero_ether_addr(dev->dev_addr) && 366 if (!is_zero_ether_addr(dev->dev_addr) &&
367 !is_valid_ether_addr(dev->dev_addr)) 367 !is_valid_ether_addr(dev->dev_addr))
368 return -EADDRNOTAVAIL; 368 return -EADDRNOTAVAIL;
369 369
370 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); 370 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
371 if (err) 371 if (err)
372 return err; 372 return err;
373 373
374 return ieee80211_do_open(dev, true); 374 return ieee80211_do_open(dev, true);
375 } 375 }
376 376
377 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, 377 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
378 bool going_down) 378 bool going_down)
379 { 379 {
380 struct ieee80211_local *local = sdata->local; 380 struct ieee80211_local *local = sdata->local;
381 unsigned long flags; 381 unsigned long flags;
382 struct sk_buff *skb, *tmp; 382 struct sk_buff *skb, *tmp;
383 u32 hw_reconf_flags = 0; 383 u32 hw_reconf_flags = 0;
384 int i; 384 int i;
385 385
386 if (local->scan_sdata == sdata) 386 if (local->scan_sdata == sdata)
387 ieee80211_scan_cancel(local); 387 ieee80211_scan_cancel(local);
388 388
389 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 389 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
390 390
391 /* 391 /*
392 * Stop TX on this interface first. 392 * Stop TX on this interface first.
393 */ 393 */
394 netif_tx_stop_all_queues(sdata->dev); 394 netif_tx_stop_all_queues(sdata->dev);
395 395
396 /* 396 /*
397 * Purge work for this interface. 397 * Purge work for this interface.
398 */ 398 */
399 ieee80211_work_purge(sdata); 399 ieee80211_work_purge(sdata);
400 400
401 /* 401 /*
402 * Remove all stations associated with this interface. 402 * Remove all stations associated with this interface.
403 * 403 *
404 * This must be done before calling ops->remove_interface() 404 * This must be done before calling ops->remove_interface()
405 * because otherwise we can later invoke ops->sta_notify() 405 * because otherwise we can later invoke ops->sta_notify()
406 * whenever the STAs are removed, and that invalidates driver 406 * whenever the STAs are removed, and that invalidates driver
407 * assumptions about always getting a vif pointer that is valid 407 * assumptions about always getting a vif pointer that is valid
408 * (because if we remove a STA after ops->remove_interface() 408 * (because if we remove a STA after ops->remove_interface()
409 * the driver will have removed the vif info already!) 409 * the driver will have removed the vif info already!)
410 * 410 *
411 * This is relevant only in AP, WDS and mesh modes, since in 411 * This is relevant only in AP, WDS and mesh modes, since in
412 * all other modes we've already removed all stations when 412 * all other modes we've already removed all stations when
413 * disconnecting etc. 413 * disconnecting etc.
414 */ 414 */
415 sta_info_flush(local, sdata); 415 sta_info_flush(local, sdata);
416 416
417 /* 417 /*
418 * Don't count this interface for promisc/allmulti while it 418 * Don't count this interface for promisc/allmulti while it
419 * is down. dev_mc_unsync() will invoke set_multicast_list 419 * is down. dev_mc_unsync() will invoke set_multicast_list
420 * on the master interface which will sync these down to the 420 * on the master interface which will sync these down to the
421 * hardware as filter flags. 421 * hardware as filter flags.
422 */ 422 */
423 if (sdata->flags & IEEE80211_SDATA_ALLMULTI) 423 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
424 atomic_dec(&local->iff_allmultis); 424 atomic_dec(&local->iff_allmultis);
425 425
426 if (sdata->flags & IEEE80211_SDATA_PROMISC) 426 if (sdata->flags & IEEE80211_SDATA_PROMISC)
427 atomic_dec(&local->iff_promiscs); 427 atomic_dec(&local->iff_promiscs);
428 428
429 if (sdata->vif.type == NL80211_IFTYPE_AP) { 429 if (sdata->vif.type == NL80211_IFTYPE_AP) {
430 local->fif_pspoll--; 430 local->fif_pspoll--;
431 local->fif_probe_req--; 431 local->fif_probe_req--;
432 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 432 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
433 local->fif_probe_req--; 433 local->fif_probe_req--;
434 } 434 }
435 435
436 netif_addr_lock_bh(sdata->dev); 436 netif_addr_lock_bh(sdata->dev);
437 spin_lock_bh(&local->filter_lock); 437 spin_lock_bh(&local->filter_lock);
438 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, 438 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
439 sdata->dev->addr_len); 439 sdata->dev->addr_len);
440 spin_unlock_bh(&local->filter_lock); 440 spin_unlock_bh(&local->filter_lock);
441 netif_addr_unlock_bh(sdata->dev); 441 netif_addr_unlock_bh(sdata->dev);
442 442
443 ieee80211_configure_filter(local); 443 ieee80211_configure_filter(local);
444 444
445 del_timer_sync(&local->dynamic_ps_timer); 445 del_timer_sync(&local->dynamic_ps_timer);
446 cancel_work_sync(&local->dynamic_ps_enable_work); 446 cancel_work_sync(&local->dynamic_ps_enable_work);
447 447
448 /* APs need special treatment */ 448 /* APs need special treatment */
449 if (sdata->vif.type == NL80211_IFTYPE_AP) { 449 if (sdata->vif.type == NL80211_IFTYPE_AP) {
450 struct ieee80211_sub_if_data *vlan, *tmpsdata; 450 struct ieee80211_sub_if_data *vlan, *tmpsdata;
451 struct beacon_data *old_beacon = sdata->u.ap.beacon; 451 struct beacon_data *old_beacon = sdata->u.ap.beacon;
452 452
453 /* sdata_running will return false, so this will disable */ 453 /* sdata_running will return false, so this will disable */
454 ieee80211_bss_info_change_notify(sdata, 454 ieee80211_bss_info_change_notify(sdata,
455 BSS_CHANGED_BEACON_ENABLED); 455 BSS_CHANGED_BEACON_ENABLED);
456 456
457 /* remove beacon */ 457 /* remove beacon */
458 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 458 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
459 synchronize_rcu(); 459 synchronize_rcu();
460 kfree(old_beacon); 460 kfree(old_beacon);
461 461
462 /* free all potentially still buffered bcast frames */ 462 /* free all potentially still buffered bcast frames */
463 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { 463 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
464 local->total_ps_buffered--; 464 local->total_ps_buffered--;
465 dev_kfree_skb(skb); 465 dev_kfree_skb(skb);
466 } 466 }
467 467
468 /* down all dependent devices, that is VLANs */ 468 /* down all dependent devices, that is VLANs */
469 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 469 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
470 u.vlan.list) 470 u.vlan.list)
471 dev_close(vlan->dev); 471 dev_close(vlan->dev);
472 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 472 WARN_ON(!list_empty(&sdata->u.ap.vlans));
473 } 473 }
474 474
475 if (going_down) 475 if (going_down)
476 local->open_count--; 476 local->open_count--;
477 477
478 switch (sdata->vif.type) { 478 switch (sdata->vif.type) {
479 case NL80211_IFTYPE_AP_VLAN: 479 case NL80211_IFTYPE_AP_VLAN:
480 list_del(&sdata->u.vlan.list); 480 list_del(&sdata->u.vlan.list);
481 /* no need to tell driver */ 481 /* no need to tell driver */
482 break; 482 break;
483 case NL80211_IFTYPE_MONITOR: 483 case NL80211_IFTYPE_MONITOR:
484 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 484 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
485 local->cooked_mntrs--; 485 local->cooked_mntrs--;
486 break; 486 break;
487 } 487 }
488 488
489 local->monitors--; 489 local->monitors--;
490 if (local->monitors == 0) { 490 if (local->monitors == 0) {
491 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; 491 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
492 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; 492 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
493 } 493 }
494 494
495 ieee80211_adjust_monitor_flags(sdata, -1); 495 ieee80211_adjust_monitor_flags(sdata, -1);
496 ieee80211_configure_filter(local); 496 ieee80211_configure_filter(local);
497 break; 497 break;
498 default: 498 default:
499 flush_work(&sdata->work); 499 flush_work(&sdata->work);
500 /* 500 /*
501 * When we get here, the interface is marked down. 501 * When we get here, the interface is marked down.
502 * Call synchronize_rcu() to wait for the RX path 502 * Call synchronize_rcu() to wait for the RX path
503 * should it be using the interface and enqueuing 503 * should it be using the interface and enqueuing
504 * frames at this very time on another CPU. 504 * frames at this very time on another CPU.
505 */ 505 */
506 synchronize_rcu(); 506 synchronize_rcu();
507 skb_queue_purge(&sdata->skb_queue); 507 skb_queue_purge(&sdata->skb_queue);
508 508
509 /* 509 /*
510 * Disable beaconing here for mesh only, AP and IBSS 510 * Disable beaconing here for mesh only, AP and IBSS
511 * are already taken care of. 511 * are already taken care of.
512 */ 512 */
513 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 513 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
514 ieee80211_bss_info_change_notify(sdata, 514 ieee80211_bss_info_change_notify(sdata,
515 BSS_CHANGED_BEACON_ENABLED); 515 BSS_CHANGED_BEACON_ENABLED);
516 516
517 /* 517 /*
518 * Free all remaining keys, there shouldn't be any, 518 * Free all remaining keys, there shouldn't be any,
519 * except maybe group keys in AP more or WDS? 519 * except maybe group keys in AP more or WDS?
520 */ 520 */
521 ieee80211_free_keys(sdata); 521 ieee80211_free_keys(sdata);
522 522
523 if (going_down) 523 if (going_down)
524 drv_remove_interface(local, &sdata->vif); 524 drv_remove_interface(local, &sdata->vif);
525 } 525 }
526 526
527 sdata->bss = NULL; 527 sdata->bss = NULL;
528 528
529 mutex_lock(&local->mtx); 529 mutex_lock(&local->mtx);
530 hw_reconf_flags |= __ieee80211_recalc_idle(local); 530 hw_reconf_flags |= __ieee80211_recalc_idle(local);
531 mutex_unlock(&local->mtx); 531 mutex_unlock(&local->mtx);
532 532
533 ieee80211_recalc_ps(local, -1); 533 ieee80211_recalc_ps(local, -1);
534 534
535 if (local->open_count == 0) { 535 if (local->open_count == 0) {
536 if (local->ops->napi_poll) 536 if (local->ops->napi_poll)
537 napi_disable(&local->napi); 537 napi_disable(&local->napi);
538 ieee80211_clear_tx_pending(local); 538 ieee80211_clear_tx_pending(local);
539 ieee80211_stop_device(local); 539 ieee80211_stop_device(local);
540 540
541 /* no reconfiguring after stop! */ 541 /* no reconfiguring after stop! */
542 hw_reconf_flags = 0; 542 hw_reconf_flags = 0;
543 } 543 }
544 544
545 /* do after stop to avoid reconfiguring when we stop anyway */ 545 /* do after stop to avoid reconfiguring when we stop anyway */
546 if (hw_reconf_flags) 546 if (hw_reconf_flags)
547 ieee80211_hw_config(local, hw_reconf_flags); 547 ieee80211_hw_config(local, hw_reconf_flags);
548 548
549 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 549 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
550 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { 550 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
551 skb_queue_walk_safe(&local->pending[i], skb, tmp) { 551 skb_queue_walk_safe(&local->pending[i], skb, tmp) {
552 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 552 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
553 if (info->control.vif == &sdata->vif) { 553 if (info->control.vif == &sdata->vif) {
554 __skb_unlink(skb, &local->pending[i]); 554 __skb_unlink(skb, &local->pending[i]);
555 dev_kfree_skb_irq(skb); 555 dev_kfree_skb_irq(skb);
556 } 556 }
557 } 557 }
558 } 558 }
559 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 559 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
560 } 560 }
561 561
562 static int ieee80211_stop(struct net_device *dev) 562 static int ieee80211_stop(struct net_device *dev)
563 { 563 {
564 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 564 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
565 565
566 ieee80211_do_stop(sdata, true); 566 ieee80211_do_stop(sdata, true);
567 567
568 return 0; 568 return 0;
569 } 569 }
570 570
571 static void ieee80211_set_multicast_list(struct net_device *dev) 571 static void ieee80211_set_multicast_list(struct net_device *dev)
572 { 572 {
573 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 573 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
574 struct ieee80211_local *local = sdata->local; 574 struct ieee80211_local *local = sdata->local;
575 int allmulti, promisc, sdata_allmulti, sdata_promisc; 575 int allmulti, promisc, sdata_allmulti, sdata_promisc;
576 576
577 allmulti = !!(dev->flags & IFF_ALLMULTI); 577 allmulti = !!(dev->flags & IFF_ALLMULTI);
578 promisc = !!(dev->flags & IFF_PROMISC); 578 promisc = !!(dev->flags & IFF_PROMISC);
579 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); 579 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
580 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); 580 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
581 581
582 if (allmulti != sdata_allmulti) { 582 if (allmulti != sdata_allmulti) {
583 if (dev->flags & IFF_ALLMULTI) 583 if (dev->flags & IFF_ALLMULTI)
584 atomic_inc(&local->iff_allmultis); 584 atomic_inc(&local->iff_allmultis);
585 else 585 else
586 atomic_dec(&local->iff_allmultis); 586 atomic_dec(&local->iff_allmultis);
587 sdata->flags ^= IEEE80211_SDATA_ALLMULTI; 587 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
588 } 588 }
589 589
590 if (promisc != sdata_promisc) { 590 if (promisc != sdata_promisc) {
591 if (dev->flags & IFF_PROMISC) 591 if (dev->flags & IFF_PROMISC)
592 atomic_inc(&local->iff_promiscs); 592 atomic_inc(&local->iff_promiscs);
593 else 593 else
594 atomic_dec(&local->iff_promiscs); 594 atomic_dec(&local->iff_promiscs);
595 sdata->flags ^= IEEE80211_SDATA_PROMISC; 595 sdata->flags ^= IEEE80211_SDATA_PROMISC;
596 } 596 }
597 spin_lock_bh(&local->filter_lock); 597 spin_lock_bh(&local->filter_lock);
598 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); 598 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
599 spin_unlock_bh(&local->filter_lock); 599 spin_unlock_bh(&local->filter_lock);
600 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 600 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
601 } 601 }
602 602
603 /* 603 /*
604 * Called when the netdev is removed or, by the code below, before 604 * Called when the netdev is removed or, by the code below, before
605 * the interface type changes. 605 * the interface type changes.
606 */ 606 */
607 static void ieee80211_teardown_sdata(struct net_device *dev) 607 static void ieee80211_teardown_sdata(struct net_device *dev)
608 { 608 {
609 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 609 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
610 struct ieee80211_local *local = sdata->local; 610 struct ieee80211_local *local = sdata->local;
611 int flushed; 611 int flushed;
612 int i; 612 int i;
613 613
614 /* free extra data */ 614 /* free extra data */
615 ieee80211_free_keys(sdata); 615 ieee80211_free_keys(sdata);
616 616
617 ieee80211_debugfs_remove_netdev(sdata); 617 ieee80211_debugfs_remove_netdev(sdata);
618 618
619 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 619 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
620 __skb_queue_purge(&sdata->fragments[i].skb_list); 620 __skb_queue_purge(&sdata->fragments[i].skb_list);
621 sdata->fragment_next = 0; 621 sdata->fragment_next = 0;
622 622
623 if (ieee80211_vif_is_mesh(&sdata->vif)) 623 if (ieee80211_vif_is_mesh(&sdata->vif))
624 mesh_rmc_free(sdata); 624 mesh_rmc_free(sdata);
625 625
626 flushed = sta_info_flush(local, sdata); 626 flushed = sta_info_flush(local, sdata);
627 WARN_ON(flushed); 627 WARN_ON(flushed);
628 } 628 }
629 629
630 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 630 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
631 struct sk_buff *skb) 631 struct sk_buff *skb)
632 { 632 {
633 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 633 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
634 } 634 }
635 635
636 static const struct net_device_ops ieee80211_dataif_ops = { 636 static const struct net_device_ops ieee80211_dataif_ops = {
637 .ndo_open = ieee80211_open, 637 .ndo_open = ieee80211_open,
638 .ndo_stop = ieee80211_stop, 638 .ndo_stop = ieee80211_stop,
639 .ndo_uninit = ieee80211_teardown_sdata, 639 .ndo_uninit = ieee80211_teardown_sdata,
640 .ndo_start_xmit = ieee80211_subif_start_xmit, 640 .ndo_start_xmit = ieee80211_subif_start_xmit,
641 .ndo_set_multicast_list = ieee80211_set_multicast_list, 641 .ndo_set_multicast_list = ieee80211_set_multicast_list,
642 .ndo_change_mtu = ieee80211_change_mtu, 642 .ndo_change_mtu = ieee80211_change_mtu,
643 .ndo_set_mac_address = ieee80211_change_mac, 643 .ndo_set_mac_address = ieee80211_change_mac,
644 .ndo_select_queue = ieee80211_netdev_select_queue, 644 .ndo_select_queue = ieee80211_netdev_select_queue,
645 }; 645 };
646 646
647 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 647 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
648 struct sk_buff *skb) 648 struct sk_buff *skb)
649 { 649 {
650 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 650 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
651 struct ieee80211_local *local = sdata->local; 651 struct ieee80211_local *local = sdata->local;
652 struct ieee80211_hdr *hdr; 652 struct ieee80211_hdr *hdr;
653 struct ieee80211_radiotap_header *rtap = (void *)skb->data; 653 struct ieee80211_radiotap_header *rtap = (void *)skb->data;
654 u8 *p; 654 u8 *p;
655 655
656 if (local->hw.queues < 4) 656 if (local->hw.queues < 4)
657 return 0; 657 return 0;
658 658
659 if (skb->len < 4 || 659 if (skb->len < 4 ||
660 skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) 660 skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
661 return 0; /* doesn't matter, frame will be dropped */ 661 return 0; /* doesn't matter, frame will be dropped */
662 662
663 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); 663 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
664 664
665 if (!ieee80211_is_data(hdr->frame_control)) { 665 if (!ieee80211_is_data(hdr->frame_control)) {
666 skb->priority = 7; 666 skb->priority = 7;
667 return ieee802_1d_to_ac[skb->priority]; 667 return ieee802_1d_to_ac[skb->priority];
668 } 668 }
669 if (!ieee80211_is_data_qos(hdr->frame_control)) { 669 if (!ieee80211_is_data_qos(hdr->frame_control)) {
670 skb->priority = 0; 670 skb->priority = 0;
671 return ieee802_1d_to_ac[skb->priority]; 671 return ieee802_1d_to_ac[skb->priority];
672 } 672 }
673 673
674 p = ieee80211_get_qos_ctl(hdr); 674 p = ieee80211_get_qos_ctl(hdr);
675 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; 675 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
676 676
677 return ieee80211_downgrade_queue(local, skb); 677 return ieee80211_downgrade_queue(local, skb);
678 } 678 }
679 679
680 static const struct net_device_ops ieee80211_monitorif_ops = { 680 static const struct net_device_ops ieee80211_monitorif_ops = {
681 .ndo_open = ieee80211_open, 681 .ndo_open = ieee80211_open,
682 .ndo_stop = ieee80211_stop, 682 .ndo_stop = ieee80211_stop,
683 .ndo_uninit = ieee80211_teardown_sdata, 683 .ndo_uninit = ieee80211_teardown_sdata,
684 .ndo_start_xmit = ieee80211_monitor_start_xmit, 684 .ndo_start_xmit = ieee80211_monitor_start_xmit,
685 .ndo_set_multicast_list = ieee80211_set_multicast_list, 685 .ndo_set_multicast_list = ieee80211_set_multicast_list,
686 .ndo_change_mtu = ieee80211_change_mtu, 686 .ndo_change_mtu = ieee80211_change_mtu,
687 .ndo_set_mac_address = eth_mac_addr, 687 .ndo_set_mac_address = eth_mac_addr,
688 .ndo_select_queue = ieee80211_monitor_select_queue, 688 .ndo_select_queue = ieee80211_monitor_select_queue,
689 }; 689 };
690 690
691 static void ieee80211_if_setup(struct net_device *dev) 691 static void ieee80211_if_setup(struct net_device *dev)
692 { 692 {
693 ether_setup(dev); 693 ether_setup(dev);
694 dev->netdev_ops = &ieee80211_dataif_ops; 694 dev->netdev_ops = &ieee80211_dataif_ops;
695 dev->destructor = free_netdev; 695 dev->destructor = free_netdev;
696 } 696 }
697 697
698 static void ieee80211_iface_work(struct work_struct *work) 698 static void ieee80211_iface_work(struct work_struct *work)
699 { 699 {
700 struct ieee80211_sub_if_data *sdata = 700 struct ieee80211_sub_if_data *sdata =
701 container_of(work, struct ieee80211_sub_if_data, work); 701 container_of(work, struct ieee80211_sub_if_data, work);
702 struct ieee80211_local *local = sdata->local; 702 struct ieee80211_local *local = sdata->local;
703 struct sk_buff *skb; 703 struct sk_buff *skb;
704 struct sta_info *sta; 704 struct sta_info *sta;
705 struct ieee80211_ra_tid *ra_tid; 705 struct ieee80211_ra_tid *ra_tid;
706 706
707 if (!ieee80211_sdata_running(sdata)) 707 if (!ieee80211_sdata_running(sdata))
708 return; 708 return;
709 709
710 if (local->scanning) 710 if (local->scanning)
711 return; 711 return;
712 712
713 /* 713 /*
714 * ieee80211_queue_work() should have picked up most cases, 714 * ieee80211_queue_work() should have picked up most cases,
715 * here we'll pick the rest. 715 * here we'll pick the rest.
716 */ 716 */
717 if (WARN(local->suspended, 717 if (WARN(local->suspended,
718 "interface work scheduled while going to suspend\n")) 718 "interface work scheduled while going to suspend\n"))
719 return; 719 return;
720 720
721 /* first process frames */ 721 /* first process frames */
722 while ((skb = skb_dequeue(&sdata->skb_queue))) { 722 while ((skb = skb_dequeue(&sdata->skb_queue))) {
723 struct ieee80211_mgmt *mgmt = (void *)skb->data; 723 struct ieee80211_mgmt *mgmt = (void *)skb->data;
724 724
725 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 725 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
726 ra_tid = (void *)&skb->cb; 726 ra_tid = (void *)&skb->cb;
727 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, 727 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
728 ra_tid->tid); 728 ra_tid->tid);
729 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { 729 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
730 ra_tid = (void *)&skb->cb; 730 ra_tid = (void *)&skb->cb;
731 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, 731 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
732 ra_tid->tid); 732 ra_tid->tid);
733 } else if (ieee80211_is_action(mgmt->frame_control) && 733 } else if (ieee80211_is_action(mgmt->frame_control) &&
734 mgmt->u.action.category == WLAN_CATEGORY_BACK) { 734 mgmt->u.action.category == WLAN_CATEGORY_BACK) {
735 int len = skb->len; 735 int len = skb->len;
736 736
737 mutex_lock(&local->sta_mtx); 737 mutex_lock(&local->sta_mtx);
738 sta = sta_info_get_bss(sdata, mgmt->sa); 738 sta = sta_info_get_bss(sdata, mgmt->sa);
739 if (sta) { 739 if (sta) {
740 switch (mgmt->u.action.u.addba_req.action_code) { 740 switch (mgmt->u.action.u.addba_req.action_code) {
741 case WLAN_ACTION_ADDBA_REQ: 741 case WLAN_ACTION_ADDBA_REQ:
742 ieee80211_process_addba_request( 742 ieee80211_process_addba_request(
743 local, sta, mgmt, len); 743 local, sta, mgmt, len);
744 break; 744 break;
745 case WLAN_ACTION_ADDBA_RESP: 745 case WLAN_ACTION_ADDBA_RESP:
746 ieee80211_process_addba_resp(local, sta, 746 ieee80211_process_addba_resp(local, sta,
747 mgmt, len); 747 mgmt, len);
748 break; 748 break;
749 case WLAN_ACTION_DELBA: 749 case WLAN_ACTION_DELBA:
750 ieee80211_process_delba(sdata, sta, 750 ieee80211_process_delba(sdata, sta,
751 mgmt, len); 751 mgmt, len);
752 break; 752 break;
753 default: 753 default:
754 WARN_ON(1); 754 WARN_ON(1);
755 break; 755 break;
756 } 756 }
757 } 757 }
758 mutex_unlock(&local->sta_mtx); 758 mutex_unlock(&local->sta_mtx);
759 } else if (ieee80211_is_data_qos(mgmt->frame_control)) { 759 } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
760 struct ieee80211_hdr *hdr = (void *)mgmt; 760 struct ieee80211_hdr *hdr = (void *)mgmt;
761 /* 761 /*
762 * So the frame isn't mgmt, but frame_control 762 * So the frame isn't mgmt, but frame_control
763 * is at the right place anyway, of course, so 763 * is at the right place anyway, of course, so
764 * the if statement is correct. 764 * the if statement is correct.
765 * 765 *
766 * Warn if we have other data frame types here, 766 * Warn if we have other data frame types here,
767 * they must not get here. 767 * they must not get here.
768 */ 768 */
769 WARN_ON(hdr->frame_control & 769 WARN_ON(hdr->frame_control &
770 cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); 770 cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
771 WARN_ON(!(hdr->seq_ctrl & 771 WARN_ON(!(hdr->seq_ctrl &
772 cpu_to_le16(IEEE80211_SCTL_FRAG))); 772 cpu_to_le16(IEEE80211_SCTL_FRAG)));
773 /* 773 /*
774 * This was a fragment of a frame, received while 774 * This was a fragment of a frame, received while
775 * a block-ack session was active. That cannot be 775 * a block-ack session was active. That cannot be
776 * right, so terminate the session. 776 * right, so terminate the session.
777 */ 777 */
778 mutex_lock(&local->sta_mtx); 778 mutex_lock(&local->sta_mtx);
779 sta = sta_info_get_bss(sdata, mgmt->sa); 779 sta = sta_info_get_bss(sdata, mgmt->sa);
780 if (sta) { 780 if (sta) {
781 u16 tid = *ieee80211_get_qos_ctl(hdr) & 781 u16 tid = *ieee80211_get_qos_ctl(hdr) &
782 IEEE80211_QOS_CTL_TID_MASK; 782 IEEE80211_QOS_CTL_TID_MASK;
783 783
784 __ieee80211_stop_rx_ba_session( 784 __ieee80211_stop_rx_ba_session(
785 sta, tid, WLAN_BACK_RECIPIENT, 785 sta, tid, WLAN_BACK_RECIPIENT,
786 WLAN_REASON_QSTA_REQUIRE_SETUP, 786 WLAN_REASON_QSTA_REQUIRE_SETUP,
787 true); 787 true);
788 } 788 }
789 mutex_unlock(&local->sta_mtx); 789 mutex_unlock(&local->sta_mtx);
790 } else switch (sdata->vif.type) { 790 } else switch (sdata->vif.type) {
791 case NL80211_IFTYPE_STATION: 791 case NL80211_IFTYPE_STATION:
792 ieee80211_sta_rx_queued_mgmt(sdata, skb); 792 ieee80211_sta_rx_queued_mgmt(sdata, skb);
793 break; 793 break;
794 case NL80211_IFTYPE_ADHOC: 794 case NL80211_IFTYPE_ADHOC:
795 ieee80211_ibss_rx_queued_mgmt(sdata, skb); 795 ieee80211_ibss_rx_queued_mgmt(sdata, skb);
796 break; 796 break;
797 case NL80211_IFTYPE_MESH_POINT: 797 case NL80211_IFTYPE_MESH_POINT:
798 if (!ieee80211_vif_is_mesh(&sdata->vif)) 798 if (!ieee80211_vif_is_mesh(&sdata->vif))
799 break; 799 break;
800 ieee80211_mesh_rx_queued_mgmt(sdata, skb); 800 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
801 break; 801 break;
802 default: 802 default:
803 WARN(1, "frame for unexpected interface type"); 803 WARN(1, "frame for unexpected interface type");
804 break; 804 break;
805 } 805 }
806 806
807 kfree_skb(skb); 807 kfree_skb(skb);
808 } 808 }
809 809
810 /* then other type-dependent work */ 810 /* then other type-dependent work */
811 switch (sdata->vif.type) { 811 switch (sdata->vif.type) {
812 case NL80211_IFTYPE_STATION: 812 case NL80211_IFTYPE_STATION:
813 ieee80211_sta_work(sdata); 813 ieee80211_sta_work(sdata);
814 break; 814 break;
815 case NL80211_IFTYPE_ADHOC: 815 case NL80211_IFTYPE_ADHOC:
816 ieee80211_ibss_work(sdata); 816 ieee80211_ibss_work(sdata);
817 break; 817 break;
818 case NL80211_IFTYPE_MESH_POINT: 818 case NL80211_IFTYPE_MESH_POINT:
819 if (!ieee80211_vif_is_mesh(&sdata->vif)) 819 if (!ieee80211_vif_is_mesh(&sdata->vif))
820 break; 820 break;
821 ieee80211_mesh_work(sdata); 821 ieee80211_mesh_work(sdata);
822 break; 822 break;
823 default: 823 default:
824 break; 824 break;
825 } 825 }
826 } 826 }
827 827
828 828
829 /* 829 /*
830 * Helper function to initialise an interface to a specific type. 830 * Helper function to initialise an interface to a specific type.
831 */ 831 */
832 static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, 832 static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
833 enum nl80211_iftype type) 833 enum nl80211_iftype type)
834 { 834 {
835 /* clear type-dependent union */ 835 /* clear type-dependent union */
836 memset(&sdata->u, 0, sizeof(sdata->u)); 836 memset(&sdata->u, 0, sizeof(sdata->u));
837 837
838 /* and set some type-dependent values */ 838 /* and set some type-dependent values */
839 sdata->vif.type = type; 839 sdata->vif.type = type;
840 sdata->vif.p2p = false; 840 sdata->vif.p2p = false;
841 sdata->dev->netdev_ops = &ieee80211_dataif_ops; 841 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
842 sdata->wdev.iftype = type; 842 sdata->wdev.iftype = type;
843 843
844 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); 844 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
845 sdata->control_port_no_encrypt = false; 845 sdata->control_port_no_encrypt = false;
846 846
847 /* only monitor differs */ 847 /* only monitor differs */
848 sdata->dev->type = ARPHRD_ETHER; 848 sdata->dev->type = ARPHRD_ETHER;
849 849
850 skb_queue_head_init(&sdata->skb_queue); 850 skb_queue_head_init(&sdata->skb_queue);
851 INIT_WORK(&sdata->work, ieee80211_iface_work); 851 INIT_WORK(&sdata->work, ieee80211_iface_work);
852 852
853 switch (type) { 853 switch (type) {
854 case NL80211_IFTYPE_P2P_GO: 854 case NL80211_IFTYPE_P2P_GO:
855 type = NL80211_IFTYPE_AP; 855 type = NL80211_IFTYPE_AP;
856 sdata->vif.type = type; 856 sdata->vif.type = type;
857 sdata->vif.p2p = true; 857 sdata->vif.p2p = true;
858 /* fall through */ 858 /* fall through */
859 case NL80211_IFTYPE_AP: 859 case NL80211_IFTYPE_AP:
860 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 860 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
861 INIT_LIST_HEAD(&sdata->u.ap.vlans); 861 INIT_LIST_HEAD(&sdata->u.ap.vlans);
862 break; 862 break;
863 case NL80211_IFTYPE_P2P_CLIENT: 863 case NL80211_IFTYPE_P2P_CLIENT:
864 type = NL80211_IFTYPE_STATION; 864 type = NL80211_IFTYPE_STATION;
865 sdata->vif.type = type; 865 sdata->vif.type = type;
866 sdata->vif.p2p = true; 866 sdata->vif.p2p = true;
867 /* fall through */ 867 /* fall through */
868 case NL80211_IFTYPE_STATION: 868 case NL80211_IFTYPE_STATION:
869 ieee80211_sta_setup_sdata(sdata); 869 ieee80211_sta_setup_sdata(sdata);
870 break; 870 break;
871 case NL80211_IFTYPE_ADHOC: 871 case NL80211_IFTYPE_ADHOC:
872 ieee80211_ibss_setup_sdata(sdata); 872 ieee80211_ibss_setup_sdata(sdata);
873 break; 873 break;
874 case NL80211_IFTYPE_MESH_POINT: 874 case NL80211_IFTYPE_MESH_POINT:
875 if (ieee80211_vif_is_mesh(&sdata->vif)) 875 if (ieee80211_vif_is_mesh(&sdata->vif))
876 ieee80211_mesh_init_sdata(sdata); 876 ieee80211_mesh_init_sdata(sdata);
877 break; 877 break;
878 case NL80211_IFTYPE_MONITOR: 878 case NL80211_IFTYPE_MONITOR:
879 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; 879 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
880 sdata->dev->netdev_ops = &ieee80211_monitorif_ops; 880 sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
881 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | 881 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
882 MONITOR_FLAG_OTHER_BSS; 882 MONITOR_FLAG_OTHER_BSS;
883 break; 883 break;
884 case NL80211_IFTYPE_WDS: 884 case NL80211_IFTYPE_WDS:
885 case NL80211_IFTYPE_AP_VLAN: 885 case NL80211_IFTYPE_AP_VLAN:
886 break; 886 break;
887 case NL80211_IFTYPE_UNSPECIFIED: 887 case NL80211_IFTYPE_UNSPECIFIED:
888 case NUM_NL80211_IFTYPES: 888 case NUM_NL80211_IFTYPES:
889 BUG(); 889 BUG();
890 break; 890 break;
891 } 891 }
892 892
893 ieee80211_debugfs_add_netdev(sdata); 893 ieee80211_debugfs_add_netdev(sdata);
894 } 894 }
895 895
896 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, 896 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
897 enum nl80211_iftype type) 897 enum nl80211_iftype type)
898 { 898 {
899 struct ieee80211_local *local = sdata->local; 899 struct ieee80211_local *local = sdata->local;
900 int ret, err; 900 int ret, err;
901 enum nl80211_iftype internal_type = type; 901 enum nl80211_iftype internal_type = type;
902 bool p2p = false; 902 bool p2p = false;
903 903
904 ASSERT_RTNL(); 904 ASSERT_RTNL();
905 905
906 if (!local->ops->change_interface) 906 if (!local->ops->change_interface)
907 return -EBUSY; 907 return -EBUSY;
908 908
909 switch (sdata->vif.type) { 909 switch (sdata->vif.type) {
910 case NL80211_IFTYPE_AP: 910 case NL80211_IFTYPE_AP:
911 case NL80211_IFTYPE_STATION: 911 case NL80211_IFTYPE_STATION:
912 case NL80211_IFTYPE_ADHOC: 912 case NL80211_IFTYPE_ADHOC:
913 /* 913 /*
914 * Could maybe also all others here? 914 * Could maybe also all others here?
915 * Just not sure how that interacts 915 * Just not sure how that interacts
916 * with the RX/config path e.g. for 916 * with the RX/config path e.g. for
917 * mesh. 917 * mesh.
918 */ 918 */
919 break; 919 break;
920 default: 920 default:
921 return -EBUSY; 921 return -EBUSY;
922 } 922 }
923 923
924 switch (type) { 924 switch (type) {
925 case NL80211_IFTYPE_AP: 925 case NL80211_IFTYPE_AP:
926 case NL80211_IFTYPE_STATION: 926 case NL80211_IFTYPE_STATION:
927 case NL80211_IFTYPE_ADHOC: 927 case NL80211_IFTYPE_ADHOC:
928 /* 928 /*
929 * Could probably support everything 929 * Could probably support everything
930 * but WDS here (WDS do_open can fail 930 * but WDS here (WDS do_open can fail
931 * under memory pressure, which this 931 * under memory pressure, which this
932 * code isn't prepared to handle). 932 * code isn't prepared to handle).
933 */ 933 */
934 break; 934 break;
935 case NL80211_IFTYPE_P2P_CLIENT: 935 case NL80211_IFTYPE_P2P_CLIENT:
936 p2p = true; 936 p2p = true;
937 internal_type = NL80211_IFTYPE_STATION; 937 internal_type = NL80211_IFTYPE_STATION;
938 break; 938 break;
939 case NL80211_IFTYPE_P2P_GO: 939 case NL80211_IFTYPE_P2P_GO:
940 p2p = true; 940 p2p = true;
941 internal_type = NL80211_IFTYPE_AP; 941 internal_type = NL80211_IFTYPE_AP;
942 break; 942 break;
943 default: 943 default:
944 return -EBUSY; 944 return -EBUSY;
945 } 945 }
946 946
947 ret = ieee80211_check_concurrent_iface(sdata, internal_type); 947 ret = ieee80211_check_concurrent_iface(sdata, internal_type);
948 if (ret) 948 if (ret)
949 return ret; 949 return ret;
950 950
951 ieee80211_do_stop(sdata, false); 951 ieee80211_do_stop(sdata, false);
952 952
953 ieee80211_teardown_sdata(sdata->dev); 953 ieee80211_teardown_sdata(sdata->dev);
954 954
955 ret = drv_change_interface(local, sdata, internal_type, p2p); 955 ret = drv_change_interface(local, sdata, internal_type, p2p);
956 if (ret) 956 if (ret)
957 type = sdata->vif.type; 957 type = sdata->vif.type;
958 958
959 ieee80211_setup_sdata(sdata, type); 959 ieee80211_setup_sdata(sdata, type);
960 960
961 err = ieee80211_do_open(sdata->dev, false); 961 err = ieee80211_do_open(sdata->dev, false);
962 WARN(err, "type change: do_open returned %d", err); 962 WARN(err, "type change: do_open returned %d", err);
963 963
964 return ret; 964 return ret;
965 } 965 }
966 966
967 int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 967 int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
968 enum nl80211_iftype type) 968 enum nl80211_iftype type)
969 { 969 {
970 int ret; 970 int ret;
971 971
972 ASSERT_RTNL(); 972 ASSERT_RTNL();
973 973
974 if (type == ieee80211_vif_type_p2p(&sdata->vif)) 974 if (type == ieee80211_vif_type_p2p(&sdata->vif))
975 return 0; 975 return 0;
976 976
977 /* Setting ad-hoc mode on non-IBSS channel is not supported. */ 977 /* Setting ad-hoc mode on non-IBSS channel is not supported. */
978 if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && 978 if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS &&
979 type == NL80211_IFTYPE_ADHOC) 979 type == NL80211_IFTYPE_ADHOC)
980 return -EOPNOTSUPP; 980 return -EOPNOTSUPP;
981 981
982 if (ieee80211_sdata_running(sdata)) { 982 if (ieee80211_sdata_running(sdata)) {
983 ret = ieee80211_runtime_change_iftype(sdata, type); 983 ret = ieee80211_runtime_change_iftype(sdata, type);
984 if (ret) 984 if (ret)
985 return ret; 985 return ret;
986 } else { 986 } else {
987 /* Purge and reset type-dependent state. */ 987 /* Purge and reset type-dependent state. */
988 ieee80211_teardown_sdata(sdata->dev); 988 ieee80211_teardown_sdata(sdata->dev);
989 ieee80211_setup_sdata(sdata, type); 989 ieee80211_setup_sdata(sdata, type);
990 } 990 }
991 991
992 /* reset some values that shouldn't be kept across type changes */ 992 /* reset some values that shouldn't be kept across type changes */
993 sdata->vif.bss_conf.basic_rates = 993 sdata->vif.bss_conf.basic_rates =
994 ieee80211_mandatory_rates(sdata->local, 994 ieee80211_mandatory_rates(sdata->local,
995 sdata->local->hw.conf.channel->band); 995 sdata->local->hw.conf.channel->band);
996 sdata->drop_unencrypted = 0; 996 sdata->drop_unencrypted = 0;
997 if (type == NL80211_IFTYPE_STATION) 997 if (type == NL80211_IFTYPE_STATION)
998 sdata->u.mgd.use_4addr = false; 998 sdata->u.mgd.use_4addr = false;
999 999
1000 return 0; 1000 return 0;
1001 } 1001 }
1002 1002
1003 static void ieee80211_assign_perm_addr(struct ieee80211_local *local, 1003 static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
1004 struct net_device *dev, 1004 struct net_device *dev,
1005 enum nl80211_iftype type) 1005 enum nl80211_iftype type)
1006 { 1006 {
1007 struct ieee80211_sub_if_data *sdata; 1007 struct ieee80211_sub_if_data *sdata;
1008 u64 mask, start, addr, val, inc; 1008 u64 mask, start, addr, val, inc;
1009 u8 *m; 1009 u8 *m;
1010 u8 tmp_addr[ETH_ALEN]; 1010 u8 tmp_addr[ETH_ALEN];
1011 int i; 1011 int i;
1012 1012
1013 /* default ... something at least */ 1013 /* default ... something at least */
1014 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 1014 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
1015 1015
1016 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) && 1016 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
1017 local->hw.wiphy->n_addresses <= 1) 1017 local->hw.wiphy->n_addresses <= 1)
1018 return; 1018 return;
1019 1019
1020 1020
1021 mutex_lock(&local->iflist_mtx); 1021 mutex_lock(&local->iflist_mtx);
1022 1022
1023 switch (type) { 1023 switch (type) {
1024 case NL80211_IFTYPE_MONITOR: 1024 case NL80211_IFTYPE_MONITOR:
1025 /* doesn't matter */ 1025 /* doesn't matter */
1026 break; 1026 break;
1027 case NL80211_IFTYPE_WDS: 1027 case NL80211_IFTYPE_WDS:
1028 case NL80211_IFTYPE_AP_VLAN: 1028 case NL80211_IFTYPE_AP_VLAN:
1029 /* match up with an AP interface */ 1029 /* match up with an AP interface */
1030 list_for_each_entry(sdata, &local->interfaces, list) { 1030 list_for_each_entry(sdata, &local->interfaces, list) {
1031 if (sdata->vif.type != NL80211_IFTYPE_AP) 1031 if (sdata->vif.type != NL80211_IFTYPE_AP)
1032 continue; 1032 continue;
1033 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN); 1033 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
1034 break; 1034 break;
1035 } 1035 }
1036 /* keep default if no AP interface present */ 1036 /* keep default if no AP interface present */
1037 break; 1037 break;
1038 default: 1038 default:
1039 /* assign a new address if possible -- try n_addresses first */ 1039 /* assign a new address if possible -- try n_addresses first */
1040 for (i = 0; i < local->hw.wiphy->n_addresses; i++) { 1040 for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
1041 bool used = false; 1041 bool used = false;
1042 1042
1043 list_for_each_entry(sdata, &local->interfaces, list) { 1043 list_for_each_entry(sdata, &local->interfaces, list) {
1044 if (memcmp(local->hw.wiphy->addresses[i].addr, 1044 if (memcmp(local->hw.wiphy->addresses[i].addr,
1045 sdata->vif.addr, ETH_ALEN) == 0) { 1045 sdata->vif.addr, ETH_ALEN) == 0) {
1046 used = true; 1046 used = true;
1047 break; 1047 break;
1048 } 1048 }
1049 } 1049 }
1050 1050
1051 if (!used) { 1051 if (!used) {
1052 memcpy(dev->perm_addr, 1052 memcpy(dev->perm_addr,
1053 local->hw.wiphy->addresses[i].addr, 1053 local->hw.wiphy->addresses[i].addr,
1054 ETH_ALEN); 1054 ETH_ALEN);
1055 break; 1055 break;
1056 } 1056 }
1057 } 1057 }
1058 1058
1059 /* try mask if available */ 1059 /* try mask if available */
1060 if (is_zero_ether_addr(local->hw.wiphy->addr_mask)) 1060 if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
1061 break; 1061 break;
1062 1062
1063 m = local->hw.wiphy->addr_mask; 1063 m = local->hw.wiphy->addr_mask;
1064 mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | 1064 mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
1065 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | 1065 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
1066 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); 1066 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
1067 1067
1068 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) { 1068 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
1069 /* not a contiguous mask ... not handled now! */ 1069 /* not a contiguous mask ... not handled now! */
1070 printk(KERN_DEBUG "not contiguous\n"); 1070 printk(KERN_DEBUG "not contiguous\n");
1071 break; 1071 break;
1072 } 1072 }
1073 1073
1074 m = local->hw.wiphy->perm_addr; 1074 m = local->hw.wiphy->perm_addr;
1075 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) | 1075 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
1076 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | 1076 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
1077 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); 1077 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
1078 1078
1079 inc = 1ULL<<__ffs64(mask); 1079 inc = 1ULL<<__ffs64(mask);
1080 val = (start & mask); 1080 val = (start & mask);
1081 addr = (start & ~mask) | (val & mask); 1081 addr = (start & ~mask) | (val & mask);
1082 do { 1082 do {
1083 bool used = false; 1083 bool used = false;
1084 1084
1085 tmp_addr[5] = addr >> 0*8; 1085 tmp_addr[5] = addr >> 0*8;
1086 tmp_addr[4] = addr >> 1*8; 1086 tmp_addr[4] = addr >> 1*8;
1087 tmp_addr[3] = addr >> 2*8; 1087 tmp_addr[3] = addr >> 2*8;
1088 tmp_addr[2] = addr >> 3*8; 1088 tmp_addr[2] = addr >> 3*8;
1089 tmp_addr[1] = addr >> 4*8; 1089 tmp_addr[1] = addr >> 4*8;
1090 tmp_addr[0] = addr >> 5*8; 1090 tmp_addr[0] = addr >> 5*8;
1091 1091
1092 val += inc; 1092 val += inc;
1093 1093
1094 list_for_each_entry(sdata, &local->interfaces, list) { 1094 list_for_each_entry(sdata, &local->interfaces, list) {
1095 if (memcmp(tmp_addr, sdata->vif.addr, 1095 if (memcmp(tmp_addr, sdata->vif.addr,
1096 ETH_ALEN) == 0) { 1096 ETH_ALEN) == 0) {
1097 used = true; 1097 used = true;
1098 break; 1098 break;
1099 } 1099 }
1100 } 1100 }
1101 1101
1102 if (!used) { 1102 if (!used) {
1103 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN); 1103 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
1104 break; 1104 break;
1105 } 1105 }
1106 addr = (start & ~mask) | (val & mask); 1106 addr = (start & ~mask) | (val & mask);
1107 } while (addr != start); 1107 } while (addr != start);
1108 1108
1109 break; 1109 break;
1110 } 1110 }
1111 1111
1112 mutex_unlock(&local->iflist_mtx); 1112 mutex_unlock(&local->iflist_mtx);
1113 } 1113 }
1114 1114
1115 int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1115 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1116 struct net_device **new_dev, enum nl80211_iftype type, 1116 struct net_device **new_dev, enum nl80211_iftype type,
1117 struct vif_params *params) 1117 struct vif_params *params)
1118 { 1118 {
1119 struct net_device *ndev; 1119 struct net_device *ndev;
1120 struct ieee80211_sub_if_data *sdata = NULL; 1120 struct ieee80211_sub_if_data *sdata = NULL;
1121 int ret, i; 1121 int ret, i;
1122 1122
1123 ASSERT_RTNL(); 1123 ASSERT_RTNL();
1124 1124
1125 ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, 1125 ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
1126 name, ieee80211_if_setup, local->hw.queues); 1126 name, ieee80211_if_setup, local->hw.queues);
1127 if (!ndev) 1127 if (!ndev)
1128 return -ENOMEM; 1128 return -ENOMEM;
1129 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1129 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
1130 1130
1131 ndev->needed_headroom = local->tx_headroom + 1131 ndev->needed_headroom = local->tx_headroom +
1132 4*6 /* four MAC addresses */ 1132 4*6 /* four MAC addresses */
1133 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ 1133 + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
1134 + 6 /* mesh */ 1134 + 6 /* mesh */
1135 + 8 /* rfc1042/bridge tunnel */ 1135 + 8 /* rfc1042/bridge tunnel */
1136 - ETH_HLEN /* ethernet hard_header_len */ 1136 - ETH_HLEN /* ethernet hard_header_len */
1137 + IEEE80211_ENCRYPT_HEADROOM; 1137 + IEEE80211_ENCRYPT_HEADROOM;
1138 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1138 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1139 1139
1140 ret = dev_alloc_name(ndev, ndev->name); 1140 ret = dev_alloc_name(ndev, ndev->name);
1141 if (ret < 0) 1141 if (ret < 0)
1142 goto fail; 1142 goto fail;
1143 1143
1144 ieee80211_assign_perm_addr(local, ndev, type); 1144 ieee80211_assign_perm_addr(local, ndev, type);
1145 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1145 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1146 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1146 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
1147 1147
1148 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 1148 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
1149 sdata = netdev_priv(ndev); 1149 sdata = netdev_priv(ndev);
1150 ndev->ieee80211_ptr = &sdata->wdev; 1150 ndev->ieee80211_ptr = &sdata->wdev;
1151 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); 1151 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
1152 memcpy(sdata->name, ndev->name, IFNAMSIZ); 1152 memcpy(sdata->name, ndev->name, IFNAMSIZ);
1153 1153
1154 /* initialise type-independent data */ 1154 /* initialise type-independent data */
1155 sdata->wdev.wiphy = local->hw.wiphy; 1155 sdata->wdev.wiphy = local->hw.wiphy;
1156 sdata->local = local; 1156 sdata->local = local;
1157 sdata->dev = ndev; 1157 sdata->dev = ndev;
1158 #ifdef CONFIG_INET 1158 #ifdef CONFIG_INET
1159 sdata->arp_filter_state = true; 1159 sdata->arp_filter_state = true;
1160 #endif 1160 #endif
1161 1161
1162 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 1162 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
1163 skb_queue_head_init(&sdata->fragments[i].skb_list); 1163 skb_queue_head_init(&sdata->fragments[i].skb_list);
1164 1164
1165 INIT_LIST_HEAD(&sdata->key_list); 1165 INIT_LIST_HEAD(&sdata->key_list);
1166 1166
1167 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1167 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1168 struct ieee80211_supported_band *sband; 1168 struct ieee80211_supported_band *sband;
1169 sband = local->hw.wiphy->bands[i]; 1169 sband = local->hw.wiphy->bands[i];
1170 sdata->rc_rateidx_mask[i] = 1170 sdata->rc_rateidx_mask[i] =
1171 sband ? (1 << sband->n_bitrates) - 1 : 0; 1171 sband ? (1 << sband->n_bitrates) - 1 : 0;
1172 } 1172 }
1173 1173
1174 /* setup type-dependent data */ 1174 /* setup type-dependent data */
1175 ieee80211_setup_sdata(sdata, type); 1175 ieee80211_setup_sdata(sdata, type);
1176 1176
1177 if (params) { 1177 if (params) {
1178 ndev->ieee80211_ptr->use_4addr = params->use_4addr; 1178 ndev->ieee80211_ptr->use_4addr = params->use_4addr;
1179 if (type == NL80211_IFTYPE_STATION) 1179 if (type == NL80211_IFTYPE_STATION)
1180 sdata->u.mgd.use_4addr = params->use_4addr; 1180 sdata->u.mgd.use_4addr = params->use_4addr;
1181 } 1181 }
1182 1182
1183 ret = register_netdevice(ndev); 1183 ret = register_netdevice(ndev);
1184 if (ret) 1184 if (ret)
1185 goto fail; 1185 goto fail;
1186 1186
1187 mutex_lock(&local->iflist_mtx); 1187 mutex_lock(&local->iflist_mtx);
1188 list_add_tail_rcu(&sdata->list, &local->interfaces); 1188 list_add_tail_rcu(&sdata->list, &local->interfaces);
1189 mutex_unlock(&local->iflist_mtx); 1189 mutex_unlock(&local->iflist_mtx);
1190 1190
1191 if (new_dev) 1191 if (new_dev)
1192 *new_dev = ndev; 1192 *new_dev = ndev;
1193 1193
1194 return 0; 1194 return 0;
1195 1195
1196 fail: 1196 fail:
1197 free_netdev(ndev); 1197 free_netdev(ndev);
1198 return ret; 1198 return ret;
1199 } 1199 }
1200 1200
1201 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) 1201 void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1202 { 1202 {
1203 ASSERT_RTNL(); 1203 ASSERT_RTNL();
1204 1204
1205 mutex_lock(&sdata->local->iflist_mtx); 1205 mutex_lock(&sdata->local->iflist_mtx);
1206 list_del_rcu(&sdata->list); 1206 list_del_rcu(&sdata->list);
1207 mutex_unlock(&sdata->local->iflist_mtx); 1207 mutex_unlock(&sdata->local->iflist_mtx);
1208 1208
1209 synchronize_rcu(); 1209 synchronize_rcu();
1210 unregister_netdevice(sdata->dev); 1210 unregister_netdevice(sdata->dev);
1211 } 1211 }
1212 1212
1213 /* 1213 /*
1214 * Remove all interfaces, may only be called at hardware unregistration 1214 * Remove all interfaces, may only be called at hardware unregistration
1215 * time because it doesn't do RCU-safe list removals. 1215 * time because it doesn't do RCU-safe list removals.
1216 */ 1216 */
1217 void ieee80211_remove_interfaces(struct ieee80211_local *local) 1217 void ieee80211_remove_interfaces(struct ieee80211_local *local)
1218 { 1218 {
1219 struct ieee80211_sub_if_data *sdata, *tmp; 1219 struct ieee80211_sub_if_data *sdata, *tmp;
1220 LIST_HEAD(unreg_list); 1220 LIST_HEAD(unreg_list);
1221 1221
1222 ASSERT_RTNL(); 1222 ASSERT_RTNL();
1223 1223
1224 mutex_lock(&local->iflist_mtx); 1224 mutex_lock(&local->iflist_mtx);
1225 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1225 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1226 list_del(&sdata->list); 1226 list_del(&sdata->list);
1227 1227
1228 unregister_netdevice_queue(sdata->dev, &unreg_list); 1228 unregister_netdevice_queue(sdata->dev, &unreg_list);
1229 } 1229 }
1230 mutex_unlock(&local->iflist_mtx); 1230 mutex_unlock(&local->iflist_mtx);
1231 unregister_netdevice_many(&unreg_list); 1231 unregister_netdevice_many(&unreg_list);
1232 list_del(&unreg_list);
1232 } 1233 }
1233 1234
1234 static u32 ieee80211_idle_off(struct ieee80211_local *local, 1235 static u32 ieee80211_idle_off(struct ieee80211_local *local,
1235 const char *reason) 1236 const char *reason)
1236 { 1237 {
1237 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 1238 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
1238 return 0; 1239 return 0;
1239 1240
1240 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1241 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1241 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason); 1242 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
1242 #endif 1243 #endif
1243 1244
1244 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; 1245 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
1245 return IEEE80211_CONF_CHANGE_IDLE; 1246 return IEEE80211_CONF_CHANGE_IDLE;
1246 } 1247 }
1247 1248
1248 static u32 ieee80211_idle_on(struct ieee80211_local *local) 1249 static u32 ieee80211_idle_on(struct ieee80211_local *local)
1249 { 1250 {
1250 if (local->hw.conf.flags & IEEE80211_CONF_IDLE) 1251 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
1251 return 0; 1252 return 0;
1252 1253
1253 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1254 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1254 wiphy_debug(local->hw.wiphy, "device now idle\n"); 1255 wiphy_debug(local->hw.wiphy, "device now idle\n");
1255 #endif 1256 #endif
1256 1257
1257 drv_flush(local, false); 1258 drv_flush(local, false);
1258 1259
1259 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 1260 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
1260 return IEEE80211_CONF_CHANGE_IDLE; 1261 return IEEE80211_CONF_CHANGE_IDLE;
1261 } 1262 }
1262 1263
1263 u32 __ieee80211_recalc_idle(struct ieee80211_local *local) 1264 u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1264 { 1265 {
1265 struct ieee80211_sub_if_data *sdata; 1266 struct ieee80211_sub_if_data *sdata;
1266 int count = 0; 1267 int count = 0;
1267 bool working = false, scanning = false, hw_roc = false; 1268 bool working = false, scanning = false, hw_roc = false;
1268 struct ieee80211_work *wk; 1269 struct ieee80211_work *wk;
1269 unsigned int led_trig_start = 0, led_trig_stop = 0; 1270 unsigned int led_trig_start = 0, led_trig_stop = 0;
1270 1271
1271 #ifdef CONFIG_PROVE_LOCKING 1272 #ifdef CONFIG_PROVE_LOCKING
1272 WARN_ON(debug_locks && !lockdep_rtnl_is_held() && 1273 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
1273 !lockdep_is_held(&local->iflist_mtx)); 1274 !lockdep_is_held(&local->iflist_mtx));
1274 #endif 1275 #endif
1275 lockdep_assert_held(&local->mtx); 1276 lockdep_assert_held(&local->mtx);
1276 1277
1277 list_for_each_entry(sdata, &local->interfaces, list) { 1278 list_for_each_entry(sdata, &local->interfaces, list) {
1278 if (!ieee80211_sdata_running(sdata)) { 1279 if (!ieee80211_sdata_running(sdata)) {
1279 sdata->vif.bss_conf.idle = true; 1280 sdata->vif.bss_conf.idle = true;
1280 continue; 1281 continue;
1281 } 1282 }
1282 1283
1283 sdata->old_idle = sdata->vif.bss_conf.idle; 1284 sdata->old_idle = sdata->vif.bss_conf.idle;
1284 1285
1285 /* do not count disabled managed interfaces */ 1286 /* do not count disabled managed interfaces */
1286 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1287 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1287 !sdata->u.mgd.associated) { 1288 !sdata->u.mgd.associated) {
1288 sdata->vif.bss_conf.idle = true; 1289 sdata->vif.bss_conf.idle = true;
1289 continue; 1290 continue;
1290 } 1291 }
1291 /* do not count unused IBSS interfaces */ 1292 /* do not count unused IBSS interfaces */
1292 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 1293 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
1293 !sdata->u.ibss.ssid_len) { 1294 !sdata->u.ibss.ssid_len) {
1294 sdata->vif.bss_conf.idle = true; 1295 sdata->vif.bss_conf.idle = true;
1295 continue; 1296 continue;
1296 } 1297 }
1297 /* count everything else */ 1298 /* count everything else */
1298 count++; 1299 count++;
1299 } 1300 }
1300 1301
1301 list_for_each_entry(wk, &local->work_list, list) { 1302 list_for_each_entry(wk, &local->work_list, list) {
1302 working = true; 1303 working = true;
1303 wk->sdata->vif.bss_conf.idle = false; 1304 wk->sdata->vif.bss_conf.idle = false;
1304 } 1305 }
1305 1306
1306 if (local->scan_sdata) { 1307 if (local->scan_sdata) {
1307 scanning = true; 1308 scanning = true;
1308 local->scan_sdata->vif.bss_conf.idle = false; 1309 local->scan_sdata->vif.bss_conf.idle = false;
1309 } 1310 }
1310 1311
1311 if (local->hw_roc_channel) 1312 if (local->hw_roc_channel)
1312 hw_roc = true; 1313 hw_roc = true;
1313 1314
1314 list_for_each_entry(sdata, &local->interfaces, list) { 1315 list_for_each_entry(sdata, &local->interfaces, list) {
1315 if (sdata->old_idle == sdata->vif.bss_conf.idle) 1316 if (sdata->old_idle == sdata->vif.bss_conf.idle)
1316 continue; 1317 continue;
1317 if (!ieee80211_sdata_running(sdata)) 1318 if (!ieee80211_sdata_running(sdata))
1318 continue; 1319 continue;
1319 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); 1320 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
1320 } 1321 }
1321 1322
1322 if (working || scanning || hw_roc) 1323 if (working || scanning || hw_roc)
1323 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; 1324 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
1324 else 1325 else
1325 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; 1326 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
1326 1327
1327 if (count) 1328 if (count)
1328 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 1329 led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
1329 else 1330 else
1330 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; 1331 led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
1331 1332
1332 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); 1333 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
1333 1334
1334 if (hw_roc) 1335 if (hw_roc)
1335 return ieee80211_idle_off(local, "hw remain-on-channel"); 1336 return ieee80211_idle_off(local, "hw remain-on-channel");
1336 if (working) 1337 if (working)
1337 return ieee80211_idle_off(local, "working"); 1338 return ieee80211_idle_off(local, "working");
1338 if (scanning) 1339 if (scanning)
1339 return ieee80211_idle_off(local, "scanning"); 1340 return ieee80211_idle_off(local, "scanning");
1340 if (!count) 1341 if (!count)
1341 return ieee80211_idle_on(local); 1342 return ieee80211_idle_on(local);
1342 else 1343 else
1343 return ieee80211_idle_off(local, "in use"); 1344 return ieee80211_idle_off(local, "in use");
1344 1345
1345 return 0; 1346 return 0;
1346 } 1347 }
1347 1348
1348 void ieee80211_recalc_idle(struct ieee80211_local *local) 1349 void ieee80211_recalc_idle(struct ieee80211_local *local)
1349 { 1350 {
1350 u32 chg; 1351 u32 chg;
1351 1352
1352 mutex_lock(&local->iflist_mtx); 1353 mutex_lock(&local->iflist_mtx);
1353 chg = __ieee80211_recalc_idle(local); 1354 chg = __ieee80211_recalc_idle(local);
1354 mutex_unlock(&local->iflist_mtx); 1355 mutex_unlock(&local->iflist_mtx);
1355 if (chg) 1356 if (chg)
1356 ieee80211_hw_config(local, chg); 1357 ieee80211_hw_config(local, chg);
1357 } 1358 }
1358 1359
1359 static int netdev_notify(struct notifier_block *nb, 1360 static int netdev_notify(struct notifier_block *nb,
1360 unsigned long state, 1361 unsigned long state,
1361 void *ndev) 1362 void *ndev)
1362 { 1363 {
1363 struct net_device *dev = ndev; 1364 struct net_device *dev = ndev;
1364 struct ieee80211_sub_if_data *sdata; 1365 struct ieee80211_sub_if_data *sdata;
1365 1366
1366 if (state != NETDEV_CHANGENAME) 1367 if (state != NETDEV_CHANGENAME)
1367 return 0; 1368 return 0;
1368 1369
1369 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) 1370 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1370 return 0; 1371 return 0;
1371 1372
1372 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) 1373 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1373 return 0; 1374 return 0;
1374 1375
1375 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1376 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1376 1377
1377 memcpy(sdata->name, dev->name, IFNAMSIZ); 1378 memcpy(sdata->name, dev->name, IFNAMSIZ);
1378 1379
1379 ieee80211_debugfs_rename_netdev(sdata); 1380 ieee80211_debugfs_rename_netdev(sdata);
1380 return 0; 1381 return 0;
1381 } 1382 }
1382 1383
1383 static struct notifier_block mac80211_netdev_notifier = { 1384 static struct notifier_block mac80211_netdev_notifier = {
1384 .notifier_call = netdev_notify, 1385 .notifier_call = netdev_notify,
1385 }; 1386 };
1386 1387
1387 int ieee80211_iface_init(void) 1388 int ieee80211_iface_init(void)
1388 { 1389 {
1389 return register_netdevice_notifier(&mac80211_netdev_notifier); 1390 return register_netdevice_notifier(&mac80211_netdev_notifier);
1390 } 1391 }
1391 1392
1392 void ieee80211_iface_exit(void) 1393 void ieee80211_iface_exit(void)
1393 { 1394 {
1394 unregister_netdevice_notifier(&mac80211_netdev_notifier); 1395 unregister_netdevice_notifier(&mac80211_netdev_notifier);
1395 } 1396 }
1396 1397
net/sched/sch_generic.c
1 /* 1 /*
2 * net/sched/sch_generic.c Generic packet scheduler routines. 2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support 11 * - Ingress support
12 */ 12 */
13 13
14 #include <linux/bitops.h> 14 #include <linux/bitops.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/string.h> 19 #include <linux/string.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/netdevice.h> 21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h> 22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h> 23 #include <linux/rtnetlink.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/rcupdate.h> 25 #include <linux/rcupdate.h>
26 #include <linux/list.h> 26 #include <linux/list.h>
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <net/pkt_sched.h> 28 #include <net/pkt_sched.h>
29 #include <net/dst.h> 29 #include <net/dst.h>
30 30
31 /* Main transmission queue. */ 31 /* Main transmission queue. */
32 32
33 /* Modifications to data participating in scheduling must be protected with 33 /* Modifications to data participating in scheduling must be protected with
34 * qdisc_lock(qdisc) spinlock. 34 * qdisc_lock(qdisc) spinlock.
35 * 35 *
36 * The idea is the following: 36 * The idea is the following:
37 * - enqueue, dequeue are serialized via qdisc root lock 37 * - enqueue, dequeue are serialized via qdisc root lock
38 * - ingress filtering is also serialized via qdisc root lock 38 * - ingress filtering is also serialized via qdisc root lock
39 * - updates to tree and tree walking are only done under the rtnl mutex. 39 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 40 */
41 41
42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
43 { 43 {
44 skb_dst_force(skb); 44 skb_dst_force(skb);
45 q->gso_skb = skb; 45 q->gso_skb = skb;
46 q->qstats.requeues++; 46 q->qstats.requeues++;
47 q->q.qlen++; /* it's still part of the queue */ 47 q->q.qlen++; /* it's still part of the queue */
48 __netif_schedule(q); 48 __netif_schedule(q);
49 49
50 return 0; 50 return 0;
51 } 51 }
52 52
53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54 { 54 {
55 struct sk_buff *skb = q->gso_skb; 55 struct sk_buff *skb = q->gso_skb;
56 56
57 if (unlikely(skb)) { 57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q); 58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq; 59 struct netdev_queue *txq;
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_frozen_or_stopped(txq)) { 63 if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 q->gso_skb = NULL; 64 q->gso_skb = NULL;
65 q->q.qlen--; 65 q->q.qlen--;
66 } else 66 } else
67 skb = NULL; 67 skb = NULL;
68 } else { 68 } else {
69 skb = q->dequeue(q); 69 skb = q->dequeue(q);
70 } 70 }
71 71
72 return skb; 72 return skb;
73 } 73 }
74 74
75 static inline int handle_dev_cpu_collision(struct sk_buff *skb, 75 static inline int handle_dev_cpu_collision(struct sk_buff *skb,
76 struct netdev_queue *dev_queue, 76 struct netdev_queue *dev_queue,
77 struct Qdisc *q) 77 struct Qdisc *q)
78 { 78 {
79 int ret; 79 int ret;
80 80
81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { 81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
82 /* 82 /*
83 * Same CPU holding the lock. It may be a transient 83 * Same CPU holding the lock. It may be a transient
84 * configuration error, when hard_start_xmit() recurses. We 84 * configuration error, when hard_start_xmit() recurses. We
85 * detect it by checking xmit owner and drop the packet when 85 * detect it by checking xmit owner and drop the packet when
86 * deadloop is detected. Return OK to try the next skb. 86 * deadloop is detected. Return OK to try the next skb.
87 */ 87 */
88 kfree_skb(skb); 88 kfree_skb(skb);
89 if (net_ratelimit()) 89 if (net_ratelimit())
90 printk(KERN_WARNING "Dead loop on netdevice %s, " 90 printk(KERN_WARNING "Dead loop on netdevice %s, "
91 "fix it urgently!\n", dev_queue->dev->name); 91 "fix it urgently!\n", dev_queue->dev->name);
92 ret = qdisc_qlen(q); 92 ret = qdisc_qlen(q);
93 } else { 93 } else {
94 /* 94 /*
95 * Another cpu is holding lock, requeue & delay xmits for 95 * Another cpu is holding lock, requeue & delay xmits for
96 * some time. 96 * some time.
97 */ 97 */
98 __this_cpu_inc(softnet_data.cpu_collision); 98 __this_cpu_inc(softnet_data.cpu_collision);
99 ret = dev_requeue_skb(skb, q); 99 ret = dev_requeue_skb(skb, q);
100 } 100 }
101 101
102 return ret; 102 return ret;
103 } 103 }
104 104
105 /* 105 /*
106 * Transmit one skb, and handle the return status as required. Holding the 106 * Transmit one skb, and handle the return status as required. Holding the
107 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this 107 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
108 * function. 108 * function.
109 * 109 *
110 * Returns to the caller: 110 * Returns to the caller:
111 * 0 - queue is empty or throttled. 111 * 0 - queue is empty or throttled.
112 * >0 - queue is not empty. 112 * >0 - queue is not empty.
113 */ 113 */
114 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 114 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
115 struct net_device *dev, struct netdev_queue *txq, 115 struct net_device *dev, struct netdev_queue *txq,
116 spinlock_t *root_lock) 116 spinlock_t *root_lock)
117 { 117 {
118 int ret = NETDEV_TX_BUSY; 118 int ret = NETDEV_TX_BUSY;
119 119
120 /* And release qdisc */ 120 /* And release qdisc */
121 spin_unlock(root_lock); 121 spin_unlock(root_lock);
122 122
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (!netif_tx_queue_frozen_or_stopped(txq)) 124 if (!netif_tx_queue_frozen_or_stopped(txq))
125 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
126 126
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
128 128
129 spin_lock(root_lock); 129 spin_lock(root_lock);
130 130
131 if (dev_xmit_complete(ret)) { 131 if (dev_xmit_complete(ret)) {
132 /* Driver sent out skb successfully or skb was consumed */ 132 /* Driver sent out skb successfully or skb was consumed */
133 ret = qdisc_qlen(q); 133 ret = qdisc_qlen(q);
134 } else if (ret == NETDEV_TX_LOCKED) { 134 } else if (ret == NETDEV_TX_LOCKED) {
135 /* Driver try lock failed */ 135 /* Driver try lock failed */
136 ret = handle_dev_cpu_collision(skb, txq, q); 136 ret = handle_dev_cpu_collision(skb, txq, q);
137 } else { 137 } else {
138 /* Driver returned NETDEV_TX_BUSY - requeue skb */ 138 /* Driver returned NETDEV_TX_BUSY - requeue skb */
139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) 139 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
140 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 140 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
141 dev->name, ret, q->q.qlen); 141 dev->name, ret, q->q.qlen);
142 142
143 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
144 } 144 }
145 145
146 if (ret && netif_tx_queue_frozen_or_stopped(txq)) 146 if (ret && netif_tx_queue_frozen_or_stopped(txq))
147 ret = 0; 147 ret = 0;
148 148
149 return ret; 149 return ret;
150 } 150 }
151 151
152 /* 152 /*
153 * NOTE: Called under qdisc_lock(q) with locally disabled BH. 153 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
154 * 154 *
155 * __QDISC_STATE_RUNNING guarantees only one CPU can process 155 * __QDISC_STATE_RUNNING guarantees only one CPU can process
156 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for 156 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
157 * this queue. 157 * this queue.
158 * 158 *
159 * netif_tx_lock serializes accesses to device driver. 159 * netif_tx_lock serializes accesses to device driver.
160 * 160 *
161 * qdisc_lock(q) and netif_tx_lock are mutually exclusive, 161 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
162 * if one is grabbed, another must be free. 162 * if one is grabbed, another must be free.
163 * 163 *
164 * Note, that this procedure can be called by a watchdog timer 164 * Note, that this procedure can be called by a watchdog timer
165 * 165 *
166 * Returns to the caller: 166 * Returns to the caller:
167 * 0 - queue is empty or throttled. 167 * 0 - queue is empty or throttled.
168 * >0 - queue is not empty. 168 * >0 - queue is not empty.
169 * 169 *
170 */ 170 */
171 static inline int qdisc_restart(struct Qdisc *q) 171 static inline int qdisc_restart(struct Qdisc *q)
172 { 172 {
173 struct netdev_queue *txq; 173 struct netdev_queue *txq;
174 struct net_device *dev; 174 struct net_device *dev;
175 spinlock_t *root_lock; 175 spinlock_t *root_lock;
176 struct sk_buff *skb; 176 struct sk_buff *skb;
177 177
178 /* Dequeue packet */ 178 /* Dequeue packet */
179 skb = dequeue_skb(q); 179 skb = dequeue_skb(q);
180 if (unlikely(!skb)) 180 if (unlikely(!skb))
181 return 0; 181 return 0;
182 WARN_ON_ONCE(skb_dst_is_noref(skb)); 182 WARN_ON_ONCE(skb_dst_is_noref(skb));
183 root_lock = qdisc_lock(q); 183 root_lock = qdisc_lock(q);
184 dev = qdisc_dev(q); 184 dev = qdisc_dev(q);
185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 185 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
186 186
187 return sch_direct_xmit(skb, q, dev, txq, root_lock); 187 return sch_direct_xmit(skb, q, dev, txq, root_lock);
188 } 188 }
189 189
190 void __qdisc_run(struct Qdisc *q) 190 void __qdisc_run(struct Qdisc *q)
191 { 191 {
192 unsigned long start_time = jiffies; 192 unsigned long start_time = jiffies;
193 193
194 while (qdisc_restart(q)) { 194 while (qdisc_restart(q)) {
195 /* 195 /*
196 * Postpone processing if 196 * Postpone processing if
197 * 1. another process needs the CPU; 197 * 1. another process needs the CPU;
198 * 2. we've been doing it for too long. 198 * 2. we've been doing it for too long.
199 */ 199 */
200 if (need_resched() || jiffies != start_time) { 200 if (need_resched() || jiffies != start_time) {
201 __netif_schedule(q); 201 __netif_schedule(q);
202 break; 202 break;
203 } 203 }
204 } 204 }
205 205
206 qdisc_run_end(q); 206 qdisc_run_end(q);
207 } 207 }
208 208
209 unsigned long dev_trans_start(struct net_device *dev) 209 unsigned long dev_trans_start(struct net_device *dev)
210 { 210 {
211 unsigned long val, res = dev->trans_start; 211 unsigned long val, res = dev->trans_start;
212 unsigned int i; 212 unsigned int i;
213 213
214 for (i = 0; i < dev->num_tx_queues; i++) { 214 for (i = 0; i < dev->num_tx_queues; i++) {
215 val = netdev_get_tx_queue(dev, i)->trans_start; 215 val = netdev_get_tx_queue(dev, i)->trans_start;
216 if (val && time_after(val, res)) 216 if (val && time_after(val, res))
217 res = val; 217 res = val;
218 } 218 }
219 dev->trans_start = res; 219 dev->trans_start = res;
220 return res; 220 return res;
221 } 221 }
222 EXPORT_SYMBOL(dev_trans_start); 222 EXPORT_SYMBOL(dev_trans_start);
223 223
224 static void dev_watchdog(unsigned long arg) 224 static void dev_watchdog(unsigned long arg)
225 { 225 {
226 struct net_device *dev = (struct net_device *)arg; 226 struct net_device *dev = (struct net_device *)arg;
227 227
228 netif_tx_lock(dev); 228 netif_tx_lock(dev);
229 if (!qdisc_tx_is_noop(dev)) { 229 if (!qdisc_tx_is_noop(dev)) {
230 if (netif_device_present(dev) && 230 if (netif_device_present(dev) &&
231 netif_running(dev) && 231 netif_running(dev) &&
232 netif_carrier_ok(dev)) { 232 netif_carrier_ok(dev)) {
233 int some_queue_timedout = 0; 233 int some_queue_timedout = 0;
234 unsigned int i; 234 unsigned int i;
235 unsigned long trans_start; 235 unsigned long trans_start;
236 236
237 for (i = 0; i < dev->num_tx_queues; i++) { 237 for (i = 0; i < dev->num_tx_queues; i++) {
238 struct netdev_queue *txq; 238 struct netdev_queue *txq;
239 239
240 txq = netdev_get_tx_queue(dev, i); 240 txq = netdev_get_tx_queue(dev, i);
241 /* 241 /*
242 * old device drivers set dev->trans_start 242 * old device drivers set dev->trans_start
243 */ 243 */
244 trans_start = txq->trans_start ? : dev->trans_start; 244 trans_start = txq->trans_start ? : dev->trans_start;
245 if (netif_tx_queue_stopped(txq) && 245 if (netif_tx_queue_stopped(txq) &&
246 time_after(jiffies, (trans_start + 246 time_after(jiffies, (trans_start +
247 dev->watchdog_timeo))) { 247 dev->watchdog_timeo))) {
248 some_queue_timedout = 1; 248 some_queue_timedout = 1;
249 break; 249 break;
250 } 250 }
251 } 251 }
252 252
253 if (some_queue_timedout) { 253 if (some_queue_timedout) {
254 char drivername[64]; 254 char drivername[64];
255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
256 dev->name, netdev_drivername(dev, drivername, 64), i); 256 dev->name, netdev_drivername(dev, drivername, 64), i);
257 dev->netdev_ops->ndo_tx_timeout(dev); 257 dev->netdev_ops->ndo_tx_timeout(dev);
258 } 258 }
259 if (!mod_timer(&dev->watchdog_timer, 259 if (!mod_timer(&dev->watchdog_timer,
260 round_jiffies(jiffies + 260 round_jiffies(jiffies +
261 dev->watchdog_timeo))) 261 dev->watchdog_timeo)))
262 dev_hold(dev); 262 dev_hold(dev);
263 } 263 }
264 } 264 }
265 netif_tx_unlock(dev); 265 netif_tx_unlock(dev);
266 266
267 dev_put(dev); 267 dev_put(dev);
268 } 268 }
269 269
270 void __netdev_watchdog_up(struct net_device *dev) 270 void __netdev_watchdog_up(struct net_device *dev)
271 { 271 {
272 if (dev->netdev_ops->ndo_tx_timeout) { 272 if (dev->netdev_ops->ndo_tx_timeout) {
273 if (dev->watchdog_timeo <= 0) 273 if (dev->watchdog_timeo <= 0)
274 dev->watchdog_timeo = 5*HZ; 274 dev->watchdog_timeo = 5*HZ;
275 if (!mod_timer(&dev->watchdog_timer, 275 if (!mod_timer(&dev->watchdog_timer,
276 round_jiffies(jiffies + dev->watchdog_timeo))) 276 round_jiffies(jiffies + dev->watchdog_timeo)))
277 dev_hold(dev); 277 dev_hold(dev);
278 } 278 }
279 } 279 }
280 280
281 static void dev_watchdog_up(struct net_device *dev) 281 static void dev_watchdog_up(struct net_device *dev)
282 { 282 {
283 __netdev_watchdog_up(dev); 283 __netdev_watchdog_up(dev);
284 } 284 }
285 285
286 static void dev_watchdog_down(struct net_device *dev) 286 static void dev_watchdog_down(struct net_device *dev)
287 { 287 {
288 netif_tx_lock_bh(dev); 288 netif_tx_lock_bh(dev);
289 if (del_timer(&dev->watchdog_timer)) 289 if (del_timer(&dev->watchdog_timer))
290 dev_put(dev); 290 dev_put(dev);
291 netif_tx_unlock_bh(dev); 291 netif_tx_unlock_bh(dev);
292 } 292 }
293 293
294 /** 294 /**
295 * netif_carrier_on - set carrier 295 * netif_carrier_on - set carrier
296 * @dev: network device 296 * @dev: network device
297 * 297 *
298 * Device has detected that carrier. 298 * Device has detected that carrier.
299 */ 299 */
300 void netif_carrier_on(struct net_device *dev) 300 void netif_carrier_on(struct net_device *dev)
301 { 301 {
302 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 302 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
303 if (dev->reg_state == NETREG_UNINITIALIZED) 303 if (dev->reg_state == NETREG_UNINITIALIZED)
304 return; 304 return;
305 linkwatch_fire_event(dev); 305 linkwatch_fire_event(dev);
306 if (netif_running(dev)) 306 if (netif_running(dev))
307 __netdev_watchdog_up(dev); 307 __netdev_watchdog_up(dev);
308 } 308 }
309 } 309 }
310 EXPORT_SYMBOL(netif_carrier_on); 310 EXPORT_SYMBOL(netif_carrier_on);
311 311
312 /** 312 /**
313 * netif_carrier_off - clear carrier 313 * netif_carrier_off - clear carrier
314 * @dev: network device 314 * @dev: network device
315 * 315 *
316 * Device has detected loss of carrier. 316 * Device has detected loss of carrier.
317 */ 317 */
318 void netif_carrier_off(struct net_device *dev) 318 void netif_carrier_off(struct net_device *dev)
319 { 319 {
320 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { 320 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
321 if (dev->reg_state == NETREG_UNINITIALIZED) 321 if (dev->reg_state == NETREG_UNINITIALIZED)
322 return; 322 return;
323 linkwatch_fire_event(dev); 323 linkwatch_fire_event(dev);
324 } 324 }
325 } 325 }
326 EXPORT_SYMBOL(netif_carrier_off); 326 EXPORT_SYMBOL(netif_carrier_off);
327 327
328 /** 328 /**
329 * netif_notify_peers - notify network peers about existence of @dev 329 * netif_notify_peers - notify network peers about existence of @dev
330 * @dev: network device 330 * @dev: network device
331 * 331 *
332 * Generate traffic such that interested network peers are aware of 332 * Generate traffic such that interested network peers are aware of
333 * @dev, such as by generating a gratuitous ARP. This may be used when 333 * @dev, such as by generating a gratuitous ARP. This may be used when
334 * a device wants to inform the rest of the network about some sort of 334 * a device wants to inform the rest of the network about some sort of
335 * reconfiguration such as a failover event or virtual machine 335 * reconfiguration such as a failover event or virtual machine
336 * migration. 336 * migration.
337 */ 337 */
338 void netif_notify_peers(struct net_device *dev) 338 void netif_notify_peers(struct net_device *dev)
339 { 339 {
340 rtnl_lock(); 340 rtnl_lock();
341 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); 341 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
342 rtnl_unlock(); 342 rtnl_unlock();
343 } 343 }
344 EXPORT_SYMBOL(netif_notify_peers); 344 EXPORT_SYMBOL(netif_notify_peers);
345 345
346 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 346 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
347 under all circumstances. It is difficult to invent anything faster or 347 under all circumstances. It is difficult to invent anything faster or
348 cheaper. 348 cheaper.
349 */ 349 */
350 350
351 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 351 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
352 { 352 {
353 kfree_skb(skb); 353 kfree_skb(skb);
354 return NET_XMIT_CN; 354 return NET_XMIT_CN;
355 } 355 }
356 356
357 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 357 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
358 { 358 {
359 return NULL; 359 return NULL;
360 } 360 }
361 361
362 struct Qdisc_ops noop_qdisc_ops __read_mostly = { 362 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
363 .id = "noop", 363 .id = "noop",
364 .priv_size = 0, 364 .priv_size = 0,
365 .enqueue = noop_enqueue, 365 .enqueue = noop_enqueue,
366 .dequeue = noop_dequeue, 366 .dequeue = noop_dequeue,
367 .peek = noop_dequeue, 367 .peek = noop_dequeue,
368 .owner = THIS_MODULE, 368 .owner = THIS_MODULE,
369 }; 369 };
370 370
371 static struct netdev_queue noop_netdev_queue = { 371 static struct netdev_queue noop_netdev_queue = {
372 .qdisc = &noop_qdisc, 372 .qdisc = &noop_qdisc,
373 .qdisc_sleeping = &noop_qdisc, 373 .qdisc_sleeping = &noop_qdisc,
374 }; 374 };
375 375
376 struct Qdisc noop_qdisc = { 376 struct Qdisc noop_qdisc = {
377 .enqueue = noop_enqueue, 377 .enqueue = noop_enqueue,
378 .dequeue = noop_dequeue, 378 .dequeue = noop_dequeue,
379 .flags = TCQ_F_BUILTIN, 379 .flags = TCQ_F_BUILTIN,
380 .ops = &noop_qdisc_ops, 380 .ops = &noop_qdisc_ops,
381 .list = LIST_HEAD_INIT(noop_qdisc.list), 381 .list = LIST_HEAD_INIT(noop_qdisc.list),
382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 382 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
383 .dev_queue = &noop_netdev_queue, 383 .dev_queue = &noop_netdev_queue,
384 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), 384 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
385 }; 385 };
386 EXPORT_SYMBOL(noop_qdisc); 386 EXPORT_SYMBOL(noop_qdisc);
387 387
388 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { 388 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
389 .id = "noqueue", 389 .id = "noqueue",
390 .priv_size = 0, 390 .priv_size = 0,
391 .enqueue = noop_enqueue, 391 .enqueue = noop_enqueue,
392 .dequeue = noop_dequeue, 392 .dequeue = noop_dequeue,
393 .peek = noop_dequeue, 393 .peek = noop_dequeue,
394 .owner = THIS_MODULE, 394 .owner = THIS_MODULE,
395 }; 395 };
396 396
397 static struct Qdisc noqueue_qdisc; 397 static struct Qdisc noqueue_qdisc;
398 static struct netdev_queue noqueue_netdev_queue = { 398 static struct netdev_queue noqueue_netdev_queue = {
399 .qdisc = &noqueue_qdisc, 399 .qdisc = &noqueue_qdisc,
400 .qdisc_sleeping = &noqueue_qdisc, 400 .qdisc_sleeping = &noqueue_qdisc,
401 }; 401 };
402 402
403 static struct Qdisc noqueue_qdisc = { 403 static struct Qdisc noqueue_qdisc = {
404 .enqueue = NULL, 404 .enqueue = NULL,
405 .dequeue = noop_dequeue, 405 .dequeue = noop_dequeue,
406 .flags = TCQ_F_BUILTIN, 406 .flags = TCQ_F_BUILTIN,
407 .ops = &noqueue_qdisc_ops, 407 .ops = &noqueue_qdisc_ops,
408 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 408 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
409 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 409 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
410 .dev_queue = &noqueue_netdev_queue, 410 .dev_queue = &noqueue_netdev_queue,
411 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), 411 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
412 }; 412 };
413 413
414 414
415 static const u8 prio2band[TC_PRIO_MAX+1] = 415 static const u8 prio2band[TC_PRIO_MAX+1] =
416 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 416 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
417 417
418 /* 3-band FIFO queue: old style, but should be a bit faster than 418 /* 3-band FIFO queue: old style, but should be a bit faster than
419 generic prio+fifo combination. 419 generic prio+fifo combination.
420 */ 420 */
421 421
422 #define PFIFO_FAST_BANDS 3 422 #define PFIFO_FAST_BANDS 3
423 423
424 /* 424 /*
425 * Private data for a pfifo_fast scheduler containing: 425 * Private data for a pfifo_fast scheduler containing:
426 * - queues for the three band 426 * - queues for the three band
427 * - bitmap indicating which of the bands contain skbs 427 * - bitmap indicating which of the bands contain skbs
428 */ 428 */
429 struct pfifo_fast_priv { 429 struct pfifo_fast_priv {
430 u32 bitmap; 430 u32 bitmap;
431 struct sk_buff_head q[PFIFO_FAST_BANDS]; 431 struct sk_buff_head q[PFIFO_FAST_BANDS];
432 }; 432 };
433 433
434 /* 434 /*
435 * Convert a bitmap to the first band number where an skb is queued, where: 435 * Convert a bitmap to the first band number where an skb is queued, where:
436 * bitmap=0 means there are no skbs on any band. 436 * bitmap=0 means there are no skbs on any band.
437 * bitmap=1 means there is an skb on band 0. 437 * bitmap=1 means there is an skb on band 0.
438 * bitmap=7 means there are skbs on all 3 bands, etc. 438 * bitmap=7 means there are skbs on all 3 bands, etc.
439 */ 439 */
440 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; 440 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
441 441
442 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, 442 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
443 int band) 443 int band)
444 { 444 {
445 return priv->q + band; 445 return priv->q + band;
446 } 446 }
447 447
448 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 448 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
449 { 449 {
450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { 450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
451 int band = prio2band[skb->priority & TC_PRIO_MAX]; 451 int band = prio2band[skb->priority & TC_PRIO_MAX];
452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
453 struct sk_buff_head *list = band2list(priv, band); 453 struct sk_buff_head *list = band2list(priv, band);
454 454
455 priv->bitmap |= (1 << band); 455 priv->bitmap |= (1 << band);
456 qdisc->q.qlen++; 456 qdisc->q.qlen++;
457 return __qdisc_enqueue_tail(skb, qdisc, list); 457 return __qdisc_enqueue_tail(skb, qdisc, list);
458 } 458 }
459 459
460 return qdisc_drop(skb, qdisc); 460 return qdisc_drop(skb, qdisc);
461 } 461 }
462 462
463 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 463 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
464 { 464 {
465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
466 int band = bitmap2band[priv->bitmap]; 466 int band = bitmap2band[priv->bitmap];
467 467
468 if (likely(band >= 0)) { 468 if (likely(band >= 0)) {
469 struct sk_buff_head *list = band2list(priv, band); 469 struct sk_buff_head *list = band2list(priv, band);
470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); 470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
471 471
472 qdisc->q.qlen--; 472 qdisc->q.qlen--;
473 if (skb_queue_empty(list)) 473 if (skb_queue_empty(list))
474 priv->bitmap &= ~(1 << band); 474 priv->bitmap &= ~(1 << band);
475 475
476 return skb; 476 return skb;
477 } 477 }
478 478
479 return NULL; 479 return NULL;
480 } 480 }
481 481
482 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) 482 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
483 { 483 {
484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
485 int band = bitmap2band[priv->bitmap]; 485 int band = bitmap2band[priv->bitmap];
486 486
487 if (band >= 0) { 487 if (band >= 0) {
488 struct sk_buff_head *list = band2list(priv, band); 488 struct sk_buff_head *list = band2list(priv, band);
489 489
490 return skb_peek(list); 490 return skb_peek(list);
491 } 491 }
492 492
493 return NULL; 493 return NULL;
494 } 494 }
495 495
496 static void pfifo_fast_reset(struct Qdisc* qdisc) 496 static void pfifo_fast_reset(struct Qdisc* qdisc)
497 { 497 {
498 int prio; 498 int prio;
499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
500 500
501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
502 __qdisc_reset_queue(qdisc, band2list(priv, prio)); 502 __qdisc_reset_queue(qdisc, band2list(priv, prio));
503 503
504 priv->bitmap = 0; 504 priv->bitmap = 0;
505 qdisc->qstats.backlog = 0; 505 qdisc->qstats.backlog = 0;
506 qdisc->q.qlen = 0; 506 qdisc->q.qlen = 0;
507 } 507 }
508 508
509 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 509 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
510 { 510 {
511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
512 512
513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
515 return skb->len; 515 return skb->len;
516 516
517 nla_put_failure: 517 nla_put_failure:
518 return -1; 518 return -1;
519 } 519 }
520 520
521 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 521 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
522 { 522 {
523 int prio; 523 int prio;
524 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 524 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
525 525
526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
527 skb_queue_head_init(band2list(priv, prio)); 527 skb_queue_head_init(band2list(priv, prio));
528 528
529 return 0; 529 return 0;
530 } 530 }
531 531
532 struct Qdisc_ops pfifo_fast_ops __read_mostly = { 532 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
533 .id = "pfifo_fast", 533 .id = "pfifo_fast",
534 .priv_size = sizeof(struct pfifo_fast_priv), 534 .priv_size = sizeof(struct pfifo_fast_priv),
535 .enqueue = pfifo_fast_enqueue, 535 .enqueue = pfifo_fast_enqueue,
536 .dequeue = pfifo_fast_dequeue, 536 .dequeue = pfifo_fast_dequeue,
537 .peek = pfifo_fast_peek, 537 .peek = pfifo_fast_peek,
538 .init = pfifo_fast_init, 538 .init = pfifo_fast_init,
539 .reset = pfifo_fast_reset, 539 .reset = pfifo_fast_reset,
540 .dump = pfifo_fast_dump, 540 .dump = pfifo_fast_dump,
541 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 }; 542 };
543 543
544 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 544 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
545 struct Qdisc_ops *ops) 545 struct Qdisc_ops *ops)
546 { 546 {
547 void *p; 547 void *p;
548 struct Qdisc *sch; 548 struct Qdisc *sch;
549 unsigned int size; 549 unsigned int size;
550 int err = -ENOBUFS; 550 int err = -ENOBUFS;
551 551
552 /* ensure that the Qdisc and the private data are 64-byte aligned */ 552 /* ensure that the Qdisc and the private data are 64-byte aligned */
553 size = QDISC_ALIGN(sizeof(*sch)); 553 size = QDISC_ALIGN(sizeof(*sch));
554 size += ops->priv_size + (QDISC_ALIGNTO - 1); 554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
555 555
556 p = kzalloc_node(size, GFP_KERNEL, 556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue)); 557 netdev_queue_numa_node_read(dev_queue));
558 558
559 if (!p) 559 if (!p)
560 goto errout; 560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
562 sch->padded = (char *) sch - (char *) p; 562 sch->padded = (char *) sch - (char *) p;
563 563
564 INIT_LIST_HEAD(&sch->list); 564 INIT_LIST_HEAD(&sch->list);
565 skb_queue_head_init(&sch->q); 565 skb_queue_head_init(&sch->q);
566 spin_lock_init(&sch->busylock); 566 spin_lock_init(&sch->busylock);
567 sch->ops = ops; 567 sch->ops = ops;
568 sch->enqueue = ops->enqueue; 568 sch->enqueue = ops->enqueue;
569 sch->dequeue = ops->dequeue; 569 sch->dequeue = ops->dequeue;
570 sch->dev_queue = dev_queue; 570 sch->dev_queue = dev_queue;
571 dev_hold(qdisc_dev(sch)); 571 dev_hold(qdisc_dev(sch));
572 atomic_set(&sch->refcnt, 1); 572 atomic_set(&sch->refcnt, 1);
573 573
574 return sch; 574 return sch;
575 errout: 575 errout:
576 return ERR_PTR(err); 576 return ERR_PTR(err);
577 } 577 }
578 578
579 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 579 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
580 struct Qdisc_ops *ops, unsigned int parentid) 580 struct Qdisc_ops *ops, unsigned int parentid)
581 { 581 {
582 struct Qdisc *sch; 582 struct Qdisc *sch;
583 583
584 sch = qdisc_alloc(dev_queue, ops); 584 sch = qdisc_alloc(dev_queue, ops);
585 if (IS_ERR(sch)) 585 if (IS_ERR(sch))
586 goto errout; 586 goto errout;
587 sch->parent = parentid; 587 sch->parent = parentid;
588 588
589 if (!ops->init || ops->init(sch, NULL) == 0) 589 if (!ops->init || ops->init(sch, NULL) == 0)
590 return sch; 590 return sch;
591 591
592 qdisc_destroy(sch); 592 qdisc_destroy(sch);
593 errout: 593 errout:
594 return NULL; 594 return NULL;
595 } 595 }
596 EXPORT_SYMBOL(qdisc_create_dflt); 596 EXPORT_SYMBOL(qdisc_create_dflt);
597 597
598 /* Under qdisc_lock(qdisc) and BH! */ 598 /* Under qdisc_lock(qdisc) and BH! */
599 599
600 void qdisc_reset(struct Qdisc *qdisc) 600 void qdisc_reset(struct Qdisc *qdisc)
601 { 601 {
602 const struct Qdisc_ops *ops = qdisc->ops; 602 const struct Qdisc_ops *ops = qdisc->ops;
603 603
604 if (ops->reset) 604 if (ops->reset)
605 ops->reset(qdisc); 605 ops->reset(qdisc);
606 606
607 if (qdisc->gso_skb) { 607 if (qdisc->gso_skb) {
608 kfree_skb(qdisc->gso_skb); 608 kfree_skb(qdisc->gso_skb);
609 qdisc->gso_skb = NULL; 609 qdisc->gso_skb = NULL;
610 qdisc->q.qlen = 0; 610 qdisc->q.qlen = 0;
611 } 611 }
612 } 612 }
613 EXPORT_SYMBOL(qdisc_reset); 613 EXPORT_SYMBOL(qdisc_reset);
614 614
615 static void qdisc_rcu_free(struct rcu_head *head) 615 static void qdisc_rcu_free(struct rcu_head *head)
616 { 616 {
617 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); 617 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
618 618
619 kfree((char *) qdisc - qdisc->padded); 619 kfree((char *) qdisc - qdisc->padded);
620 } 620 }
621 621
622 void qdisc_destroy(struct Qdisc *qdisc) 622 void qdisc_destroy(struct Qdisc *qdisc)
623 { 623 {
624 const struct Qdisc_ops *ops = qdisc->ops; 624 const struct Qdisc_ops *ops = qdisc->ops;
625 625
626 if (qdisc->flags & TCQ_F_BUILTIN || 626 if (qdisc->flags & TCQ_F_BUILTIN ||
627 !atomic_dec_and_test(&qdisc->refcnt)) 627 !atomic_dec_and_test(&qdisc->refcnt))
628 return; 628 return;
629 629
630 #ifdef CONFIG_NET_SCHED 630 #ifdef CONFIG_NET_SCHED
631 qdisc_list_del(qdisc); 631 qdisc_list_del(qdisc);
632 632
633 qdisc_put_stab(qdisc->stab); 633 qdisc_put_stab(qdisc->stab);
634 #endif 634 #endif
635 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 635 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
636 if (ops->reset) 636 if (ops->reset)
637 ops->reset(qdisc); 637 ops->reset(qdisc);
638 if (ops->destroy) 638 if (ops->destroy)
639 ops->destroy(qdisc); 639 ops->destroy(qdisc);
640 640
641 module_put(ops->owner); 641 module_put(ops->owner);
642 dev_put(qdisc_dev(qdisc)); 642 dev_put(qdisc_dev(qdisc));
643 643
644 kfree_skb(qdisc->gso_skb); 644 kfree_skb(qdisc->gso_skb);
645 /* 645 /*
646 * gen_estimator est_timer() might access qdisc->q.lock, 646 * gen_estimator est_timer() might access qdisc->q.lock,
647 * wait a RCU grace period before freeing qdisc. 647 * wait a RCU grace period before freeing qdisc.
648 */ 648 */
649 call_rcu(&qdisc->rcu_head, qdisc_rcu_free); 649 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
650 } 650 }
651 EXPORT_SYMBOL(qdisc_destroy); 651 EXPORT_SYMBOL(qdisc_destroy);
652 652
653 /* Attach toplevel qdisc to device queue. */ 653 /* Attach toplevel qdisc to device queue. */
654 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 654 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
655 struct Qdisc *qdisc) 655 struct Qdisc *qdisc)
656 { 656 {
657 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 657 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
658 spinlock_t *root_lock; 658 spinlock_t *root_lock;
659 659
660 root_lock = qdisc_lock(oqdisc); 660 root_lock = qdisc_lock(oqdisc);
661 spin_lock_bh(root_lock); 661 spin_lock_bh(root_lock);
662 662
663 /* Prune old scheduler */ 663 /* Prune old scheduler */
664 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 664 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
665 qdisc_reset(oqdisc); 665 qdisc_reset(oqdisc);
666 666
667 /* ... and graft new one */ 667 /* ... and graft new one */
668 if (qdisc == NULL) 668 if (qdisc == NULL)
669 qdisc = &noop_qdisc; 669 qdisc = &noop_qdisc;
670 dev_queue->qdisc_sleeping = qdisc; 670 dev_queue->qdisc_sleeping = qdisc;
671 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 671 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
672 672
673 spin_unlock_bh(root_lock); 673 spin_unlock_bh(root_lock);
674 674
675 return oqdisc; 675 return oqdisc;
676 } 676 }
677 677
678 static void attach_one_default_qdisc(struct net_device *dev, 678 static void attach_one_default_qdisc(struct net_device *dev,
679 struct netdev_queue *dev_queue, 679 struct netdev_queue *dev_queue,
680 void *_unused) 680 void *_unused)
681 { 681 {
682 struct Qdisc *qdisc; 682 struct Qdisc *qdisc;
683 683
684 if (dev->tx_queue_len) { 684 if (dev->tx_queue_len) {
685 qdisc = qdisc_create_dflt(dev_queue, 685 qdisc = qdisc_create_dflt(dev_queue,
686 &pfifo_fast_ops, TC_H_ROOT); 686 &pfifo_fast_ops, TC_H_ROOT);
687 if (!qdisc) { 687 if (!qdisc) {
688 printk(KERN_INFO "%s: activation failed\n", dev->name); 688 printk(KERN_INFO "%s: activation failed\n", dev->name);
689 return; 689 return;
690 } 690 }
691 691
692 /* Can by-pass the queue discipline for default qdisc */ 692 /* Can by-pass the queue discipline for default qdisc */
693 qdisc->flags |= TCQ_F_CAN_BYPASS; 693 qdisc->flags |= TCQ_F_CAN_BYPASS;
694 } else { 694 } else {
695 qdisc = &noqueue_qdisc; 695 qdisc = &noqueue_qdisc;
696 } 696 }
697 dev_queue->qdisc_sleeping = qdisc; 697 dev_queue->qdisc_sleeping = qdisc;
698 } 698 }
699 699
700 static void attach_default_qdiscs(struct net_device *dev) 700 static void attach_default_qdiscs(struct net_device *dev)
701 { 701 {
702 struct netdev_queue *txq; 702 struct netdev_queue *txq;
703 struct Qdisc *qdisc; 703 struct Qdisc *qdisc;
704 704
705 txq = netdev_get_tx_queue(dev, 0); 705 txq = netdev_get_tx_queue(dev, 0);
706 706
707 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { 707 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
708 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); 708 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
709 dev->qdisc = txq->qdisc_sleeping; 709 dev->qdisc = txq->qdisc_sleeping;
710 atomic_inc(&dev->qdisc->refcnt); 710 atomic_inc(&dev->qdisc->refcnt);
711 } else { 711 } else {
712 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); 712 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
713 if (qdisc) { 713 if (qdisc) {
714 qdisc->ops->attach(qdisc); 714 qdisc->ops->attach(qdisc);
715 dev->qdisc = qdisc; 715 dev->qdisc = qdisc;
716 } 716 }
717 } 717 }
718 } 718 }
719 719
720 static void transition_one_qdisc(struct net_device *dev, 720 static void transition_one_qdisc(struct net_device *dev,
721 struct netdev_queue *dev_queue, 721 struct netdev_queue *dev_queue,
722 void *_need_watchdog) 722 void *_need_watchdog)
723 { 723 {
724 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; 724 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
725 int *need_watchdog_p = _need_watchdog; 725 int *need_watchdog_p = _need_watchdog;
726 726
727 if (!(new_qdisc->flags & TCQ_F_BUILTIN)) 727 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
728 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 728 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
729 729
730 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 730 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
731 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { 731 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
732 dev_queue->trans_start = 0; 732 dev_queue->trans_start = 0;
733 *need_watchdog_p = 1; 733 *need_watchdog_p = 1;
734 } 734 }
735 } 735 }
736 736
737 void dev_activate(struct net_device *dev) 737 void dev_activate(struct net_device *dev)
738 { 738 {
739 int need_watchdog; 739 int need_watchdog;
740 740
741 /* No queueing discipline is attached to device; 741 /* No queueing discipline is attached to device;
742 create default one i.e. pfifo_fast for devices, 742 create default one i.e. pfifo_fast for devices,
743 which need queueing and noqueue_qdisc for 743 which need queueing and noqueue_qdisc for
744 virtual interfaces 744 virtual interfaces
745 */ 745 */
746 746
747 if (dev->qdisc == &noop_qdisc) 747 if (dev->qdisc == &noop_qdisc)
748 attach_default_qdiscs(dev); 748 attach_default_qdiscs(dev);
749 749
750 if (!netif_carrier_ok(dev)) 750 if (!netif_carrier_ok(dev))
751 /* Delay activation until next carrier-on event */ 751 /* Delay activation until next carrier-on event */
752 return; 752 return;
753 753
754 need_watchdog = 0; 754 need_watchdog = 0;
755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
756 if (dev_ingress_queue(dev)) 756 if (dev_ingress_queue(dev))
757 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); 757 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
758 758
759 if (need_watchdog) { 759 if (need_watchdog) {
760 dev->trans_start = jiffies; 760 dev->trans_start = jiffies;
761 dev_watchdog_up(dev); 761 dev_watchdog_up(dev);
762 } 762 }
763 } 763 }
764 764
765 static void dev_deactivate_queue(struct net_device *dev, 765 static void dev_deactivate_queue(struct net_device *dev,
766 struct netdev_queue *dev_queue, 766 struct netdev_queue *dev_queue,
767 void *_qdisc_default) 767 void *_qdisc_default)
768 { 768 {
769 struct Qdisc *qdisc_default = _qdisc_default; 769 struct Qdisc *qdisc_default = _qdisc_default;
770 struct Qdisc *qdisc; 770 struct Qdisc *qdisc;
771 771
772 qdisc = dev_queue->qdisc; 772 qdisc = dev_queue->qdisc;
773 if (qdisc) { 773 if (qdisc) {
774 spin_lock_bh(qdisc_lock(qdisc)); 774 spin_lock_bh(qdisc_lock(qdisc));
775 775
776 if (!(qdisc->flags & TCQ_F_BUILTIN)) 776 if (!(qdisc->flags & TCQ_F_BUILTIN))
777 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 777 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
778 778
779 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 779 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
780 qdisc_reset(qdisc); 780 qdisc_reset(qdisc);
781 781
782 spin_unlock_bh(qdisc_lock(qdisc)); 782 spin_unlock_bh(qdisc_lock(qdisc));
783 } 783 }
784 } 784 }
785 785
786 static bool some_qdisc_is_busy(struct net_device *dev) 786 static bool some_qdisc_is_busy(struct net_device *dev)
787 { 787 {
788 unsigned int i; 788 unsigned int i;
789 789
790 for (i = 0; i < dev->num_tx_queues; i++) { 790 for (i = 0; i < dev->num_tx_queues; i++) {
791 struct netdev_queue *dev_queue; 791 struct netdev_queue *dev_queue;
792 spinlock_t *root_lock; 792 spinlock_t *root_lock;
793 struct Qdisc *q; 793 struct Qdisc *q;
794 int val; 794 int val;
795 795
796 dev_queue = netdev_get_tx_queue(dev, i); 796 dev_queue = netdev_get_tx_queue(dev, i);
797 q = dev_queue->qdisc_sleeping; 797 q = dev_queue->qdisc_sleeping;
798 root_lock = qdisc_lock(q); 798 root_lock = qdisc_lock(q);
799 799
800 spin_lock_bh(root_lock); 800 spin_lock_bh(root_lock);
801 801
802 val = (qdisc_is_running(q) || 802 val = (qdisc_is_running(q) ||
803 test_bit(__QDISC_STATE_SCHED, &q->state)); 803 test_bit(__QDISC_STATE_SCHED, &q->state));
804 804
805 spin_unlock_bh(root_lock); 805 spin_unlock_bh(root_lock);
806 806
807 if (val) 807 if (val)
808 return true; 808 return true;
809 } 809 }
810 return false; 810 return false;
811 } 811 }
812 812
813 void dev_deactivate_many(struct list_head *head) 813 void dev_deactivate_many(struct list_head *head)
814 { 814 {
815 struct net_device *dev; 815 struct net_device *dev;
816 816
817 list_for_each_entry(dev, head, unreg_list) { 817 list_for_each_entry(dev, head, unreg_list) {
818 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 818 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819 &noop_qdisc); 819 &noop_qdisc);
820 if (dev_ingress_queue(dev)) 820 if (dev_ingress_queue(dev))
821 dev_deactivate_queue(dev, dev_ingress_queue(dev), 821 dev_deactivate_queue(dev, dev_ingress_queue(dev),
822 &noop_qdisc); 822 &noop_qdisc);
823 823
824 dev_watchdog_down(dev); 824 dev_watchdog_down(dev);
825 } 825 }
826 826
827 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 827 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
828 synchronize_rcu(); 828 synchronize_rcu();
829 829
830 /* Wait for outstanding qdisc_run calls. */ 830 /* Wait for outstanding qdisc_run calls. */
831 list_for_each_entry(dev, head, unreg_list) 831 list_for_each_entry(dev, head, unreg_list)
832 while (some_qdisc_is_busy(dev)) 832 while (some_qdisc_is_busy(dev))
833 yield(); 833 yield();
834 } 834 }
835 835
836 void dev_deactivate(struct net_device *dev) 836 void dev_deactivate(struct net_device *dev)
837 { 837 {
838 LIST_HEAD(single); 838 LIST_HEAD(single);
839 839
840 list_add(&dev->unreg_list, &single); 840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single); 841 dev_deactivate_many(&single);
842 list_del(&single);
842 } 843 }
843 844
844 static void dev_init_scheduler_queue(struct net_device *dev, 845 static void dev_init_scheduler_queue(struct net_device *dev,
845 struct netdev_queue *dev_queue, 846 struct netdev_queue *dev_queue,
846 void *_qdisc) 847 void *_qdisc)
847 { 848 {
848 struct Qdisc *qdisc = _qdisc; 849 struct Qdisc *qdisc = _qdisc;
849 850
850 dev_queue->qdisc = qdisc; 851 dev_queue->qdisc = qdisc;
851 dev_queue->qdisc_sleeping = qdisc; 852 dev_queue->qdisc_sleeping = qdisc;
852 } 853 }
853 854
854 void dev_init_scheduler(struct net_device *dev) 855 void dev_init_scheduler(struct net_device *dev)
855 { 856 {
856 dev->qdisc = &noop_qdisc; 857 dev->qdisc = &noop_qdisc;
857 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 858 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
858 if (dev_ingress_queue(dev)) 859 if (dev_ingress_queue(dev))
859 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 860 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
860 861
861 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 862 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
862 } 863 }
863 864
864 static void shutdown_scheduler_queue(struct net_device *dev, 865 static void shutdown_scheduler_queue(struct net_device *dev,
865 struct netdev_queue *dev_queue, 866 struct netdev_queue *dev_queue,
866 void *_qdisc_default) 867 void *_qdisc_default)
867 { 868 {
868 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 869 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
869 struct Qdisc *qdisc_default = _qdisc_default; 870 struct Qdisc *qdisc_default = _qdisc_default;
870 871
871 if (qdisc) { 872 if (qdisc) {
872 rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 873 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
873 dev_queue->qdisc_sleeping = qdisc_default; 874 dev_queue->qdisc_sleeping = qdisc_default;
874 875
875 qdisc_destroy(qdisc); 876 qdisc_destroy(qdisc);
876 } 877 }
877 } 878 }
878 879
879 void dev_shutdown(struct net_device *dev) 880 void dev_shutdown(struct net_device *dev)
880 { 881 {
881 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 882 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
882 if (dev_ingress_queue(dev)) 883 if (dev_ingress_queue(dev))
883 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); 884 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
884 qdisc_destroy(dev->qdisc); 885 qdisc_destroy(dev->qdisc);
885 dev->qdisc = &noop_qdisc; 886 dev->qdisc = &noop_qdisc;
886 887
887 WARN_ON(timer_pending(&dev->watchdog_timer)); 888 WARN_ON(timer_pending(&dev->watchdog_timer));
888 } 889 }
889 890