Commit 5ce2d488fe039ddd86a638496cf704df86c74eeb

Authored by David S. Miller
1 parent bb949fbd18

pkt_sched: Remove 'dev' member of struct Qdisc.

It can be obtained via the netdev_queue.  So create a helper routine,
qdisc_dev(), to make the transformations nicer looking.

Now, qdisc_alloc() now no longer needs a net_device pointer argument.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 17 changed files with 93 additions and 92 deletions Side-by-side Diff

include/net/sch_generic.h
... ... @@ -38,7 +38,6 @@
38 38 atomic_t refcnt;
39 39 struct sk_buff_head q;
40 40 struct netdev_queue *dev_queue;
41   - struct net_device *dev;
42 41 struct list_head list;
43 42  
44 43 struct gnet_stats_basic bstats;
45 44  
... ... @@ -156,14 +155,18 @@
156 155 struct tcf_proto_ops *ops;
157 156 };
158 157  
  158 +static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
  159 +{
  160 + return qdisc->dev_queue->dev;
  161 +}
159 162  
160 163 extern void qdisc_lock_tree(struct net_device *dev);
161 164 extern void qdisc_unlock_tree(struct net_device *dev);
162 165  
163   -#define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
164   -#define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
165   -#define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
166   -#define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
  166 +#define sch_tree_lock(q) qdisc_lock_tree(qdisc_dev(q))
  167 +#define sch_tree_unlock(q) qdisc_unlock_tree(qdisc_dev(q))
  168 +#define tcf_tree_lock(tp) qdisc_lock_tree(qdisc_dev((tp)->q))
  169 +#define tcf_tree_unlock(tp) qdisc_unlock_tree(qdisc_dev((tp)->q))
167 170  
168 171 extern struct Qdisc noop_qdisc;
169 172 extern struct Qdisc_ops noop_qdisc_ops;
... ... @@ -217,8 +220,7 @@
217 220 extern void qdisc_reset(struct Qdisc *qdisc);
218 221 extern void qdisc_destroy(struct Qdisc *qdisc);
219 222 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
220   -extern struct Qdisc *qdisc_alloc(struct net_device *dev,
221   - struct netdev_queue *dev_queue,
  223 +extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
222 224 struct Qdisc_ops *ops);
223 225 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
224 226 struct netdev_queue *dev_queue,
... ... @@ -103,7 +103,7 @@
103 103 * negative return value indicates to drop the frame */
104 104 static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105 105 {
106   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  106 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
107 107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108 108  
109 109 if (!ieee80211_is_data(hdr->frame_control)) {
... ... @@ -140,7 +140,7 @@
140 140  
141 141 static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142 142 {
143   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  143 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
144 144 struct ieee80211_hw *hw = &local->hw;
145 145 struct ieee80211_sched_data *q = qdisc_priv(qd);
146 146 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
... ... @@ -249,7 +249,7 @@
249 249 static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250 250 {
251 251 struct ieee80211_sched_data *q = qdisc_priv(qd);
252   - struct net_device *dev = qd->dev;
  252 + struct net_device *dev = qdisc_dev(qd);
253 253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254 254 struct ieee80211_hw *hw = &local->hw;
255 255 struct sk_buff *skb;
... ... @@ -286,7 +286,7 @@
286 286 static void wme_qdiscop_reset(struct Qdisc* qd)
287 287 {
288 288 struct ieee80211_sched_data *q = qdisc_priv(qd);
289   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  289 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
290 290 struct ieee80211_hw *hw = &local->hw;
291 291 int queue;
292 292  
... ... @@ -303,7 +303,7 @@
303 303 static void wme_qdiscop_destroy(struct Qdisc* qd)
304 304 {
305 305 struct ieee80211_sched_data *q = qdisc_priv(qd);
306   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  306 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
307 307 struct ieee80211_hw *hw = &local->hw;
308 308 int queue;
309 309  
... ... @@ -328,7 +328,7 @@
328 328 static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
329 329 {
330 330 struct ieee80211_sched_data *q = qdisc_priv(qd);
331   - struct net_device *dev = qd->dev;
  331 + struct net_device *dev = qdisc_dev(qd);
332 332 struct ieee80211_local *local;
333 333 struct ieee80211_hw *hw;
334 334 int err = 0, i;
... ... @@ -359,7 +359,7 @@
359 359 /* create child queues */
360 360 for (i = 0; i < QD_NUM(hw); i++) {
361 361 skb_queue_head_init(&q->requeued[i]);
362   - q->queues[i] = qdisc_create_dflt(qd->dev, qd->dev_queue,
  362 + q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
363 363 &pfifo_qdisc_ops,
364 364 qd->handle);
365 365 if (!q->queues[i]) {
... ... @@ -386,7 +386,7 @@
386 386 struct Qdisc *new, struct Qdisc **old)
387 387 {
388 388 struct ieee80211_sched_data *q = qdisc_priv(qd);
389   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  389 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
390 390 struct ieee80211_hw *hw = &local->hw;
391 391 unsigned long queue = arg - 1;
392 392  
... ... @@ -410,7 +410,7 @@
410 410 wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
411 411 {
412 412 struct ieee80211_sched_data *q = qdisc_priv(qd);
413   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  413 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
414 414 struct ieee80211_hw *hw = &local->hw;
415 415 unsigned long queue = arg - 1;
416 416  
... ... @@ -423,7 +423,7 @@
423 423  
424 424 static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
425 425 {
426   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  426 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
427 427 struct ieee80211_hw *hw = &local->hw;
428 428 unsigned long queue = TC_H_MIN(classid);
429 429  
... ... @@ -450,7 +450,7 @@
450 450 struct nlattr **tca, unsigned long *arg)
451 451 {
452 452 unsigned long cl = *arg;
453   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  453 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
454 454 struct ieee80211_hw *hw = &local->hw;
455 455  
456 456 if (cl - 1 > QD_NUM(hw))
... ... @@ -467,7 +467,7 @@
467 467 * when we add WMM-SA support - TSPECs may be deleted here */
468 468 static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
469 469 {
470   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  470 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
471 471 struct ieee80211_hw *hw = &local->hw;
472 472  
473 473 if (cl - 1 > QD_NUM(hw))
... ... @@ -480,7 +480,7 @@
480 480 struct sk_buff *skb, struct tcmsg *tcm)
481 481 {
482 482 struct ieee80211_sched_data *q = qdisc_priv(qd);
483   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  483 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
484 484 struct ieee80211_hw *hw = &local->hw;
485 485  
486 486 if (cl - 1 > QD_NUM(hw))
... ... @@ -494,7 +494,7 @@
494 494  
495 495 static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
496 496 {
497   - struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
  497 + struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
498 498 struct ieee80211_hw *hw = &local->hw;
499 499 int queue;
500 500  
... ... @@ -334,7 +334,7 @@
334 334 tcm->tcm_family = AF_UNSPEC;
335 335 tcm->tcm__pad1 = 0;
336 336 tcm->tcm__pad1 = 0;
337   - tcm->tcm_ifindex = tp->q->dev->ifindex;
  337 + tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
338 338 tcm->tcm_parent = tp->classid;
339 339 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
340 340 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
net/sched/cls_route.c
... ... @@ -302,7 +302,7 @@
302 302 *fp = f->next;
303 303 tcf_tree_unlock(tp);
304 304  
305   - route4_reset_fastmap(tp->q->dev, head, f->id);
  305 + route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
306 306 route4_delete_filter(tp, f);
307 307  
308 308 /* Strip tree */
... ... @@ -500,7 +500,7 @@
500 500 }
501 501 tcf_tree_unlock(tp);
502 502  
503   - route4_reset_fastmap(tp->q->dev, head, f->id);
  503 + route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
504 504 *arg = (unsigned long)f;
505 505 return 0;
506 506  
... ... @@ -281,7 +281,7 @@
281 281 {
282 282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
283 283 timer);
284   - struct net_device *dev = wd->qdisc->dev;
  284 + struct net_device *dev = qdisc_dev(wd->qdisc);
285 285  
286 286 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
287 287 smp_wmb();
... ... @@ -493,7 +493,7 @@
493 493 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
494 494 return;
495 495  
496   - sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
  496 + sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
497 497 if (sch == NULL) {
498 498 WARN_ON(parentid != TC_H_ROOT);
499 499 return;
... ... @@ -593,7 +593,7 @@
593 593 if (ops == NULL)
594 594 goto err_out;
595 595  
596   - sch = qdisc_alloc(dev, dev_queue, ops);
  596 + sch = qdisc_alloc(dev_queue, ops);
597 597 if (IS_ERR(sch)) {
598 598 err = PTR_ERR(sch);
599 599 goto err_out2;
... ... @@ -940,7 +940,7 @@
940 940 tcm->tcm_family = AF_UNSPEC;
941 941 tcm->tcm__pad1 = 0;
942 942 tcm->tcm__pad2 = 0;
943   - tcm->tcm_ifindex = q->dev->ifindex;
  943 + tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
944 944 tcm->tcm_parent = clid;
945 945 tcm->tcm_handle = q->handle;
946 946 tcm->tcm_info = atomic_read(&q->refcnt);
... ... @@ -1186,7 +1186,7 @@
1186 1186 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1187 1187 tcm = NLMSG_DATA(nlh);
1188 1188 tcm->tcm_family = AF_UNSPEC;
1189   - tcm->tcm_ifindex = q->dev->ifindex;
  1189 + tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1190 1190 tcm->tcm_parent = q->handle;
1191 1191 tcm->tcm_handle = q->handle;
1192 1192 tcm->tcm_info = 0;
... ... @@ -296,7 +296,7 @@
296 296 goto err_out;
297 297 }
298 298 flow->filter_list = NULL;
299   - flow->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  299 + flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
300 300 &pfifo_qdisc_ops, classid);
301 301 if (!flow->q)
302 302 flow->q = &noop_qdisc;
... ... @@ -556,7 +556,7 @@
556 556  
557 557 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
558 558 p->flows = &p->link;
559   - p->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  559 + p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
560 560 &pfifo_qdisc_ops, sch->handle);
561 561 if (!p->link.q)
562 562 p->link.q = &noop_qdisc;
... ... @@ -650,7 +650,7 @@
650 650 }
651 651  
652 652 sch->flags &= ~TCQ_F_THROTTLED;
653   - netif_schedule(sch->dev);
  653 + netif_schedule(qdisc_dev(sch));
654 654 return HRTIMER_NORESTART;
655 655 }
656 656  
657 657  
... ... @@ -1077,9 +1077,9 @@
1077 1077 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1078 1078 q->quanta[prio];
1079 1079 }
1080   - if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) {
  1080 + if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1081 1081 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
1082   - cl->quantum = cl->qdisc->dev->mtu/2 + 1;
  1082 + cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1083 1083 }
1084 1084 }
1085 1085 }
... ... @@ -1401,7 +1401,7 @@
1401 1401 q->link.sibling = &q->link;
1402 1402 q->link.common.classid = sch->handle;
1403 1403 q->link.qdisc = sch;
1404   - if (!(q->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1404 + if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1405 1405 &pfifo_qdisc_ops,
1406 1406 sch->handle)))
1407 1407 q->link.q = &noop_qdisc;
... ... @@ -1411,7 +1411,7 @@
1411 1411 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1412 1412 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1413 1413 q->link.overlimit = cbq_ovl_classic;
1414   - q->link.allot = psched_mtu(sch->dev);
  1414 + q->link.allot = psched_mtu(qdisc_dev(sch));
1415 1415 q->link.quantum = q->link.allot;
1416 1416 q->link.weight = q->link.R_tab->rate.rate;
1417 1417  
... ... @@ -1646,7 +1646,7 @@
1646 1646  
1647 1647 if (cl) {
1648 1648 if (new == NULL) {
1649   - new = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1649 + new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1650 1650 &pfifo_qdisc_ops,
1651 1651 cl->common.classid);
1652 1652 if (new == NULL)
1653 1653  
... ... @@ -1746,10 +1746,10 @@
1746 1746 #ifdef CONFIG_NET_CLS_ACT
1747 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1748 1748  
1749   - spin_lock_bh(&sch->dev->queue_lock);
  1749 + spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1750 1750 if (q->rx_class == cl)
1751 1751 q->rx_class = NULL;
1752   - spin_unlock_bh(&sch->dev->queue_lock);
  1752 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1753 1753 #endif
1754 1754  
1755 1755 cbq_destroy_class(sch, cl);
... ... @@ -1828,7 +1828,7 @@
1828 1828  
1829 1829 if (tca[TCA_RATE])
1830 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1831   - &sch->dev->queue_lock,
  1831 + &qdisc_dev(sch)->queue_lock,
1832 1832 tca[TCA_RATE]);
1833 1833 return 0;
1834 1834 }
... ... @@ -1879,7 +1879,7 @@
1879 1879 cl->R_tab = rtab;
1880 1880 rtab = NULL;
1881 1881 cl->refcnt = 1;
1882   - if (!(cl->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1882 + if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1883 1883 &pfifo_qdisc_ops, classid)))
1884 1884 cl->q = &noop_qdisc;
1885 1885 cl->common.classid = classid;
... ... @@ -1919,7 +1919,7 @@
1919 1919  
1920 1920 if (tca[TCA_RATE])
1921 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1922   - &sch->dev->queue_lock, tca[TCA_RATE]);
  1922 + &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
1923 1923  
1924 1924 *arg = (unsigned long)cl;
1925 1925 return 0;
net/sched/sch_dsmark.c
... ... @@ -60,7 +60,7 @@
60 60 sch, p, new, old);
61 61  
62 62 if (new == NULL) {
63   - new = qdisc_create_dflt(sch->dev, sch->dev_queue,
  63 + new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
64 64 &pfifo_qdisc_ops,
65 65 sch->handle);
66 66 if (new == NULL)
... ... @@ -391,7 +391,7 @@
391 391 p->default_index = default_index;
392 392 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
393 393  
394   - p->q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  394 + p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
395 395 &pfifo_qdisc_ops, sch->handle);
396 396 if (p->q == NULL)
397 397 p->q = &noop_qdisc;
net/sched/sch_fifo.c
... ... @@ -48,10 +48,10 @@
48 48 struct fifo_sched_data *q = qdisc_priv(sch);
49 49  
50 50 if (opt == NULL) {
51   - u32 limit = sch->dev->tx_queue_len ? : 1;
  51 + u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52  
53 53 if (sch->ops == &bfifo_qdisc_ops)
54   - limit *= sch->dev->mtu;
  54 + limit *= qdisc_dev(sch)->mtu;
55 55  
56 56 q->limit = limit;
57 57 } else {
... ... @@ -137,7 +137,7 @@
137 137 struct Qdisc *q;
138 138 int err = -ENOMEM;
139 139  
140   - q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  140 + q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
141 141 ops, TC_H_MAKE(sch->handle, 1));
142 142 if (q) {
143 143 err = fifo_set_limit(q, limit);
net/sched/sch_generic.c
... ... @@ -364,7 +364,7 @@
364 364 {
365 365 struct sk_buff_head *list = prio2list(skb, qdisc);
366 366  
367   - if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
  367 + if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
368 368 qdisc->q.qlen++;
369 369 return __qdisc_enqueue_tail(skb, qdisc, list);
370 370 }
... ... @@ -440,8 +440,7 @@
440 440 .owner = THIS_MODULE,
441 441 };
442 442  
443   -struct Qdisc *qdisc_alloc(struct net_device *dev,
444   - struct netdev_queue *dev_queue,
  443 +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
445 444 struct Qdisc_ops *ops)
446 445 {
447 446 void *p;
... ... @@ -465,8 +464,7 @@
465 464 sch->enqueue = ops->enqueue;
466 465 sch->dequeue = ops->dequeue;
467 466 sch->dev_queue = dev_queue;
468   - sch->dev = dev;
469   - dev_hold(dev);
  467 + dev_hold(qdisc_dev(sch));
470 468 atomic_set(&sch->refcnt, 1);
471 469  
472 470 return sch;
... ... @@ -481,7 +479,7 @@
481 479 {
482 480 struct Qdisc *sch;
483 481  
484   - sch = qdisc_alloc(dev, dev_queue, ops);
  482 + sch = qdisc_alloc(dev_queue, ops);
485 483 if (IS_ERR(sch))
486 484 goto errout;
487 485 sch->stats_lock = &dev->queue_lock;
... ... @@ -534,7 +532,7 @@
534 532 ops->destroy(qdisc);
535 533  
536 534 module_put(ops->owner);
537   - dev_put(qdisc->dev);
  535 + dev_put(qdisc_dev(qdisc));
538 536 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
539 537 }
540 538 EXPORT_SYMBOL(qdisc_destroy);
net/sched/sch_gred.c
... ... @@ -164,7 +164,7 @@
164 164 * if no default DP has been configured. This
165 165 * allows for DP flows to be left untouched.
166 166 */
167   - if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
  167 + if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 168 return qdisc_enqueue_tail(skb, sch);
169 169 else
170 170 goto drop;
net/sched/sch_hfsc.c
... ... @@ -1045,7 +1045,7 @@
1045 1045  
1046 1046 if (tca[TCA_RATE])
1047 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048   - &sch->dev->queue_lock,
  1048 + &qdisc_dev(sch)->queue_lock,
1049 1049 tca[TCA_RATE]);
1050 1050 return 0;
1051 1051 }
... ... @@ -1083,7 +1083,7 @@
1083 1083 cl->refcnt = 1;
1084 1084 cl->sched = q;
1085 1085 cl->cl_parent = parent;
1086   - cl->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1086 + cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1087 1087 &pfifo_qdisc_ops, classid);
1088 1088 if (cl->qdisc == NULL)
1089 1089 cl->qdisc = &noop_qdisc;
... ... @@ -1104,7 +1104,7 @@
1104 1104  
1105 1105 if (tca[TCA_RATE])
1106 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107   - &sch->dev->queue_lock, tca[TCA_RATE]);
  1107 + &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
1108 1108 *arg = (unsigned long)cl;
1109 1109 return 0;
1110 1110 }
... ... @@ -1202,7 +1202,7 @@
1202 1202 if (cl->level > 0)
1203 1203 return -EINVAL;
1204 1204 if (new == NULL) {
1205   - new = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1205 + new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1206 1206 &pfifo_qdisc_ops,
1207 1207 cl->cl_common.classid);
1208 1208 if (new == NULL)
... ... @@ -1445,7 +1445,7 @@
1445 1445 q->root.cl_common.classid = sch->handle;
1446 1446 q->root.refcnt = 1;
1447 1447 q->root.sched = q;
1448   - q->root.qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1448 + q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1449 1449 &pfifo_qdisc_ops,
1450 1450 sch->handle);
1451 1451 if (q->root.qdisc == NULL)
... ... @@ -1026,7 +1026,7 @@
1026 1026 qdisc_watchdog_init(&q->watchdog, sch);
1027 1027 skb_queue_head_init(&q->direct_queue);
1028 1028  
1029   - q->direct_qlen = sch->dev->tx_queue_len;
  1029 + q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1030 1030 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1031 1031 q->direct_qlen = 2;
1032 1032  
... ... @@ -1043,7 +1043,7 @@
1043 1043 struct nlattr *nest;
1044 1044 struct tc_htb_glob gopt;
1045 1045  
1046   - spin_lock_bh(&sch->dev->queue_lock);
  1046 + spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1047 1047  
1048 1048 gopt.direct_pkts = q->direct_pkts;
1049 1049 gopt.version = HTB_VER;
1050 1050  
... ... @@ -1057,11 +1057,11 @@
1057 1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 1058 nla_nest_end(skb, nest);
1059 1059  
1060   - spin_unlock_bh(&sch->dev->queue_lock);
  1060 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1061 1061 return skb->len;
1062 1062  
1063 1063 nla_put_failure:
1064   - spin_unlock_bh(&sch->dev->queue_lock);
  1064 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1065 1065 nla_nest_cancel(skb, nest);
1066 1066 return -1;
1067 1067 }
... ... @@ -1073,7 +1073,7 @@
1073 1073 struct nlattr *nest;
1074 1074 struct tc_htb_opt opt;
1075 1075  
1076   - spin_lock_bh(&sch->dev->queue_lock);
  1076 + spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1077 1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 1078 tcm->tcm_handle = cl->common.classid;
1079 1079 if (!cl->level && cl->un.leaf.q)
1080 1080  
... ... @@ -1095,11 +1095,11 @@
1095 1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1096  
1097 1097 nla_nest_end(skb, nest);
1098   - spin_unlock_bh(&sch->dev->queue_lock);
  1098 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1099 1099 return skb->len;
1100 1100  
1101 1101 nla_put_failure:
1102   - spin_unlock_bh(&sch->dev->queue_lock);
  1102 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1103 1103 nla_nest_cancel(skb, nest);
1104 1104 return -1;
1105 1105 }
... ... @@ -1129,7 +1129,7 @@
1129 1129  
1130 1130 if (cl && !cl->level) {
1131 1131 if (new == NULL &&
1132   - (new = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1132 + (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1133 1133 &pfifo_qdisc_ops,
1134 1134 cl->common.classid))
1135 1135 == NULL)
... ... @@ -1257,7 +1257,7 @@
1257 1257 return -EBUSY;
1258 1258  
1259 1259 if (!cl->level && htb_parent_last_child(cl)) {
1260   - new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1260 + new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1261 1261 &pfifo_qdisc_ops,
1262 1262 cl->parent->common.classid);
1263 1263 last_child = 1;
... ... @@ -1365,7 +1365,7 @@
1365 1365 goto failure;
1366 1366  
1367 1367 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368   - &sch->dev->queue_lock,
  1368 + &qdisc_dev(sch)->queue_lock,
1369 1369 tca[TCA_RATE] ? : &est.nla);
1370 1370 cl->refcnt = 1;
1371 1371 cl->children = 0;
... ... @@ -1378,7 +1378,7 @@
1378 1378 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1379 1379 so that can't be used inside of sch_tree_lock
1380 1380 -- thanks to Karlis Peisenieks */
1381   - new_q = qdisc_create_dflt(sch->dev, sch->dev_queue,
  1381 + new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1382 1382 &pfifo_qdisc_ops, classid);
1383 1383 sch_tree_lock(sch);
1384 1384 if (parent && !parent->level) {
... ... @@ -1420,7 +1420,7 @@
1420 1420 } else {
1421 1421 if (tca[TCA_RATE])
1422 1422 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423   - &sch->dev->queue_lock,
  1423 + &qdisc_dev(sch)->queue_lock,
1424 1424 tca[TCA_RATE]);
1425 1425 sch_tree_lock(sch);
1426 1426 }
net/sched/sch_netem.c
... ... @@ -180,7 +180,7 @@
180 180 * skb will be queued.
181 181 */
182 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183   - struct Qdisc *rootq = sch->dev->qdisc;
  183 + struct Qdisc *rootq = qdisc_dev(sch)->qdisc;
184 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 185 q->duplicate = 0;
186 186  
187 187  
... ... @@ -333,9 +333,9 @@
333 333 for (i = 0; i < n; i++)
334 334 d->table[i] = data[i];
335 335  
336   - spin_lock_bh(&sch->dev->queue_lock);
  336 + spin_lock_bh(&qdisc_dev(sch)->queue_lock);
337 337 d = xchg(&q->delay_dist, d);
338   - spin_unlock_bh(&sch->dev->queue_lock);
  338 + spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
339 339  
340 340 kfree(d);
341 341 return 0;
... ... @@ -495,7 +495,7 @@
495 495  
496 496 q->limit = ctl->limit;
497 497 } else
498   - q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
  498 + q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
499 499  
500 500 q->oldest = PSCHED_PASTPERFECT;
501 501 return 0;
... ... @@ -536,7 +536,7 @@
536 536  
537 537 qdisc_watchdog_init(&q->watchdog, sch);
538 538  
539   - q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue,
  539 + q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
540 540 &tfifo_qdisc_ops,
541 541 TC_H_MAKE(sch->handle, 1));
542 542 if (!q->qdisc) {
net/sched/sch_prio.c
... ... @@ -136,7 +136,8 @@
136 136 * pulling an skb. This way we avoid excessive requeues
137 137 * for slower queues.
138 138 */
139   - if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
  139 + if (!__netif_subqueue_stopped(qdisc_dev(sch),
  140 + (q->mq ? prio : 0))) {
140 141 qdisc = q->queues[prio];
141 142 skb = qdisc->dequeue(qdisc);
142 143 if (skb) {
... ... @@ -165,8 +166,8 @@
165 166 * for slower queues. If the queue is stopped, try the
166 167 * next queue.
167 168 */
168   - if (!__netif_subqueue_stopped(sch->dev,
169   - (q->mq ? q->curband : 0))) {
  169 + if (!__netif_subqueue_stopped(qdisc_dev(sch),
  170 + (q->mq ? q->curband : 0))) {
170 171 qdisc = q->queues[q->curband];
171 172 skb = qdisc->dequeue(qdisc);
172 173 if (skb) {
173 174  
... ... @@ -249,10 +250,10 @@
249 250 if (q->mq) {
250 251 if (sch->parent != TC_H_ROOT)
251 252 return -EINVAL;
252   - if (netif_is_multiqueue(sch->dev)) {
  253 + if (netif_is_multiqueue(qdisc_dev(sch))) {
253 254 if (q->bands == 0)
254   - q->bands = sch->dev->egress_subqueue_count;
255   - else if (q->bands != sch->dev->egress_subqueue_count)
  255 + q->bands = qdisc_dev(sch)->egress_subqueue_count;
  256 + else if (q->bands != qdisc_dev(sch)->egress_subqueue_count)
256 257 return -EINVAL;
257 258 } else
258 259 return -EOPNOTSUPP;
... ... @@ -281,7 +282,7 @@
281 282 for (i=0; i<q->bands; i++) {
282 283 if (q->queues[i] == &noop_qdisc) {
283 284 struct Qdisc *child;
284   - child = qdisc_create_dflt(sch->dev, sch->dev_queue,
  285 + child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
285 286 &pfifo_qdisc_ops,
286 287 TC_H_MAKE(sch->handle, i + 1));
287 288 if (child) {
... ... @@ -461,7 +461,7 @@
461 461 return -EINVAL;
462 462  
463 463 sch_tree_lock(sch);
464   - q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
  464 + q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
465 465 q->perturb_period = ctl->perturb_period * HZ;
466 466 if (ctl->limit)
467 467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
... ... @@ -502,7 +502,7 @@
502 502 q->max_depth = 0;
503 503 q->tail = SFQ_DEPTH;
504 504 if (opt == NULL) {
505   - q->quantum = psched_mtu(sch->dev);
  505 + q->quantum = psched_mtu(qdisc_dev(sch));
506 506 q->perturb_period = 0;
507 507 q->perturbation = net_random();
508 508 } else {
net/sched/sch_teql.c
... ... @@ -78,7 +78,7 @@
78 78 static int
79 79 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
80 80 {
81   - struct net_device *dev = sch->dev;
  81 + struct net_device *dev = qdisc_dev(sch);
82 82 struct teql_sched_data *q = qdisc_priv(sch);
83 83  
84 84 if (q->q.qlen < dev->tx_queue_len) {
... ... @@ -111,7 +111,7 @@
111 111  
112 112 skb = __skb_dequeue(&dat->q);
113 113 if (skb == NULL) {
114   - struct net_device *m = dat->m->dev->qdisc->dev;
  114 + struct net_device *m = qdisc_dev(dat->m->dev->qdisc);
115 115 if (m) {
116 116 dat->m->slaves = sch;
117 117 netif_wake_queue(m);
... ... @@ -170,7 +170,7 @@
170 170  
171 171 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
172 172 {
173   - struct net_device *dev = sch->dev;
  173 + struct net_device *dev = qdisc_dev(sch);
174 174 struct teql_master *m = (struct teql_master*)sch->ops;
175 175 struct teql_sched_data *q = qdisc_priv(sch);
176 176  
... ... @@ -282,7 +282,7 @@
282 282 goto drop;
283 283  
284 284 do {
285   - struct net_device *slave = q->dev;
  285 + struct net_device *slave = qdisc_dev(q);
286 286  
287 287 if (slave->qdisc_sleeping != q)
288 288 continue;
... ... @@ -352,7 +352,7 @@
352 352  
353 353 q = m->slaves;
354 354 do {
355   - struct net_device *slave = q->dev;
  355 + struct net_device *slave = qdisc_dev(q);
356 356  
357 357 if (slave == NULL)
358 358 return -EUNATCH;
... ... @@ -403,7 +403,7 @@
403 403 q = m->slaves;
404 404 if (q) {
405 405 do {
406   - if (new_mtu > q->dev->mtu)
  406 + if (new_mtu > qdisc_dev(q)->mtu)
407 407 return -EINVAL;
408 408 } while ((q=NEXT_SLAVE(q)) != m->slaves);
409 409 }