Blame view
net/sched/sch_cbq.c
47.5 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 |
/* * net/sched/sch_cbq.c Class-Based Queueing discipline. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * */ |
1da177e4c Linux-2.6.12-rc2 |
12 |
#include <linux/module.h> |
5a0e3ad6a include cleanup: ... |
13 |
#include <linux/slab.h> |
1da177e4c Linux-2.6.12-rc2 |
14 15 |
#include <linux/types.h> #include <linux/kernel.h> |
1da177e4c Linux-2.6.12-rc2 |
16 |
#include <linux/string.h> |
1da177e4c Linux-2.6.12-rc2 |
17 |
#include <linux/errno.h> |
1da177e4c Linux-2.6.12-rc2 |
18 |
#include <linux/skbuff.h> |
0ba480538 [NET_SCHED]: Remo... |
19 |
#include <net/netlink.h> |
1da177e4c Linux-2.6.12-rc2 |
20 21 22 23 24 25 26 |
#include <net/pkt_sched.h> /* Class-Based Queueing (CBQ) algorithm. ======================================= Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource |
10297b993 [NET] SCHED: Fix ... |
27 |
Management Models for Packet Networks", |
1da177e4c Linux-2.6.12-rc2 |
28 |
IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 |
10297b993 [NET] SCHED: Fix ... |
29 |
[2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 |
1da177e4c Linux-2.6.12-rc2 |
30 |
|
10297b993 [NET] SCHED: Fix ... |
31 |
[3] Sally Floyd, "Notes on Class-Based Queueing: Setting |
1da177e4c Linux-2.6.12-rc2 |
32 33 34 35 36 37 38 39 40 41 42 43 44 |
Parameters", 1996 [4] Sally Floyd and Michael Speer, "Experimental Results for Class-Based Queueing", 1998, not published. ----------------------------------------------------------------------- Algorithm skeleton was taken from NS simulator cbq.cc. If someone wants to check this code against the LBL version, he should take into account that ONLY the skeleton was borrowed, the implementation is different. Particularly: --- The WRR algorithm is different. Our version looks more |
10297b993 [NET] SCHED: Fix ... |
45 46 47 48 49 50 |
reasonable (I hope) and works when quanta are allowed to be less than MTU, which is always the case when real time classes have small rates. Note, that the statement of [3] is incomplete, delay may actually be estimated even if class per-round allotment is less than MTU. Namely, if per-round allotment is W*r_i, and r_1+...+r_k = r < 1 |
1da177e4c Linux-2.6.12-rc2 |
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B In the worst case we have IntServ estimate with D = W*r+k*MTU and C = MTU*r. The proof (if correct at all) is trivial. --- It seems that cbq-2.0 is not very accurate. At least, I cannot interpret some places, which look like wrong translations from NS. Anyone is advised to find these differences and explain to me, why I am wrong 8). --- Linux has no EOI event, so that we cannot estimate true class idle time. Workaround is to consider the next dequeue event as sign that previous packet is finished. This is wrong because of internal device queueing, but on a permanently loaded link it is true. Moreover, combined with clock integrator, this scheme looks very close to an ideal solution. */ struct cbq_sched_data; |
cc7ec456f net_sched: cleanups |
71 |
struct cbq_class { |
d77fea2eb net-sched: sch_cb... |
72 |
struct Qdisc_class_common common; |
1da177e4c Linux-2.6.12-rc2 |
73 74 75 |
struct cbq_class *next_alive; /* next class with backlog in this priority band */ /* Parameters */ |
1da177e4c Linux-2.6.12-rc2 |
76 77 78 79 |
unsigned char priority; /* class priority */ unsigned char priority2; /* priority to be used after overlimit */ unsigned char ewma_log; /* time constant for idle time calculation */ unsigned char ovl_strategy; |
c3bc7cff8 [NET_SCHED]: Kill... |
80 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
unsigned char police; #endif u32 defmap; /* Link-sharing scheduler parameters */ long maxidle; /* Class parameters: see below. */ long offtime; long minidle; u32 avpkt; struct qdisc_rate_table *R_tab; /* Overlimit strategy parameters */ void (*overlimit)(struct cbq_class *cl); |
1a13cb63d [NET_SCHED]: sch_... |
95 |
psched_tdiff_t penalty; |
1da177e4c Linux-2.6.12-rc2 |
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
/* General scheduler (WRR) parameters */ long allot; long quantum; /* Allotment per WRR round */ long weight; /* Relative allotment: see below */ struct Qdisc *qdisc; /* Ptr to CBQ discipline */ struct cbq_class *split; /* Ptr to split node */ struct cbq_class *share; /* Ptr to LS parent in the class tree */ struct cbq_class *tparent; /* Ptr to tree parent in the class tree */ struct cbq_class *borrow; /* NULL if class is bandwidth limited; parent otherwise */ struct cbq_class *sibling; /* Sibling chain */ struct cbq_class *children; /* Pointer to children chain */ struct Qdisc *q; /* Elementary queueing discipline */ /* Variables */ unsigned char cpriority; /* Effective priority */ unsigned char delayed; unsigned char level; /* level of the class in hierarchy: 0 for leaf classes, and maximal level of children + 1 for nodes. */ psched_time_t last; /* Last end of service */ psched_time_t undertime; long avgidle; long deficit; /* Saved deficit for WRR */ |
1a13cb63d [NET_SCHED]: sch_... |
126 |
psched_time_t penalized; |
c1a8f1f1c net: restore gnet... |
127 |
struct gnet_stats_basic_packed bstats; |
1da177e4c Linux-2.6.12-rc2 |
128 129 |
struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; |
1da177e4c Linux-2.6.12-rc2 |
130 131 132 133 134 135 |
struct tc_cbq_xstats xstats; struct tcf_proto *filter_list; int refcnt; int filters; |
cc7ec456f net_sched: cleanups |
136 |
struct cbq_class *defaults[TC_PRIO_MAX + 1]; |
1da177e4c Linux-2.6.12-rc2 |
137 |
}; |
cc7ec456f net_sched: cleanups |
138 |
struct cbq_sched_data { |
d77fea2eb net-sched: sch_cb... |
139 |
struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
cc7ec456f net_sched: cleanups |
140 141 |
int nclasses[TC_CBQ_MAXPRIO + 1]; unsigned int quanta[TC_CBQ_MAXPRIO + 1]; |
1da177e4c Linux-2.6.12-rc2 |
142 143 |
struct cbq_class link; |
cc7ec456f net_sched: cleanups |
144 145 |
unsigned int activemask; struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes |
1da177e4c Linux-2.6.12-rc2 |
146 |
with backlog */ |
c3bc7cff8 [NET_SCHED]: Kill... |
147 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
148 149 150 151 152 153 154 |
struct cbq_class *rx_class; #endif struct cbq_class *tx_class; struct cbq_class *tx_borrowed; int tx_len; psched_time_t now; /* Cached timestamp */ psched_time_t now_rt; /* Cached real time */ |
cc7ec456f net_sched: cleanups |
155 |
unsigned int pmask; |
1da177e4c Linux-2.6.12-rc2 |
156 |
|
2fbd3da38 pkt_sched: Revert... |
157 |
struct hrtimer delay_timer; |
88a993540 [NET_SCHED]: sch_... |
158 |
struct qdisc_watchdog watchdog; /* Watchdog timer, |
1da177e4c Linux-2.6.12-rc2 |
159 160 161 |
started when CBQ has backlog, but cannot transmit just now */ |
88a993540 [NET_SCHED]: sch_... |
162 |
psched_tdiff_t wd_expires; |
1da177e4c Linux-2.6.12-rc2 |
163 164 165 |
int toplevel; u32 hgenerator; }; |
cc7ec456f net_sched: cleanups |
166 |
#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) |
1da177e4c Linux-2.6.12-rc2 |
167 |
|
cc7ec456f net_sched: cleanups |
168 |
static inline struct cbq_class * |
1da177e4c Linux-2.6.12-rc2 |
169 170 |
cbq_class_lookup(struct cbq_sched_data *q, u32 classid) { |
d77fea2eb net-sched: sch_cb... |
171 |
struct Qdisc_class_common *clc; |
1da177e4c Linux-2.6.12-rc2 |
172 |
|
d77fea2eb net-sched: sch_cb... |
173 174 175 176 |
clc = qdisc_class_find(&q->clhash, classid); if (clc == NULL) return NULL; return container_of(clc, struct cbq_class, common); |
1da177e4c Linux-2.6.12-rc2 |
177 |
} |
c3bc7cff8 [NET_SCHED]: Kill... |
178 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
179 180 181 182 |
static struct cbq_class * cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) { |
cc7ec456f net_sched: cleanups |
183 |
struct cbq_class *cl; |
1da177e4c Linux-2.6.12-rc2 |
184 |
|
cc7ec456f net_sched: cleanups |
185 186 |
for (cl = this->tparent; cl; cl = cl->tparent) { struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; |
1da177e4c Linux-2.6.12-rc2 |
187 |
|
cc7ec456f net_sched: cleanups |
188 189 190 |
if (new != NULL && new != this) return new; } |
1da177e4c Linux-2.6.12-rc2 |
191 192 193 194 195 196 |
return NULL; } #endif /* Classify packet. The procedure is pretty complicated, but |
cc7ec456f net_sched: cleanups |
197 198 199 200 201 202 203 |
* it allows us to combine link sharing and priority scheduling * transparently. * * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, * so that it resolves to split nodes. Then packets are classified * by logical priority, or a more specific classifier may be attached * to the split node. |
1da177e4c Linux-2.6.12-rc2 |
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
*/ static struct cbq_class * cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *head = &q->link; struct cbq_class **defmap; struct cbq_class *cl = NULL; u32 prio = skb->priority; struct tcf_result res; /* * Step 1. If skb->priority points to one of our classes, use it. */ |
cc7ec456f net_sched: cleanups |
219 |
if (TC_H_MAJ(prio ^ sch->handle) == 0 && |
1da177e4c Linux-2.6.12-rc2 |
220 221 |
(cl = cbq_class_lookup(q, prio)) != NULL) return cl; |
c27f339af net_sched: Add qd... |
222 |
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4c Linux-2.6.12-rc2 |
223 224 225 226 227 228 229 |
for (;;) { int result = 0; defmap = head->defaults; /* * Step 2+n. Apply classifier. */ |
73ca4918f [NET_SCHED]: act_... |
230 231 |
if (!head->filter_list || (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) |
1da177e4c Linux-2.6.12-rc2 |
232 |
goto fallback; |
cc7ec456f net_sched: cleanups |
233 234 |
cl = (void *)res.class; if (!cl) { |
1da177e4c Linux-2.6.12-rc2 |
235 236 |
if (TC_H_MAJ(res.classid)) cl = cbq_class_lookup(q, res.classid); |
cc7ec456f net_sched: cleanups |
237 |
else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) |
1da177e4c Linux-2.6.12-rc2 |
238 239 240 241 242 243 244 245 246 |
cl = defmap[TC_PRIO_BESTEFFORT]; if (cl == NULL || cl->level >= head->level) goto fallback; } #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: |
10297b993 [NET] SCHED: Fix ... |
247 |
case TC_ACT_STOLEN: |
378a2f090 net_sched: Add qd... |
248 |
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
1da177e4c Linux-2.6.12-rc2 |
249 250 |
case TC_ACT_SHOT: return NULL; |
73ca4918f [NET_SCHED]: act_... |
251 252 |
case TC_ACT_RECLASSIFY: return cbq_reclassify(skb, cl); |
1da177e4c Linux-2.6.12-rc2 |
253 |
} |
1da177e4c Linux-2.6.12-rc2 |
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
#endif if (cl->level == 0) return cl; /* * Step 3+n. If classifier selected a link sharing class, * apply agency specific classifier. * Repeat this procdure until we hit a leaf node. */ head = cl; } fallback: cl = head; /* * Step 4. No success... */ if (TC_H_MAJ(prio) == 0 && |
cc7ec456f net_sched: cleanups |
273 |
!(cl = head->defaults[prio & TC_PRIO_MAX]) && |
1da177e4c Linux-2.6.12-rc2 |
274 275 276 277 278 279 280 |
!(cl = head->defaults[TC_PRIO_BESTEFFORT])) return head; return cl; } /* |
cc7ec456f net_sched: cleanups |
281 282 283 |
* A packet has just been enqueued on the empty class. * cbq_activate_class adds it to the tail of active class list * of its priority band. |
1da177e4c Linux-2.6.12-rc2 |
284 |
*/ |
cc7ec456f net_sched: cleanups |
285 |
static inline void cbq_activate_class(struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
{ struct cbq_sched_data *q = qdisc_priv(cl->qdisc); int prio = cl->cpriority; struct cbq_class *cl_tail; cl_tail = q->active[prio]; q->active[prio] = cl; if (cl_tail != NULL) { cl->next_alive = cl_tail->next_alive; cl_tail->next_alive = cl; } else { cl->next_alive = cl; q->activemask |= (1<<prio); } } /* |
cc7ec456f net_sched: cleanups |
304 305 306 |
* Unlink class from active chain. * Note that this same procedure is done directly in cbq_dequeue* * during round-robin procedure. |
1da177e4c Linux-2.6.12-rc2 |
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
*/ static void cbq_deactivate_class(struct cbq_class *this) { struct cbq_sched_data *q = qdisc_priv(this->qdisc); int prio = this->cpriority; struct cbq_class *cl; struct cbq_class *cl_prev = q->active[prio]; do { cl = cl_prev->next_alive; if (cl == this) { cl_prev->next_alive = cl->next_alive; cl->next_alive = NULL; if (cl == q->active[prio]) { q->active[prio] = cl_prev; if (cl == q->active[prio]) { q->active[prio] = NULL; q->activemask &= ~(1<<prio); return; } } |
1da177e4c Linux-2.6.12-rc2 |
330 331 332 333 334 335 336 337 338 |
return; } } while ((cl_prev = cl) != q->active[prio]); } static void cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) { int toplevel = q->toplevel; |
fd245a4ad net_sched: move T... |
339 |
if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) { |
1da177e4c Linux-2.6.12-rc2 |
340 341 |
psched_time_t now; psched_tdiff_t incr; |
3bebcda28 [NET_SCHED]: turn... |
342 |
now = psched_get_time(); |
8edc0c31d [NET_SCHED]: kill... |
343 |
incr = now - q->now_rt; |
7c59e25f3 [NET_SCHED]: kill... |
344 |
now = q->now + incr; |
1da177e4c Linux-2.6.12-rc2 |
345 346 |
do { |
104e08789 [NET_SCHED]: kill... |
347 |
if (cl->undertime < now) { |
1da177e4c Linux-2.6.12-rc2 |
348 349 350 |
q->toplevel = cl->level; return; } |
cc7ec456f net_sched: cleanups |
351 |
} while ((cl = cl->borrow) != NULL && toplevel > cl->level); |
1da177e4c Linux-2.6.12-rc2 |
352 353 354 355 356 357 358 |
} } static int cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct cbq_sched_data *q = qdisc_priv(sch); |
ddeee3ce7 [PKT_SCHED]: sch_... |
359 |
int uninitialized_var(ret); |
1da177e4c Linux-2.6.12-rc2 |
360 |
struct cbq_class *cl = cbq_classify(skb, sch, &ret); |
c3bc7cff8 [NET_SCHED]: Kill... |
361 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
362 363 364 |
q->rx_class = cl; #endif if (cl == NULL) { |
c27f339af net_sched: Add qd... |
365 |
if (ret & __NET_XMIT_BYPASS) |
1da177e4c Linux-2.6.12-rc2 |
366 367 368 369 |
sch->qstats.drops++; kfree_skb(skb); return ret; } |
c3bc7cff8 [NET_SCHED]: Kill... |
370 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
371 372 |
cl->q->__parent = sch; #endif |
5f86173bd net_sched: Add qd... |
373 374 |
ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { |
1da177e4c Linux-2.6.12-rc2 |
375 |
sch->q.qlen++; |
1da177e4c Linux-2.6.12-rc2 |
376 377 378 379 380 |
cbq_mark_toplevel(q, cl); if (!cl->next_alive) cbq_activate_class(cl); return ret; } |
378a2f090 net_sched: Add qd... |
381 382 383 384 385 |
if (net_xmit_drop_count(ret)) { sch->qstats.drops++; cbq_mark_toplevel(q, cl); cl->qstats.drops++; } |
1da177e4c Linux-2.6.12-rc2 |
386 387 |
return ret; } |
1da177e4c Linux-2.6.12-rc2 |
388 389 390 391 392 393 394 |
/* Overlimit actions */ /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ static void cbq_ovl_classic(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
8edc0c31d [NET_SCHED]: kill... |
395 |
psched_tdiff_t delay = cl->undertime - q->now; |
1da177e4c Linux-2.6.12-rc2 |
396 397 398 |
if (!cl->delayed) { delay += cl->offtime; |
10297b993 [NET] SCHED: Fix ... |
399 |
/* |
cc7ec456f net_sched: cleanups |
400 401 402 403 404 |
* Class goes to sleep, so that it will have no * chance to work avgidle. Let's forgive it 8) * * BTW cbq-2.0 has a crap in this * place, apparently they forgot to shift it by cl->ewma_log. |
1da177e4c Linux-2.6.12-rc2 |
405 406 407 408 409 410 411 |
*/ if (cl->avgidle < 0) delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); if (cl->avgidle < cl->minidle) cl->avgidle = cl->minidle; if (delay <= 0) delay = 1; |
7c59e25f3 [NET_SCHED]: kill... |
412 |
cl->undertime = q->now + delay; |
1da177e4c Linux-2.6.12-rc2 |
413 414 415 416 417 418 419 420 |
cl->xstats.overactions++; cl->delayed = 1; } if (q->wd_expires == 0 || q->wd_expires > delay) q->wd_expires = delay; /* Dirty work! We must schedule wakeups based on |
cc7ec456f net_sched: cleanups |
421 422 |
* real available rate, rather than leaf rate, * which may be tiny (even zero). |
1da177e4c Linux-2.6.12-rc2 |
423 424 425 426 427 428 |
*/ if (q->toplevel == TC_CBQ_MAXLEVEL) { struct cbq_class *b; psched_tdiff_t base_delay = q->wd_expires; for (b = cl->borrow; b; b = b->borrow) { |
8edc0c31d [NET_SCHED]: kill... |
429 |
delay = b->undertime - q->now; |
1da177e4c Linux-2.6.12-rc2 |
430 431 432 433 434 435 436 437 438 439 440 441 |
if (delay < base_delay) { if (delay <= 0) delay = 1; base_delay = delay; } } q->wd_expires = base_delay; } } /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when |
cc7ec456f net_sched: cleanups |
442 |
* they go overlimit |
1da177e4c Linux-2.6.12-rc2 |
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 |
*/ static void cbq_ovl_rclassic(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_class *this = cl; do { if (cl->level > q->toplevel) { cl = NULL; break; } } while ((cl = cl->borrow) != NULL); if (cl == NULL) cl = this; cbq_ovl_classic(cl); } /* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */ static void cbq_ovl_delay(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
8edc0c31d [NET_SCHED]: kill... |
467 |
psched_tdiff_t delay = cl->undertime - q->now; |
1da177e4c Linux-2.6.12-rc2 |
468 |
|
2540e0511 pkt_sched: Fix qd... |
469 470 471 |
if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc_root_sleeping(cl->qdisc)->state)) return; |
1da177e4c Linux-2.6.12-rc2 |
472 |
if (!cl->delayed) { |
1a13cb63d [NET_SCHED]: sch_... |
473 474 |
psched_time_t sched = q->now; ktime_t expires; |
1da177e4c Linux-2.6.12-rc2 |
475 476 477 478 479 480 |
delay += cl->offtime; if (cl->avgidle < 0) delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); if (cl->avgidle < cl->minidle) cl->avgidle = cl->minidle; |
7c59e25f3 [NET_SCHED]: kill... |
481 |
cl->undertime = q->now + delay; |
1da177e4c Linux-2.6.12-rc2 |
482 483 |
if (delay > 0) { |
1a13cb63d [NET_SCHED]: sch_... |
484 |
sched += delay + cl->penalty; |
1da177e4c Linux-2.6.12-rc2 |
485 486 487 |
cl->penalized = sched; cl->cpriority = TC_CBQ_MAXPRIO; q->pmask |= (1<<TC_CBQ_MAXPRIO); |
1a13cb63d [NET_SCHED]: sch_... |
488 489 |
expires = ktime_set(0, 0); |
ca44d6e60 pkt_sched: Rename... |
490 |
expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched)); |
2fbd3da38 pkt_sched: Revert... |
491 492 493 494 495 496 |
if (hrtimer_try_to_cancel(&q->delay_timer) && ktime_to_ns(ktime_sub( hrtimer_get_expires(&q->delay_timer), expires)) > 0) hrtimer_set_expires(&q->delay_timer, expires); hrtimer_restart(&q->delay_timer); |
1da177e4c Linux-2.6.12-rc2 |
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 |
cl->delayed = 1; cl->xstats.overactions++; return; } delay = 1; } if (q->wd_expires == 0 || q->wd_expires > delay) q->wd_expires = delay; } /* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */ static void cbq_ovl_lowprio(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
1a13cb63d [NET_SCHED]: sch_... |
512 |
cl->penalized = q->now + cl->penalty; |
1da177e4c Linux-2.6.12-rc2 |
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 |
if (cl->cpriority != cl->priority2) { cl->cpriority = cl->priority2; q->pmask |= (1<<cl->cpriority); cl->xstats.overactions++; } cbq_ovl_classic(cl); } /* TC_CBQ_OVL_DROP: penalize class by dropping */ static void cbq_ovl_drop(struct cbq_class *cl) { if (cl->q->ops->drop) if (cl->q->ops->drop(cl->q)) cl->qdisc->q.qlen--; cl->xstats.overactions++; cbq_ovl_classic(cl); } |
1a13cb63d [NET_SCHED]: sch_... |
532 533 |
static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, psched_time_t now) |
1da177e4c Linux-2.6.12-rc2 |
534 535 536 |
{ struct cbq_class *cl; struct cbq_class *cl_prev = q->active[prio]; |
1a13cb63d [NET_SCHED]: sch_... |
537 |
psched_time_t sched = now; |
1da177e4c Linux-2.6.12-rc2 |
538 539 |
if (cl_prev == NULL) |
e9054a339 [NET_SCHED]: sch_... |
540 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
541 542 543 |
do { cl = cl_prev->next_alive; |
1a13cb63d [NET_SCHED]: sch_... |
544 |
if (now - cl->penalized > 0) { |
1da177e4c Linux-2.6.12-rc2 |
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 |
cl_prev->next_alive = cl->next_alive; cl->next_alive = NULL; cl->cpriority = cl->priority; cl->delayed = 0; cbq_activate_class(cl); if (cl == q->active[prio]) { q->active[prio] = cl_prev; if (cl == q->active[prio]) { q->active[prio] = NULL; return 0; } } cl = cl_prev->next_alive; |
1a13cb63d [NET_SCHED]: sch_... |
560 |
} else if (sched - cl->penalized > 0) |
1da177e4c Linux-2.6.12-rc2 |
561 562 |
sched = cl->penalized; } while ((cl_prev = cl) != q->active[prio]); |
1a13cb63d [NET_SCHED]: sch_... |
563 |
return sched - now; |
1da177e4c Linux-2.6.12-rc2 |
564 |
} |
1a13cb63d [NET_SCHED]: sch_... |
565 |
static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) |
1da177e4c Linux-2.6.12-rc2 |
566 |
{ |
1a13cb63d [NET_SCHED]: sch_... |
567 |
struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, |
2fbd3da38 pkt_sched: Revert... |
568 |
delay_timer); |
1a13cb63d [NET_SCHED]: sch_... |
569 570 571 |
struct Qdisc *sch = q->watchdog.qdisc; psched_time_t now; psched_tdiff_t delay = 0; |
cc7ec456f net_sched: cleanups |
572 |
unsigned int pmask; |
1da177e4c Linux-2.6.12-rc2 |
573 |
|
3bebcda28 [NET_SCHED]: turn... |
574 |
now = psched_get_time(); |
1a13cb63d [NET_SCHED]: sch_... |
575 |
|
1da177e4c Linux-2.6.12-rc2 |
576 577 578 579 580 |
pmask = q->pmask; q->pmask = 0; while (pmask) { int prio = ffz(~pmask); |
1a13cb63d [NET_SCHED]: sch_... |
581 |
psched_tdiff_t tmp; |
1da177e4c Linux-2.6.12-rc2 |
582 583 |
pmask &= ~(1<<prio); |
1a13cb63d [NET_SCHED]: sch_... |
584 |
tmp = cbq_undelay_prio(q, prio, now); |
1da177e4c Linux-2.6.12-rc2 |
585 586 587 588 589 590 591 592 |
if (tmp > 0) { q->pmask |= 1<<prio; if (tmp < delay || delay == 0) delay = tmp; } } if (delay) { |
1a13cb63d [NET_SCHED]: sch_... |
593 594 595 |
ktime_t time; time = ktime_set(0, 0); |
ca44d6e60 pkt_sched: Rename... |
596 |
time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); |
2fbd3da38 pkt_sched: Revert... |
597 |
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); |
1da177e4c Linux-2.6.12-rc2 |
598 |
} |
fd245a4ad net_sched: move T... |
599 |
qdisc_unthrottled(sch); |
8608db031 pkt_sched: Never ... |
600 |
__netif_schedule(qdisc_root(sch)); |
1a13cb63d [NET_SCHED]: sch_... |
601 |
return HRTIMER_NORESTART; |
1da177e4c Linux-2.6.12-rc2 |
602 |
} |
c3bc7cff8 [NET_SCHED]: Kill... |
603 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
604 605 |
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) { |
1da177e4c Linux-2.6.12-rc2 |
606 607 608 609 610 611 612 |
struct Qdisc *sch = child->__parent; struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = q->rx_class; q->rx_class = NULL; if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { |
378a2f090 net_sched: Add qd... |
613 |
int ret; |
1da177e4c Linux-2.6.12-rc2 |
614 615 616 617 618 |
cbq_mark_toplevel(q, cl); q->rx_class = cl; cl->q->__parent = sch; |
378a2f090 net_sched: Add qd... |
619 620 |
ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { |
1da177e4c Linux-2.6.12-rc2 |
621 |
sch->q.qlen++; |
1da177e4c Linux-2.6.12-rc2 |
622 623 624 625 |
if (!cl->next_alive) cbq_activate_class(cl); return 0; } |
378a2f090 net_sched: Add qd... |
626 627 |
if (net_xmit_drop_count(ret)) sch->qstats.drops++; |
1da177e4c Linux-2.6.12-rc2 |
628 629 630 631 632 633 634 |
return 0; } sch->qstats.drops++; return -1; } #endif |
10297b993 [NET] SCHED: Fix ... |
635 |
/* |
cc7ec456f net_sched: cleanups |
636 637 638 639 640 641 642 |
* It is mission critical procedure. * * We "regenerate" toplevel cutoff, if transmitting class * has backlog and it is not regulated. It is not part of * original CBQ description, but looks more reasonable. * Probably, it is wrong. This question needs further investigation. */ |
1da177e4c Linux-2.6.12-rc2 |
643 |
|
cc7ec456f net_sched: cleanups |
644 |
static inline void |
1da177e4c Linux-2.6.12-rc2 |
645 646 647 648 649 650 |
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, struct cbq_class *borrowed) { if (cl && q->toplevel >= borrowed->level) { if (cl->q->q.qlen > 1) { do { |
a084980dc [NET_SCHED]: kill... |
651 |
if (borrowed->undertime == PSCHED_PASTPERFECT) { |
1da177e4c Linux-2.6.12-rc2 |
652 653 654 |
q->toplevel = borrowed->level; return; } |
cc7ec456f net_sched: cleanups |
655 |
} while ((borrowed = borrowed->borrow) != NULL); |
1da177e4c Linux-2.6.12-rc2 |
656 |
} |
10297b993 [NET] SCHED: Fix ... |
657 |
#if 0 |
1da177e4c Linux-2.6.12-rc2 |
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 |
/* It is not necessary now. Uncommenting it will save CPU cycles, but decrease fairness. */ q->toplevel = TC_CBQ_MAXLEVEL; #endif } } static void cbq_update(struct cbq_sched_data *q) { struct cbq_class *this = q->tx_class; struct cbq_class *cl = this; int len = q->tx_len; q->tx_class = NULL; for ( ; cl; cl = cl->share) { long avgidle = cl->avgidle; long idle; cl->bstats.packets++; cl->bstats.bytes += len; /* |
cc7ec456f net_sched: cleanups |
683 684 685 686 |
* (now - last) is total time between packet right edges. * (last_pktlen/rate) is "virtual" busy time, so that * * idle = (now - last) - last_pktlen/rate |
1da177e4c Linux-2.6.12-rc2 |
687 |
*/ |
8edc0c31d [NET_SCHED]: kill... |
688 |
idle = q->now - cl->last; |
1da177e4c Linux-2.6.12-rc2 |
689 690 691 692 693 694 |
if ((unsigned long)idle > 128*1024*1024) { avgidle = cl->maxidle; } else { idle -= L2T(cl, len); /* true_avgidle := (1-W)*true_avgidle + W*idle, |
cc7ec456f net_sched: cleanups |
695 696 697 |
* where W=2^{-ewma_log}. But cl->avgidle is scaled: * cl->avgidle == true_avgidle/W, * hence: |
1da177e4c Linux-2.6.12-rc2 |
698 699 700 701 702 703 704 705 706 707 708 709 710 |
*/ avgidle += idle - (avgidle>>cl->ewma_log); } if (avgidle <= 0) { /* Overlimit or at-limit */ if (avgidle < cl->minidle) avgidle = cl->minidle; cl->avgidle = avgidle; /* Calculate expected time, when this class |
cc7ec456f net_sched: cleanups |
711 712 713 714 715 716 |
* will be allowed to send. * It will occur, when: * (1-W)*true_avgidle + W*delay = 0, i.e. * idle = (1/W - 1)*(-true_avgidle) * or * idle = (1 - W)*(-cl->avgidle); |
1da177e4c Linux-2.6.12-rc2 |
717 718 719 720 |
*/ idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); /* |
cc7ec456f net_sched: cleanups |
721 722 723 724 725 726 |
* That is not all. * To maintain the rate allocated to the class, * we add to undertime virtual clock, * necessary to complete transmitted packet. * (len/phys_bandwidth has been already passed * to the moment of cbq_update) |
1da177e4c Linux-2.6.12-rc2 |
727 728 729 730 |
*/ idle -= L2T(&q->link, len); idle += L2T(cl, len); |
7c59e25f3 [NET_SCHED]: kill... |
731 |
cl->undertime = q->now + idle; |
1da177e4c Linux-2.6.12-rc2 |
732 733 |
} else { /* Underlimit */ |
a084980dc [NET_SCHED]: kill... |
734 |
cl->undertime = PSCHED_PASTPERFECT; |
1da177e4c Linux-2.6.12-rc2 |
735 736 737 738 739 740 741 742 743 744 |
if (avgidle > cl->maxidle) cl->avgidle = cl->maxidle; else cl->avgidle = avgidle; } cl->last = q->now; } cbq_update_toplevel(q, this, q->tx_borrowed); } |
cc7ec456f net_sched: cleanups |
745 |
static inline struct cbq_class * |
1da177e4c Linux-2.6.12-rc2 |
746 747 748 749 750 751 752 |
cbq_under_limit(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_class *this_cl = cl; if (cl->tparent == NULL) return cl; |
a084980dc [NET_SCHED]: kill... |
753 |
if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { |
1da177e4c Linux-2.6.12-rc2 |
754 755 756 757 758 759 |
cl->delayed = 0; return cl; } do { /* It is very suspicious place. Now overlimit |
cc7ec456f net_sched: cleanups |
760 761 762 763 764 765 766 767 |
* action is generated for not bounded classes * only if link is completely congested. * Though it is in agree with ancestor-only paradigm, * it looks very stupid. Particularly, * it means that this chunk of code will either * never be called or result in strong amplification * of burstiness. Dangerous, silly, and, however, * no another solution exists. |
1da177e4c Linux-2.6.12-rc2 |
768 |
*/ |
cc7ec456f net_sched: cleanups |
769 770 |
cl = cl->borrow; if (!cl) { |
1da177e4c Linux-2.6.12-rc2 |
771 772 773 774 775 776 |
this_cl->qstats.overlimits++; this_cl->overlimit(this_cl); return NULL; } if (cl->level > q->toplevel) return NULL; |
a084980dc [NET_SCHED]: kill... |
777 |
} while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); |
1da177e4c Linux-2.6.12-rc2 |
778 779 780 781 |
cl->delayed = 0; return cl; } |
cc7ec456f net_sched: cleanups |
782 |
static inline struct sk_buff * |
1da177e4c Linux-2.6.12-rc2 |
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 |
cbq_dequeue_prio(struct Qdisc *sch, int prio) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl_tail, *cl_prev, *cl; struct sk_buff *skb; int deficit; cl_tail = cl_prev = q->active[prio]; cl = cl_prev->next_alive; do { deficit = 0; /* Start round */ do { struct cbq_class *borrow = cl; if (cl->q->q.qlen && (borrow = cbq_under_limit(cl)) == NULL) goto skip_class; if (cl->deficit <= 0) { /* Class exhausted its allotment per |
cc7ec456f net_sched: cleanups |
806 |
* this round. Switch to the next one. |
1da177e4c Linux-2.6.12-rc2 |
807 808 809 810 811 812 813 814 815 |
*/ deficit = 1; cl->deficit += cl->quantum; goto next_class; } skb = cl->q->dequeue(cl->q); /* Class did not give us any skb :-( |
cc7ec456f net_sched: cleanups |
816 817 |
* It could occur even if cl->q->q.qlen != 0 * f.e. if cl->q == "tbf" |
1da177e4c Linux-2.6.12-rc2 |
818 819 820 |
*/ if (skb == NULL) goto skip_class; |
0abf77e55 net_sched: Add ac... |
821 |
cl->deficit -= qdisc_pkt_len(skb); |
1da177e4c Linux-2.6.12-rc2 |
822 823 824 825 826 827 828 |
q->tx_class = cl; q->tx_borrowed = borrow; if (borrow != cl) { #ifndef CBQ_XSTATS_BORROWS_BYTES borrow->xstats.borrows++; cl->xstats.borrows++; #else |
0abf77e55 net_sched: Add ac... |
829 830 |
borrow->xstats.borrows += qdisc_pkt_len(skb); cl->xstats.borrows += qdisc_pkt_len(skb); |
1da177e4c Linux-2.6.12-rc2 |
831 832 |
#endif } |
0abf77e55 net_sched: Add ac... |
833 |
q->tx_len = qdisc_pkt_len(skb); |
1da177e4c Linux-2.6.12-rc2 |
834 835 836 837 838 839 840 841 842 843 844 |
if (cl->deficit <= 0) { q->active[prio] = cl; cl = cl->next_alive; cl->deficit += cl->quantum; } return skb; skip_class: if (cl->q->q.qlen == 0 || prio != cl->cpriority) { /* Class is empty or penalized. |
cc7ec456f net_sched: cleanups |
845 |
* Unlink it from active chain. |
1da177e4c Linux-2.6.12-rc2 |
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 |
*/ cl_prev->next_alive = cl->next_alive; cl->next_alive = NULL; /* Did cl_tail point to it? */ if (cl == cl_tail) { /* Repair it! */ cl_tail = cl_prev; /* Was it the last class in this band? */ if (cl == cl_tail) { /* Kill the band! */ q->active[prio] = NULL; q->activemask &= ~(1<<prio); if (cl->q->q.qlen) cbq_activate_class(cl); return NULL; } q->active[prio] = cl_tail; } if (cl->q->q.qlen) cbq_activate_class(cl); cl = cl_prev; } next_class: cl_prev = cl; cl = cl->next_alive; } while (cl_prev != cl_tail); } while (deficit); q->active[prio] = cl_prev; return NULL; } |
cc7ec456f net_sched: cleanups |
883 |
static inline struct sk_buff * |
1da177e4c Linux-2.6.12-rc2 |
884 885 886 887 |
cbq_dequeue_1(struct Qdisc *sch) { struct cbq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; |
cc7ec456f net_sched: cleanups |
888 |
unsigned int activemask; |
1da177e4c Linux-2.6.12-rc2 |
889 |
|
cc7ec456f net_sched: cleanups |
890 |
activemask = q->activemask & 0xFF; |
1da177e4c Linux-2.6.12-rc2 |
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 |
while (activemask) { int prio = ffz(~activemask); activemask &= ~(1<<prio); skb = cbq_dequeue_prio(sch, prio); if (skb) return skb; } return NULL; } static struct sk_buff * cbq_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct cbq_sched_data *q = qdisc_priv(sch); psched_time_t now; psched_tdiff_t incr; |
3bebcda28 [NET_SCHED]: turn... |
908 |
now = psched_get_time(); |
8edc0c31d [NET_SCHED]: kill... |
909 |
incr = now - q->now_rt; |
1da177e4c Linux-2.6.12-rc2 |
910 911 912 913 |
if (q->tx_class) { psched_tdiff_t incr2; /* Time integrator. We calculate EOS time |
cc7ec456f net_sched: cleanups |
914 915 916 917 918 |
* by adding expected packet transmission time. * If real time is greater, we warp artificial clock, * so that: * * cbq_time = max(real_time, work); |
1da177e4c Linux-2.6.12-rc2 |
919 920 |
*/ incr2 = L2T(&q->link, q->tx_len); |
7c59e25f3 [NET_SCHED]: kill... |
921 |
q->now += incr2; |
1da177e4c Linux-2.6.12-rc2 |
922 923 924 925 |
cbq_update(q); if ((incr -= incr2) < 0) incr = 0; } |
7c59e25f3 [NET_SCHED]: kill... |
926 |
q->now += incr; |
1da177e4c Linux-2.6.12-rc2 |
927 928 929 930 931 932 933 |
q->now_rt = now; for (;;) { q->wd_expires = 0; skb = cbq_dequeue_1(sch); if (skb) { |
9190b3b32 net_sched: accura... |
934 |
qdisc_bstats_update(sch, skb); |
1da177e4c Linux-2.6.12-rc2 |
935 |
sch->q.qlen--; |
fd245a4ad net_sched: move T... |
936 |
qdisc_unthrottled(sch); |
1da177e4c Linux-2.6.12-rc2 |
937 938 939 940 |
return skb; } /* All the classes are overlimit. |
cc7ec456f net_sched: cleanups |
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 |
* * It is possible, if: * * 1. Scheduler is empty. * 2. Toplevel cutoff inhibited borrowing. * 3. Root class is overlimit. * * Reset 2d and 3d conditions and retry. * * Note, that NS and cbq-2.0 are buggy, peeking * an arbitrary class is appropriate for ancestor-only * sharing, but not for toplevel algorithm. * * Our version is better, but slower, because it requires * two passes, but it is unavoidable with top-level sharing. */ |
1da177e4c Linux-2.6.12-rc2 |
957 958 |
if (q->toplevel == TC_CBQ_MAXLEVEL && |
a084980dc [NET_SCHED]: kill... |
959 |
q->link.undertime == PSCHED_PASTPERFECT) |
1da177e4c Linux-2.6.12-rc2 |
960 961 962 |
break; q->toplevel = TC_CBQ_MAXLEVEL; |
a084980dc [NET_SCHED]: kill... |
963 |
q->link.undertime = PSCHED_PASTPERFECT; |
1da177e4c Linux-2.6.12-rc2 |
964 965 966 |
} /* No packets in scheduler or nobody wants to give them to us :-( |
cc7ec456f net_sched: cleanups |
967 968 |
* Sigh... start watchdog timer in the last case. */ |
1da177e4c Linux-2.6.12-rc2 |
969 970 971 |
if (sch->q.qlen) { sch->qstats.overlimits++; |
88a993540 [NET_SCHED]: sch_... |
972 973 |
if (q->wd_expires) qdisc_watchdog_schedule(&q->watchdog, |
bb239acf5 [NET_SCHED]: sch_... |
974 |
now + q->wd_expires); |
1da177e4c Linux-2.6.12-rc2 |
975 976 977 978 979 980 981 982 983 984 985 986 987 988 |
} return NULL; } /* CBQ class maintanance routines */ static void cbq_adjust_levels(struct cbq_class *this) { if (this == NULL) return; do { int level = 0; struct cbq_class *cl; |
cc7ec456f net_sched: cleanups |
989 990 |
cl = this->children; if (cl) { |
1da177e4c Linux-2.6.12-rc2 |
991 992 993 994 995 |
do { if (cl->level > level) level = cl->level; } while ((cl = cl->sibling) != this->children); } |
cc7ec456f net_sched: cleanups |
996 |
this->level = level + 1; |
1da177e4c Linux-2.6.12-rc2 |
997 998 999 1000 1001 1002 |
} while ((this = this->tparent) != NULL); } static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) { struct cbq_class *cl; |
d77fea2eb net-sched: sch_cb... |
1003 1004 |
struct hlist_node *n; unsigned int h; |
1da177e4c Linux-2.6.12-rc2 |
1005 1006 1007 |
if (q->quanta[prio] == 0) return; |
d77fea2eb net-sched: sch_cb... |
1008 1009 |
for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1da177e4c Linux-2.6.12-rc2 |
1010 |
/* BUGGGG... Beware! This expression suffer of |
cc7ec456f net_sched: cleanups |
1011 |
* arithmetic overflows! |
1da177e4c Linux-2.6.12-rc2 |
1012 1013 1014 1015 1016 |
*/ if (cl->priority == prio) { cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ q->quanta[prio]; } |
5ce2d488f pkt_sched: Remove... |
1017 |
if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
cc7ec456f net_sched: cleanups |
1018 1019 1020 |
pr_warning("CBQ: class %08x has bad quantum==%ld, repaired. ", cl->common.classid, cl->quantum); |
5ce2d488f pkt_sched: Remove... |
1021 |
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1da177e4c Linux-2.6.12-rc2 |
1022 1023 1024 1025 1026 1027 1028 1029 1030 |
} } } } static void cbq_sync_defmap(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); struct cbq_class *split = cl->split; |
cc7ec456f net_sched: cleanups |
1031 |
unsigned int h; |
1da177e4c Linux-2.6.12-rc2 |
1032 1033 1034 1035 |
int i; if (split == NULL) return; |
cc7ec456f net_sched: cleanups |
1036 1037 |
for (i = 0; i <= TC_PRIO_MAX; i++) { if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) |
1da177e4c Linux-2.6.12-rc2 |
1038 1039 |
split->defaults[i] = NULL; } |
cc7ec456f net_sched: cleanups |
1040 |
for (i = 0; i <= TC_PRIO_MAX; i++) { |
1da177e4c Linux-2.6.12-rc2 |
1041 1042 1043 1044 |
int level = split->level; if (split->defaults[i]) continue; |
d77fea2eb net-sched: sch_cb... |
1045 1046 |
for (h = 0; h < q->clhash.hashsize; h++) { struct hlist_node *n; |
1da177e4c Linux-2.6.12-rc2 |
1047 |
struct cbq_class *c; |
d77fea2eb net-sched: sch_cb... |
1048 1049 |
hlist_for_each_entry(c, n, &q->clhash.hash[h], common.hnode) { |
1da177e4c Linux-2.6.12-rc2 |
1050 |
if (c->split == split && c->level < level && |
cc7ec456f net_sched: cleanups |
1051 |
c->defmap & (1<<i)) { |
1da177e4c Linux-2.6.12-rc2 |
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 |
split->defaults[i] = c; level = c->level; } } } } } static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask) { struct cbq_class *split = NULL; if (splitid == 0) { |
cc7ec456f net_sched: cleanups |
1065 1066 |
split = cl->split; if (!split) |
1da177e4c Linux-2.6.12-rc2 |
1067 |
return; |
d77fea2eb net-sched: sch_cb... |
1068 |
splitid = split->common.classid; |
1da177e4c Linux-2.6.12-rc2 |
1069 |
} |
d77fea2eb net-sched: sch_cb... |
1070 |
if (split == NULL || split->common.classid != splitid) { |
1da177e4c Linux-2.6.12-rc2 |
1071 |
for (split = cl->tparent; split; split = split->tparent) |
d77fea2eb net-sched: sch_cb... |
1072 |
if (split->common.classid == splitid) |
1da177e4c Linux-2.6.12-rc2 |
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 |
break; } if (split == NULL) return; if (cl->split != split) { cl->defmap = 0; cbq_sync_defmap(cl); cl->split = split; |
cc7ec456f net_sched: cleanups |
1083 |
cl->defmap = def & mask; |
1da177e4c Linux-2.6.12-rc2 |
1084 |
} else |
cc7ec456f net_sched: cleanups |
1085 |
cl->defmap = (cl->defmap & ~mask) | (def & mask); |
1da177e4c Linux-2.6.12-rc2 |
1086 1087 1088 1089 1090 1091 1092 1093 |
cbq_sync_defmap(cl); } static void cbq_unlink_class(struct cbq_class *this) { struct cbq_class *cl, **clp; struct cbq_sched_data *q = qdisc_priv(this->qdisc); |
d77fea2eb net-sched: sch_cb... |
1094 |
qdisc_class_hash_remove(&q->clhash, &this->common); |
1da177e4c Linux-2.6.12-rc2 |
1095 1096 |
if (this->tparent) { |
cc7ec456f net_sched: cleanups |
1097 |
clp = &this->sibling; |
1da177e4c Linux-2.6.12-rc2 |
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 |
cl = *clp; do { if (cl == this) { *clp = cl->sibling; break; } clp = &cl->sibling; } while ((cl = *clp) != this->sibling); if (this->tparent->children == this) { this->tparent->children = this->sibling; if (this->sibling == this) this->tparent->children = NULL; } } else { |
547b792ca net: convert BUG_... |
1113 |
WARN_ON(this->sibling != this); |
1da177e4c Linux-2.6.12-rc2 |
1114 1115 1116 1117 1118 1119 |
} } static void cbq_link_class(struct cbq_class *this) { struct cbq_sched_data *q = qdisc_priv(this->qdisc); |
1da177e4c Linux-2.6.12-rc2 |
1120 1121 1122 |
struct cbq_class *parent = this->tparent; this->sibling = this; |
d77fea2eb net-sched: sch_cb... |
1123 |
qdisc_class_hash_insert(&q->clhash, &this->common); |
1da177e4c Linux-2.6.12-rc2 |
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 |
if (parent == NULL) return; if (parent->children == NULL) { parent->children = this; } else { this->sibling = parent->children->sibling; parent->children->sibling = this; } } |
cc7ec456f net_sched: cleanups |
1135 |
static unsigned int cbq_drop(struct Qdisc *sch) |
1da177e4c Linux-2.6.12-rc2 |
1136 1137 1138 1139 1140 1141 1142 |
{ struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl, *cl_head; int prio; unsigned int len; for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { |
cc7ec456f net_sched: cleanups |
1143 1144 |
cl_head = q->active[prio]; if (!cl_head) |
1da177e4c Linux-2.6.12-rc2 |
1145 1146 1147 1148 1149 1150 |
continue; cl = cl_head; do { if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { sch->q.qlen--; |
a37ef2e32 [NET_SCHED] sch_c... |
1151 1152 |
if (!cl->q->q.qlen) cbq_deactivate_class(cl); |
1da177e4c Linux-2.6.12-rc2 |
1153 1154 1155 1156 1157 1158 1159 1160 |
return len; } } while ((cl = cl->next_alive) != cl_head); } return 0; } static void |
cc7ec456f net_sched: cleanups |
1161 |
cbq_reset(struct Qdisc *sch) |
1da177e4c Linux-2.6.12-rc2 |
1162 1163 1164 |
{ struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl; |
d77fea2eb net-sched: sch_cb... |
1165 |
struct hlist_node *n; |
1da177e4c Linux-2.6.12-rc2 |
1166 |
int prio; |
cc7ec456f net_sched: cleanups |
1167 |
unsigned int h; |
1da177e4c Linux-2.6.12-rc2 |
1168 1169 1170 1171 1172 |
q->activemask = 0; q->pmask = 0; q->tx_class = NULL; q->tx_borrowed = NULL; |
88a993540 [NET_SCHED]: sch_... |
1173 |
qdisc_watchdog_cancel(&q->watchdog); |
2fbd3da38 pkt_sched: Revert... |
1174 |
hrtimer_cancel(&q->delay_timer); |
1da177e4c Linux-2.6.12-rc2 |
1175 |
q->toplevel = TC_CBQ_MAXLEVEL; |
3bebcda28 [NET_SCHED]: turn... |
1176 |
q->now = psched_get_time(); |
1da177e4c Linux-2.6.12-rc2 |
1177 1178 1179 1180 |
q->now_rt = q->now; for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) q->active[prio] = NULL; |
d77fea2eb net-sched: sch_cb... |
1181 1182 |
for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1da177e4c Linux-2.6.12-rc2 |
1183 1184 1185 |
qdisc_reset(cl->q); cl->next_alive = NULL; |
a084980dc [NET_SCHED]: kill... |
1186 |
cl->undertime = PSCHED_PASTPERFECT; |
1da177e4c Linux-2.6.12-rc2 |
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 |
cl->avgidle = cl->maxidle; cl->deficit = cl->quantum; cl->cpriority = cl->priority; } } sch->q.qlen = 0; } static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) { |
cc7ec456f net_sched: cleanups |
1198 1199 1200 |
if (lss->change & TCF_CBQ_LSS_FLAGS) { cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; |
1da177e4c Linux-2.6.12-rc2 |
1201 |
} |
cc7ec456f net_sched: cleanups |
1202 |
if (lss->change & TCF_CBQ_LSS_EWMA) |
1da177e4c Linux-2.6.12-rc2 |
1203 |
cl->ewma_log = lss->ewma_log; |
cc7ec456f net_sched: cleanups |
1204 |
if (lss->change & TCF_CBQ_LSS_AVPKT) |
1da177e4c Linux-2.6.12-rc2 |
1205 |
cl->avpkt = lss->avpkt; |
cc7ec456f net_sched: cleanups |
1206 |
if (lss->change & TCF_CBQ_LSS_MINIDLE) |
1da177e4c Linux-2.6.12-rc2 |
1207 |
cl->minidle = -(long)lss->minidle; |
cc7ec456f net_sched: cleanups |
1208 |
if (lss->change & TCF_CBQ_LSS_MAXIDLE) { |
1da177e4c Linux-2.6.12-rc2 |
1209 1210 1211 |
cl->maxidle = lss->maxidle; cl->avgidle = lss->maxidle; } |
cc7ec456f net_sched: cleanups |
1212 |
if (lss->change & TCF_CBQ_LSS_OFFTIME) |
1da177e4c Linux-2.6.12-rc2 |
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 |
cl->offtime = lss->offtime; return 0; } static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) { q->nclasses[cl->priority]--; q->quanta[cl->priority] -= cl->weight; cbq_normalize_quanta(q, cl->priority); } static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) { q->nclasses[cl->priority]++; q->quanta[cl->priority] += cl->weight; cbq_normalize_quanta(q, cl->priority); } static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); if (wrr->allot) cl->allot = wrr->allot; if (wrr->weight) cl->weight = wrr->weight; if (wrr->priority) { |
cc7ec456f net_sched: cleanups |
1240 |
cl->priority = wrr->priority - 1; |
1da177e4c Linux-2.6.12-rc2 |
1241 1242 |
cl->cpriority = cl->priority; if (cl->priority >= cl->priority2) |
cc7ec456f net_sched: cleanups |
1243 |
cl->priority2 = TC_CBQ_MAXPRIO - 1; |
1da177e4c Linux-2.6.12-rc2 |
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 |
} cbq_addprio(q, cl); return 0; } static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) { switch (ovl->strategy) { case TC_CBQ_OVL_CLASSIC: cl->overlimit = cbq_ovl_classic; break; case TC_CBQ_OVL_DELAY: cl->overlimit = cbq_ovl_delay; break; case TC_CBQ_OVL_LOWPRIO: |
cc7ec456f net_sched: cleanups |
1260 1261 |
if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || ovl->priority2 - 1 <= cl->priority) |
1da177e4c Linux-2.6.12-rc2 |
1262 |
return -EINVAL; |
cc7ec456f net_sched: cleanups |
1263 |
cl->priority2 = ovl->priority2 - 1; |
1da177e4c Linux-2.6.12-rc2 |
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 |
cl->overlimit = cbq_ovl_lowprio; break; case TC_CBQ_OVL_DROP: cl->overlimit = cbq_ovl_drop; break; case TC_CBQ_OVL_RCLASSIC: cl->overlimit = cbq_ovl_rclassic; break; default: return -EINVAL; } |
1a13cb63d [NET_SCHED]: sch_... |
1275 |
cl->penalty = ovl->penalty; |
1da177e4c Linux-2.6.12-rc2 |
1276 1277 |
return 0; } |
c3bc7cff8 [NET_SCHED]: Kill... |
1278 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 |
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p) { cl->police = p->police; if (cl->q->handle) { if (p->police == TC_POLICE_RECLASSIFY) cl->q->reshape_fail = cbq_reshape_fail; else cl->q->reshape_fail = NULL; } return 0; } #endif static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt) { cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange); return 0; } |
27a3421e4 [NET_SCHED]: Use ... |
1298 1299 1300 1301 1302 1303 1304 1305 1306 |
static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) }, [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) }, [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) }, [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) }, [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) }, [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, }; |
1e90474c3 [NET_SCHED]: Conv... |
1307 |
static int cbq_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4c Linux-2.6.12-rc2 |
1308 1309 |
{ struct cbq_sched_data *q = qdisc_priv(sch); |
1e90474c3 [NET_SCHED]: Conv... |
1310 |
struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1da177e4c Linux-2.6.12-rc2 |
1311 |
struct tc_ratespec *r; |
cee63723b [NET_SCHED]: Prop... |
1312 |
int err; |
27a3421e4 [NET_SCHED]: Use ... |
1313 |
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); |
cee63723b [NET_SCHED]: Prop... |
1314 1315 |
if (err < 0) return err; |
1da177e4c Linux-2.6.12-rc2 |
1316 |
|
27a3421e4 [NET_SCHED]: Use ... |
1317 |
if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL) |
1da177e4c Linux-2.6.12-rc2 |
1318 |
return -EINVAL; |
1e90474c3 [NET_SCHED]: Conv... |
1319 |
r = nla_data(tb[TCA_CBQ_RATE]); |
1da177e4c Linux-2.6.12-rc2 |
1320 |
|
1e90474c3 [NET_SCHED]: Conv... |
1321 |
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) |
1da177e4c Linux-2.6.12-rc2 |
1322 |
return -EINVAL; |
d77fea2eb net-sched: sch_cb... |
1323 1324 1325 |
err = qdisc_class_hash_init(&q->clhash); if (err < 0) goto put_rtab; |
1da177e4c Linux-2.6.12-rc2 |
1326 1327 |
q->link.refcnt = 1; q->link.sibling = &q->link; |
d77fea2eb net-sched: sch_cb... |
1328 |
q->link.common.classid = sch->handle; |
1da177e4c Linux-2.6.12-rc2 |
1329 |
q->link.qdisc = sch; |
3511c9132 net_sched: remove... |
1330 1331 1332 |
q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (!q->link.q) |
1da177e4c Linux-2.6.12-rc2 |
1333 |
q->link.q = &noop_qdisc; |
cc7ec456f net_sched: cleanups |
1334 1335 1336 |
q->link.priority = TC_CBQ_MAXPRIO - 1; q->link.priority2 = TC_CBQ_MAXPRIO - 1; q->link.cpriority = TC_CBQ_MAXPRIO - 1; |
1da177e4c Linux-2.6.12-rc2 |
1337 1338 |
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; q->link.overlimit = cbq_ovl_classic; |
5ce2d488f pkt_sched: Remove... |
1339 |
q->link.allot = psched_mtu(qdisc_dev(sch)); |
1da177e4c Linux-2.6.12-rc2 |
1340 1341 1342 1343 1344 1345 |
q->link.quantum = q->link.allot; q->link.weight = q->link.R_tab->rate.rate; q->link.ewma_log = TC_CBQ_DEF_EWMA; q->link.avpkt = q->link.allot/2; q->link.minidle = -0x7FFFFFFF; |
1da177e4c Linux-2.6.12-rc2 |
1346 |
|
88a993540 [NET_SCHED]: sch_... |
1347 |
qdisc_watchdog_init(&q->watchdog, sch); |
2fbd3da38 pkt_sched: Revert... |
1348 |
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1da177e4c Linux-2.6.12-rc2 |
1349 1350 |
q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; |
3bebcda28 [NET_SCHED]: turn... |
1351 |
q->now = psched_get_time(); |
1da177e4c Linux-2.6.12-rc2 |
1352 1353 1354 |
q->now_rt = q->now; cbq_link_class(&q->link); |
1e90474c3 [NET_SCHED]: Conv... |
1355 1356 |
if (tb[TCA_CBQ_LSSOPT]) cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); |
1da177e4c Linux-2.6.12-rc2 |
1357 1358 1359 |
cbq_addprio(q, &q->link); return 0; |
d77fea2eb net-sched: sch_cb... |
1360 1361 1362 1363 |
put_rtab: qdisc_put_rtab(q->link.R_tab); return err; |
1da177e4c Linux-2.6.12-rc2 |
1364 |
} |
cc7ec456f net_sched: cleanups |
1365 |
static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1366 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1367 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1368 |
|
1e90474c3 [NET_SCHED]: Conv... |
1369 |
NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); |
1da177e4c Linux-2.6.12-rc2 |
1370 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1371 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1372 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1373 1374 |
return -1; } |
cc7ec456f net_sched: cleanups |
1375 |
static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1376 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1377 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 |
struct tc_cbq_lssopt opt; opt.flags = 0; if (cl->borrow == NULL) opt.flags |= TCF_CBQ_LSS_BOUNDED; if (cl->share == NULL) opt.flags |= TCF_CBQ_LSS_ISOLATED; opt.ewma_log = cl->ewma_log; opt.level = cl->level; opt.avpkt = cl->avpkt; opt.maxidle = cl->maxidle; opt.minidle = (u32)(-cl->minidle); opt.offtime = cl->offtime; opt.change = ~0; |
1e90474c3 [NET_SCHED]: Conv... |
1392 |
NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); |
1da177e4c Linux-2.6.12-rc2 |
1393 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1394 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1395 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1396 1397 |
return -1; } |
cc7ec456f net_sched: cleanups |
1398 |
static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1399 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1400 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1401 1402 1403 1404 |
struct tc_cbq_wrropt opt; opt.flags = 0; opt.allot = cl->allot; |
cc7ec456f net_sched: cleanups |
1405 1406 |
opt.priority = cl->priority + 1; opt.cpriority = cl->cpriority + 1; |
1da177e4c Linux-2.6.12-rc2 |
1407 |
opt.weight = cl->weight; |
1e90474c3 [NET_SCHED]: Conv... |
1408 |
NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); |
1da177e4c Linux-2.6.12-rc2 |
1409 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1410 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1411 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1412 1413 |
return -1; } |
cc7ec456f net_sched: cleanups |
1414 |
static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1415 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1416 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1417 1418 1419 |
struct tc_cbq_ovl opt; opt.strategy = cl->ovl_strategy; |
cc7ec456f net_sched: cleanups |
1420 |
opt.priority2 = cl->priority2 + 1; |
8a47077a0 [NETLINK]: Missin... |
1421 |
opt.pad = 0; |
1a13cb63d [NET_SCHED]: sch_... |
1422 |
opt.penalty = cl->penalty; |
1e90474c3 [NET_SCHED]: Conv... |
1423 |
NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
1da177e4c Linux-2.6.12-rc2 |
1424 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1425 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1426 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1427 1428 |
return -1; } |
cc7ec456f net_sched: cleanups |
1429 |
static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1430 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1431 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1432 1433 1434 |
struct tc_cbq_fopt opt; if (cl->split || cl->defmap) { |
d77fea2eb net-sched: sch_cb... |
1435 |
opt.split = cl->split ? cl->split->common.classid : 0; |
1da177e4c Linux-2.6.12-rc2 |
1436 1437 |
opt.defmap = cl->defmap; opt.defchange = ~0; |
1e90474c3 [NET_SCHED]: Conv... |
1438 |
NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); |
1da177e4c Linux-2.6.12-rc2 |
1439 1440 |
} return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1441 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1442 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1443 1444 |
return -1; } |
c3bc7cff8 [NET_SCHED]: Kill... |
1445 |
#ifdef CONFIG_NET_CLS_ACT |
cc7ec456f net_sched: cleanups |
1446 |
static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) |
1da177e4c Linux-2.6.12-rc2 |
1447 |
{ |
27a884dc3 [SK_BUFF]: Conver... |
1448 |
unsigned char *b = skb_tail_pointer(skb); |
1da177e4c Linux-2.6.12-rc2 |
1449 1450 1451 1452 |
struct tc_cbq_police opt; if (cl->police) { opt.police = cl->police; |
9ef1d4c7c [NETLINK]: Missin... |
1453 1454 |
opt.__res1 = 0; opt.__res2 = 0; |
1e90474c3 [NET_SCHED]: Conv... |
1455 |
NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); |
1da177e4c Linux-2.6.12-rc2 |
1456 1457 |
} return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1458 |
nla_put_failure: |
dc5fc579b [NETLINK]: Use nl... |
1459 |
nlmsg_trim(skb, b); |
1da177e4c Linux-2.6.12-rc2 |
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 |
return -1; } #endif static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) { if (cbq_dump_lss(skb, cl) < 0 || cbq_dump_rate(skb, cl) < 0 || cbq_dump_wrr(skb, cl) < 0 || cbq_dump_ovl(skb, cl) < 0 || |
c3bc7cff8 [NET_SCHED]: Kill... |
1470 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 |
cbq_dump_police(skb, cl) < 0 || #endif cbq_dump_fopt(skb, cl) < 0) return -1; return 0; } static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct cbq_sched_data *q = qdisc_priv(sch); |
4b3550ef5 [NET_SCHED]: Use ... |
1481 |
struct nlattr *nest; |
1da177e4c Linux-2.6.12-rc2 |
1482 |
|
4b3550ef5 [NET_SCHED]: Use ... |
1483 1484 1485 |
nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; |
1da177e4c Linux-2.6.12-rc2 |
1486 |
if (cbq_dump_attr(skb, &q->link) < 0) |
1e90474c3 [NET_SCHED]: Conv... |
1487 |
goto nla_put_failure; |
4b3550ef5 [NET_SCHED]: Use ... |
1488 |
nla_nest_end(skb, nest); |
1da177e4c Linux-2.6.12-rc2 |
1489 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1490 |
nla_put_failure: |
4b3550ef5 [NET_SCHED]: Use ... |
1491 |
nla_nest_cancel(skb, nest); |
1da177e4c Linux-2.6.12-rc2 |
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 |
return -1; } static int cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct cbq_sched_data *q = qdisc_priv(sch); q->link.xstats.avgidle = q->link.avgidle; return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); } static int cbq_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { |
cc7ec456f net_sched: cleanups |
1508 |
struct cbq_class *cl = (struct cbq_class *)arg; |
4b3550ef5 [NET_SCHED]: Use ... |
1509 |
struct nlattr *nest; |
1da177e4c Linux-2.6.12-rc2 |
1510 1511 |
if (cl->tparent) |
d77fea2eb net-sched: sch_cb... |
1512 |
tcm->tcm_parent = cl->tparent->common.classid; |
1da177e4c Linux-2.6.12-rc2 |
1513 1514 |
else tcm->tcm_parent = TC_H_ROOT; |
d77fea2eb net-sched: sch_cb... |
1515 |
tcm->tcm_handle = cl->common.classid; |
1da177e4c Linux-2.6.12-rc2 |
1516 |
tcm->tcm_info = cl->q->handle; |
4b3550ef5 [NET_SCHED]: Use ... |
1517 1518 1519 |
nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; |
1da177e4c Linux-2.6.12-rc2 |
1520 |
if (cbq_dump_attr(skb, cl) < 0) |
1e90474c3 [NET_SCHED]: Conv... |
1521 |
goto nla_put_failure; |
4b3550ef5 [NET_SCHED]: Use ... |
1522 |
nla_nest_end(skb, nest); |
1da177e4c Linux-2.6.12-rc2 |
1523 |
return skb->len; |
1e90474c3 [NET_SCHED]: Conv... |
1524 |
nla_put_failure: |
4b3550ef5 [NET_SCHED]: Use ... |
1525 |
nla_nest_cancel(skb, nest); |
1da177e4c Linux-2.6.12-rc2 |
1526 1527 1528 1529 1530 1531 1532 1533 |
return -1; } static int cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct cbq_sched_data *q = qdisc_priv(sch); |
cc7ec456f net_sched: cleanups |
1534 |
struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4c Linux-2.6.12-rc2 |
1535 1536 1537 1538 |
cl->qstats.qlen = cl->q->q.qlen; cl->xstats.avgidle = cl->avgidle; cl->xstats.undertime = 0; |
a084980dc [NET_SCHED]: kill... |
1539 |
if (cl->undertime != PSCHED_PASTPERFECT) |
8edc0c31d [NET_SCHED]: kill... |
1540 |
cl->xstats.undertime = cl->undertime - q->now; |
1da177e4c Linux-2.6.12-rc2 |
1541 1542 |
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || |
d250a5f90 pkt_sched: gen_es... |
1543 |
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || |
1da177e4c Linux-2.6.12-rc2 |
1544 1545 1546 1547 1548 1549 1550 1551 1552 |
gnet_stats_copy_queue(d, &cl->qstats) < 0) return -1; return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); } static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { |
cc7ec456f net_sched: cleanups |
1553 |
struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4c Linux-2.6.12-rc2 |
1554 |
|
5b9a9ccfa net_sched: remove... |
1555 |
if (new == NULL) { |
3511c9132 net_sched: remove... |
1556 |
new = qdisc_create_dflt(sch->dev_queue, |
5b9a9ccfa net_sched: remove... |
1557 1558 1559 1560 |
&pfifo_qdisc_ops, cl->common.classid); if (new == NULL) return -ENOBUFS; } else { |
c3bc7cff8 [NET_SCHED]: Kill... |
1561 |
#ifdef CONFIG_NET_CLS_ACT |
5b9a9ccfa net_sched: remove... |
1562 1563 |
if (cl->police == TC_POLICE_RECLASSIFY) new->reshape_fail = cbq_reshape_fail; |
1da177e4c Linux-2.6.12-rc2 |
1564 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
1565 |
} |
5b9a9ccfa net_sched: remove... |
1566 1567 1568 1569 1570 1571 1572 1573 |
sch_tree_lock(sch); *old = cl->q; cl->q = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; |
1da177e4c Linux-2.6.12-rc2 |
1574 |
} |
cc7ec456f net_sched: cleanups |
1575 |
static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) |
1da177e4c Linux-2.6.12-rc2 |
1576 |
{ |
cc7ec456f net_sched: cleanups |
1577 |
struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4c Linux-2.6.12-rc2 |
1578 |
|
5b9a9ccfa net_sched: remove... |
1579 |
return cl->q; |
1da177e4c Linux-2.6.12-rc2 |
1580 |
} |
a37ef2e32 [NET_SCHED] sch_c... |
1581 1582 1583 1584 1585 1586 1587 |
static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct cbq_class *cl = (struct cbq_class *)arg; if (cl->q->q.qlen == 0) cbq_deactivate_class(cl); } |
1da177e4c Linux-2.6.12-rc2 |
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 |
static unsigned long cbq_get(struct Qdisc *sch, u32 classid) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = cbq_class_lookup(q, classid); if (cl) { cl->refcnt++; return (unsigned long)cl; } return 0; } |
1da177e4c Linux-2.6.12-rc2 |
1599 1600 1601 |
static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(sch); |
547b792ca net: convert BUG_... |
1602 |
WARN_ON(cl->filters); |
1da177e4c Linux-2.6.12-rc2 |
1603 |
|
ff31ab56c net-sched: change... |
1604 |
tcf_destroy_chain(&cl->filter_list); |
1da177e4c Linux-2.6.12-rc2 |
1605 1606 |
qdisc_destroy(cl->q); qdisc_put_rtab(cl->R_tab); |
1da177e4c Linux-2.6.12-rc2 |
1607 |
gen_kill_estimator(&cl->bstats, &cl->rate_est); |
1da177e4c Linux-2.6.12-rc2 |
1608 1609 1610 |
if (cl != &q->link) kfree(cl); } |
cc7ec456f net_sched: cleanups |
1611 |
static void cbq_destroy(struct Qdisc *sch) |
1da177e4c Linux-2.6.12-rc2 |
1612 1613 |
{ struct cbq_sched_data *q = qdisc_priv(sch); |
d77fea2eb net-sched: sch_cb... |
1614 |
struct hlist_node *n, *next; |
1da177e4c Linux-2.6.12-rc2 |
1615 |
struct cbq_class *cl; |
cc7ec456f net_sched: cleanups |
1616 |
unsigned int h; |
1da177e4c Linux-2.6.12-rc2 |
1617 |
|
c3bc7cff8 [NET_SCHED]: Kill... |
1618 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
1619 1620 1621 1622 1623 1624 1625 |
q->rx_class = NULL; #endif /* * Filters must be destroyed first because we don't destroy the * classes from root to leafs which means that filters can still * be bound to classes which have been destroyed already. --TGR '04 */ |
d77fea2eb net-sched: sch_cb... |
1626 1627 |
for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) |
ff31ab56c net-sched: change... |
1628 |
tcf_destroy_chain(&cl->filter_list); |
b00b4bf94 [NET_SCHED]: Fix ... |
1629 |
} |
d77fea2eb net-sched: sch_cb... |
1630 1631 1632 |
for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h], common.hnode) |
1da177e4c Linux-2.6.12-rc2 |
1633 |
cbq_destroy_class(sch, cl); |
1da177e4c Linux-2.6.12-rc2 |
1634 |
} |
d77fea2eb net-sched: sch_cb... |
1635 |
qdisc_class_hash_destroy(&q->clhash); |
1da177e4c Linux-2.6.12-rc2 |
1636 1637 1638 1639 |
} static void cbq_put(struct Qdisc *sch, unsigned long arg) { |
cc7ec456f net_sched: cleanups |
1640 |
struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4c Linux-2.6.12-rc2 |
1641 1642 |
if (--cl->refcnt == 0) { |
c3bc7cff8 [NET_SCHED]: Kill... |
1643 |
#ifdef CONFIG_NET_CLS_ACT |
102396ae6 pkt_sched: Fix lo... |
1644 |
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch); |
1da177e4c Linux-2.6.12-rc2 |
1645 |
struct cbq_sched_data *q = qdisc_priv(sch); |
7698b4fca pkt_sched: Add an... |
1646 |
spin_lock_bh(root_lock); |
1da177e4c Linux-2.6.12-rc2 |
1647 1648 |
if (q->rx_class == cl) q->rx_class = NULL; |
7698b4fca pkt_sched: Add an... |
1649 |
spin_unlock_bh(root_lock); |
1da177e4c Linux-2.6.12-rc2 |
1650 1651 1652 1653 1654 1655 1656 |
#endif cbq_destroy_class(sch, cl); } } static int |
1e90474c3 [NET_SCHED]: Conv... |
1657 |
cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, |
1da177e4c Linux-2.6.12-rc2 |
1658 1659 1660 1661 |
unsigned long *arg) { int err; struct cbq_sched_data *q = qdisc_priv(sch); |
cc7ec456f net_sched: cleanups |
1662 |
struct cbq_class *cl = (struct cbq_class *)*arg; |
1e90474c3 [NET_SCHED]: Conv... |
1663 1664 |
struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1da177e4c Linux-2.6.12-rc2 |
1665 1666 |
struct cbq_class *parent; struct qdisc_rate_table *rtab = NULL; |
cee63723b [NET_SCHED]: Prop... |
1667 |
if (opt == NULL) |
1da177e4c Linux-2.6.12-rc2 |
1668 |
return -EINVAL; |
27a3421e4 [NET_SCHED]: Use ... |
1669 |
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); |
cee63723b [NET_SCHED]: Prop... |
1670 1671 |
if (err < 0) return err; |
1da177e4c Linux-2.6.12-rc2 |
1672 1673 1674 |
if (cl) { /* Check parent */ if (parentid) { |
d77fea2eb net-sched: sch_cb... |
1675 1676 |
if (cl->tparent && cl->tparent->common.classid != parentid) |
1da177e4c Linux-2.6.12-rc2 |
1677 1678 1679 1680 |
return -EINVAL; if (!cl->tparent && parentid != TC_H_ROOT) return -EINVAL; } |
1e90474c3 [NET_SCHED]: Conv... |
1681 |
if (tb[TCA_CBQ_RATE]) { |
71bcb09a5 tc: check for err... |
1682 1683 |
rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); |
1da177e4c Linux-2.6.12-rc2 |
1684 1685 1686 |
if (rtab == NULL) return -EINVAL; } |
71bcb09a5 tc: check for err... |
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 |
if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { if (rtab) qdisc_put_rtab(rtab); return err; } } |
1da177e4c Linux-2.6.12-rc2 |
1697 1698 1699 1700 1701 1702 1703 |
/* Change class parameters */ sch_tree_lock(sch); if (cl->next_alive != NULL) cbq_deactivate_class(cl); if (rtab) { |
b94c8afcb pkt_sched: remove... |
1704 1705 |
qdisc_put_rtab(cl->R_tab); cl->R_tab = rtab; |
1da177e4c Linux-2.6.12-rc2 |
1706 |
} |
1e90474c3 [NET_SCHED]: Conv... |
1707 1708 |
if (tb[TCA_CBQ_LSSOPT]) cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1da177e4c Linux-2.6.12-rc2 |
1709 |
|
1e90474c3 [NET_SCHED]: Conv... |
1710 |
if (tb[TCA_CBQ_WRROPT]) { |
1da177e4c Linux-2.6.12-rc2 |
1711 |
cbq_rmprio(q, cl); |
1e90474c3 [NET_SCHED]: Conv... |
1712 |
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1da177e4c Linux-2.6.12-rc2 |
1713 |
} |
1e90474c3 [NET_SCHED]: Conv... |
1714 1715 |
if (tb[TCA_CBQ_OVL_STRATEGY]) cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); |
1da177e4c Linux-2.6.12-rc2 |
1716 |
|
c3bc7cff8 [NET_SCHED]: Kill... |
1717 |
#ifdef CONFIG_NET_CLS_ACT |
1e90474c3 [NET_SCHED]: Conv... |
1718 1719 |
if (tb[TCA_CBQ_POLICE]) cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); |
1da177e4c Linux-2.6.12-rc2 |
1720 |
#endif |
1e90474c3 [NET_SCHED]: Conv... |
1721 1722 |
if (tb[TCA_CBQ_FOPT]) cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); |
1da177e4c Linux-2.6.12-rc2 |
1723 1724 1725 1726 1727 |
if (cl->q->q.qlen) cbq_activate_class(cl); sch_tree_unlock(sch); |
1da177e4c Linux-2.6.12-rc2 |
1728 1729 1730 1731 1732 |
return 0; } if (parentid == TC_H_ROOT) return -EINVAL; |
1e90474c3 [NET_SCHED]: Conv... |
1733 1734 |
if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || tb[TCA_CBQ_LSSOPT] == NULL) |
1da177e4c Linux-2.6.12-rc2 |
1735 |
return -EINVAL; |
1e90474c3 [NET_SCHED]: Conv... |
1736 |
rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); |
1da177e4c Linux-2.6.12-rc2 |
1737 1738 1739 1740 1741 |
if (rtab == NULL) return -EINVAL; if (classid) { err = -EINVAL; |
cc7ec456f net_sched: cleanups |
1742 1743 |
if (TC_H_MAJ(classid ^ sch->handle) || cbq_class_lookup(q, classid)) |
1da177e4c Linux-2.6.12-rc2 |
1744 1745 1746 |
goto failure; } else { int i; |
cc7ec456f net_sched: cleanups |
1747 |
classid = TC_H_MAKE(sch->handle, 0x8000); |
1da177e4c Linux-2.6.12-rc2 |
1748 |
|
cc7ec456f net_sched: cleanups |
1749 |
for (i = 0; i < 0x8000; i++) { |
1da177e4c Linux-2.6.12-rc2 |
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 |
if (++q->hgenerator >= 0x8000) q->hgenerator = 1; if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) break; } err = -ENOSR; if (i >= 0x8000) goto failure; classid = classid|q->hgenerator; } parent = &q->link; if (parentid) { parent = cbq_class_lookup(q, parentid); err = -EINVAL; if (parent == NULL) goto failure; } err = -ENOBUFS; |
0da974f4f [NET]: Conversion... |
1770 |
cl = kzalloc(sizeof(*cl), GFP_KERNEL); |
1da177e4c Linux-2.6.12-rc2 |
1771 1772 |
if (cl == NULL) goto failure; |
71bcb09a5 tc: check for err... |
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 |
if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { kfree(cl); goto failure; } } |
1da177e4c Linux-2.6.12-rc2 |
1783 1784 1785 |
cl->R_tab = rtab; rtab = NULL; cl->refcnt = 1; |
3511c9132 net_sched: remove... |
1786 1787 |
cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!cl->q) |
1da177e4c Linux-2.6.12-rc2 |
1788 |
cl->q = &noop_qdisc; |
d77fea2eb net-sched: sch_cb... |
1789 |
cl->common.classid = classid; |
1da177e4c Linux-2.6.12-rc2 |
1790 1791 1792 1793 1794 |
cl->tparent = parent; cl->qdisc = sch; cl->allot = parent->allot; cl->quantum = cl->allot; cl->weight = cl->R_tab->rate.rate; |
1da177e4c Linux-2.6.12-rc2 |
1795 1796 1797 1798 1799 1800 1801 1802 |
sch_tree_lock(sch); cbq_link_class(cl); cl->borrow = cl->tparent; if (cl->tparent != &q->link) cl->share = cl->tparent; cbq_adjust_levels(parent); cl->minidle = -0x7FFFFFFF; |
1e90474c3 [NET_SCHED]: Conv... |
1803 1804 |
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
cc7ec456f net_sched: cleanups |
1805 |
if (cl->ewma_log == 0) |
1da177e4c Linux-2.6.12-rc2 |
1806 |
cl->ewma_log = q->link.ewma_log; |
cc7ec456f net_sched: cleanups |
1807 |
if (cl->maxidle == 0) |
1da177e4c Linux-2.6.12-rc2 |
1808 |
cl->maxidle = q->link.maxidle; |
cc7ec456f net_sched: cleanups |
1809 |
if (cl->avpkt == 0) |
1da177e4c Linux-2.6.12-rc2 |
1810 1811 |
cl->avpkt = q->link.avpkt; cl->overlimit = cbq_ovl_classic; |
1e90474c3 [NET_SCHED]: Conv... |
1812 1813 |
if (tb[TCA_CBQ_OVL_STRATEGY]) cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); |
c3bc7cff8 [NET_SCHED]: Kill... |
1814 |
#ifdef CONFIG_NET_CLS_ACT |
1e90474c3 [NET_SCHED]: Conv... |
1815 1816 |
if (tb[TCA_CBQ_POLICE]) cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); |
1da177e4c Linux-2.6.12-rc2 |
1817 |
#endif |
1e90474c3 [NET_SCHED]: Conv... |
1818 1819 |
if (tb[TCA_CBQ_FOPT]) cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); |
1da177e4c Linux-2.6.12-rc2 |
1820 |
sch_tree_unlock(sch); |
d77fea2eb net-sched: sch_cb... |
1821 |
qdisc_class_hash_grow(sch, &q->clhash); |
1da177e4c Linux-2.6.12-rc2 |
1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 |
*arg = (unsigned long)cl; return 0; failure: qdisc_put_rtab(rtab); return err; } static int cbq_delete(struct Qdisc *sch, unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); |
cc7ec456f net_sched: cleanups |
1833 |
struct cbq_class *cl = (struct cbq_class *)arg; |
a37ef2e32 [NET_SCHED] sch_c... |
1834 |
unsigned int qlen; |
1da177e4c Linux-2.6.12-rc2 |
1835 1836 1837 1838 1839 |
if (cl->filters || cl->children || cl == &q->link) return -EBUSY; sch_tree_lock(sch); |
a37ef2e32 [NET_SCHED] sch_c... |
1840 1841 1842 |
qlen = cl->q->q.qlen; qdisc_reset(cl->q); qdisc_tree_decrease_qlen(cl->q, qlen); |
1da177e4c Linux-2.6.12-rc2 |
1843 1844 1845 1846 1847 1848 1849 1850 1851 |
if (cl->next_alive) cbq_deactivate_class(cl); if (q->tx_borrowed == cl) q->tx_borrowed = q->tx_class; if (q->tx_class == cl) { q->tx_class = NULL; q->tx_borrowed = NULL; } |
c3bc7cff8 [NET_SCHED]: Kill... |
1852 |
#ifdef CONFIG_NET_CLS_ACT |
1da177e4c Linux-2.6.12-rc2 |
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 |
if (q->rx_class == cl) q->rx_class = NULL; #endif cbq_unlink_class(cl); cbq_adjust_levels(cl->tparent); cl->defmap = 0; cbq_sync_defmap(cl); cbq_rmprio(q, cl); sch_tree_unlock(sch); |
7cd0a6387 pkt_sched: Change... |
1864 1865 1866 1867 1868 |
BUG_ON(--cl->refcnt == 0); /* * This shouldn't happen: we "hold" one cops->get() when called * from tc_ctl_tclass; the destroy method is done from cops->put(). */ |
1da177e4c Linux-2.6.12-rc2 |
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 |
return 0; } static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class *)arg; if (cl == NULL) cl = &q->link; return &cl->filter_list; } static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { struct cbq_sched_data *q = qdisc_priv(sch); |
cc7ec456f net_sched: cleanups |
1888 |
struct cbq_class *p = (struct cbq_class *)parent; |
1da177e4c Linux-2.6.12-rc2 |
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 |
struct cbq_class *cl = cbq_class_lookup(q, classid); if (cl) { if (p && p->level <= cl->level) return 0; cl->filters++; return (unsigned long)cl; } return 0; } static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) { |
cc7ec456f net_sched: cleanups |
1902 |
struct cbq_class *cl = (struct cbq_class *)arg; |
1da177e4c Linux-2.6.12-rc2 |
1903 1904 1905 1906 1907 1908 1909 |
cl->filters--; } static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct cbq_sched_data *q = qdisc_priv(sch); |
d77fea2eb net-sched: sch_cb... |
1910 1911 |
struct cbq_class *cl; struct hlist_node *n; |
cc7ec456f net_sched: cleanups |
1912 |
unsigned int h; |
1da177e4c Linux-2.6.12-rc2 |
1913 1914 1915 |
if (arg->stop) return; |
d77fea2eb net-sched: sch_cb... |
1916 1917 |
for (h = 0; h < q->clhash.hashsize; h++) { hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1da177e4c Linux-2.6.12-rc2 |
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 |
if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } } } |
20fea08b5 [NET]: Move Qdisc... |
1930 |
static const struct Qdisc_class_ops cbq_class_ops = { |
1da177e4c Linux-2.6.12-rc2 |
1931 1932 |
.graft = cbq_graft, .leaf = cbq_leaf, |
a37ef2e32 [NET_SCHED] sch_c... |
1933 |
.qlen_notify = cbq_qlen_notify, |
1da177e4c Linux-2.6.12-rc2 |
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 |
.get = cbq_get, .put = cbq_put, .change = cbq_change_class, .delete = cbq_delete, .walk = cbq_walk, .tcf_chain = cbq_find_tcf, .bind_tcf = cbq_bind_filter, .unbind_tcf = cbq_unbind_filter, .dump = cbq_dump_class, .dump_stats = cbq_dump_class_stats, }; |
20fea08b5 [NET]: Move Qdisc... |
1945 |
static struct Qdisc_ops cbq_qdisc_ops __read_mostly = { |
1da177e4c Linux-2.6.12-rc2 |
1946 1947 1948 1949 1950 1951 |
.next = NULL, .cl_ops = &cbq_class_ops, .id = "cbq", .priv_size = sizeof(struct cbq_sched_data), .enqueue = cbq_enqueue, .dequeue = cbq_dequeue, |
77be155cb pkt_sched: Add pe... |
1952 |
.peek = qdisc_peek_dequeued, |
1da177e4c Linux-2.6.12-rc2 |
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 |
.drop = cbq_drop, .init = cbq_init, .reset = cbq_reset, .destroy = cbq_destroy, .change = NULL, .dump = cbq_dump, .dump_stats = cbq_dump_stats, .owner = THIS_MODULE, }; static int __init cbq_module_init(void) { return register_qdisc(&cbq_qdisc_ops); } |
10297b993 [NET] SCHED: Fix ... |
1967 |
static void __exit cbq_module_exit(void) |
1da177e4c Linux-2.6.12-rc2 |
1968 1969 1970 1971 1972 1973 |
{ unregister_qdisc(&cbq_qdisc_ops); } module_init(cbq_module_init) module_exit(cbq_module_exit) MODULE_LICENSE("GPL"); |