Blame view
net/sched/sch_multiq.c
9.29 KB
92651940a
|
1 2 3 4 5 6 7 8 9 10 11 12 13 |
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with |
c057b190b
|
14 |
* this program; if not, see <http://www.gnu.org/licenses/>. |
92651940a
|
15 16 17 18 19 |
* * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> |
5a0e3ad6a
|
20 |
#include <linux/slab.h> |
92651940a
|
21 22 23 24 25 26 27 |
#include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> |
cf1facda2
|
28 |
#include <net/pkt_cls.h> |
92651940a
|
29 30 31 32 33 |
struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; |
25d8c0d55
|
34 |
struct tcf_proto __rcu *filter_list; |
6529eaba3
|
35 |
struct tcf_block *block; |
92651940a
|
36 37 38 39 40 41 42 43 44 45 |
struct Qdisc **queues; }; static struct Qdisc * multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; |
25d8c0d55
|
46 |
struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); |
92651940a
|
47 48 49 |
int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
87d83093b
|
50 |
err = tcf_classify(skb, fl, &res, false); |
92651940a
|
51 52 53 54 |
#ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: |
e25ea21ff
|
55 |
case TC_ACT_TRAP: |
92651940a
|
56 |
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
f3ae608ed
|
57 |
/* fall through */ |
92651940a
|
58 59 60 61 62 63 64 65 66 67 68 69 70 |
case TC_ACT_SHOT: return NULL; } #endif band = skb_get_queue_mapping(skb); if (band >= q->bands) return q->queues[0]; return q->queues[band]; } static int |
520ac30f4
|
71 72 |
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) |
92651940a
|
73 74 75 76 77 78 79 80 81 |
{ struct Qdisc *qdisc; int ret; qdisc = multiq_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) |
25331d6ce
|
82 |
qdisc_qstats_drop(sch); |
520ac30f4
|
83 |
__qdisc_drop(skb, to_free); |
92651940a
|
84 85 86 |
return ret; } #endif |
520ac30f4
|
87 |
ret = qdisc_enqueue(skb, qdisc, to_free); |
92651940a
|
88 |
if (ret == NET_XMIT_SUCCESS) { |
92651940a
|
89 90 91 92 |
sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) |
25331d6ce
|
93 |
qdisc_qstats_drop(sch); |
92651940a
|
94 95 |
return ret; } |
92651940a
|
96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
static struct sk_buff *multiq_dequeue(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ q->curband++; if (q->curband >= q->bands) q->curband = 0; /* Check that target subqueue is available before |
f30ab418a
|
110 |
* pulling an skb to avoid head-of-line blocking. |
92651940a
|
111 |
*/ |
734664982
|
112 113 |
if (!netif_xmit_stopped( netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { |
92651940a
|
114 115 116 |
qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { |
9190b3b32
|
117 |
qdisc_bstats_update(sch, skb); |
92651940a
|
118 119 120 121 122 123 124 125 |
sch->q.qlen--; return skb; } } } return NULL; } |
8e3af9789
|
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
static struct sk_buff *multiq_peek(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned int curband = q->curband; struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ curband++; if (curband >= q->bands) curband = 0; /* Check that target subqueue is available before |
f30ab418a
|
141 |
* pulling an skb to avoid head-of-line blocking. |
8e3af9789
|
142 |
*/ |
734664982
|
143 144 |
if (!netif_xmit_stopped( netdev_get_tx_queue(qdisc_dev(sch), curband))) { |
8e3af9789
|
145 146 147 148 149 150 151 152 153 |
qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) return skb; } } return NULL; } |
92651940a
|
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
static void multiq_reset(struct Qdisc *sch) { u16 band; struct multiq_sched_data *q = qdisc_priv(sch); for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); sch->q.qlen = 0; q->curband = 0; } static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); |
6529eaba3
|
171 |
tcf_block_put(q->block); |
92651940a
|
172 173 174 175 176 |
for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); } |
2030721cc
|
177 178 |
static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) |
92651940a
|
179 180 181 182 183 184 |
{ struct multiq_sched_data *q = qdisc_priv(sch); struct tc_multiq_qopt *qopt; int i; if (!netif_is_multiqueue(qdisc_dev(sch))) |
149490f13
|
185 |
return -EOPNOTSUPP; |
92651940a
|
186 187 188 189 190 191 192 193 194 195 |
if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); qopt->bands = qdisc_dev(sch)->real_num_tx_queues; sch_tree_lock(sch); q->bands = qopt->bands; for (i = q->bands; i < q->max_bands; i++) { |
f07d15012
|
196 |
if (q->queues[i] != &noop_qdisc) { |
b94c8afcb
|
197 198 |
struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; |
2ccccf5fb
|
199 200 |
qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); |
92651940a
|
201 202 203 204 205 206 207 208 |
qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { |
b94c8afcb
|
209 |
struct Qdisc *child, *old; |
3511c9132
|
210 |
child = qdisc_create_dflt(sch->dev_queue, |
92651940a
|
211 212 |
&pfifo_qdisc_ops, TC_H_MAKE(sch->handle, |
a38a98821
|
213 |
i + 1), extack); |
92651940a
|
214 215 |
if (child) { sch_tree_lock(sch); |
b94c8afcb
|
216 217 |
old = q->queues[i]; q->queues[i] = child; |
49b499718
|
218 219 |
if (child != &noop_qdisc) qdisc_hash_add(child, true); |
92651940a
|
220 |
|
b94c8afcb
|
221 |
if (old != &noop_qdisc) { |
2ccccf5fb
|
222 223 224 |
qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); |
b94c8afcb
|
225 |
qdisc_destroy(old); |
92651940a
|
226 227 228 229 230 231 232 |
} sch_tree_unlock(sch); } } } return 0; } |
e63d7dfd2
|
233 234 |
static int multiq_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) |
92651940a
|
235 236 |
{ struct multiq_sched_data *q = qdisc_priv(sch); |
f07d15012
|
237 |
int i, err; |
92651940a
|
238 239 |
q->queues = NULL; |
ac8ef4ab7
|
240 |
if (!opt) |
92651940a
|
241 |
return -EINVAL; |
8d1a77f97
|
242 |
err = tcf_block_get(&q->block, &q->filter_list, sch, extack); |
6529eaba3
|
243 244 |
if (err) return err; |
92651940a
|
245 246 247 248 249 250 251 |
q->max_bands = qdisc_dev(sch)->num_tx_queues; q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); if (!q->queues) return -ENOBUFS; for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; |
2030721cc
|
252 |
return multiq_tune(sch, opt, extack); |
92651940a
|
253 254 255 256 257 258 259 260 261 262 |
} static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_multiq_qopt opt; opt.bands = q->bands; opt.max_bands = q->max_bands; |
1b34ec43c
|
263 264 |
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; |
92651940a
|
265 266 267 268 269 270 271 272 273 |
return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
653d6fd68
|
274 |
struct Qdisc **old, struct netlink_ext_ack *extack) |
92651940a
|
275 276 277 |
{ struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; |
92651940a
|
278 279 |
if (new == NULL) new = &noop_qdisc; |
86a7996cc
|
280 |
*old = qdisc_replace(sch, new, &q->queues[band]); |
92651940a
|
281 282 283 284 285 286 287 288 |
return 0; } static struct Qdisc * multiq_leaf(struct Qdisc *sch, unsigned long arg) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; |
92651940a
|
289 290 |
return q->queues[band]; } |
143976ce9
|
291 |
static unsigned long multiq_find(struct Qdisc *sch, u32 classid) |
92651940a
|
292 293 294 295 296 297 298 299 300 301 302 303 |
{ struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { |
143976ce9
|
304 |
return multiq_find(sch, classid); |
92651940a
|
305 |
} |
143976ce9
|
306 |
static void multiq_unbind(struct Qdisc *q, unsigned long cl) |
92651940a
|
307 |
{ |
92651940a
|
308 |
} |
92651940a
|
309 310 311 312 |
static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct multiq_sched_data *q = qdisc_priv(sch); |
92651940a
|
313 |
tcm->tcm_handle |= TC_H_MIN(cl); |
cc7ec456f
|
314 |
tcm->tcm_info = q->queues[cl - 1]->handle; |
92651940a
|
315 316 317 318 319 320 321 322 323 324 |
return 0; } static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; |
edb09eb17
|
325 326 |
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl_q->bstats) < 0 || |
b0ab6f927
|
327 |
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) |
92651940a
|
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 |
return -1; return 0; } static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct multiq_sched_data *q = qdisc_priv(sch); int band; if (arg->stop) return; for (band = 0; band < q->bands; band++) { if (arg->count < arg->skip) { arg->count++; continue; } |
cc7ec456f
|
346 |
if (arg->fn(sch, band + 1, arg) < 0) { |
92651940a
|
347 348 349 350 351 352 |
arg->stop = 1; break; } arg->count++; } } |
cbaacc4e8
|
353 354 |
static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) |
92651940a
|
355 356 357 358 359 |
{ struct multiq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; |
6529eaba3
|
360 |
return q->block; |
92651940a
|
361 362 363 364 365 |
} static const struct Qdisc_class_ops multiq_class_ops = { .graft = multiq_graft, .leaf = multiq_leaf, |
143976ce9
|
366 |
.find = multiq_find, |
92651940a
|
367 |
.walk = multiq_walk, |
6529eaba3
|
368 |
.tcf_block = multiq_tcf_block, |
92651940a
|
369 |
.bind_tcf = multiq_bind, |
143976ce9
|
370 |
.unbind_tcf = multiq_unbind, |
92651940a
|
371 372 373 374 375 376 377 378 379 380 381 |
.dump = multiq_dump_class, .dump_stats = multiq_dump_class_stats, }; static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &multiq_class_ops, .id = "multiq", .priv_size = sizeof(struct multiq_sched_data), .enqueue = multiq_enqueue, .dequeue = multiq_dequeue, |
8e3af9789
|
382 |
.peek = multiq_peek, |
92651940a
|
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 |
.init = multiq_init, .reset = multiq_reset, .destroy = multiq_destroy, .change = multiq_tune, .dump = multiq_dump, .owner = THIS_MODULE, }; static int __init multiq_module_init(void) { return register_qdisc(&multiq_qdisc_ops); } static void __exit multiq_module_exit(void) { unregister_qdisc(&multiq_qdisc_ops); } module_init(multiq_module_init) module_exit(multiq_module_exit) MODULE_LICENSE("GPL"); |