Blame view
net/sched/sch_netem.c
22.6 KB
1da177e4c
|
1 2 3 4 5 6 |
/* * net/sched/sch_netem.c Network emulator * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version |
798b6b19d
|
7 |
* 2 of the License. |
1da177e4c
|
8 9 |
* * Many of the algorithms and ideas for this came from |
10297b993
|
10 |
* NIST Net which is not copyrighted. |
1da177e4c
|
11 12 13 14 |
* * Authors: Stephen Hemminger <shemminger@osdl.org> * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> */ |
b7f080cfe
|
15 |
#include <linux/mm.h> |
1da177e4c
|
16 |
#include <linux/module.h> |
5a0e3ad6a
|
17 |
#include <linux/slab.h> |
1da177e4c
|
18 19 20 |
#include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> |
1da177e4c
|
21 |
#include <linux/skbuff.h> |
78776d3f2
|
22 |
#include <linux/vmalloc.h> |
1da177e4c
|
23 |
#include <linux/rtnetlink.h> |
dc5fc579b
|
24 |
#include <net/netlink.h> |
1da177e4c
|
25 |
#include <net/pkt_sched.h> |
250a65f78
|
26 |
#define VERSION "1.3" |
eb229c4cd
|
27 |
|
1da177e4c
|
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
/* Network Emulation Queuing algorithm. ==================================== Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based Network Emulation Tool [2] Luigi Rizzo, DummyNet for FreeBSD ---------------------------------------------------------------- This started out as a simple way to delay outgoing packets to test TCP but has grown to include most of the functionality of a full blown network emulator like NISTnet. It can delay packets and add random jitter (and correlation). The random distribution can be loaded from a table as well to provide normal, Pareto, or experimental curves. Packet loss, duplication, and reordering can also be emulated. This qdisc does not do classification that can be handled in layering other disciplines. It does not need to do bandwidth control either since that can be handled by using token bucket or other rate control. |
661b79725
|
49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
Correlated Loss Generator models Added generation of correlated loss according to the "Gilbert-Elliot" model, a 4-state markov model. References: [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general and intuitive loss model for packet networks and its implementation in the Netem module in the Linux kernel", available in [1] Authors: Stefano Salsano <stefano.salsano at uniroma2.it Fabio Ludovici <fabio.ludovici at yahoo.it> |
1da177e4c
|
63 64 65 66 |
*/ struct netem_sched_data { struct Qdisc *qdisc; |
59cb5c673
|
67 |
struct qdisc_watchdog watchdog; |
1da177e4c
|
68 |
|
b407621c3
|
69 70 |
psched_tdiff_t latency; psched_tdiff_t jitter; |
1da177e4c
|
71 72 73 74 |
u32 loss; u32 limit; u32 counter; u32 gap; |
1da177e4c
|
75 |
u32 duplicate; |
0dca51d36
|
76 |
u32 reorder; |
c865e5d99
|
77 |
u32 corrupt; |
1da177e4c
|
78 79 |
struct crndstate { |
b407621c3
|
80 81 |
u32 last; u32 rho; |
c865e5d99
|
82 |
} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
1da177e4c
|
83 84 85 86 87 |
struct disttable { u32 size; s16 table[0]; } *delay_dist; |
661b79725
|
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
enum { CLG_RANDOM, CLG_4_STATES, CLG_GILB_ELL, } loss_model; /* Correlated Loss Generation models */ struct clgstate { /* state of the Markov chain */ u8 state; /* 4-states and Gilbert-Elliot models */ u32 a1; /* p13 for 4-states or p for GE */ u32 a2; /* p31 for 4-states or r for GE */ u32 a3; /* p32 for 4-states or h for GE */ u32 a4; /* p14 for 4-states or 1-k for GE */ u32 a5; /* p23 used only in 4-states */ } clg; |
1da177e4c
|
107 108 109 110 111 112 |
}; /* Time stamp put into socket buffer control block */ struct netem_skb_cb { psched_time_t time_to_send; }; |
5f86173bd
|
113 114 |
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) { |
175f9c1bb
|
115 116 117 |
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
5f86173bd
|
118 |
} |
1da177e4c
|
119 120 121 122 123 124 125 126 127 128 129 130 131 |
/* init_crandom - initialize correlated random number generator * Use entropy source for initial seed. */ static void init_crandom(struct crndstate *state, unsigned long rho) { state->rho = rho; state->last = net_random(); } /* get_crandom - correlated random number generator * Next number depends on last value. * rho is scaled to avoid floating point. */ |
b407621c3
|
132 |
static u32 get_crandom(struct crndstate *state) |
1da177e4c
|
133 134 135 |
{ u64 value, rho; unsigned long answer; |
bb2f8cc0e
|
136 |
if (state->rho == 0) /* no correlation */ |
1da177e4c
|
137 138 139 140 141 142 143 144 |
return net_random(); value = net_random(); rho = (u64)state->rho + 1; answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; state->last = answer; return answer; } |
661b79725
|
145 146 147 148 149 150 151 152 153 154 |
/* loss_4state - 4-state model loss generator * Generates losses according to the 4-state Markov chain adopted in * the GI (General and Intuitive) loss model. */ static bool loss_4state(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; u32 rnd = net_random(); /* |
25985edce
|
155 |
* Makes a comparison between rnd and the transition |
661b79725
|
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
* probabilities outgoing from the current state, then decides the * next state and if the next packet has to be transmitted or lost. * The four states correspond to: * 1 => successfully transmitted packets within a gap period * 4 => isolated losses within a gap period * 3 => lost packets within a burst period * 2 => successfully transmitted packets within a burst period */ switch (clg->state) { case 1: if (rnd < clg->a4) { clg->state = 4; return true; } else if (clg->a4 < rnd && rnd < clg->a1) { clg->state = 3; return true; } else if (clg->a1 < rnd) clg->state = 1; break; case 2: if (rnd < clg->a5) { clg->state = 3; return true; } else clg->state = 2; break; case 3: if (rnd < clg->a3) clg->state = 2; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = 1; return true; } else if (clg->a2 + clg->a3 < rnd) { clg->state = 3; return true; } break; case 4: clg->state = 1; break; } return false; } /* loss_gilb_ell - Gilbert-Elliot model loss generator * Generates losses according to the Gilbert-Elliot loss model or * its special cases (Gilbert or Simple Gilbert) * |
25985edce
|
207 |
* Makes a comparison between random number and the transition |
661b79725
|
208 |
* probabilities outgoing from the current state, then decides the |
25985edce
|
209 |
* next state. A second random number is extracted and the comparison |
661b79725
|
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 |
* with the loss probability of the current state decides if the next * packet will be transmitted or lost. */ static bool loss_gilb_ell(struct netem_sched_data *q) { struct clgstate *clg = &q->clg; switch (clg->state) { case 1: if (net_random() < clg->a1) clg->state = 2; if (net_random() < clg->a4) return true; case 2: if (net_random() < clg->a2) clg->state = 1; if (clg->a3 > net_random()) return true; } return false; } static bool loss_event(struct netem_sched_data *q) { switch (q->loss_model) { case CLG_RANDOM: /* Random packet drop 0 => none, ~0 => all */ return q->loss && q->loss >= get_crandom(&q->loss_cor); case CLG_4_STATES: /* 4state loss model algorithm (used also for GI model) * Extracts a value from the markov 4 state loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_4state(q); case CLG_GILB_ELL: /* Gilbert-Elliot loss model algorithm * Extracts a value from the Gilbert-Elliot loss generator, * if it is 1 drops a packet and if needed writes the event in * the kernel logs */ return loss_gilb_ell(q); } return false; /* not reached */ } |
1da177e4c
|
259 260 261 262 |
/* tabledist - return a pseudo-randomly distributed value with mean mu and * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ |
b407621c3
|
263 264 265 |
static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, struct crndstate *state, const struct disttable *dist) |
1da177e4c
|
266 |
{ |
b407621c3
|
267 268 269 |
psched_tdiff_t x; long t; u32 rnd; |
1da177e4c
|
270 271 272 273 274 275 276 |
if (sigma == 0) return mu; rnd = get_crandom(state); /* default uniform distribution */ |
10297b993
|
277 |
if (dist == NULL) |
1da177e4c
|
278 279 280 281 282 283 284 285 286 287 288 |
return (rnd % (2*sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; x = (sigma % NETEM_DIST_SCALE) * t; if (x >= 0) x += NETEM_DIST_SCALE/2; else x -= NETEM_DIST_SCALE/2; return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } |
0afb51e72
|
289 290 291 292 293 294 |
/* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ |
1da177e4c
|
295 296 297 |
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); |
89e1df74f
|
298 299 |
/* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; |
0afb51e72
|
300 |
struct sk_buff *skb2; |
1da177e4c
|
301 |
int ret; |
0afb51e72
|
302 |
int count = 1; |
1da177e4c
|
303 |
|
0afb51e72
|
304 305 306 |
/* Random duplication */ if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) ++count; |
661b79725
|
307 308 |
/* Drop packet? */ if (loss_event(q)) |
0afb51e72
|
309 310 311 |
--count; if (count == 0) { |
1da177e4c
|
312 313 |
sch->qstats.drops++; kfree_skb(skb); |
c27f339af
|
314 |
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
1da177e4c
|
315 |
} |
4e8a52015
|
316 |
skb_orphan(skb); |
0afb51e72
|
317 318 319 320 321 322 |
/* * If we need to duplicate packet, then re-insert at top of the * qdisc tree, since parent queuer expects that only one * skb will be queued. */ if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
7698b4fca
|
323 |
struct Qdisc *rootq = qdisc_root(sch); |
0afb51e72
|
324 325 |
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; |
5f86173bd
|
326 |
qdisc_enqueue_root(skb2, rootq); |
0afb51e72
|
327 |
q->duplicate = dupsave; |
1da177e4c
|
328 |
} |
c865e5d99
|
329 330 331 332 333 334 335 |
/* * Randomized packet corruption. * Make copy if needed since we are modifying * If packet is going to be hardware checksummed, then * do it now in software before we mangle it. */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
f64f9e719
|
336 337 338 |
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) { |
c865e5d99
|
339 340 341 342 343 344 |
sch->qstats.drops++; return NET_XMIT_DROP; } skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } |
5f86173bd
|
345 |
cb = netem_skb_cb(skb); |
cc7ec456f
|
346 347 |
if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap || /* inside last reordering gap */ |
f64f9e719
|
348 |
q->reorder < get_crandom(&q->reorder_cor)) { |
0f9f32ac6
|
349 |
psched_time_t now; |
07aaa1154
|
350 351 352 353 |
psched_tdiff_t delay; delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); |
3bebcda28
|
354 |
now = psched_get_time(); |
7c59e25f3
|
355 |
cb->time_to_send = now + delay; |
1da177e4c
|
356 |
++q->counter; |
5f86173bd
|
357 |
ret = qdisc_enqueue(skb, q->qdisc); |
1da177e4c
|
358 |
} else { |
10297b993
|
359 |
/* |
0dca51d36
|
360 361 362 |
* Do re-ordering by putting one out of N packets at the front * of the queue. */ |
3bebcda28
|
363 |
cb->time_to_send = psched_get_time(); |
0dca51d36
|
364 |
q->counter = 0; |
8ba25dad0
|
365 366 367 368 369 |
__skb_queue_head(&q->qdisc->q, skb); q->qdisc->qstats.backlog += qdisc_pkt_len(skb); q->qdisc->qstats.requeues++; ret = NET_XMIT_SUCCESS; |
1da177e4c
|
370 |
} |
10f6dfcfd
|
371 372 373 374 375 |
if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { sch->qstats.drops++; return ret; } |
378a2f090
|
376 |
} |
1da177e4c
|
377 |
|
10f6dfcfd
|
378 379 |
sch->q.qlen++; return NET_XMIT_SUCCESS; |
1da177e4c
|
380 |
} |
cc7ec456f
|
381 |
static unsigned int netem_drop(struct Qdisc *sch) |
1da177e4c
|
382 383 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
6d037a26f
|
384 |
unsigned int len = 0; |
1da177e4c
|
385 |
|
6d037a26f
|
386 |
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { |
1da177e4c
|
387 388 389 390 391 |
sch->q.qlen--; sch->qstats.drops++; } return len; } |
1da177e4c
|
392 393 394 395 |
static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; |
fd245a4ad
|
396 |
if (qdisc_is_throttled(sch)) |
11274e5a4
|
397 |
return NULL; |
03c05f0d4
|
398 |
skb = q->qdisc->ops->peek(q->qdisc); |
771018e76
|
399 |
if (skb) { |
5f86173bd
|
400 |
const struct netem_skb_cb *cb = netem_skb_cb(skb); |
3bebcda28
|
401 |
psched_time_t now = psched_get_time(); |
0f9f32ac6
|
402 403 |
/* if more time remaining? */ |
104e08789
|
404 |
if (cb->time_to_send <= now) { |
77be155cb
|
405 406 |
skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) |
03c05f0d4
|
407 |
return NULL; |
8caf15397
|
408 409 410 411 412 413 414 415 |
#ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif |
10f6dfcfd
|
416 |
|
0f9f32ac6
|
417 |
sch->q.qlen--; |
10f6dfcfd
|
418 419 |
qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); |
0f9f32ac6
|
420 |
return skb; |
07aaa1154
|
421 |
} |
11274e5a4
|
422 |
|
11274e5a4
|
423 |
qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); |
0f9f32ac6
|
424 425 426 |
} return NULL; |
1da177e4c
|
427 |
} |
1da177e4c
|
428 429 430 431 432 |
static void netem_reset(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); |
1da177e4c
|
433 |
sch->q.qlen = 0; |
59cb5c673
|
434 |
qdisc_watchdog_cancel(&q->watchdog); |
1da177e4c
|
435 |
} |
6373a9a28
|
436 437 438 439 440 441 442 443 444 |
static void dist_free(struct disttable *d) { if (d) { if (is_vmalloc_addr(d)) vfree(d); else kfree(d); } } |
1da177e4c
|
445 446 447 448 |
/* * Distribution data is a variable size payload containing * signed 16 bit values. */ |
1e90474c3
|
449 |
static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4c
|
450 451 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
6373a9a28
|
452 |
size_t n = nla_len(attr)/sizeof(__s16); |
1e90474c3
|
453 |
const __s16 *data = nla_data(attr); |
7698b4fca
|
454 |
spinlock_t *root_lock; |
1da177e4c
|
455 456 |
struct disttable *d; int i; |
6373a9a28
|
457 |
size_t s; |
1da177e4c
|
458 |
|
df173bda2
|
459 |
if (n > NETEM_DIST_MAX) |
1da177e4c
|
460 |
return -EINVAL; |
6373a9a28
|
461 462 463 464 |
s = sizeof(struct disttable) + n * sizeof(s16); d = kmalloc(s, GFP_KERNEL); if (!d) d = vmalloc(s); |
1da177e4c
|
465 466 467 468 469 470 |
if (!d) return -ENOMEM; d->size = n; for (i = 0; i < n; i++) d->table[i] = data[i]; |
10297b993
|
471 |
|
102396ae6
|
472 |
root_lock = qdisc_root_sleeping_lock(sch); |
7698b4fca
|
473 474 |
spin_lock_bh(root_lock); |
6373a9a28
|
475 |
dist_free(q->delay_dist); |
b94c8afcb
|
476 |
q->delay_dist = d; |
7698b4fca
|
477 |
spin_unlock_bh(root_lock); |
1da177e4c
|
478 479 |
return 0; } |
265eb67fb
|
480 |
static void get_correlation(struct Qdisc *sch, const struct nlattr *attr) |
1da177e4c
|
481 482 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
1e90474c3
|
483 |
const struct tc_netem_corr *c = nla_data(attr); |
1da177e4c
|
484 |
|
1da177e4c
|
485 486 487 |
init_crandom(&q->delay_cor, c->delay_corr); init_crandom(&q->loss_cor, c->loss_corr); init_crandom(&q->dup_cor, c->dup_corr); |
1da177e4c
|
488 |
} |
265eb67fb
|
489 |
static void get_reorder(struct Qdisc *sch, const struct nlattr *attr) |
0dca51d36
|
490 491 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
1e90474c3
|
492 |
const struct tc_netem_reorder *r = nla_data(attr); |
0dca51d36
|
493 |
|
0dca51d36
|
494 495 |
q->reorder = r->probability; init_crandom(&q->reorder_cor, r->correlation); |
0dca51d36
|
496 |
} |
265eb67fb
|
497 |
static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) |
c865e5d99
|
498 499 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
1e90474c3
|
500 |
const struct tc_netem_corrupt *r = nla_data(attr); |
c865e5d99
|
501 |
|
c865e5d99
|
502 503 |
q->corrupt = r->probability; init_crandom(&q->corrupt_cor, r->correlation); |
c865e5d99
|
504 |
} |
661b79725
|
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 |
static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct nlattr *la; int rem; nla_for_each_nested(la, attr, rem) { u16 type = nla_type(la); switch(type) { case NETEM_LOSS_GI: { const struct tc_netem_gimodel *gi = nla_data(la); if (nla_len(la) != sizeof(struct tc_netem_gimodel)) { pr_info("netem: incorrect gi model size "); return -EINVAL; } q->loss_model = CLG_4_STATES; q->clg.state = 1; q->clg.a1 = gi->p13; q->clg.a2 = gi->p31; q->clg.a3 = gi->p32; q->clg.a4 = gi->p14; q->clg.a5 = gi->p23; break; } case NETEM_LOSS_GE: { const struct tc_netem_gemodel *ge = nla_data(la); if (nla_len(la) != sizeof(struct tc_netem_gemodel)) { pr_info("netem: incorrect gi model size "); return -EINVAL; } q->loss_model = CLG_GILB_ELL; q->clg.state = 1; q->clg.a1 = ge->p; q->clg.a2 = ge->r; q->clg.a3 = ge->h; q->clg.a4 = ge->k1; break; } default: pr_info("netem: unknown loss type %u ", type); return -EINVAL; } } return 0; } |
27a3421e4
|
562 563 564 565 |
static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
661b79725
|
566 |
[TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
27a3421e4
|
567 |
}; |
2c10b32bf
|
568 569 570 571 |
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy, int len) { int nested_len = nla_len(nla) - NLA_ALIGN(len); |
661b79725
|
572 573 574 |
if (nested_len < 0) { pr_info("netem: invalid attributes len %d ", nested_len); |
2c10b32bf
|
575 |
return -EINVAL; |
661b79725
|
576 |
} |
2c10b32bf
|
577 578 579 |
if (nested_len >= nla_attr_size(0)) return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), nested_len, policy); |
661b79725
|
580 |
|
2c10b32bf
|
581 582 583 |
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; } |
c865e5d99
|
584 |
/* Parse netlink message to set options */ |
1e90474c3
|
585 |
static int netem_change(struct Qdisc *sch, struct nlattr *opt) |
1da177e4c
|
586 587 |
{ struct netem_sched_data *q = qdisc_priv(sch); |
b03f46720
|
588 |
struct nlattr *tb[TCA_NETEM_MAX + 1]; |
1da177e4c
|
589 590 |
struct tc_netem_qopt *qopt; int ret; |
10297b993
|
591 |
|
b03f46720
|
592 |
if (opt == NULL) |
1da177e4c
|
593 |
return -EINVAL; |
2c10b32bf
|
594 595 |
qopt = nla_data(opt); ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); |
b03f46720
|
596 597 |
if (ret < 0) return ret; |
fb0305ce1
|
598 |
ret = fifo_set_limit(q->qdisc, qopt->limit); |
1da177e4c
|
599 |
if (ret) { |
250a65f78
|
600 601 |
pr_info("netem: can't set fifo limit "); |
1da177e4c
|
602 603 |
return ret; } |
10297b993
|
604 |
|
1da177e4c
|
605 606 607 608 |
q->latency = qopt->latency; q->jitter = qopt->jitter; q->limit = qopt->limit; q->gap = qopt->gap; |
0dca51d36
|
609 |
q->counter = 0; |
1da177e4c
|
610 611 |
q->loss = qopt->loss; q->duplicate = qopt->duplicate; |
bb2f8cc0e
|
612 613 |
/* for compatibility with earlier versions. * if gap is set, need to assume 100% probability |
0dca51d36
|
614 |
*/ |
a362e0a78
|
615 616 |
if (q->gap) q->reorder = ~0; |
0dca51d36
|
617 |
|
265eb67fb
|
618 619 |
if (tb[TCA_NETEM_CORR]) get_correlation(sch, tb[TCA_NETEM_CORR]); |
1da177e4c
|
620 |
|
b03f46720
|
621 622 623 624 625 |
if (tb[TCA_NETEM_DELAY_DIST]) { ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); if (ret) return ret; } |
c865e5d99
|
626 |
|
265eb67fb
|
627 628 |
if (tb[TCA_NETEM_REORDER]) get_reorder(sch, tb[TCA_NETEM_REORDER]); |
1da177e4c
|
629 |
|
265eb67fb
|
630 631 |
if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); |
1da177e4c
|
632 |
|
661b79725
|
633 634 635 636 637 |
q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); return ret; |
1da177e4c
|
638 |
} |
300ce174e
|
639 640 641 642 643 644 |
/* * Special case version of FIFO queue for use by netem. * It queues in order based on timestamps in skb's */ struct fifo_sched_data { u32 limit; |
075aa573b
|
645 |
psched_time_t oldest; |
300ce174e
|
646 647 648 649 650 651 |
}; static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct fifo_sched_data *q = qdisc_priv(sch); struct sk_buff_head *list = &sch->q; |
5f86173bd
|
652 |
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; |
300ce174e
|
653 654 655 |
struct sk_buff *skb; if (likely(skb_queue_len(list) < q->limit)) { |
075aa573b
|
656 |
/* Optimize for add at tail */ |
104e08789
|
657 |
if (likely(skb_queue_empty(list) || tnext >= q->oldest)) { |
075aa573b
|
658 659 660 |
q->oldest = tnext; return qdisc_enqueue_tail(nskb, sch); } |
300ce174e
|
661 |
skb_queue_reverse_walk(list, skb) { |
5f86173bd
|
662 |
const struct netem_skb_cb *cb = netem_skb_cb(skb); |
300ce174e
|
663 |
|
104e08789
|
664 |
if (tnext >= cb->time_to_send) |
300ce174e
|
665 666 667 668 |
break; } __skb_queue_after(list, skb, nskb); |
0abf77e55
|
669 |
sch->qstats.backlog += qdisc_pkt_len(nskb); |
300ce174e
|
670 671 672 |
return NET_XMIT_SUCCESS; } |
075aa573b
|
673 |
return qdisc_reshape_fail(nskb, sch); |
300ce174e
|
674 |
} |
1e90474c3
|
675 |
static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) |
300ce174e
|
676 677 678 679 |
{ struct fifo_sched_data *q = qdisc_priv(sch); if (opt) { |
1e90474c3
|
680 681 |
struct tc_fifo_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) |
300ce174e
|
682 683 684 685 |
return -EINVAL; q->limit = ctl->limit; } else |
5ce2d488f
|
686 |
q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); |
300ce174e
|
687 |
|
a084980dc
|
688 |
q->oldest = PSCHED_PASTPERFECT; |
300ce174e
|
689 690 691 692 693 694 695 |
return 0; } static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fifo_sched_data *q = qdisc_priv(sch); struct tc_fifo_qopt opt = { .limit = q->limit }; |
1e90474c3
|
696 |
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
300ce174e
|
697 |
return skb->len; |
1e90474c3
|
698 |
nla_put_failure: |
300ce174e
|
699 700 |
return -1; } |
20fea08b5
|
701 |
static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { |
300ce174e
|
702 703 704 705 |
.id = "tfifo", .priv_size = sizeof(struct fifo_sched_data), .enqueue = tfifo_enqueue, .dequeue = qdisc_dequeue_head, |
8e3af9789
|
706 |
.peek = qdisc_peek_head, |
300ce174e
|
707 708 709 710 711 712 |
.drop = qdisc_queue_drop, .init = tfifo_init, .reset = qdisc_reset_queue, .change = tfifo_init, .dump = tfifo_dump, }; |
1e90474c3
|
713 |
static int netem_init(struct Qdisc *sch, struct nlattr *opt) |
1da177e4c
|
714 715 716 717 718 719 |
{ struct netem_sched_data *q = qdisc_priv(sch); int ret; if (!opt) return -EINVAL; |
59cb5c673
|
720 |
qdisc_watchdog_init(&q->watchdog, sch); |
1da177e4c
|
721 |
|
661b79725
|
722 |
q->loss_model = CLG_RANDOM; |
3511c9132
|
723 |
q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops, |
9f9afec48
|
724 |
TC_H_MAKE(sch->handle, 1)); |
1da177e4c
|
725 |
if (!q->qdisc) { |
250a65f78
|
726 727 |
pr_notice("netem: qdisc create tfifo qdisc failed "); |
1da177e4c
|
728 729 730 731 732 |
return -ENOMEM; } ret = netem_change(sch, opt); if (ret) { |
250a65f78
|
733 734 |
pr_info("netem: change failed "); |
1da177e4c
|
735 736 737 738 739 740 741 742 |
qdisc_destroy(q->qdisc); } return ret; } static void netem_destroy(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); |
59cb5c673
|
743 |
qdisc_watchdog_cancel(&q->watchdog); |
1da177e4c
|
744 |
qdisc_destroy(q->qdisc); |
6373a9a28
|
745 |
dist_free(q->delay_dist); |
1da177e4c
|
746 |
} |
661b79725
|
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 |
static int dump_loss_model(const struct netem_sched_data *q, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start(skb, TCA_NETEM_LOSS); if (nest == NULL) goto nla_put_failure; switch (q->loss_model) { case CLG_RANDOM: /* legacy loss model */ nla_nest_cancel(skb, nest); return 0; /* no data */ case CLG_4_STATES: { struct tc_netem_gimodel gi = { .p13 = q->clg.a1, .p31 = q->clg.a2, .p32 = q->clg.a3, .p14 = q->clg.a4, .p23 = q->clg.a5, }; NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); break; } case CLG_GILB_ELL: { struct tc_netem_gemodel ge = { .p = q->clg.a1, .r = q->clg.a2, .h = q->clg.a3, .k1 = q->clg.a4, }; NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); break; } } nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } |
1da177e4c
|
794 795 796 |
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) { const struct netem_sched_data *q = qdisc_priv(sch); |
861d7f745
|
797 |
struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
1da177e4c
|
798 799 |
struct tc_netem_qopt qopt; struct tc_netem_corr cor; |
0dca51d36
|
800 |
struct tc_netem_reorder reorder; |
c865e5d99
|
801 |
struct tc_netem_corrupt corrupt; |
1da177e4c
|
802 803 804 805 806 807 808 |
qopt.latency = q->latency; qopt.jitter = q->jitter; qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; qopt.duplicate = q->duplicate; |
1e90474c3
|
809 |
NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); |
1da177e4c
|
810 811 812 813 |
cor.delay_corr = q->delay_cor.rho; cor.loss_corr = q->loss_cor.rho; cor.dup_corr = q->dup_cor.rho; |
1e90474c3
|
814 |
NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); |
0dca51d36
|
815 816 817 |
reorder.probability = q->reorder; reorder.correlation = q->reorder_cor.rho; |
1e90474c3
|
818 |
NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); |
0dca51d36
|
819 |
|
c865e5d99
|
820 821 |
corrupt.probability = q->corrupt; corrupt.correlation = q->corrupt_cor.rho; |
1e90474c3
|
822 |
NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); |
c865e5d99
|
823 |
|
661b79725
|
824 825 |
if (dump_loss_model(q, skb) != 0) goto nla_put_failure; |
861d7f745
|
826 |
return nla_nest_end(skb, nla); |
1da177e4c
|
827 |
|
1e90474c3
|
828 |
nla_put_failure: |
861d7f745
|
829 |
nlmsg_trim(skb, nla); |
1da177e4c
|
830 831 |
return -1; } |
10f6dfcfd
|
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 |
static int netem_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct netem_sched_data *q = qdisc_priv(sch); if (cl != 1) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct netem_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) { struct netem_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long netem_get(struct Qdisc *sch, u32 classid) { return 1; } static void netem_put(struct Qdisc *sch, unsigned long arg) { } static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops netem_class_ops = { .graft = netem_graft, .leaf = netem_leaf, .get = netem_get, .put = netem_put, .walk = netem_walk, .dump = netem_dump_class, }; |
20fea08b5
|
899 |
static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
1da177e4c
|
900 |
.id = "netem", |
10f6dfcfd
|
901 |
.cl_ops = &netem_class_ops, |
1da177e4c
|
902 903 904 |
.priv_size = sizeof(struct netem_sched_data), .enqueue = netem_enqueue, .dequeue = netem_dequeue, |
77be155cb
|
905 |
.peek = qdisc_peek_dequeued, |
1da177e4c
|
906 907 908 909 910 911 912 913 914 915 916 917 |
.drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, .change = netem_change, .dump = netem_dump, .owner = THIS_MODULE, }; static int __init netem_module_init(void) { |
eb229c4cd
|
918 919 |
pr_info("netem: version " VERSION " "); |
1da177e4c
|
920 921 922 923 924 925 926 927 928 |
return register_qdisc(&netem_qdisc_ops); } static void __exit netem_module_exit(void) { unregister_qdisc(&netem_qdisc_ops); } module_init(netem_module_init) module_exit(netem_module_exit) MODULE_LICENSE("GPL"); |