Blame view

net/sched/sch_codel.c 8.38 KB
76e3cc126   Eric Dumazet   codel: Controlled...
1
2
3
4
5
6
7
8
  /*
   * Codel - The Controlled-Delay Active Queue Management algorithm
   *
   *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
   *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
   *
   *  Implemented on linux by :
   *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
80ba92fa1   Eric Dumazet   codel: add ce_thr...
9
   *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
76e3cc126   Eric Dumazet   codel: Controlled...
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
   *
   * Redistribution and use in source and binary forms, with or without
   * modification, are permitted provided that the following conditions
   * are met:
   * 1. Redistributions of source code must retain the above copyright
   *    notice, this list of conditions, and the following disclaimer,
   *    without modification.
   * 2. Redistributions in binary form must reproduce the above copyright
   *    notice, this list of conditions and the following disclaimer in the
   *    documentation and/or other materials provided with the distribution.
   * 3. The names of the authors may not be used to endorse or promote products
   *    derived from this software without specific prior written permission.
   *
   * Alternatively, provided that this notice is retained in full, this
   * software may be distributed under the terms of the GNU General
   * Public License ("GPL") version 2, in which case the provisions of the
   * GPL apply INSTEAD OF those given above.
   *
   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   * DAMAGE.
   *
   */
  
  #include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/errno.h>
  #include <linux/skbuff.h>
ce5b4b977   Geert Uytterhoeven   net/codel: Add mi...
49
  #include <linux/prefetch.h>
76e3cc126   Eric Dumazet   codel: Controlled...
50
51
  #include <net/pkt_sched.h>
  #include <net/codel.h>
d068ca2ae   Michal Kazior   codel: split into...
52
53
  #include <net/codel_impl.h>
  #include <net/codel_qdisc.h>
76e3cc126   Eric Dumazet   codel: Controlled...
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
  
  
  #define DEFAULT_CODEL_LIMIT 1000
  
  struct codel_sched_data {
  	struct codel_params	params;
  	struct codel_vars	vars;
  	struct codel_stats	stats;
  	u32			drop_overlimit;
  };
  
  /* This is the specific function called from codel_dequeue()
   * to dequeue a packet from queue. Note: backlog is handled in
   * codel, we dont need to reduce it here.
   */
79bdc4c86   Michal Kazior   codel: generalize...
69
  static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
76e3cc126   Eric Dumazet   codel: Controlled...
70
  {
79bdc4c86   Michal Kazior   codel: generalize...
71
  	struct Qdisc *sch = ctx;
ed760cb8a   Florian Westphal   sched: replace __...
72
  	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
76e3cc126   Eric Dumazet   codel: Controlled...
73

79bdc4c86   Michal Kazior   codel: generalize...
74
75
  	if (skb)
  		sch->qstats.backlog -= qdisc_pkt_len(skb);
76e3cc126   Eric Dumazet   codel: Controlled...
76
77
78
  	prefetch(&skb->end); /* we'll need skb_shinfo() */
  	return skb;
  }
79bdc4c86   Michal Kazior   codel: generalize...
79
80
81
  static void drop_func(struct sk_buff *skb, void *ctx)
  {
  	struct Qdisc *sch = ctx;
520ac30f4   Eric Dumazet   net_sched: drop p...
82
83
  	kfree_skb(skb);
  	qdisc_qstats_drop(sch);
79bdc4c86   Michal Kazior   codel: generalize...
84
  }
76e3cc126   Eric Dumazet   codel: Controlled...
85
86
87
88
  static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
  {
  	struct codel_sched_data *q = qdisc_priv(sch);
  	struct sk_buff *skb;
79bdc4c86   Michal Kazior   codel: generalize...
89
90
91
  	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
  			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
  			    drop_func, dequeue_func);
865ec5523   Eric Dumazet   fq_codel: should ...
92

2ccccf5fb   WANG Cong   net_sched: update...
93
  	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
76e3cc126   Eric Dumazet   codel: Controlled...
94
95
96
  	 * or HTB crashes. Defer it for next round.
  	 */
  	if (q->stats.drop_count && sch->q.qlen) {
2ccccf5fb   WANG Cong   net_sched: update...
97
  		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
76e3cc126   Eric Dumazet   codel: Controlled...
98
  		q->stats.drop_count = 0;
2ccccf5fb   WANG Cong   net_sched: update...
99
  		q->stats.drop_len = 0;
76e3cc126   Eric Dumazet   codel: Controlled...
100
101
102
103
104
  	}
  	if (skb)
  		qdisc_bstats_update(sch, skb);
  	return skb;
  }
520ac30f4   Eric Dumazet   net_sched: drop p...
105
106
  static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  			       struct sk_buff **to_free)
76e3cc126   Eric Dumazet   codel: Controlled...
107
108
109
110
111
112
113
114
115
  {
  	struct codel_sched_data *q;
  
  	if (likely(qdisc_qlen(sch) < sch->limit)) {
  		codel_set_enqueue_time(skb);
  		return qdisc_enqueue_tail(skb, sch);
  	}
  	q = qdisc_priv(sch);
  	q->drop_overlimit++;
520ac30f4   Eric Dumazet   net_sched: drop p...
116
  	return qdisc_drop(skb, sch, to_free);
76e3cc126   Eric Dumazet   codel: Controlled...
117
118
119
120
121
122
123
  }
  
  static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
  	[TCA_CODEL_TARGET]	= { .type = NLA_U32 },
  	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
  	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
  	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
80ba92fa1   Eric Dumazet   codel: add ce_thr...
124
  	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
76e3cc126   Eric Dumazet   codel: Controlled...
125
126
127
128
129
130
  };
  
  static int codel_change(struct Qdisc *sch, struct nlattr *opt)
  {
  	struct codel_sched_data *q = qdisc_priv(sch);
  	struct nlattr *tb[TCA_CODEL_MAX + 1];
2ccccf5fb   WANG Cong   net_sched: update...
131
  	unsigned int qlen, dropped = 0;
76e3cc126   Eric Dumazet   codel: Controlled...
132
133
134
135
  	int err;
  
  	if (!opt)
  		return -EINVAL;
fceb6435e   Johannes Berg   netlink: pass ext...
136
  	err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy, NULL);
76e3cc126   Eric Dumazet   codel: Controlled...
137
138
139
140
141
142
143
144
145
146
  	if (err < 0)
  		return err;
  
  	sch_tree_lock(sch);
  
  	if (tb[TCA_CODEL_TARGET]) {
  		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
  
  		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
  	}
80ba92fa1   Eric Dumazet   codel: add ce_thr...
147
148
149
150
151
  	if (tb[TCA_CODEL_CE_THRESHOLD]) {
  		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
  
  		q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
  	}
76e3cc126   Eric Dumazet   codel: Controlled...
152
153
154
155
156
157
158
159
160
161
162
163
164
165
  	if (tb[TCA_CODEL_INTERVAL]) {
  		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
  
  		q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
  	}
  
  	if (tb[TCA_CODEL_LIMIT])
  		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
  
  	if (tb[TCA_CODEL_ECN])
  		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
  
  	qlen = sch->q.qlen;
  	while (sch->q.qlen > sch->limit) {
ed760cb8a   Florian Westphal   sched: replace __...
166
  		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
76e3cc126   Eric Dumazet   codel: Controlled...
167

2ccccf5fb   WANG Cong   net_sched: update...
168
  		dropped += qdisc_pkt_len(skb);
25331d6ce   John Fastabend   net: sched: imple...
169
  		qdisc_qstats_backlog_dec(sch, skb);
b3d7e2b29   Eric Dumazet   net_sched: sch_co...
170
  		rtnl_qdisc_drop(skb, sch);
76e3cc126   Eric Dumazet   codel: Controlled...
171
  	}
2ccccf5fb   WANG Cong   net_sched: update...
172
  	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
76e3cc126   Eric Dumazet   codel: Controlled...
173
174
175
176
177
178
179
180
181
182
  
  	sch_tree_unlock(sch);
  	return 0;
  }
  
  static int codel_init(struct Qdisc *sch, struct nlattr *opt)
  {
  	struct codel_sched_data *q = qdisc_priv(sch);
  
  	sch->limit = DEFAULT_CODEL_LIMIT;
79bdc4c86   Michal Kazior   codel: generalize...
183
  	codel_params_init(&q->params);
76e3cc126   Eric Dumazet   codel: Controlled...
184
185
  	codel_vars_init(&q->vars);
  	codel_stats_init(&q->stats);
79bdc4c86   Michal Kazior   codel: generalize...
186
  	q->params.mtu = psched_mtu(qdisc_dev(sch));
76e3cc126   Eric Dumazet   codel: Controlled...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
  
  	if (opt) {
  		int err = codel_change(sch, opt);
  
  		if (err)
  			return err;
  	}
  
  	if (sch->limit >= 1)
  		sch->flags |= TCQ_F_CAN_BYPASS;
  	else
  		sch->flags &= ~TCQ_F_CAN_BYPASS;
  
  	return 0;
  }
  
  static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
  {
  	struct codel_sched_data *q = qdisc_priv(sch);
  	struct nlattr *opts;
  
  	opts = nla_nest_start(skb, TCA_OPTIONS);
  	if (opts == NULL)
  		goto nla_put_failure;
  
  	if (nla_put_u32(skb, TCA_CODEL_TARGET,
  			codel_time_to_us(q->params.target)) ||
  	    nla_put_u32(skb, TCA_CODEL_LIMIT,
  			sch->limit) ||
  	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
  			codel_time_to_us(q->params.interval)) ||
  	    nla_put_u32(skb, TCA_CODEL_ECN,
  			q->params.ecn))
  		goto nla_put_failure;
80ba92fa1   Eric Dumazet   codel: add ce_thr...
221
222
223
224
  	if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
  	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
  			codel_time_to_us(q->params.ce_threshold)))
  		goto nla_put_failure;
76e3cc126   Eric Dumazet   codel: Controlled...
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  	return nla_nest_end(skb, opts);
  
  nla_put_failure:
  	nla_nest_cancel(skb, opts);
  	return -1;
  }
  
  static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  {
  	const struct codel_sched_data *q = qdisc_priv(sch);
  	struct tc_codel_xstats st = {
  		.maxpacket	= q->stats.maxpacket,
  		.count		= q->vars.count,
  		.lastcount	= q->vars.lastcount,
  		.drop_overlimit = q->drop_overlimit,
  		.ldelay		= codel_time_to_us(q->vars.ldelay),
  		.dropping	= q->vars.dropping,
  		.ecn_mark	= q->stats.ecn_mark,
80ba92fa1   Eric Dumazet   codel: add ce_thr...
243
  		.ce_mark	= q->stats.ce_mark,
76e3cc126   Eric Dumazet   codel: Controlled...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
  	};
  
  	if (q->vars.dropping) {
  		codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
  
  		if (delta >= 0)
  			st.drop_next = codel_time_to_us(delta);
  		else
  			st.drop_next = -codel_time_to_us(-delta);
  	}
  
  	return gnet_stats_copy_app(d, &st, sizeof(st));
  }
  
  static void codel_reset(struct Qdisc *sch)
  {
  	struct codel_sched_data *q = qdisc_priv(sch);
  
  	qdisc_reset_queue(sch);
  	codel_vars_init(&q->vars);
  }
  
  static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
  	.id		=	"codel",
  	.priv_size	=	sizeof(struct codel_sched_data),
  
  	.enqueue	=	codel_qdisc_enqueue,
  	.dequeue	=	codel_qdisc_dequeue,
  	.peek		=	qdisc_peek_dequeued,
  	.init		=	codel_init,
  	.reset		=	codel_reset,
  	.change 	=	codel_change,
  	.dump		=	codel_dump,
  	.dump_stats	=	codel_dump_stats,
  	.owner		=	THIS_MODULE,
  };
  
  static int __init codel_module_init(void)
  {
  	return register_qdisc(&codel_qdisc_ops);
  }
  
  static void __exit codel_module_exit(void)
  {
  	unregister_qdisc(&codel_qdisc_ops);
  }
  
  module_init(codel_module_init)
  module_exit(codel_module_exit)
  
  MODULE_DESCRIPTION("Controlled Delay queue discipline");
  MODULE_AUTHOR("Dave Taht");
  MODULE_AUTHOR("Eric Dumazet");
  MODULE_LICENSE("Dual BSD/GPL");