Blame view
net/sched/cls_flow.c
16.1 KB
e5dfb8151 [NET_SCHED]: Add ... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
/* * net/sched/cls_flow.c Generic flow classifier * * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/pkt_cls.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> |
9ec138101 [NET_SCHED]: cls_... |
22 |
#include <linux/if_vlan.h> |
5a0e3ad6a include cleanup: ... |
23 |
#include <linux/slab.h> |
3a9a231d9 net: Fix files ex... |
24 |
#include <linux/module.h> |
e5dfb8151 [NET_SCHED]: Add ... |
25 26 27 28 |
#include <net/pkt_cls.h> #include <net/ip.h> #include <net/route.h> |
6bd2a9af1 cls_flow: use skb... |
29 |
#include <net/flow_keys.h> |
e5dfb8151 [NET_SCHED]: Add ... |
30 31 32 33 34 35 |
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netfilter/nf_conntrack.h> #endif struct flow_head { struct list_head filters; |
70da9f0bf net: sched: cls_f... |
36 |
struct rcu_head rcu; |
e5dfb8151 [NET_SCHED]: Add ... |
37 38 39 40 41 42 |
}; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; |
70da9f0bf net: sched: cls_f... |
43 |
struct tcf_proto *tp; |
72d9794f4 net-sched: cls_fl... |
44 45 |
struct timer_list perturb_timer; u32 perturb_period; |
e5dfb8151 [NET_SCHED]: Add ... |
46 47 48 49 50 51 52 53 54 55 56 |
u32 handle; u32 nkeys; u32 keymask; u32 mode; u32 mask; u32 xor; u32 rshift; u32 addend; u32 divisor; u32 baseclass; |
72d9794f4 net-sched: cls_fl... |
57 |
u32 hashrnd; |
70da9f0bf net: sched: cls_f... |
58 |
struct rcu_head rcu; |
e5dfb8151 [NET_SCHED]: Add ... |
59 |
}; |
e5dfb8151 [NET_SCHED]: Add ... |
60 61 62 63 64 65 |
static inline u32 addr_fold(void *addr) { unsigned long a = (unsigned long)addr; return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); } |
6bd2a9af1 cls_flow: use skb... |
66 |
static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
67 |
{ |
6bd2a9af1 cls_flow: use skb... |
68 69 |
if (flow->src) return ntohl(flow->src); |
4b95c3d40 cls_flow: add san... |
70 |
return addr_fold(skb->sk); |
e5dfb8151 [NET_SCHED]: Add ... |
71 |
} |
6bd2a9af1 cls_flow: use skb... |
72 |
static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
73 |
{ |
6bd2a9af1 cls_flow: use skb... |
74 75 |
if (flow->dst) return ntohl(flow->dst); |
4b95c3d40 cls_flow: add san... |
76 |
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; |
e5dfb8151 [NET_SCHED]: Add ... |
77 |
} |
6bd2a9af1 cls_flow: use skb... |
78 |
static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
79 |
{ |
6bd2a9af1 cls_flow: use skb... |
80 |
return flow->ip_proto; |
e5dfb8151 [NET_SCHED]: Add ... |
81 |
} |
6bd2a9af1 cls_flow: use skb... |
82 |
static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
83 |
{ |
6bd2a9af1 cls_flow: use skb... |
84 85 |
if (flow->ports) return ntohs(flow->port16[0]); |
e5dfb8151 [NET_SCHED]: Add ... |
86 |
|
859c20123 net_sched: cls_fl... |
87 88 |
return addr_fold(skb->sk); } |
6bd2a9af1 cls_flow: use skb... |
89 |
static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) |
859c20123 net_sched: cls_fl... |
90 |
{ |
6bd2a9af1 cls_flow: use skb... |
91 92 |
if (flow->ports) return ntohs(flow->port16[1]); |
e5dfb8151 [NET_SCHED]: Add ... |
93 |
|
4b95c3d40 cls_flow: add san... |
94 |
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; |
e5dfb8151 [NET_SCHED]: Add ... |
95 96 97 98 |
} static u32 flow_get_iif(const struct sk_buff *skb) { |
8964be4a9 net: rename skb->... |
99 |
return skb->skb_iif; |
e5dfb8151 [NET_SCHED]: Add ... |
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
} static u32 flow_get_priority(const struct sk_buff *skb) { return skb->priority; } static u32 flow_get_mark(const struct sk_buff *skb) { return skb->mark; } static u32 flow_get_nfct(const struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) return addr_fold(skb->nfct); #else return 0; #endif } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #define CTTUPLE(skb, member) \ ({ \ enum ip_conntrack_info ctinfo; \ |
859c20123 net_sched: cls_fl... |
125 |
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ |
e5dfb8151 [NET_SCHED]: Add ... |
126 127 128 129 130 131 132 133 134 135 136 |
if (ct == NULL) \ goto fallback; \ ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ }) #else #define CTTUPLE(skb, member) \ ({ \ goto fallback; \ 0; \ }) #endif |
6bd2a9af1 cls_flow: use skb... |
137 |
static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
138 139 |
{ switch (skb->protocol) { |
606780404 net: Use hton[sl]... |
140 |
case htons(ETH_P_IP): |
e5dfb8151 [NET_SCHED]: Add ... |
141 |
return ntohl(CTTUPLE(skb, src.u3.ip)); |
606780404 net: Use hton[sl]... |
142 |
case htons(ETH_P_IPV6): |
e5dfb8151 [NET_SCHED]: Add ... |
143 144 145 |
return ntohl(CTTUPLE(skb, src.u3.ip6[3])); } fallback: |
6bd2a9af1 cls_flow: use skb... |
146 |
return flow_get_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
147 |
} |
6bd2a9af1 cls_flow: use skb... |
148 |
static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
149 150 |
{ switch (skb->protocol) { |
606780404 net: Use hton[sl]... |
151 |
case htons(ETH_P_IP): |
e5dfb8151 [NET_SCHED]: Add ... |
152 |
return ntohl(CTTUPLE(skb, dst.u3.ip)); |
606780404 net: Use hton[sl]... |
153 |
case htons(ETH_P_IPV6): |
e5dfb8151 [NET_SCHED]: Add ... |
154 155 156 |
return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); } fallback: |
6bd2a9af1 cls_flow: use skb... |
157 |
return flow_get_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
158 |
} |
6bd2a9af1 cls_flow: use skb... |
159 |
static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
160 161 162 |
{ return ntohs(CTTUPLE(skb, src.u.all)); fallback: |
6bd2a9af1 cls_flow: use skb... |
163 |
return flow_get_proto_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
164 |
} |
6bd2a9af1 cls_flow: use skb... |
165 |
static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
166 167 168 |
{ return ntohs(CTTUPLE(skb, dst.u.all)); fallback: |
6bd2a9af1 cls_flow: use skb... |
169 |
return flow_get_proto_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
170 171 172 173 |
} static u32 flow_get_rtclassid(const struct sk_buff *skb) { |
c7066f70d netfilter: fix Kc... |
174 |
#ifdef CONFIG_IP_ROUTE_CLASSID |
adf30907d net: skb->dst acc... |
175 176 |
if (skb_dst(skb)) return skb_dst(skb)->tclassid; |
e5dfb8151 [NET_SCHED]: Add ... |
177 178 179 180 181 182 |
#endif return 0; } static u32 flow_get_skuid(const struct sk_buff *skb) { |
a6c6796c7 userns: Convert c... |
183 184 185 186 |
if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; return from_kuid(&init_user_ns, skuid); } |
e5dfb8151 [NET_SCHED]: Add ... |
187 188 189 190 191 |
return 0; } static u32 flow_get_skgid(const struct sk_buff *skb) { |
a6c6796c7 userns: Convert c... |
192 193 194 195 |
if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; return from_kgid(&init_user_ns, skgid); } |
e5dfb8151 [NET_SCHED]: Add ... |
196 197 |
return 0; } |
9ec138101 [NET_SCHED]: cls_... |
198 199 200 201 202 203 204 205 |
static u32 flow_get_vlan_tag(const struct sk_buff *skb) { u16 uninitialized_var(tag); if (vlan_get_tag(skb, &tag) < 0) return 0; return tag & VLAN_VID_MASK; } |
739a91ef0 net_sched: cls_fl... |
206 207 |
static u32 flow_get_rxhash(struct sk_buff *skb) { |
3958afa1b net: Change skb_g... |
208 |
return skb_get_hash(skb); |
739a91ef0 net_sched: cls_fl... |
209 |
} |
6bd2a9af1 cls_flow: use skb... |
210 |
static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) |
e5dfb8151 [NET_SCHED]: Add ... |
211 212 213 |
{ switch (key) { case FLOW_KEY_SRC: |
6bd2a9af1 cls_flow: use skb... |
214 |
return flow_get_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
215 |
case FLOW_KEY_DST: |
6bd2a9af1 cls_flow: use skb... |
216 |
return flow_get_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
217 |
case FLOW_KEY_PROTO: |
6bd2a9af1 cls_flow: use skb... |
218 |
return flow_get_proto(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
219 |
case FLOW_KEY_PROTO_SRC: |
6bd2a9af1 cls_flow: use skb... |
220 |
return flow_get_proto_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
221 |
case FLOW_KEY_PROTO_DST: |
6bd2a9af1 cls_flow: use skb... |
222 |
return flow_get_proto_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
223 224 225 226 227 228 229 230 231 |
case FLOW_KEY_IIF: return flow_get_iif(skb); case FLOW_KEY_PRIORITY: return flow_get_priority(skb); case FLOW_KEY_MARK: return flow_get_mark(skb); case FLOW_KEY_NFCT: return flow_get_nfct(skb); case FLOW_KEY_NFCT_SRC: |
6bd2a9af1 cls_flow: use skb... |
232 |
return flow_get_nfct_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
233 |
case FLOW_KEY_NFCT_DST: |
6bd2a9af1 cls_flow: use skb... |
234 |
return flow_get_nfct_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
235 |
case FLOW_KEY_NFCT_PROTO_SRC: |
6bd2a9af1 cls_flow: use skb... |
236 |
return flow_get_nfct_proto_src(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
237 |
case FLOW_KEY_NFCT_PROTO_DST: |
6bd2a9af1 cls_flow: use skb... |
238 |
return flow_get_nfct_proto_dst(skb, flow); |
e5dfb8151 [NET_SCHED]: Add ... |
239 240 241 242 243 244 |
case FLOW_KEY_RTCLASSID: return flow_get_rtclassid(skb); case FLOW_KEY_SKUID: return flow_get_skuid(skb); case FLOW_KEY_SKGID: return flow_get_skgid(skb); |
9ec138101 [NET_SCHED]: cls_... |
245 246 |
case FLOW_KEY_VLAN_TAG: return flow_get_vlan_tag(skb); |
739a91ef0 net_sched: cls_fl... |
247 248 |
case FLOW_KEY_RXHASH: return flow_get_rxhash(skb); |
e5dfb8151 [NET_SCHED]: Add ... |
249 250 251 252 253 |
default: WARN_ON(1); return 0; } } |
6bd2a9af1 cls_flow: use skb... |
254 255 256 257 258 259 260 261 262 |
#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ (1 << FLOW_KEY_DST) | \ (1 << FLOW_KEY_PROTO) | \ (1 << FLOW_KEY_PROTO_SRC) | \ (1 << FLOW_KEY_PROTO_DST) | \ (1 << FLOW_KEY_NFCT_SRC) | \ (1 << FLOW_KEY_NFCT_DST) | \ (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ (1 << FLOW_KEY_NFCT_PROTO_DST)) |
dc7f9f6e8 net: sched: const... |
263 |
static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
e5dfb8151 [NET_SCHED]: Add ... |
264 265 |
struct tcf_result *res) { |
70da9f0bf net: sched: cls_f... |
266 |
struct flow_head *head = rcu_dereference_bh(tp->root); |
e5dfb8151 [NET_SCHED]: Add ... |
267 268 269 270 271 |
struct flow_filter *f; u32 keymask; u32 classid; unsigned int n, key; int r; |
70da9f0bf net: sched: cls_f... |
272 |
list_for_each_entry_rcu(f, &head->filters, list) { |
3a53943b5 cls_flow: remove ... |
273 |
u32 keys[FLOW_KEY_MAX + 1]; |
6bd2a9af1 cls_flow: use skb... |
274 |
struct flow_keys flow_keys; |
e5dfb8151 [NET_SCHED]: Add ... |
275 276 277 278 279 |
if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; keymask = f->keymask; |
6bd2a9af1 cls_flow: use skb... |
280 281 |
if (keymask & FLOW_KEYS_NEEDED) skb_flow_dissect(skb, &flow_keys); |
e5dfb8151 [NET_SCHED]: Add ... |
282 283 284 285 |
for (n = 0; n < f->nkeys; n++) { key = ffs(keymask) - 1; keymask &= ~(1 << key); |
6bd2a9af1 cls_flow: use skb... |
286 |
keys[n] = flow_key_get(skb, key, &flow_keys); |
e5dfb8151 [NET_SCHED]: Add ... |
287 288 289 |
} if (f->mode == FLOW_MODE_HASH) |
72d9794f4 net-sched: cls_fl... |
290 |
classid = jhash2(keys, f->nkeys, f->hashrnd); |
e5dfb8151 [NET_SCHED]: Add ... |
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
else { classid = keys[0]; classid = (classid & f->mask) ^ f->xor; classid = (classid >> f->rshift) + f->addend; } if (f->divisor) classid %= f->divisor; res->class = 0; res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); r = tcf_exts_exec(skb, &f->exts, res); if (r < 0) continue; return r; } return -1; } |
72d9794f4 net-sched: cls_fl... |
310 311 312 313 314 315 316 317 |
static void flow_perturbation(unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; get_random_bytes(&f->hashrnd, 4); if (f->perturb_period) mod_timer(&f->perturb_timer, jiffies + f->perturb_period); } |
e5dfb8151 [NET_SCHED]: Add ... |
318 319 320 321 322 323 324 325 326 327 328 329 |
static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_KEYS] = { .type = NLA_U32 }, [TCA_FLOW_MODE] = { .type = NLA_U32 }, [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, [TCA_FLOW_MASK] = { .type = NLA_U32 }, [TCA_FLOW_XOR] = { .type = NLA_U32 }, [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, [TCA_FLOW_ACT] = { .type = NLA_NESTED }, [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, |
72d9794f4 net-sched: cls_fl... |
330 |
[TCA_FLOW_PERTURB] = { .type = NLA_U32 }, |
e5dfb8151 [NET_SCHED]: Add ... |
331 |
}; |
70da9f0bf net: sched: cls_f... |
332 333 334 335 336 |
static void flow_destroy_filter(struct rcu_head *head) { struct flow_filter *f = container_of(head, struct flow_filter, rcu); del_timer_sync(&f->perturb_timer); |
18d0264f6 net_sched: remove... |
337 |
tcf_exts_destroy(&f->exts); |
82a470f11 net: sched: remov... |
338 |
tcf_em_tree_destroy(&f->ematches); |
70da9f0bf net: sched: cls_f... |
339 340 |
kfree(f); } |
c1b52739e pkt_sched: namesp... |
341 |
static int flow_change(struct net *net, struct sk_buff *in_skb, |
af4c6641f net sched: Pass t... |
342 |
struct tcf_proto *tp, unsigned long base, |
e5dfb8151 [NET_SCHED]: Add ... |
343 |
u32 handle, struct nlattr **tca, |
2f7ef2f87 sched, cls: check... |
344 |
unsigned long *arg, bool ovr) |
e5dfb8151 [NET_SCHED]: Add ... |
345 |
{ |
70da9f0bf net: sched: cls_f... |
346 347 |
struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *fold, *fnew; |
e5dfb8151 [NET_SCHED]: Add ... |
348 349 350 351 352 |
struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; struct tcf_exts e; struct tcf_ematch_tree t; unsigned int nkeys = 0; |
72d9794f4 net-sched: cls_fl... |
353 |
unsigned int perturb_period = 0; |
e5dfb8151 [NET_SCHED]: Add ... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 |
u32 baseclass = 0; u32 keymask = 0; u32 mode; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); if (err < 0) return err; if (tb[TCA_FLOW_BASECLASS]) { baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); if (TC_H_MIN(baseclass) == 0) return -EINVAL; } if (tb[TCA_FLOW_KEYS]) { keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); |
e5dfb8151 [NET_SCHED]: Add ... |
374 375 376 377 |
nkeys = hweight32(keymask); if (nkeys == 0) return -EINVAL; |
4f2504910 [NET_SCHED]: cls_... |
378 379 380 |
if (fls(keymask) - 1 > FLOW_KEY_MAX) return -EOPNOTSUPP; |
a6c6796c7 userns: Convert c... |
381 382 |
if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && |
e32123e59 netlink: rename s... |
383 |
sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns) |
a6c6796c7 userns: Convert c... |
384 |
return -EOPNOTSUPP; |
e5dfb8151 [NET_SCHED]: Add ... |
385 |
} |
5da57f422 net_sched: cls: r... |
386 |
tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); |
2f7ef2f87 sched, cls: check... |
387 |
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); |
e5dfb8151 [NET_SCHED]: Add ... |
388 389 390 391 392 393 |
if (err < 0) return err; err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); if (err < 0) goto err1; |
70da9f0bf net: sched: cls_f... |
394 395 396 397 398 399 400 |
err = -ENOBUFS; fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) goto err2; fold = (struct flow_filter *)*arg; if (fold) { |
e5dfb8151 [NET_SCHED]: Add ... |
401 |
err = -EINVAL; |
70da9f0bf net: sched: cls_f... |
402 |
if (fold->handle != handle && handle) |
e5dfb8151 [NET_SCHED]: Add ... |
403 |
goto err2; |
70da9f0bf net: sched: cls_f... |
404 |
/* Copy fold into fnew */ |
70da9f0bf net: sched: cls_f... |
405 |
fnew->tp = fold->tp; |
70da9f0bf net: sched: cls_f... |
406 407 408 409 410 411 412 413 414 415 416 417 418 |
fnew->handle = fold->handle; fnew->nkeys = fold->nkeys; fnew->keymask = fold->keymask; fnew->mode = fold->mode; fnew->mask = fold->mask; fnew->xor = fold->xor; fnew->rshift = fold->rshift; fnew->addend = fold->addend; fnew->divisor = fold->divisor; fnew->baseclass = fold->baseclass; fnew->hashrnd = fold->hashrnd; mode = fold->mode; |
e5dfb8151 [NET_SCHED]: Add ... |
419 420 421 422 |
if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; |
72d9794f4 net-sched: cls_fl... |
423 424 |
if (mode == FLOW_MODE_HASH) |
70da9f0bf net: sched: cls_f... |
425 |
perturb_period = fold->perturb_period; |
72d9794f4 net-sched: cls_fl... |
426 427 428 429 430 |
if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } |
e5dfb8151 [NET_SCHED]: Add ... |
431 432 433 434 435 436 437 438 439 440 441 442 |
} else { err = -EINVAL; if (!handle) goto err2; if (!tb[TCA_FLOW_KEYS]) goto err2; mode = FLOW_MODE_MAP; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; |
72d9794f4 net-sched: cls_fl... |
443 444 445 446 447 |
if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } |
e5dfb8151 [NET_SCHED]: Add ... |
448 449 450 451 |
if (TC_H_MAJ(baseclass) == 0) baseclass = TC_H_MAKE(tp->q->handle, baseclass); if (TC_H_MIN(baseclass) == 0) baseclass = TC_H_MAKE(baseclass, 1); |
70da9f0bf net: sched: cls_f... |
452 453 454 455 456 |
fnew->handle = handle; fnew->mask = ~0U; fnew->tp = tp; get_random_bytes(&fnew->hashrnd, 4); tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); |
e5dfb8151 [NET_SCHED]: Add ... |
457 |
} |
70da9f0bf net: sched: cls_f... |
458 459 460 |
fnew->perturb_timer.function = flow_perturbation; fnew->perturb_timer.data = (unsigned long)fnew; init_timer_deferrable(&fnew->perturb_timer); |
e5dfb8151 [NET_SCHED]: Add ... |
461 |
|
70da9f0bf net: sched: cls_f... |
462 463 |
tcf_exts_change(tp, &fnew->exts, &e); tcf_em_tree_change(tp, &fnew->ematches, &t); |
e5dfb8151 [NET_SCHED]: Add ... |
464 |
|
028758788 net: better IFF_X... |
465 |
netif_keep_dst(qdisc_dev(tp->q)); |
e5dfb8151 [NET_SCHED]: Add ... |
466 |
if (tb[TCA_FLOW_KEYS]) { |
70da9f0bf net: sched: cls_f... |
467 468 |
fnew->keymask = keymask; fnew->nkeys = nkeys; |
e5dfb8151 [NET_SCHED]: Add ... |
469 |
} |
70da9f0bf net: sched: cls_f... |
470 |
fnew->mode = mode; |
e5dfb8151 [NET_SCHED]: Add ... |
471 472 |
if (tb[TCA_FLOW_MASK]) |
70da9f0bf net: sched: cls_f... |
473 |
fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); |
e5dfb8151 [NET_SCHED]: Add ... |
474 |
if (tb[TCA_FLOW_XOR]) |
70da9f0bf net: sched: cls_f... |
475 |
fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); |
e5dfb8151 [NET_SCHED]: Add ... |
476 |
if (tb[TCA_FLOW_RSHIFT]) |
70da9f0bf net: sched: cls_f... |
477 |
fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); |
e5dfb8151 [NET_SCHED]: Add ... |
478 |
if (tb[TCA_FLOW_ADDEND]) |
70da9f0bf net: sched: cls_f... |
479 |
fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); |
e5dfb8151 [NET_SCHED]: Add ... |
480 481 |
if (tb[TCA_FLOW_DIVISOR]) |
70da9f0bf net: sched: cls_f... |
482 |
fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); |
e5dfb8151 [NET_SCHED]: Add ... |
483 |
if (baseclass) |
70da9f0bf net: sched: cls_f... |
484 |
fnew->baseclass = baseclass; |
e5dfb8151 [NET_SCHED]: Add ... |
485 |
|
70da9f0bf net: sched: cls_f... |
486 |
fnew->perturb_period = perturb_period; |
72d9794f4 net-sched: cls_fl... |
487 |
if (perturb_period) |
70da9f0bf net: sched: cls_f... |
488 |
mod_timer(&fnew->perturb_timer, jiffies + perturb_period); |
72d9794f4 net-sched: cls_fl... |
489 |
|
e5dfb8151 [NET_SCHED]: Add ... |
490 |
if (*arg == 0) |
70da9f0bf net: sched: cls_f... |
491 492 493 |
list_add_tail_rcu(&fnew->list, &head->filters); else list_replace_rcu(&fnew->list, &fold->list); |
e5dfb8151 [NET_SCHED]: Add ... |
494 |
|
70da9f0bf net: sched: cls_f... |
495 |
*arg = (unsigned long)fnew; |
e5dfb8151 [NET_SCHED]: Add ... |
496 |
|
70da9f0bf net: sched: cls_f... |
497 498 |
if (fold) call_rcu(&fold->rcu, flow_destroy_filter); |
e5dfb8151 [NET_SCHED]: Add ... |
499 500 501 |
return 0; err2: |
82a470f11 net: sched: remov... |
502 |
tcf_em_tree_destroy(&t); |
70da9f0bf net: sched: cls_f... |
503 |
kfree(fnew); |
e5dfb8151 [NET_SCHED]: Add ... |
504 |
err1: |
18d0264f6 net_sched: remove... |
505 |
tcf_exts_destroy(&e); |
e5dfb8151 [NET_SCHED]: Add ... |
506 507 |
return err; } |
e5dfb8151 [NET_SCHED]: Add ... |
508 509 510 |
static int flow_delete(struct tcf_proto *tp, unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; |
70da9f0bf net: sched: cls_f... |
511 512 |
list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); |
e5dfb8151 [NET_SCHED]: Add ... |
513 514 515 516 517 518 |
return 0; } static int flow_init(struct tcf_proto *tp) { struct flow_head *head; |
e5dfb8151 [NET_SCHED]: Add ... |
519 520 521 522 |
head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->filters); |
70da9f0bf net: sched: cls_f... |
523 |
rcu_assign_pointer(tp->root, head); |
e5dfb8151 [NET_SCHED]: Add ... |
524 525 526 527 528 |
return 0; } static void flow_destroy(struct tcf_proto *tp) { |
70da9f0bf net: sched: cls_f... |
529 |
struct flow_head *head = rtnl_dereference(tp->root); |
e5dfb8151 [NET_SCHED]: Add ... |
530 531 532 |
struct flow_filter *f, *next; list_for_each_entry_safe(f, next, &head->filters, list) { |
70da9f0bf net: sched: cls_f... |
533 534 |
list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); |
e5dfb8151 [NET_SCHED]: Add ... |
535 |
} |
70da9f0bf net: sched: cls_f... |
536 537 |
RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); |
e5dfb8151 [NET_SCHED]: Add ... |
538 539 540 541 |
} static unsigned long flow_get(struct tcf_proto *tp, u32 handle) { |
70da9f0bf net: sched: cls_f... |
542 |
struct flow_head *head = rtnl_dereference(tp->root); |
e5dfb8151 [NET_SCHED]: Add ... |
543 |
struct flow_filter *f; |
6a659cd06 net_sched: cls_fl... |
544 |
list_for_each_entry(f, &head->filters, list) |
e5dfb8151 [NET_SCHED]: Add ... |
545 546 547 548 |
if (f->handle == handle) return (unsigned long)f; return 0; } |
832d1d5bf net_sched: add st... |
549 |
static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, |
e5dfb8151 [NET_SCHED]: Add ... |
550 551 552 553 554 555 556 557 558 559 560 561 562 |
struct sk_buff *skb, struct tcmsg *t) { struct flow_filter *f = (struct flow_filter *)fh; struct nlattr *nest; if (f == NULL) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; |
1b34ec43c pkt_sched: Stop u... |
563 564 565 |
if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) goto nla_put_failure; |
e5dfb8151 [NET_SCHED]: Add ... |
566 567 |
if (f->mask != ~0 || f->xor != 0) { |
1b34ec43c pkt_sched: Stop u... |
568 569 570 |
if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) goto nla_put_failure; |
e5dfb8151 [NET_SCHED]: Add ... |
571 |
} |
1b34ec43c pkt_sched: Stop u... |
572 573 574 575 576 577 |
if (f->rshift && nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) goto nla_put_failure; if (f->addend && nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) goto nla_put_failure; |
e5dfb8151 [NET_SCHED]: Add ... |
578 |
|
1b34ec43c pkt_sched: Stop u... |
579 580 581 582 583 584 |
if (f->divisor && nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) goto nla_put_failure; if (f->baseclass && nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) goto nla_put_failure; |
e5dfb8151 [NET_SCHED]: Add ... |
585 |
|
1b34ec43c pkt_sched: Stop u... |
586 587 588 |
if (f->perturb_period && nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) goto nla_put_failure; |
72d9794f4 net-sched: cls_fl... |
589 |
|
5da57f422 net_sched: cls: r... |
590 |
if (tcf_exts_dump(skb, &f->exts) < 0) |
e5dfb8151 [NET_SCHED]: Add ... |
591 |
goto nla_put_failure; |
0aead5434 [NET_SCHED]: Add ... |
592 |
#ifdef CONFIG_NET_EMATCH |
e5dfb8151 [NET_SCHED]: Add ... |
593 594 595 |
if (f->ematches.hdr.nmatches && tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) goto nla_put_failure; |
0aead5434 [NET_SCHED]: Add ... |
596 |
#endif |
e5dfb8151 [NET_SCHED]: Add ... |
597 |
nla_nest_end(skb, nest); |
5da57f422 net_sched: cls: r... |
598 |
if (tcf_exts_dump_stats(skb, &f->exts) < 0) |
e5dfb8151 [NET_SCHED]: Add ... |
599 600 601 602 603 |
goto nla_put_failure; return skb->len; nla_put_failure: |
6ea3b446b net: sched: cls: ... |
604 |
nla_nest_cancel(skb, nest); |
e5dfb8151 [NET_SCHED]: Add ... |
605 606 607 608 609 |
return -1; } static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) { |
70da9f0bf net: sched: cls_f... |
610 |
struct flow_head *head = rtnl_dereference(tp->root); |
e5dfb8151 [NET_SCHED]: Add ... |
611 |
struct flow_filter *f; |
6a659cd06 net_sched: cls_fl... |
612 |
list_for_each_entry(f, &head->filters, list) { |
e5dfb8151 [NET_SCHED]: Add ... |
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 |
if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long)f, arg) < 0) { arg->stop = 1; break; } skip: arg->count++; } } static struct tcf_proto_ops cls_flow_ops __read_mostly = { .kind = "flow", .classify = flow_classify, .init = flow_init, .destroy = flow_destroy, .change = flow_change, .delete = flow_delete, .get = flow_get, |
e5dfb8151 [NET_SCHED]: Add ... |
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 |
.dump = flow_dump, .walk = flow_walk, .owner = THIS_MODULE, }; static int __init cls_flow_init(void) { return register_tcf_proto_ops(&cls_flow_ops); } static void __exit cls_flow_exit(void) { unregister_tcf_proto_ops(&cls_flow_ops); } module_init(cls_flow_init); module_exit(cls_flow_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("TC flow classifier"); |