Blame view

net/sched/cls_flower.c 82.1 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
77b9900ef   Jiri Pirko   tc: introduce Flo...
2
3
4
5
  /*
   * net/sched/cls_flower.c		Flower classifier
   *
   * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
77b9900ef   Jiri Pirko   tc: introduce Flo...
6
7
8
9
10
11
   */
  
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/rhashtable.h>
d93637741   Daniel Borkmann   net, sched: respe...
12
  #include <linux/workqueue.h>
061775583   Vlad Buslov   net: sched: flowe...
13
  #include <linux/refcount.h>
77b9900ef   Jiri Pirko   tc: introduce Flo...
14
15
16
17
  
  #include <linux/if_ether.h>
  #include <linux/in6.h>
  #include <linux/ip.h>
a577d8f79   Benjamin LaHaise   cls_flower: add s...
18
  #include <linux/mpls.h>
77b9900ef   Jiri Pirko   tc: introduce Flo...
19
20
21
22
23
  
  #include <net/sch_generic.h>
  #include <net/pkt_cls.h>
  #include <net/ip.h>
  #include <net/flow_dissector.h>
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
24
  #include <net/geneve.h>
d8f9dfae4   Xin Long   net: sched: allow...
25
  #include <net/vxlan.h>
79b1011cb   Xin Long   net: sched: allow...
26
  #include <net/erspan.h>
77b9900ef   Jiri Pirko   tc: introduce Flo...
27

bc3103f1e   Amir Vadai   net/sched: cls_fl...
28
29
  #include <net/dst.h>
  #include <net/dst_metadata.h>
e0ace68af   Paul Blakey   net/sched: cls_fl...
30
  #include <uapi/linux/netfilter/nf_conntrack_common.h>
77b9900ef   Jiri Pirko   tc: introduce Flo...
31
  struct fl_flow_key {
8212ed777   Jiri Pirko   net: sched: cls_f...
32
  	struct flow_dissector_key_meta meta;
42aecaa9b   Tom Herbert   net: Get skb hash...
33
  	struct flow_dissector_key_control control;
bc3103f1e   Amir Vadai   net/sched: cls_fl...
34
  	struct flow_dissector_key_control enc_control;
77b9900ef   Jiri Pirko   tc: introduce Flo...
35
36
  	struct flow_dissector_key_basic basic;
  	struct flow_dissector_key_eth_addrs eth;
9399ae9a6   Hadar Hen Zion   net_sched: flower...
37
  	struct flow_dissector_key_vlan vlan;
d64efd092   Jianbo Liu   net/sched: flower...
38
  	struct flow_dissector_key_vlan cvlan;
77b9900ef   Jiri Pirko   tc: introduce Flo...
39
  	union {
c3f832418   Tom Herbert   net: Add full IPv...
40
  		struct flow_dissector_key_ipv4_addrs ipv4;
77b9900ef   Jiri Pirko   tc: introduce Flo...
41
42
43
  		struct flow_dissector_key_ipv6_addrs ipv6;
  	};
  	struct flow_dissector_key_ports tp;
7b684884f   Simon Horman   net/sched: cls_fl...
44
  	struct flow_dissector_key_icmp icmp;
99d31326c   Simon Horman   net/sched: cls_fl...
45
  	struct flow_dissector_key_arp arp;
bc3103f1e   Amir Vadai   net/sched: cls_fl...
46
47
48
49
50
  	struct flow_dissector_key_keyid enc_key_id;
  	union {
  		struct flow_dissector_key_ipv4_addrs enc_ipv4;
  		struct flow_dissector_key_ipv6_addrs enc_ipv6;
  	};
f4d997fd6   Hadar Hen Zion   net/sched: cls_fl...
51
  	struct flow_dissector_key_ports enc_tp;
a577d8f79   Benjamin LaHaise   cls_flower: add s...
52
  	struct flow_dissector_key_mpls mpls;
fdfc7dd6c   Jiri Pirko   net/sched: flower...
53
  	struct flow_dissector_key_tcp tcp;
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
54
  	struct flow_dissector_key_ip ip;
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
55
  	struct flow_dissector_key_ip enc_ip;
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
56
  	struct flow_dissector_key_enc_opts enc_opts;
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
57
58
59
60
61
62
63
  	union {
  		struct flow_dissector_key_ports tp;
  		struct {
  			struct flow_dissector_key_ports tp_min;
  			struct flow_dissector_key_ports tp_max;
  		};
  	} tp_range;
e0ace68af   Paul Blakey   net/sched: cls_fl...
64
  	struct flow_dissector_key_ct ct;
77b9900ef   Jiri Pirko   tc: introduce Flo...
65
66
67
68
69
70
71
72
73
74
  } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  
  struct fl_flow_mask_range {
  	unsigned short int start;
  	unsigned short int end;
  };
  
  struct fl_flow_mask {
  	struct fl_flow_key key;
  	struct fl_flow_mask_range range;
5c72299fb   Amritha Nambiar   net: sched: cls_f...
75
  	u32 flags;
05cd271fd   Paul Blakey   cls_flower: Suppo...
76
77
78
79
80
  	struct rhash_head ht_node;
  	struct rhashtable ht;
  	struct rhashtable_params filter_ht_params;
  	struct flow_dissector dissector;
  	struct list_head filters;
44a5cd436   Paolo Abeni   cls_flower: fix u...
81
  	struct rcu_work rwork;
05cd271fd   Paul Blakey   cls_flower: Suppo...
82
  	struct list_head list;
f48ef4d5b   Vlad Buslov   net: sched: flowe...
83
  	refcount_t refcnt;
77b9900ef   Jiri Pirko   tc: introduce Flo...
84
  };
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
85
86
87
88
89
90
  struct fl_flow_tmplt {
  	struct fl_flow_key dummy_key;
  	struct fl_flow_key mask;
  	struct flow_dissector dissector;
  	struct tcf_chain *chain;
  };
77b9900ef   Jiri Pirko   tc: introduce Flo...
91
92
  struct cls_fl_head {
  	struct rhashtable ht;
259e60f96   Vlad Buslov   net: sched: flowe...
93
  	spinlock_t masks_lock; /* Protect masks list */
05cd271fd   Paul Blakey   cls_flower: Suppo...
94
  	struct list_head masks;
c049d56eb   Vlad Buslov   net: sched: flowe...
95
  	struct list_head hw_filters;
aaa908ffb   Cong Wang   net_sched: switch...
96
  	struct rcu_work rwork;
c15ab236d   Chris Mi   net/sched: Change...
97
  	struct idr handle_idr;
77b9900ef   Jiri Pirko   tc: introduce Flo...
98
99
100
  };
  
  struct cls_fl_filter {
05cd271fd   Paul Blakey   cls_flower: Suppo...
101
  	struct fl_flow_mask *mask;
77b9900ef   Jiri Pirko   tc: introduce Flo...
102
103
104
105
106
107
  	struct rhash_head ht_node;
  	struct fl_flow_key mkey;
  	struct tcf_exts exts;
  	struct tcf_result res;
  	struct fl_flow_key key;
  	struct list_head list;
c049d56eb   Vlad Buslov   net: sched: flowe...
108
  	struct list_head hw_list;
77b9900ef   Jiri Pirko   tc: introduce Flo...
109
  	u32 handle;
e69985c67   Amir Vadai   net/sched: cls_fl...
110
  	u32 flags;
86c55361e   Vlad Buslov   net: sched: cls_f...
111
  	u32 in_hw_count;
aaa908ffb   Cong Wang   net_sched: switch...
112
  	struct rcu_work rwork;
7091d8c70   Hadar Hen Zion   net/sched: cls_fl...
113
  	struct net_device *hw_dev;
061775583   Vlad Buslov   net: sched: flowe...
114
115
116
117
118
  	/* Flower classifier is unlocked, which means that its reference counter
  	 * can be changed concurrently without any kind of external
  	 * synchronization. Use atomic reference counter to be concurrency-safe.
  	 */
  	refcount_t refcnt;
b2552b8c4   Vlad Buslov   net: sched: flowe...
119
  	bool deleted;
77b9900ef   Jiri Pirko   tc: introduce Flo...
120
  };
05cd271fd   Paul Blakey   cls_flower: Suppo...
121
122
123
124
125
126
  static const struct rhashtable_params mask_ht_params = {
  	.key_offset = offsetof(struct fl_flow_mask, key),
  	.key_len = sizeof(struct fl_flow_key),
  	.head_offset = offsetof(struct fl_flow_mask, ht_node),
  	.automatic_shrinking = true,
  };
77b9900ef   Jiri Pirko   tc: introduce Flo...
127
128
129
130
131
132
133
134
135
  static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  {
  	return mask->range.end - mask->range.start;
  }
  
  static void fl_mask_update_range(struct fl_flow_mask *mask)
  {
  	const u8 *bytes = (const u8 *) &mask->key;
  	size_t size = sizeof(mask->key);
05cd271fd   Paul Blakey   cls_flower: Suppo...
136
  	size_t i, first = 0, last;
77b9900ef   Jiri Pirko   tc: introduce Flo...
137

05cd271fd   Paul Blakey   cls_flower: Suppo...
138
139
140
141
142
143
144
145
  	for (i = 0; i < size; i++) {
  		if (bytes[i]) {
  			first = i;
  			break;
  		}
  	}
  	last = first;
  	for (i = size - 1; i != first; i--) {
77b9900ef   Jiri Pirko   tc: introduce Flo...
146
  		if (bytes[i]) {
77b9900ef   Jiri Pirko   tc: introduce Flo...
147
  			last = i;
05cd271fd   Paul Blakey   cls_flower: Suppo...
148
  			break;
77b9900ef   Jiri Pirko   tc: introduce Flo...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
  		}
  	}
  	mask->range.start = rounddown(first, sizeof(long));
  	mask->range.end = roundup(last + 1, sizeof(long));
  }
  
  static void *fl_key_get_start(struct fl_flow_key *key,
  			      const struct fl_flow_mask *mask)
  {
  	return (u8 *) key + mask->range.start;
  }
  
  static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  			      struct fl_flow_mask *mask)
  {
  	const long *lkey = fl_key_get_start(key, mask);
  	const long *lmask = fl_key_get_start(&mask->key, mask);
  	long *lmkey = fl_key_get_start(mkey, mask);
  	int i;
  
  	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  		*lmkey++ = *lkey++ & *lmask++;
  }
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
  static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
  			       struct fl_flow_mask *mask)
  {
  	const long *lmask = fl_key_get_start(&mask->key, mask);
  	const long *ltmplt;
  	int i;
  
  	if (!tmplt)
  		return true;
  	ltmplt = fl_key_get_start(&tmplt->mask, mask);
  	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
  		if (~*ltmplt++ & *lmask++)
  			return false;
  	}
  	return true;
  }
77b9900ef   Jiri Pirko   tc: introduce Flo...
188
189
190
191
192
  static void fl_clear_masked_range(struct fl_flow_key *key,
  				  struct fl_flow_mask *mask)
  {
  	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  }
5c72299fb   Amritha Nambiar   net: sched: cls_f...
193
194
195
196
197
  static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
  				  struct fl_flow_key *key,
  				  struct fl_flow_key *mkey)
  {
  	__be16 min_mask, max_mask, min_val, max_val;
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
198
199
200
201
  	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
  	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
  	min_val = htons(filter->key.tp_range.tp_min.dst);
  	max_val = htons(filter->key.tp_range.tp_max.dst);
5c72299fb   Amritha Nambiar   net: sched: cls_f...
202
203
  
  	if (min_mask && max_mask) {
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
204
205
  		if (htons(key->tp_range.tp.dst) < min_val ||
  		    htons(key->tp_range.tp.dst) > max_val)
5c72299fb   Amritha Nambiar   net: sched: cls_f...
206
207
208
  			return false;
  
  		/* skb does not have min and max values */
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
209
210
  		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
  		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
5c72299fb   Amritha Nambiar   net: sched: cls_f...
211
212
213
214
215
216
217
218
219
  	}
  	return true;
  }
  
  static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
  				  struct fl_flow_key *key,
  				  struct fl_flow_key *mkey)
  {
  	__be16 min_mask, max_mask, min_val, max_val;
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
220
221
222
223
  	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
  	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
  	min_val = htons(filter->key.tp_range.tp_min.src);
  	max_val = htons(filter->key.tp_range.tp_max.src);
5c72299fb   Amritha Nambiar   net: sched: cls_f...
224
225
  
  	if (min_mask && max_mask) {
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
226
227
  		if (htons(key->tp_range.tp.src) < min_val ||
  		    htons(key->tp_range.tp.src) > max_val)
5c72299fb   Amritha Nambiar   net: sched: cls_f...
228
229
230
  			return false;
  
  		/* skb does not have min and max values */
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
231
232
  		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
  		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
5c72299fb   Amritha Nambiar   net: sched: cls_f...
233
234
235
236
237
238
  	}
  	return true;
  }
  
  static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
  					 struct fl_flow_key *mkey)
a3308d8fd   Paul Blakey   net/sched: cls_fl...
239
  {
05cd271fd   Paul Blakey   cls_flower: Suppo...
240
241
  	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
  				      mask->filter_ht_params);
a3308d8fd   Paul Blakey   net/sched: cls_fl...
242
  }
5c72299fb   Amritha Nambiar   net: sched: cls_f...
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
  static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
  					     struct fl_flow_key *mkey,
  					     struct fl_flow_key *key)
  {
  	struct cls_fl_filter *filter, *f;
  
  	list_for_each_entry_rcu(filter, &mask->filters, list) {
  		if (!fl_range_port_dst_cmp(filter, key, mkey))
  			continue;
  
  		if (!fl_range_port_src_cmp(filter, key, mkey))
  			continue;
  
  		f = __fl_lookup(mask, mkey);
  		if (f)
  			return f;
  	}
  	return NULL;
  }
  
  static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
  				       struct fl_flow_key *mkey,
  				       struct fl_flow_key *key)
  {
  	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
  		return fl_lookup_range(mask, mkey, key);
  
  	return __fl_lookup(mask, mkey);
  }
e0ace68af   Paul Blakey   net/sched: cls_fl...
272
273
274
275
276
277
278
279
280
281
282
283
  static u16 fl_ct_info_to_flower_map[] = {
  	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
  	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
  	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
  	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
  	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  					TCA_FLOWER_KEY_CT_FLAGS_NEW,
  };
77b9900ef   Jiri Pirko   tc: introduce Flo...
284
285
286
287
  static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  		       struct tcf_result *res)
  {
  	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
77b9900ef   Jiri Pirko   tc: introduce Flo...
288
  	struct fl_flow_key skb_mkey;
e0ace68af   Paul Blakey   net/sched: cls_fl...
289
290
291
  	struct fl_flow_key skb_key;
  	struct fl_flow_mask *mask;
  	struct cls_fl_filter *f;
77b9900ef   Jiri Pirko   tc: introduce Flo...
292

05cd271fd   Paul Blakey   cls_flower: Suppo...
293
  	list_for_each_entry_rcu(mask, &head->masks, list) {
8a9093c79   Jason Baron   net: sched: corre...
294
  		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
05cd271fd   Paul Blakey   cls_flower: Suppo...
295
  		fl_clear_masked_range(&skb_key, mask);
bc3103f1e   Amir Vadai   net/sched: cls_fl...
296

8212ed777   Jiri Pirko   net: sched: cls_f...
297
  		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
05cd271fd   Paul Blakey   cls_flower: Suppo...
298
299
300
301
302
  		/* skb_flow_dissect() does not set n_proto in case an unknown
  		 * protocol, so do it rather here.
  		 */
  		skb_key.basic.n_proto = skb->protocol;
  		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
e0ace68af   Paul Blakey   net/sched: cls_fl...
303
304
305
  		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
  				    fl_ct_info_to_flower_map,
  				    ARRAY_SIZE(fl_ct_info_to_flower_map));
05cd271fd   Paul Blakey   cls_flower: Suppo...
306
  		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
77b9900ef   Jiri Pirko   tc: introduce Flo...
307

05cd271fd   Paul Blakey   cls_flower: Suppo...
308
  		fl_set_masked_key(&skb_mkey, &skb_key, mask);
77b9900ef   Jiri Pirko   tc: introduce Flo...
309

5c72299fb   Amritha Nambiar   net: sched: cls_f...
310
  		f = fl_lookup(mask, &skb_mkey, &skb_key);
05cd271fd   Paul Blakey   cls_flower: Suppo...
311
312
313
314
  		if (f && !tc_skip_sw(f->flags)) {
  			*res = f->res;
  			return tcf_exts_exec(skb, &f->exts, res);
  		}
77b9900ef   Jiri Pirko   tc: introduce Flo...
315
316
317
318
319
320
321
322
323
324
325
  	}
  	return -1;
  }
  
  static int fl_init(struct tcf_proto *tp)
  {
  	struct cls_fl_head *head;
  
  	head = kzalloc(sizeof(*head), GFP_KERNEL);
  	if (!head)
  		return -ENOBUFS;
259e60f96   Vlad Buslov   net: sched: flowe...
326
  	spin_lock_init(&head->masks_lock);
05cd271fd   Paul Blakey   cls_flower: Suppo...
327
  	INIT_LIST_HEAD_RCU(&head->masks);
c049d56eb   Vlad Buslov   net: sched: flowe...
328
  	INIT_LIST_HEAD(&head->hw_filters);
77b9900ef   Jiri Pirko   tc: introduce Flo...
329
  	rcu_assign_pointer(tp->root, head);
c15ab236d   Chris Mi   net/sched: Change...
330
  	idr_init(&head->handle_idr);
77b9900ef   Jiri Pirko   tc: introduce Flo...
331

05cd271fd   Paul Blakey   cls_flower: Suppo...
332
333
  	return rhashtable_init(&head->ht, &mask_ht_params);
  }
99815f503   Vlad Buslov   net: sched: flowe...
334
  static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
44a5cd436   Paolo Abeni   cls_flower: fix u...
335
  {
99815f503   Vlad Buslov   net: sched: flowe...
336
337
338
339
340
  	/* temporary masks don't have their filters list and ht initialized */
  	if (mask_init_done) {
  		WARN_ON(!list_empty(&mask->filters));
  		rhashtable_destroy(&mask->ht);
  	}
44a5cd436   Paolo Abeni   cls_flower: fix u...
341
342
343
344
345
346
347
  	kfree(mask);
  }
  
  static void fl_mask_free_work(struct work_struct *work)
  {
  	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
  						 struct fl_flow_mask, rwork);
99815f503   Vlad Buslov   net: sched: flowe...
348
349
350
351
352
353
354
355
356
  	fl_mask_free(mask, true);
  }
  
  static void fl_uninit_mask_free_work(struct work_struct *work)
  {
  	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
  						 struct fl_flow_mask, rwork);
  
  	fl_mask_free(mask, false);
44a5cd436   Paolo Abeni   cls_flower: fix u...
357
  }
9994677c9   Vlad Buslov   net: sched: flowe...
358
  static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
05cd271fd   Paul Blakey   cls_flower: Suppo...
359
  {
f48ef4d5b   Vlad Buslov   net: sched: flowe...
360
  	if (!refcount_dec_and_test(&mask->refcnt))
05cd271fd   Paul Blakey   cls_flower: Suppo...
361
362
363
  		return false;
  
  	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
259e60f96   Vlad Buslov   net: sched: flowe...
364
365
  
  	spin_lock(&head->masks_lock);
05cd271fd   Paul Blakey   cls_flower: Suppo...
366
  	list_del_rcu(&mask->list);
259e60f96   Vlad Buslov   net: sched: flowe...
367
  	spin_unlock(&head->masks_lock);
9994677c9   Vlad Buslov   net: sched: flowe...
368
  	tcf_queue_work(&mask->rwork, fl_mask_free_work);
05cd271fd   Paul Blakey   cls_flower: Suppo...
369
370
  
  	return true;
77b9900ef   Jiri Pirko   tc: introduce Flo...
371
  }
c049d56eb   Vlad Buslov   net: sched: flowe...
372
373
374
375
376
377
378
379
380
  static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
  {
  	/* Flower classifier only changes root pointer during init and destroy.
  	 * Users must obtain reference to tcf_proto instance before calling its
  	 * API, so tp->root pointer is protected from concurrent call to
  	 * fl_destroy() by reference counting.
  	 */
  	return rcu_dereference_raw(tp->root);
  }
0dadc117a   Cong Wang   cls_flower: use t...
381
382
383
384
385
386
  static void __fl_destroy_filter(struct cls_fl_filter *f)
  {
  	tcf_exts_destroy(&f->exts);
  	tcf_exts_put_net(&f->exts);
  	kfree(f);
  }
0552c8afa   Cong Wang   net_sched: use tc...
387
  static void fl_destroy_filter_work(struct work_struct *work)
77b9900ef   Jiri Pirko   tc: introduce Flo...
388
  {
aaa908ffb   Cong Wang   net_sched: switch...
389
390
  	struct cls_fl_filter *f = container_of(to_rcu_work(work),
  					struct cls_fl_filter, rwork);
77b9900ef   Jiri Pirko   tc: introduce Flo...
391

0dadc117a   Cong Wang   cls_flower: use t...
392
  	__fl_destroy_filter(f);
0552c8afa   Cong Wang   net_sched: use tc...
393
  }
1b0f80375   Jakub Kicinski   cls_flower: propa...
394
  static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
c24e43d83   Vlad Buslov   net: sched: flowe...
395
  				 bool rtnl_held, struct netlink_ext_ack *extack)
5b33f4884   Amir Vadai   net/flower: Intro...
396
  {
208c0f4b5   Jiri Pirko   net: sched: use t...
397
  	struct tcf_block *block = tp->chain->block;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
398
  	struct flow_cls_offload cls_flower = {};
5b33f4884   Amir Vadai   net/flower: Intro...
399

d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
400
  	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
401
  	cls_flower.command = FLOW_CLS_DESTROY;
de4784ca0   Jiri Pirko   net: sched: get r...
402
  	cls_flower.cookie = (unsigned long) f;
5b33f4884   Amir Vadai   net/flower: Intro...
403

401192113   Vlad Buslov   net: sched: refac...
404
  	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
918190f50   Vlad Buslov   net: sched: flowe...
405
  			    &f->flags, &f->in_hw_count, rtnl_held);
c24e43d83   Vlad Buslov   net: sched: flowe...
406

5b33f4884   Amir Vadai   net/flower: Intro...
407
  }
e8eb36cd8   Amir Vadai   net/sched: flower...
408
  static int fl_hw_replace_filter(struct tcf_proto *tp,
c24e43d83   Vlad Buslov   net: sched: flowe...
409
  				struct cls_fl_filter *f, bool rtnl_held,
41002038f   Quentin Monnet   net: sched: cls_f...
410
  				struct netlink_ext_ack *extack)
5b33f4884   Amir Vadai   net/flower: Intro...
411
  {
208c0f4b5   Jiri Pirko   net: sched: use t...
412
  	struct tcf_block *block = tp->chain->block;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
413
  	struct flow_cls_offload cls_flower = {};
717503b9c   Jiri Pirko   net: sched: conve...
414
  	bool skip_sw = tc_skip_sw(f->flags);
c24e43d83   Vlad Buslov   net: sched: flowe...
415
  	int err = 0;
e3ab786b4   Pablo Neira Ayuso   flow_offload: add...
416
  	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
918190f50   Vlad Buslov   net: sched: flowe...
417
418
  	if (!cls_flower.rule)
  		return -ENOMEM;
8f2566225   Pablo Neira Ayuso   flow_offload: add...
419

d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
420
  	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
421
  	cls_flower.command = FLOW_CLS_REPLACE;
de4784ca0   Jiri Pirko   net: sched: get r...
422
  	cls_flower.cookie = (unsigned long) f;
8f2566225   Pablo Neira Ayuso   flow_offload: add...
423
424
425
  	cls_flower.rule->match.dissector = &f->mask->dissector;
  	cls_flower.rule->match.mask = &f->mask->key;
  	cls_flower.rule->match.key = &f->mkey;
384c181e3   Amritha Nambiar   net: sched: Ident...
426
  	cls_flower.classid = f->res.classid;
5b33f4884   Amir Vadai   net/flower: Intro...
427

b15e7a6e8   Vlad Buslov   net: sched: don't...
428
  	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
3a7b68617   Pablo Neira Ayuso   cls_api: add tran...
429
430
  	if (err) {
  		kfree(cls_flower.rule);
918190f50   Vlad Buslov   net: sched: flowe...
431
  		if (skip_sw) {
1f15bb4f3   Vlad Buslov   net: sched: flowe...
432
  			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
918190f50   Vlad Buslov   net: sched: flowe...
433
434
435
  			return err;
  		}
  		return 0;
3a7b68617   Pablo Neira Ayuso   cls_api: add tran...
436
  	}
401192113   Vlad Buslov   net: sched: refac...
437
  	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
918190f50   Vlad Buslov   net: sched: flowe...
438
  			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
5a6ff4b13   Vlad Buslov   net: sched: take ...
439
  	tc_cleanup_flow_action(&cls_flower.rule->action);
8f2566225   Pablo Neira Ayuso   flow_offload: add...
440
  	kfree(cls_flower.rule);
401192113   Vlad Buslov   net: sched: refac...
441
  	if (err) {
918190f50   Vlad Buslov   net: sched: flowe...
442
443
  		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
  		return err;
717503b9c   Jiri Pirko   net: sched: conve...
444
  	}
918190f50   Vlad Buslov   net: sched: flowe...
445
446
  	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
  		return -EINVAL;
c24e43d83   Vlad Buslov   net: sched: flowe...
447

918190f50   Vlad Buslov   net: sched: flowe...
448
  	return 0;
5b33f4884   Amir Vadai   net/flower: Intro...
449
  }
c24e43d83   Vlad Buslov   net: sched: flowe...
450
451
  static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
  			       bool rtnl_held)
10cbc6843   Amir Vadai   net/sched: cls_fl...
452
  {
208c0f4b5   Jiri Pirko   net: sched: use t...
453
  	struct tcf_block *block = tp->chain->block;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
454
  	struct flow_cls_offload cls_flower = {};
10cbc6843   Amir Vadai   net/sched: cls_fl...
455

d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
456
  	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
457
  	cls_flower.command = FLOW_CLS_STATS;
de4784ca0   Jiri Pirko   net: sched: get r...
458
  	cls_flower.cookie = (unsigned long) f;
384c181e3   Amritha Nambiar   net: sched: Ident...
459
  	cls_flower.classid = f->res.classid;
10cbc6843   Amir Vadai   net/sched: cls_fl...
460

918190f50   Vlad Buslov   net: sched: flowe...
461
462
  	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
  			 rtnl_held);
3b1903ef9   Pablo Neira Ayuso   flow_offload: add...
463
464
465
  
  	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
  			      cls_flower.stats.pkts,
93a129eb8   Jiri Pirko   net: sched: expos...
466
467
468
  			      cls_flower.stats.lastused,
  			      cls_flower.stats.used_hw_stats,
  			      cls_flower.stats.used_hw_stats_valid);
10cbc6843   Amir Vadai   net/sched: cls_fl...
469
  }
061775583   Vlad Buslov   net: sched: flowe...
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
  static void __fl_put(struct cls_fl_filter *f)
  {
  	if (!refcount_dec_and_test(&f->refcnt))
  		return;
  
  	if (tcf_exts_get_net(&f->exts))
  		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
  	else
  		__fl_destroy_filter(f);
  }
  
  static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
  {
  	struct cls_fl_filter *f;
  
  	rcu_read_lock();
  	f = idr_find(&head->handle_idr, handle);
  	if (f && !refcount_inc_not_zero(&f->refcnt))
  		f = NULL;
  	rcu_read_unlock();
  
  	return f;
  }
b2552b8c4   Vlad Buslov   net: sched: flowe...
493
  static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
c24e43d83   Vlad Buslov   net: sched: flowe...
494
495
  		       bool *last, bool rtnl_held,
  		       struct netlink_ext_ack *extack)
13fa876eb   Roi Dayan   net/sched: cls_fl...
496
  {
e474619a2   Vlad Buslov   net: sched: flowe...
497
  	struct cls_fl_head *head = fl_head_dereference(tp);
c15ab236d   Chris Mi   net/sched: Change...
498

b2552b8c4   Vlad Buslov   net: sched: flowe...
499
  	*last = false;
3d81e7118   Vlad Buslov   net: sched: flowe...
500
501
502
  	spin_lock(&tp->lock);
  	if (f->deleted) {
  		spin_unlock(&tp->lock);
b2552b8c4   Vlad Buslov   net: sched: flowe...
503
  		return -ENOENT;
3d81e7118   Vlad Buslov   net: sched: flowe...
504
  	}
b2552b8c4   Vlad Buslov   net: sched: flowe...
505
506
507
508
  
  	f->deleted = true;
  	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
  			       f->mask->filter_ht_params);
9c1609414   Matthew Wilcox   idr: Delete idr_r...
509
  	idr_remove(&head->handle_idr, f->handle);
13fa876eb   Roi Dayan   net/sched: cls_fl...
510
  	list_del_rcu(&f->list);
3d81e7118   Vlad Buslov   net: sched: flowe...
511
  	spin_unlock(&tp->lock);
9994677c9   Vlad Buslov   net: sched: flowe...
512
  	*last = fl_mask_put(head, f->mask);
796852197   Hadar Hen Zion   net/sched: cls_fl...
513
  	if (!tc_skip_hw(f->flags))
c24e43d83   Vlad Buslov   net: sched: flowe...
514
  		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
13fa876eb   Roi Dayan   net/sched: cls_fl...
515
  	tcf_unbind_filter(tp, &f->res);
061775583   Vlad Buslov   net: sched: flowe...
516
  	__fl_put(f);
05cd271fd   Paul Blakey   cls_flower: Suppo...
517

b2552b8c4   Vlad Buslov   net: sched: flowe...
518
  	return 0;
13fa876eb   Roi Dayan   net/sched: cls_fl...
519
  }
d93637741   Daniel Borkmann   net, sched: respe...
520
521
  static void fl_destroy_sleepable(struct work_struct *work)
  {
aaa908ffb   Cong Wang   net_sched: switch...
522
523
524
  	struct cls_fl_head *head = container_of(to_rcu_work(work),
  						struct cls_fl_head,
  						rwork);
de9dc650f   Paul Blakey   cls_flower: Fix m...
525
526
  
  	rhashtable_destroy(&head->ht);
d93637741   Daniel Borkmann   net, sched: respe...
527
528
529
  	kfree(head);
  	module_put(THIS_MODULE);
  }
12db03b65   Vlad Buslov   net: sched: exten...
530
531
  static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
  		       struct netlink_ext_ack *extack)
77b9900ef   Jiri Pirko   tc: introduce Flo...
532
  {
e474619a2   Vlad Buslov   net: sched: flowe...
533
  	struct cls_fl_head *head = fl_head_dereference(tp);
05cd271fd   Paul Blakey   cls_flower: Suppo...
534
  	struct fl_flow_mask *mask, *next_mask;
77b9900ef   Jiri Pirko   tc: introduce Flo...
535
  	struct cls_fl_filter *f, *next;
b2552b8c4   Vlad Buslov   net: sched: flowe...
536
  	bool last;
77b9900ef   Jiri Pirko   tc: introduce Flo...
537

05cd271fd   Paul Blakey   cls_flower: Suppo...
538
539
  	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
  		list_for_each_entry_safe(f, next, &mask->filters, list) {
c24e43d83   Vlad Buslov   net: sched: flowe...
540
  			__fl_delete(tp, f, &last, rtnl_held, extack);
b2552b8c4   Vlad Buslov   net: sched: flowe...
541
  			if (last)
05cd271fd   Paul Blakey   cls_flower: Suppo...
542
543
544
  				break;
  		}
  	}
c15ab236d   Chris Mi   net/sched: Change...
545
  	idr_destroy(&head->handle_idr);
d93637741   Daniel Borkmann   net, sched: respe...
546
547
  
  	__module_get(THIS_MODULE);
aaa908ffb   Cong Wang   net_sched: switch...
548
  	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
77b9900ef   Jiri Pirko   tc: introduce Flo...
549
  }
061775583   Vlad Buslov   net: sched: flowe...
550
551
552
553
554
555
  static void fl_put(struct tcf_proto *tp, void *arg)
  {
  	struct cls_fl_filter *f = arg;
  
  	__fl_put(f);
  }
8113c0956   WANG Cong   net_sched: use vo...
556
  static void *fl_get(struct tcf_proto *tp, u32 handle)
77b9900ef   Jiri Pirko   tc: introduce Flo...
557
  {
e474619a2   Vlad Buslov   net: sched: flowe...
558
  	struct cls_fl_head *head = fl_head_dereference(tp);
77b9900ef   Jiri Pirko   tc: introduce Flo...
559

061775583   Vlad Buslov   net: sched: flowe...
560
  	return __fl_get(head, handle);
77b9900ef   Jiri Pirko   tc: introduce Flo...
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
  }
  
  static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
  	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
  	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
  					    .len = IFNAMSIZ },
  	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
b175c3a44   Jamal Hadi Salim   net: sched: flowe...
584
585
  	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
9399ae9a6   Hadar Hen Zion   net_sched: flower...
586
587
588
  	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
bc3103f1e   Amir Vadai   net/sched: cls_fl...
589
590
591
592
593
594
595
596
597
  	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
  	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
aa72d7083   Or Gerlitz   net/sched: cls_fl...
598
599
600
601
  	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
5976c5f45   Simon Horman   net/sched: cls_fl...
602
603
604
605
  	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
f4d997fd6   Hadar Hen Zion   net/sched: cls_fl...
606
607
608
609
  	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
610
611
  	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
7b684884f   Simon Horman   net/sched: cls_fl...
612
613
614
615
616
617
618
619
  	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
99d31326c   Simon Horman   net/sched: cls_fl...
620
621
622
623
624
625
626
627
628
629
  	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
  	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
a577d8f79   Benjamin LaHaise   cls_flower: add s...
630
631
632
633
  	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
fdfc7dd6c   Jiri Pirko   net/sched: flower...
634
635
  	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
636
637
638
639
  	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
d64efd092   Jianbo Liu   net/sched: flower...
640
641
642
  	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
643
644
645
646
  	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
647
648
  	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
  	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
e0ace68af   Paul Blakey   net/sched: cls_fl...
649
650
651
652
653
654
655
656
657
658
  	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
  					    .len = 128 / BITS_PER_BYTE },
  	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
  					    .len = 128 / BITS_PER_BYTE },
e2debf085   Davide Caratti   net/sched: flower...
659
  	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
660
661
662
663
  };
  
  static const struct nla_policy
  enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
d8f9dfae4   Xin Long   net: sched: allow...
664
665
  	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
  		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
666
  	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
d8f9dfae4   Xin Long   net: sched: allow...
667
  	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
79b1011cb   Xin Long   net: sched: allow...
668
  	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
669
670
671
672
673
674
675
676
  };
  
  static const struct nla_policy
  geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
  	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
  	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
  						       .len = 128 },
77b9900ef   Jiri Pirko   tc: introduce Flo...
677
  };
d8f9dfae4   Xin Long   net: sched: allow...
678
679
680
681
  static const struct nla_policy
  vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
  	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
  };
79b1011cb   Xin Long   net: sched: allow...
682
683
684
685
686
687
688
  static const struct nla_policy
  erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
  	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
  	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
  	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
  };
77b9900ef   Jiri Pirko   tc: introduce Flo...
689
690
691
692
693
694
  static void fl_set_key_val(struct nlattr **tb,
  			   void *val, int val_type,
  			   void *mask, int mask_type, int len)
  {
  	if (!tb[val_type])
  		return;
e0ace68af   Paul Blakey   net/sched: cls_fl...
695
  	nla_memcpy(val, tb[val_type], len);
77b9900ef   Jiri Pirko   tc: introduce Flo...
696
697
698
  	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  		memset(mask, 0xff, len);
  	else
e0ace68af   Paul Blakey   net/sched: cls_fl...
699
  		nla_memcpy(mask, tb[mask_type], len);
77b9900ef   Jiri Pirko   tc: introduce Flo...
700
  }
5c72299fb   Amritha Nambiar   net: sched: cls_f...
701
  static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
bd7d4c128   Guillaume Nault   cls_flower: Add e...
702
703
  				 struct fl_flow_key *mask,
  				 struct netlink_ext_ack *extack)
5c72299fb   Amritha Nambiar   net: sched: cls_f...
704
  {
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
705
706
707
708
709
710
711
712
713
714
715
716
  	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
  		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
  		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
  	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
  		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
  		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
  	fl_set_key_val(tb, &key->tp_range.tp_min.src,
  		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
  		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
  	fl_set_key_val(tb, &key->tp_range.tp_max.src,
  		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
  		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
bd7d4c128   Guillaume Nault   cls_flower: Add e...
717
718
719
720
721
722
  	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
  	    htons(key->tp_range.tp_max.dst) <=
  	    htons(key->tp_range.tp_min.dst)) {
  		NL_SET_ERR_MSG_ATTR(extack,
  				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
  				    "Invalid destination port range (min must be strictly smaller than max)");
5c72299fb   Amritha Nambiar   net: sched: cls_f...
723
  		return -EINVAL;
bd7d4c128   Guillaume Nault   cls_flower: Add e...
724
725
726
727
728
729
730
731
732
  	}
  	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
  	    htons(key->tp_range.tp_max.src) <=
  	    htons(key->tp_range.tp_min.src)) {
  		NL_SET_ERR_MSG_ATTR(extack,
  				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
  				    "Invalid source port range (min must be strictly smaller than max)");
  		return -EINVAL;
  	}
5c72299fb   Amritha Nambiar   net: sched: cls_f...
733
734
735
  
  	return 0;
  }
1a7fca63c   Benjamin LaHaise   flower: check unu...
736
737
  static int fl_set_key_mpls(struct nlattr **tb,
  			   struct flow_dissector_key_mpls *key_val,
442f730e4   Guillaume Nault   cls_flower: Add e...
738
739
  			   struct flow_dissector_key_mpls *key_mask,
  			   struct netlink_ext_ack *extack)
a577d8f79   Benjamin LaHaise   cls_flower: add s...
740
741
742
743
744
745
  {
  	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
  		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
  		key_mask->mpls_ttl = MPLS_TTL_MASK;
  	}
  	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
1a7fca63c   Benjamin LaHaise   flower: check unu...
746
  		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
442f730e4   Guillaume Nault   cls_flower: Add e...
747
748
749
750
  		if (bos & ~MPLS_BOS_MASK) {
  			NL_SET_ERR_MSG_ATTR(extack,
  					    tb[TCA_FLOWER_KEY_MPLS_BOS],
  					    "Bottom Of Stack (BOS) must be 0 or 1");
1a7fca63c   Benjamin LaHaise   flower: check unu...
751
  			return -EINVAL;
442f730e4   Guillaume Nault   cls_flower: Add e...
752
  		}
1a7fca63c   Benjamin LaHaise   flower: check unu...
753
  		key_val->mpls_bos = bos;
a577d8f79   Benjamin LaHaise   cls_flower: add s...
754
755
756
  		key_mask->mpls_bos = MPLS_BOS_MASK;
  	}
  	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1a7fca63c   Benjamin LaHaise   flower: check unu...
757
  		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
442f730e4   Guillaume Nault   cls_flower: Add e...
758
759
760
761
  		if (tc & ~MPLS_TC_MASK) {
  			NL_SET_ERR_MSG_ATTR(extack,
  					    tb[TCA_FLOWER_KEY_MPLS_TC],
  					    "Traffic Class (TC) must be between 0 and 7");
1a7fca63c   Benjamin LaHaise   flower: check unu...
762
  			return -EINVAL;
442f730e4   Guillaume Nault   cls_flower: Add e...
763
  		}
1a7fca63c   Benjamin LaHaise   flower: check unu...
764
  		key_val->mpls_tc = tc;
a577d8f79   Benjamin LaHaise   cls_flower: add s...
765
766
767
  		key_mask->mpls_tc = MPLS_TC_MASK;
  	}
  	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1a7fca63c   Benjamin LaHaise   flower: check unu...
768
  		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
442f730e4   Guillaume Nault   cls_flower: Add e...
769
770
771
772
  		if (label & ~MPLS_LABEL_MASK) {
  			NL_SET_ERR_MSG_ATTR(extack,
  					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
  					    "Label must be between 0 and 1048575");
1a7fca63c   Benjamin LaHaise   flower: check unu...
773
  			return -EINVAL;
442f730e4   Guillaume Nault   cls_flower: Add e...
774
  		}
1a7fca63c   Benjamin LaHaise   flower: check unu...
775
  		key_val->mpls_label = label;
a577d8f79   Benjamin LaHaise   cls_flower: add s...
776
777
  		key_mask->mpls_label = MPLS_LABEL_MASK;
  	}
1a7fca63c   Benjamin LaHaise   flower: check unu...
778
  	return 0;
a577d8f79   Benjamin LaHaise   cls_flower: add s...
779
  }
9399ae9a6   Hadar Hen Zion   net_sched: flower...
780
  static void fl_set_key_vlan(struct nlattr **tb,
aaab08344   Jianbo Liu   net/sched: flower...
781
  			    __be16 ethertype,
d64efd092   Jianbo Liu   net/sched: flower...
782
  			    int vlan_id_key, int vlan_prio_key,
9399ae9a6   Hadar Hen Zion   net_sched: flower...
783
784
785
786
  			    struct flow_dissector_key_vlan *key_val,
  			    struct flow_dissector_key_vlan *key_mask)
  {
  #define VLAN_PRIORITY_MASK	0x7
d64efd092   Jianbo Liu   net/sched: flower...
787
  	if (tb[vlan_id_key]) {
9399ae9a6   Hadar Hen Zion   net_sched: flower...
788
  		key_val->vlan_id =
d64efd092   Jianbo Liu   net/sched: flower...
789
  			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
9399ae9a6   Hadar Hen Zion   net_sched: flower...
790
791
  		key_mask->vlan_id = VLAN_VID_MASK;
  	}
d64efd092   Jianbo Liu   net/sched: flower...
792
  	if (tb[vlan_prio_key]) {
9399ae9a6   Hadar Hen Zion   net_sched: flower...
793
  		key_val->vlan_priority =
d64efd092   Jianbo Liu   net/sched: flower...
794
  			nla_get_u8(tb[vlan_prio_key]) &
9399ae9a6   Hadar Hen Zion   net_sched: flower...
795
796
797
  			VLAN_PRIORITY_MASK;
  		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  	}
aaab08344   Jianbo Liu   net/sched: flower...
798
799
  	key_val->vlan_tpid = ethertype;
  	key_mask->vlan_tpid = cpu_to_be16(~0);
9399ae9a6   Hadar Hen Zion   net_sched: flower...
800
  }
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
801
802
803
804
805
806
807
808
809
810
  static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  			    u32 *dissector_key, u32 *dissector_mask,
  			    u32 flower_flag_bit, u32 dissector_flag_bit)
  {
  	if (flower_mask & flower_flag_bit) {
  		*dissector_mask |= dissector_flag_bit;
  		if (flower_key & flower_flag_bit)
  			*dissector_key |= dissector_flag_bit;
  	}
  }
e304e21a2   Guillaume Nault   cls_flower: Add e...
811
812
  static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
  			    u32 *flags_mask, struct netlink_ext_ack *extack)
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
813
814
  {
  	u32 key, mask;
d9724772e   Or Gerlitz   net/sched: cls_fl...
815
  	/* mask is mandatory for flags */
e304e21a2   Guillaume Nault   cls_flower: Add e...
816
817
  	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
  		NL_SET_ERR_MSG(extack, "Missing flags mask");
d9724772e   Or Gerlitz   net/sched: cls_fl...
818
  		return -EINVAL;
e304e21a2   Guillaume Nault   cls_flower: Add e...
819
  	}
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
820
821
  
  	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
d9724772e   Or Gerlitz   net/sched: cls_fl...
822
  	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
823
824
825
826
827
828
  
  	*flags_key  = 0;
  	*flags_mask = 0;
  
  	fl_set_key_flag(key, mask, flags_key, flags_mask,
  			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
459d153d9   Pieter Jansen van Vuuren   net/sched: cls_fl...
829
830
831
  	fl_set_key_flag(key, mask, flags_key, flags_mask,
  			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  			FLOW_DIS_FIRST_FRAG);
d9724772e   Or Gerlitz   net/sched: cls_fl...
832
833
  
  	return 0;
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
834
  }
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
835
  static void fl_set_key_ip(struct nlattr **tb, bool encap,
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
836
837
838
  			  struct flow_dissector_key_ip *key,
  			  struct flow_dissector_key_ip *mask)
  {
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
839
840
841
842
  	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
  	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
  	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
  	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
843

0e2c17b64   Or Gerlitz   net/sched: cls_fl...
844
845
  	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
  	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
846
  }
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
  static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
  			     int depth, int option_len,
  			     struct netlink_ext_ack *extack)
  {
  	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
  	struct nlattr *class = NULL, *type = NULL, *data = NULL;
  	struct geneve_opt *opt;
  	int err, data_len = 0;
  
  	if (option_len > sizeof(struct geneve_opt))
  		data_len = option_len - sizeof(struct geneve_opt);
  
  	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
  	memset(opt, 0xff, option_len);
  	opt->length = data_len / 4;
  	opt->r1 = 0;
  	opt->r2 = 0;
  	opt->r3 = 0;
  
  	/* If no mask has been prodived we assume an exact match. */
  	if (!depth)
  		return sizeof(struct geneve_opt) + data_len;
  
  	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
  		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
  		return -EINVAL;
  	}
8cb081746   Johannes Berg   netlink: make val...
874
875
876
  	err = nla_parse_nested_deprecated(tb,
  					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
  					  nla, geneve_opt_policy, extack);
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
  	if (err < 0)
  		return err;
  
  	/* We are not allowed to omit any of CLASS, TYPE or DATA
  	 * fields from the key.
  	 */
  	if (!option_len &&
  	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
  	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
  	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
  		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
  		return -EINVAL;
  	}
  
  	/* Omitting any of CLASS, TYPE or DATA fields is allowed
  	 * for the mask.
  	 */
  	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
  		int new_len = key->enc_opts.len;
  
  		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
  		data_len = nla_len(data);
  		if (data_len < 4) {
  			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
  			return -ERANGE;
  		}
  		if (data_len % 4) {
  			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
  			return -ERANGE;
  		}
  
  		new_len += sizeof(struct geneve_opt) + data_len;
  		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
  		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
  			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
  			return -ERANGE;
  		}
  		opt->length = data_len / 4;
  		memcpy(opt->opt_data, nla_data(data), data_len);
  	}
  
  	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
  		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
  		opt->opt_class = nla_get_be16(class);
  	}
  
  	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
  		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
  		opt->type = nla_get_u8(type);
  	}
  
  	return sizeof(struct geneve_opt) + data_len;
  }
d8f9dfae4   Xin Long   net: sched: allow...
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
  static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
  			    int depth, int option_len,
  			    struct netlink_ext_ack *extack)
  {
  	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
  	struct vxlan_metadata *md;
  	int err;
  
  	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
  	memset(md, 0xff, sizeof(*md));
  
  	if (!depth)
  		return sizeof(*md);
  
  	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
  		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
  		return -EINVAL;
  	}
  
  	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
  			       vxlan_opt_policy, extack);
  	if (err < 0)
  		return err;
  
  	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
  		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
  		return -EINVAL;
  	}
  
  	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
  		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
  
  	return sizeof(*md);
  }
79b1011cb   Xin Long   net: sched: allow...
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
  static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
  			     int depth, int option_len,
  			     struct netlink_ext_ack *extack)
  {
  	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
  	struct erspan_metadata *md;
  	int err;
  
  	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
  	memset(md, 0xff, sizeof(*md));
  	md->version = 1;
  
  	if (!depth)
  		return sizeof(*md);
  
  	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
  		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
  		return -EINVAL;
  	}
  
  	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
  			       erspan_opt_policy, extack);
  	if (err < 0)
  		return err;
  
  	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
  		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
  		return -EINVAL;
  	}
  
  	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
  		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
  
  	if (md->version == 1) {
  		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
  			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
  			return -EINVAL;
  		}
  		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
  			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
  			md->u.index = nla_get_be32(nla);
  		}
  	} else if (md->version == 2) {
  		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
  				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
  			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
  			return -EINVAL;
  		}
  		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
  			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
  			md->u.md2.dir = nla_get_u8(nla);
  		}
  		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
  			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
  			set_hwid(&md->u.md2, nla_get_u8(nla));
  		}
  	} else {
  		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
  		return -EINVAL;
  	}
  
  	return sizeof(*md);
  }
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1027
1028
1029
1030
1031
  static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
  			  struct fl_flow_key *mask,
  			  struct netlink_ext_ack *extack)
  {
  	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
63c82997f   Jakub Kicinski   net: sched: cls_f...
1032
  	int err, option_len, key_depth, msk_depth = 0;
8cb081746   Johannes Berg   netlink: make val...
1033
1034
1035
  	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
  					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
  					     enc_opts_policy, extack);
63c82997f   Jakub Kicinski   net: sched: cls_f...
1036
1037
  	if (err)
  		return err;
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1038
1039
1040
1041
  
  	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
  
  	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
8cb081746   Johannes Berg   netlink: make val...
1042
1043
1044
  		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
  						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
  						     enc_opts_policy, extack);
63c82997f   Jakub Kicinski   net: sched: cls_f...
1045
1046
  		if (err)
  			return err;
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1047
1048
1049
1050
1051
1052
1053
1054
  		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
  		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
  	}
  
  	nla_for_each_attr(nla_opt_key, nla_enc_key,
  			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
  		switch (nla_type(nla_opt_key)) {
  		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
d8f9dfae4   Xin Long   net: sched: allow...
1055
1056
1057
1058
1059
  			if (key->enc_opts.dst_opt_type &&
  			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
  				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
  				return -EINVAL;
  			}
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
  			option_len = 0;
  			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
  			option_len = fl_set_geneve_opt(nla_opt_key, key,
  						       key_depth, option_len,
  						       extack);
  			if (option_len < 0)
  				return option_len;
  
  			key->enc_opts.len += option_len;
  			/* At the same time we need to parse through the mask
  			 * in order to verify exact and mask attribute lengths.
  			 */
  			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
  			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
  						       msk_depth, option_len,
  						       extack);
  			if (option_len < 0)
  				return option_len;
  
  			mask->enc_opts.len += option_len;
  			if (key->enc_opts.len != mask->enc_opts.len) {
  				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  				return -EINVAL;
  			}
  
  			if (msk_depth)
  				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
  			break;
d8f9dfae4   Xin Long   net: sched: allow...
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
  		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
  			if (key->enc_opts.dst_opt_type) {
  				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
  				return -EINVAL;
  			}
  			option_len = 0;
  			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
  			option_len = fl_set_vxlan_opt(nla_opt_key, key,
  						      key_depth, option_len,
  						      extack);
  			if (option_len < 0)
  				return option_len;
  
  			key->enc_opts.len += option_len;
  			/* At the same time we need to parse through the mask
  			 * in order to verify exact and mask attribute lengths.
  			 */
  			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
  			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
  						      msk_depth, option_len,
  						      extack);
  			if (option_len < 0)
  				return option_len;
  
  			mask->enc_opts.len += option_len;
  			if (key->enc_opts.len != mask->enc_opts.len) {
  				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  				return -EINVAL;
  			}
  
  			if (msk_depth)
  				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
  			break;
79b1011cb   Xin Long   net: sched: allow...
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
  		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
  			if (key->enc_opts.dst_opt_type) {
  				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
  				return -EINVAL;
  			}
  			option_len = 0;
  			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
  			option_len = fl_set_erspan_opt(nla_opt_key, key,
  						       key_depth, option_len,
  						       extack);
  			if (option_len < 0)
  				return option_len;
  
  			key->enc_opts.len += option_len;
  			/* At the same time we need to parse through the mask
  			 * in order to verify exact and mask attribute lengths.
  			 */
  			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
  			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
  						       msk_depth, option_len,
  						       extack);
  			if (option_len < 0)
  				return option_len;
  
  			mask->enc_opts.len += option_len;
  			if (key->enc_opts.len != mask->enc_opts.len) {
  				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  				return -EINVAL;
  			}
  
  			if (msk_depth)
  				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
  			break;
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1154
1155
1156
1157
1158
1159
1160
1161
  		default:
  			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
  			return -EINVAL;
  		}
  	}
  
  	return 0;
  }
e0ace68af   Paul Blakey   net/sched: cls_fl...
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
  static int fl_set_key_ct(struct nlattr **tb,
  			 struct flow_dissector_key_ct *key,
  			 struct flow_dissector_key_ct *mask,
  			 struct netlink_ext_ack *extack)
  {
  	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
  		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
  			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
  			return -EOPNOTSUPP;
  		}
  		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
  			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
  			       sizeof(key->ct_state));
  	}
  	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
  		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
  			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
  			return -EOPNOTSUPP;
  		}
  		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
  			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
  			       sizeof(key->ct_zone));
  	}
  	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
  		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
  			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
  			return -EOPNOTSUPP;
  		}
  		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
  			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
  			       sizeof(key->ct_mark));
  	}
  	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
  		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
  			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
  			return -EOPNOTSUPP;
  		}
  		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
  			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
  			       sizeof(key->ct_labels));
  	}
  
  	return 0;
  }
77b9900ef   Jiri Pirko   tc: introduce Flo...
1206
  static int fl_set_key(struct net *net, struct nlattr **tb,
1057c55f6   Alexander Aring   net: sched: cls: ...
1207
1208
  		      struct fl_flow_key *key, struct fl_flow_key *mask,
  		      struct netlink_ext_ack *extack)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1209
  {
9399ae9a6   Hadar Hen Zion   net_sched: flower...
1210
  	__be16 ethertype;
d9724772e   Or Gerlitz   net/sched: cls_fl...
1211
  	int ret = 0;
a51486266   Jiri Pirko   net: sched: remov...
1212

77b9900ef   Jiri Pirko   tc: introduce Flo...
1213
  	if (tb[TCA_FLOWER_INDEV]) {
1057c55f6   Alexander Aring   net: sched: cls: ...
1214
  		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1215
1216
  		if (err < 0)
  			return err;
8212ed777   Jiri Pirko   net: sched: cls_f...
1217
1218
  		key->meta.ingress_ifindex = err;
  		mask->meta.ingress_ifindex = 0xffffffff;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1219
1220
1221
1222
1223
1224
1225
1226
  	}
  
  	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  		       sizeof(key->eth.dst));
  	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  		       sizeof(key->eth.src));
66530bdf8   Jamal Hadi Salim   sched,cls_flower:...
1227

0b498a527   Arnd Bergmann   net_sched: fix us...
1228
  	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
9399ae9a6   Hadar Hen Zion   net_sched: flower...
1229
  		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
aaab08344   Jianbo Liu   net/sched: flower...
1230
  		if (eth_type_vlan(ethertype)) {
d64efd092   Jianbo Liu   net/sched: flower...
1231
1232
1233
  			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
  					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
  					&mask->vlan);
5e9a0fe49   Jianbo Liu   net/sched: flower...
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
  			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
  				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
  				if (eth_type_vlan(ethertype)) {
  					fl_set_key_vlan(tb, ethertype,
  							TCA_FLOWER_KEY_CVLAN_ID,
  							TCA_FLOWER_KEY_CVLAN_PRIO,
  							&key->cvlan, &mask->cvlan);
  					fl_set_key_val(tb, &key->basic.n_proto,
  						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
  						       &mask->basic.n_proto,
  						       TCA_FLOWER_UNSPEC,
  						       sizeof(key->basic.n_proto));
  				} else {
  					key->basic.n_proto = ethertype;
  					mask->basic.n_proto = cpu_to_be16(~0);
  				}
d64efd092   Jianbo Liu   net/sched: flower...
1250
  			}
0b498a527   Arnd Bergmann   net_sched: fix us...
1251
1252
1253
1254
  		} else {
  			key->basic.n_proto = ethertype;
  			mask->basic.n_proto = cpu_to_be16(~0);
  		}
9399ae9a6   Hadar Hen Zion   net_sched: flower...
1255
  	}
66530bdf8   Jamal Hadi Salim   sched,cls_flower:...
1256

77b9900ef   Jiri Pirko   tc: introduce Flo...
1257
1258
1259
1260
1261
  	if (key->basic.n_proto == htons(ETH_P_IP) ||
  	    key->basic.n_proto == htons(ETH_P_IPV6)) {
  		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  			       sizeof(key->basic.ip_proto));
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
1262
  		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1263
  	}
66530bdf8   Jamal Hadi Salim   sched,cls_flower:...
1264
1265
1266
  
  	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd09   Paul Blakey   net/sched: cls_fl...
1267
  		mask->control.addr_type = ~0;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1268
1269
1270
1271
1272
1273
  		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  			       sizeof(key->ipv4.src));
  		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  			       sizeof(key->ipv4.dst));
66530bdf8   Jamal Hadi Salim   sched,cls_flower:...
1274
1275
  	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd09   Paul Blakey   net/sched: cls_fl...
1276
  		mask->control.addr_type = ~0;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1277
1278
1279
1280
1281
1282
1283
  		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  			       sizeof(key->ipv6.src));
  		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  			       sizeof(key->ipv6.dst));
  	}
66530bdf8   Jamal Hadi Salim   sched,cls_flower:...
1284

77b9900ef   Jiri Pirko   tc: introduce Flo...
1285
1286
  	if (key->basic.ip_proto == IPPROTO_TCP) {
  		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
1287
  			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
1288
1289
  			       sizeof(key->tp.src));
  		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
1290
  			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
1291
  			       sizeof(key->tp.dst));
fdfc7dd6c   Jiri Pirko   net/sched: flower...
1292
1293
1294
  		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  			       sizeof(key->tcp.flags));
77b9900ef   Jiri Pirko   tc: introduce Flo...
1295
1296
  	} else if (key->basic.ip_proto == IPPROTO_UDP) {
  		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
1297
  			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
1298
1299
  			       sizeof(key->tp.src));
  		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
1300
  			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
1301
  			       sizeof(key->tp.dst));
5976c5f45   Simon Horman   net/sched: cls_fl...
1302
1303
1304
1305
1306
1307
1308
  	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
  		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  			       sizeof(key->tp.src));
  		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  			       sizeof(key->tp.dst));
7b684884f   Simon Horman   net/sched: cls_fl...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
  	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
  		   key->basic.ip_proto == IPPROTO_ICMP) {
  		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  			       &mask->icmp.type,
  			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  			       sizeof(key->icmp.type));
  		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  			       &mask->icmp.code,
  			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  			       sizeof(key->icmp.code));
  	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  		   key->basic.ip_proto == IPPROTO_ICMPV6) {
  		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  			       &mask->icmp.type,
  			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  			       sizeof(key->icmp.type));
040587af3   Simon Horman   net/sched: cls_fl...
1325
  		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
7b684884f   Simon Horman   net/sched: cls_fl...
1326
  			       &mask->icmp.code,
040587af3   Simon Horman   net/sched: cls_fl...
1327
  			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
7b684884f   Simon Horman   net/sched: cls_fl...
1328
  			       sizeof(key->icmp.code));
a577d8f79   Benjamin LaHaise   cls_flower: add s...
1329
1330
  	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
  		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
442f730e4   Guillaume Nault   cls_flower: Add e...
1331
  		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1a7fca63c   Benjamin LaHaise   flower: check unu...
1332
1333
  		if (ret)
  			return ret;
99d31326c   Simon Horman   net/sched: cls_fl...
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
  	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
  		   key->basic.n_proto == htons(ETH_P_RARP)) {
  		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
  			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
  			       sizeof(key->arp.sip));
  		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
  			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
  			       sizeof(key->arp.tip));
  		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
  			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
  			       sizeof(key->arp.op));
  		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  			       sizeof(key->arp.sha));
  		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  			       sizeof(key->arp.tha));
77b9900ef   Jiri Pirko   tc: introduce Flo...
1351
  	}
5c72299fb   Amritha Nambiar   net: sched: cls_f...
1352
1353
1354
  	if (key->basic.ip_proto == IPPROTO_TCP ||
  	    key->basic.ip_proto == IPPROTO_UDP ||
  	    key->basic.ip_proto == IPPROTO_SCTP) {
bd7d4c128   Guillaume Nault   cls_flower: Add e...
1355
  		ret = fl_set_key_port_range(tb, key, mask, extack);
5c72299fb   Amritha Nambiar   net: sched: cls_f...
1356
1357
1358
  		if (ret)
  			return ret;
  	}
bc3103f1e   Amir Vadai   net/sched: cls_fl...
1359
1360
1361
  	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
970bfcd09   Paul Blakey   net/sched: cls_fl...
1362
  		mask->enc_control.addr_type = ~0;
bc3103f1e   Amir Vadai   net/sched: cls_fl...
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
  		fl_set_key_val(tb, &key->enc_ipv4.src,
  			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
  			       &mask->enc_ipv4.src,
  			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  			       sizeof(key->enc_ipv4.src));
  		fl_set_key_val(tb, &key->enc_ipv4.dst,
  			       TCA_FLOWER_KEY_ENC_IPV4_DST,
  			       &mask->enc_ipv4.dst,
  			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  			       sizeof(key->enc_ipv4.dst));
  	}
  
  	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
970bfcd09   Paul Blakey   net/sched: cls_fl...
1378
  		mask->enc_control.addr_type = ~0;
bc3103f1e   Amir Vadai   net/sched: cls_fl...
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
  		fl_set_key_val(tb, &key->enc_ipv6.src,
  			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
  			       &mask->enc_ipv6.src,
  			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  			       sizeof(key->enc_ipv6.src));
  		fl_set_key_val(tb, &key->enc_ipv6.dst,
  			       TCA_FLOWER_KEY_ENC_IPV6_DST,
  			       &mask->enc_ipv6.dst,
  			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  			       sizeof(key->enc_ipv6.dst));
  	}
  
  	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42d   Hadar Hen Zion   net/sched: cls_fl...
1392
  		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
bc3103f1e   Amir Vadai   net/sched: cls_fl...
1393
  		       sizeof(key->enc_key_id.keyid));
f4d997fd6   Hadar Hen Zion   net/sched: cls_fl...
1394
1395
1396
1397
1398
1399
1400
  	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  		       sizeof(key->enc_tp.src));
  
  	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  		       sizeof(key->enc_tp.dst));
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
1401
  	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1402
1403
1404
1405
1406
  	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
  		ret = fl_set_enc_opt(tb, key, mask, extack);
  		if (ret)
  			return ret;
  	}
e0ace68af   Paul Blakey   net/sched: cls_fl...
1407
1408
1409
  	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
  	if (ret)
  		return ret;
d9724772e   Or Gerlitz   net/sched: cls_fl...
1410
  	if (tb[TCA_FLOWER_KEY_FLAGS])
e304e21a2   Guillaume Nault   cls_flower: Add e...
1411
1412
  		ret = fl_set_key_flags(tb, &key->control.flags,
  				       &mask->control.flags, extack);
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
1413

d9724772e   Or Gerlitz   net/sched: cls_fl...
1414
  	return ret;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1415
  }
05cd271fd   Paul Blakey   cls_flower: Suppo...
1416
1417
  static void fl_mask_copy(struct fl_flow_mask *dst,
  			 struct fl_flow_mask *src)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1418
  {
05cd271fd   Paul Blakey   cls_flower: Suppo...
1419
1420
  	const void *psrc = fl_key_get_start(&src->key, src);
  	void *pdst = fl_key_get_start(&dst->key, src);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1421

05cd271fd   Paul Blakey   cls_flower: Suppo...
1422
1423
  	memcpy(pdst, psrc, fl_mask_range(src));
  	dst->range = src->range;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1424
1425
1426
1427
1428
1429
1430
  }
  
  static const struct rhashtable_params fl_ht_params = {
  	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  	.head_offset = offsetof(struct cls_fl_filter, ht_node),
  	.automatic_shrinking = true,
  };
05cd271fd   Paul Blakey   cls_flower: Suppo...
1431
  static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1432
  {
05cd271fd   Paul Blakey   cls_flower: Suppo...
1433
1434
1435
  	mask->filter_ht_params = fl_ht_params;
  	mask->filter_ht_params.key_len = fl_mask_range(mask);
  	mask->filter_ht_params.key_offset += mask->range.start;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1436

05cd271fd   Paul Blakey   cls_flower: Suppo...
1437
  	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1438
1439
1440
  }
  
  #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
c593642c8   Pankaj Bharadiya   treewide: Use siz...
1441
  #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1442

339ba878c   Hadar Hen Zion   net_sched: flower...
1443
1444
1445
  #define FL_KEY_IS_MASKED(mask, member)						\
  	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
  		   0, FL_KEY_MEMBER_SIZE(member))				\
77b9900ef   Jiri Pirko   tc: introduce Flo...
1446
1447
1448
1449
1450
1451
1452
  
  #define FL_KEY_SET(keys, cnt, id, member)					\
  	do {									\
  		keys[cnt].key_id = id;						\
  		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
  		cnt++;								\
  	} while(0);
339ba878c   Hadar Hen Zion   net_sched: flower...
1453
  #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
77b9900ef   Jiri Pirko   tc: introduce Flo...
1454
  	do {									\
339ba878c   Hadar Hen Zion   net_sched: flower...
1455
  		if (FL_KEY_IS_MASKED(mask, member))				\
77b9900ef   Jiri Pirko   tc: introduce Flo...
1456
1457
  			FL_KEY_SET(keys, cnt, id, member);			\
  	} while(0);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1458
1459
  static void fl_init_dissector(struct flow_dissector *dissector,
  			      struct fl_flow_key *mask)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1460
1461
1462
  {
  	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  	size_t cnt = 0;
8212ed777   Jiri Pirko   net: sched: cls_f...
1463
1464
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  			     FLOW_DISSECTOR_KEY_META, meta);
42aecaa9b   Tom Herbert   net: Get skb hash...
1465
  	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1466
  	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1467
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878c   Hadar Hen Zion   net_sched: flower...
1468
  			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1469
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878c   Hadar Hen Zion   net_sched: flower...
1470
  			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1471
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
339ba878c   Hadar Hen Zion   net_sched: flower...
1472
  			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
1473
1474
1475
1476
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  			     FLOW_DISSECTOR_KEY_PORTS, tp);
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1477
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
1478
  			     FLOW_DISSECTOR_KEY_IP, ip);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1479
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
fdfc7dd6c   Jiri Pirko   net/sched: flower...
1480
  			     FLOW_DISSECTOR_KEY_TCP, tcp);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1481
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
7b684884f   Simon Horman   net/sched: cls_fl...
1482
  			     FLOW_DISSECTOR_KEY_ICMP, icmp);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1483
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
99d31326c   Simon Horman   net/sched: cls_fl...
1484
  			     FLOW_DISSECTOR_KEY_ARP, arp);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1485
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
a577d8f79   Benjamin LaHaise   cls_flower: add s...
1486
  			     FLOW_DISSECTOR_KEY_MPLS, mpls);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1487
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
9399ae9a6   Hadar Hen Zion   net_sched: flower...
1488
  			     FLOW_DISSECTOR_KEY_VLAN, vlan);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1489
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
d64efd092   Jianbo Liu   net/sched: flower...
1490
  			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1491
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d10521   Hadar Hen Zion   net/sched: cls_fl...
1492
  			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1493
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d10521   Hadar Hen Zion   net/sched: cls_fl...
1494
  			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1495
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
519d10521   Hadar Hen Zion   net/sched: cls_fl...
1496
  			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1497
1498
  	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
  	    FL_KEY_IS_MASKED(mask, enc_ipv6))
519d10521   Hadar Hen Zion   net/sched: cls_fl...
1499
1500
  		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  			   enc_control);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1501
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
f4d997fd6   Hadar Hen Zion   net/sched: cls_fl...
1502
  			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1503
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
1504
  			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
1505
1506
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
e0ace68af   Paul Blakey   net/sched: cls_fl...
1507
1508
  	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  			     FLOW_DISSECTOR_KEY_CT, ct);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1509

33fb5cba1   Jiri Pirko   net: sched: cls_f...
1510
  	skb_flow_dissector_init(dissector, keys, cnt);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
  }
  
  static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
  					       struct fl_flow_mask *mask)
  {
  	struct fl_flow_mask *newmask;
  	int err;
  
  	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
  	if (!newmask)
  		return ERR_PTR(-ENOMEM);
  
  	fl_mask_copy(newmask, mask);
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
1524
1525
1526
1527
  	if ((newmask->key.tp_range.tp_min.dst &&
  	     newmask->key.tp_range.tp_max.dst) ||
  	    (newmask->key.tp_range.tp_min.src &&
  	     newmask->key.tp_range.tp_max.src))
5c72299fb   Amritha Nambiar   net: sched: cls_f...
1528
  		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
05cd271fd   Paul Blakey   cls_flower: Suppo...
1529
1530
1531
  	err = fl_init_mask_hashtable(newmask);
  	if (err)
  		goto errout_free;
33fb5cba1   Jiri Pirko   net: sched: cls_f...
1532
  	fl_init_dissector(&newmask->dissector, &newmask->key);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1533
1534
  
  	INIT_LIST_HEAD_RCU(&newmask->filters);
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1535
  	refcount_set(&newmask->refcnt, 1);
195c234d1   Vlad Buslov   net: sched: flowe...
1536
1537
  	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
  				      &newmask->ht_node, mask_ht_params);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1538
1539
  	if (err)
  		goto errout_destroy;
259e60f96   Vlad Buslov   net: sched: flowe...
1540
  	spin_lock(&head->masks_lock);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1541
  	list_add_tail_rcu(&newmask->list, &head->masks);
259e60f96   Vlad Buslov   net: sched: flowe...
1542
  	spin_unlock(&head->masks_lock);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1543
1544
1545
1546
1547
1548
1549
1550
1551
  
  	return newmask;
  
  errout_destroy:
  	rhashtable_destroy(&newmask->ht);
  errout_free:
  	kfree(newmask);
  
  	return ERR_PTR(err);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1552
1553
1554
  }
  
  static int fl_check_assign_mask(struct cls_fl_head *head,
05cd271fd   Paul Blakey   cls_flower: Suppo...
1555
1556
  				struct cls_fl_filter *fnew,
  				struct cls_fl_filter *fold,
77b9900ef   Jiri Pirko   tc: introduce Flo...
1557
1558
  				struct fl_flow_mask *mask)
  {
05cd271fd   Paul Blakey   cls_flower: Suppo...
1559
  	struct fl_flow_mask *newmask;
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1560
  	int ret = 0;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1561

f48ef4d5b   Vlad Buslov   net: sched: flowe...
1562
  	rcu_read_lock();
195c234d1   Vlad Buslov   net: sched: flowe...
1563
1564
1565
  
  	/* Insert mask as temporary node to prevent concurrent creation of mask
  	 * with same key. Any concurrent lookups with same key will return
99815f503   Vlad Buslov   net: sched: flowe...
1566
  	 * -EAGAIN because mask's refcnt is zero.
195c234d1   Vlad Buslov   net: sched: flowe...
1567
1568
1569
1570
  	 */
  	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
  						       &mask->ht_node,
  						       mask_ht_params);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1571
  	if (!fnew->mask) {
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1572
  		rcu_read_unlock();
195c234d1   Vlad Buslov   net: sched: flowe...
1573
1574
1575
1576
  		if (fold) {
  			ret = -EINVAL;
  			goto errout_cleanup;
  		}
77b9900ef   Jiri Pirko   tc: introduce Flo...
1577

05cd271fd   Paul Blakey   cls_flower: Suppo...
1578
  		newmask = fl_create_new_mask(head, mask);
195c234d1   Vlad Buslov   net: sched: flowe...
1579
1580
1581
1582
  		if (IS_ERR(newmask)) {
  			ret = PTR_ERR(newmask);
  			goto errout_cleanup;
  		}
77b9900ef   Jiri Pirko   tc: introduce Flo...
1583

05cd271fd   Paul Blakey   cls_flower: Suppo...
1584
  		fnew->mask = newmask;
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1585
  		return 0;
195c234d1   Vlad Buslov   net: sched: flowe...
1586
1587
  	} else if (IS_ERR(fnew->mask)) {
  		ret = PTR_ERR(fnew->mask);
f6521c587   Paul Blakey   cls_flower: Fix c...
1588
  	} else if (fold && fold->mask != fnew->mask) {
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1589
1590
1591
1592
  		ret = -EINVAL;
  	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
  		/* Mask was deleted concurrently, try again */
  		ret = -EAGAIN;
05cd271fd   Paul Blakey   cls_flower: Suppo...
1593
  	}
f48ef4d5b   Vlad Buslov   net: sched: flowe...
1594
1595
  	rcu_read_unlock();
  	return ret;
195c234d1   Vlad Buslov   net: sched: flowe...
1596
1597
1598
1599
  
  errout_cleanup:
  	rhashtable_remove_fast(&head->ht, &mask->ht_node,
  			       mask_ht_params);
195c234d1   Vlad Buslov   net: sched: flowe...
1600
  	return ret;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1601
1602
1603
1604
1605
  }
  
  static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  			struct cls_fl_filter *f, struct fl_flow_mask *mask,
  			unsigned long base, struct nlattr **tb,
50a561900   Alexander Aring   net: sched: cls: ...
1606
  			struct nlattr *est, bool ovr,
c24e43d83   Vlad Buslov   net: sched: flowe...
1607
  			struct fl_flow_tmplt *tmplt, bool rtnl_held,
50a561900   Alexander Aring   net: sched: cls: ...
1608
  			struct netlink_ext_ack *extack)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1609
  {
77b9900ef   Jiri Pirko   tc: introduce Flo...
1610
  	int err;
c24e43d83   Vlad Buslov   net: sched: flowe...
1611
  	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
ec6743a10   Vlad Buslov   net: sched: track...
1612
  				extack);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1613
1614
1615
1616
1617
  	if (err < 0)
  		return err;
  
  	if (tb[TCA_FLOWER_CLASSID]) {
  		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
c24e43d83   Vlad Buslov   net: sched: flowe...
1618
1619
  		if (!rtnl_held)
  			rtnl_lock();
77b9900ef   Jiri Pirko   tc: introduce Flo...
1620
  		tcf_bind_filter(tp, &f->res, base);
c24e43d83   Vlad Buslov   net: sched: flowe...
1621
1622
  		if (!rtnl_held)
  			rtnl_unlock();
77b9900ef   Jiri Pirko   tc: introduce Flo...
1623
  	}
1057c55f6   Alexander Aring   net: sched: cls: ...
1624
  	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1625
  	if (err)
455075292   Jiri Pirko   net: sched: cls_f...
1626
  		return err;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1627
1628
1629
  
  	fl_mask_update_range(mask);
  	fl_set_masked_key(&f->mkey, &f->key, mask);
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
1630
1631
1632
1633
  	if (!fl_mask_fits_tmplt(tmplt, mask)) {
  		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
  		return -EINVAL;
  	}
77b9900ef   Jiri Pirko   tc: introduce Flo...
1634
  	return 0;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1635
  }
1f17f7742   Vlad Buslov   net: sched: flowe...
1636
1637
1638
1639
1640
1641
  static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
  			       struct cls_fl_filter *fold,
  			       bool *in_ht)
  {
  	struct fl_flow_mask *mask = fnew->mask;
  	int err;
9e35552ae   Vlad Buslov   net: sched: flowe...
1642
1643
1644
  	err = rhashtable_lookup_insert_fast(&mask->ht,
  					    &fnew->ht_node,
  					    mask->filter_ht_params);
1f17f7742   Vlad Buslov   net: sched: flowe...
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
  	if (err) {
  		*in_ht = false;
  		/* It is okay if filter with same key exists when
  		 * overwriting.
  		 */
  		return fold && err == -EEXIST ? 0 : err;
  	}
  
  	*in_ht = true;
  	return 0;
  }
77b9900ef   Jiri Pirko   tc: introduce Flo...
1656
1657
1658
  static int fl_change(struct net *net, struct sk_buff *in_skb,
  		     struct tcf_proto *tp, unsigned long base,
  		     u32 handle, struct nlattr **tca,
12db03b65   Vlad Buslov   net: sched: exten...
1659
1660
  		     void **arg, bool ovr, bool rtnl_held,
  		     struct netlink_ext_ack *extack)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1661
  {
e474619a2   Vlad Buslov   net: sched: flowe...
1662
  	struct cls_fl_head *head = fl_head_dereference(tp);
8113c0956   WANG Cong   net_sched: use vo...
1663
  	struct cls_fl_filter *fold = *arg;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1664
  	struct cls_fl_filter *fnew;
2cddd2014   Ivan Vecera   net/sched: cls_fl...
1665
  	struct fl_flow_mask *mask;
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1666
  	struct nlattr **tb;
1f17f7742   Vlad Buslov   net: sched: flowe...
1667
  	bool in_ht;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1668
  	int err;
061775583   Vlad Buslov   net: sched: flowe...
1669
1670
1671
1672
  	if (!tca[TCA_OPTIONS]) {
  		err = -EINVAL;
  		goto errout_fold;
  	}
77b9900ef   Jiri Pirko   tc: introduce Flo...
1673

2cddd2014   Ivan Vecera   net/sched: cls_fl...
1674
  	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
061775583   Vlad Buslov   net: sched: flowe...
1675
1676
1677
1678
  	if (!mask) {
  		err = -ENOBUFS;
  		goto errout_fold;
  	}
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1679

2cddd2014   Ivan Vecera   net/sched: cls_fl...
1680
1681
1682
1683
1684
  	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  	if (!tb) {
  		err = -ENOBUFS;
  		goto errout_mask_alloc;
  	}
8cb081746   Johannes Berg   netlink: make val...
1685
1686
  	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
  					  tca[TCA_OPTIONS], fl_policy, NULL);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1687
  	if (err < 0)
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1688
  		goto errout_tb;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1689

39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1690
1691
1692
1693
  	if (fold && handle && fold->handle != handle) {
  		err = -EINVAL;
  		goto errout_tb;
  	}
77b9900ef   Jiri Pirko   tc: introduce Flo...
1694
1695
  
  	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1696
1697
1698
1699
  	if (!fnew) {
  		err = -ENOBUFS;
  		goto errout_tb;
  	}
c049d56eb   Vlad Buslov   net: sched: flowe...
1700
  	INIT_LIST_HEAD(&fnew->hw_list);
061775583   Vlad Buslov   net: sched: flowe...
1701
  	refcount_set(&fnew->refcnt, 1);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1702

14215108a   Cong Wang   net_sched: initia...
1703
  	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
b9a24bb76   WANG Cong   net_sched: proper...
1704
1705
  	if (err < 0)
  		goto errout;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1706

e69985c67   Amir Vadai   net/sched: cls_fl...
1707
1708
1709
1710
1711
  	if (tb[TCA_FLOWER_FLAGS]) {
  		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  
  		if (!tc_flags_valid(fnew->flags)) {
  			err = -EINVAL;
ecb3dea40   Vlad Buslov   net: sched: flowe...
1712
  			goto errout;
e69985c67   Amir Vadai   net/sched: cls_fl...
1713
1714
  		}
  	}
5b33f4884   Amir Vadai   net/flower: Intro...
1715

2cddd2014   Ivan Vecera   net/sched: cls_fl...
1716
  	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
c24e43d83   Vlad Buslov   net: sched: flowe...
1717
  			   tp->chain->tmplt_priv, rtnl_held, extack);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1718
  	if (err)
ecb3dea40   Vlad Buslov   net: sched: flowe...
1719
  		goto errout;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1720

2cddd2014   Ivan Vecera   net/sched: cls_fl...
1721
  	err = fl_check_assign_mask(head, fnew, fold, mask);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1722
  	if (err)
ecb3dea40   Vlad Buslov   net: sched: flowe...
1723
  		goto errout;
1f17f7742   Vlad Buslov   net: sched: flowe...
1724
1725
1726
  	err = fl_ht_insert_unique(fnew, fold, &in_ht);
  	if (err)
  		goto errout_mask;
796852197   Hadar Hen Zion   net/sched: cls_fl...
1727
  	if (!tc_skip_hw(fnew->flags)) {
c24e43d83   Vlad Buslov   net: sched: flowe...
1728
  		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
796852197   Hadar Hen Zion   net/sched: cls_fl...
1729
  		if (err)
1f17f7742   Vlad Buslov   net: sched: flowe...
1730
  			goto errout_ht;
796852197   Hadar Hen Zion   net/sched: cls_fl...
1731
  	}
5b33f4884   Amir Vadai   net/flower: Intro...
1732

55593960d   Or Gerlitz   net/sched: cls_fl...
1733
1734
  	if (!tc_in_hw(fnew->flags))
  		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
3d81e7118   Vlad Buslov   net: sched: flowe...
1735
  	spin_lock(&tp->lock);
272ffaade   Vlad Buslov   net: sched: flowe...
1736
1737
1738
1739
1740
1741
1742
  	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
  	 * proto again or create new one, if necessary.
  	 */
  	if (tp->deleting) {
  		err = -EAGAIN;
  		goto errout_hw;
  	}
5b33f4884   Amir Vadai   net/flower: Intro...
1743
  	if (fold) {
b2552b8c4   Vlad Buslov   net: sched: flowe...
1744
1745
1746
1747
1748
  		/* Fold filter was deleted concurrently. Retry lookup. */
  		if (fold->deleted) {
  			err = -EAGAIN;
  			goto errout_hw;
  		}
620da4860   Vlad Buslov   net: sched: flowe...
1749
  		fnew->handle = handle;
1f17f7742   Vlad Buslov   net: sched: flowe...
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
  		if (!in_ht) {
  			struct rhashtable_params params =
  				fnew->mask->filter_ht_params;
  
  			err = rhashtable_insert_fast(&fnew->mask->ht,
  						     &fnew->ht_node,
  						     params);
  			if (err)
  				goto errout_hw;
  			in_ht = true;
  		}
620da4860   Vlad Buslov   net: sched: flowe...
1761

c049d56eb   Vlad Buslov   net: sched: flowe...
1762
  		refcount_inc(&fnew->refcnt);
599d2570b   Roi Dayan   net/sched: cls_fl...
1763
1764
1765
  		rhashtable_remove_fast(&fold->mask->ht,
  				       &fold->ht_node,
  				       fold->mask->filter_ht_params);
234a4624e   Matthew Wilcox   idr: Delete idr_r...
1766
  		idr_replace(&head->handle_idr, fnew, fnew->handle);
ff3532f26   Daniel Borkmann   sched: cls_flower...
1767
  		list_replace_rcu(&fold->list, &fnew->list);
b2552b8c4   Vlad Buslov   net: sched: flowe...
1768
  		fold->deleted = true;
620da4860   Vlad Buslov   net: sched: flowe...
1769

3d81e7118   Vlad Buslov   net: sched: flowe...
1770
  		spin_unlock(&tp->lock);
9994677c9   Vlad Buslov   net: sched: flowe...
1771
  		fl_mask_put(head, fold->mask);
620da4860   Vlad Buslov   net: sched: flowe...
1772
  		if (!tc_skip_hw(fold->flags))
c24e43d83   Vlad Buslov   net: sched: flowe...
1773
  			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1774
  		tcf_unbind_filter(tp, &fold->res);
061775583   Vlad Buslov   net: sched: flowe...
1775
1776
1777
1778
1779
  		/* Caller holds reference to fold, so refcnt is always > 0
  		 * after this.
  		 */
  		refcount_dec(&fold->refcnt);
  		__fl_put(fold);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1780
  	} else {
620da4860   Vlad Buslov   net: sched: flowe...
1781
1782
1783
1784
  		if (handle) {
  			/* user specifies a handle and it doesn't exist */
  			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
  					    handle, GFP_ATOMIC);
9a2d93899   Vlad Buslov   net: sched: flowe...
1785
1786
1787
1788
1789
1790
1791
1792
1793
  
  			/* Filter with specified handle was concurrently
  			 * inserted after initial check in cls_api. This is not
  			 * necessarily an error if NLM_F_EXCL is not set in
  			 * message flags. Returning EAGAIN will cause cls_api to
  			 * try to update concurrently inserted rule.
  			 */
  			if (err == -ENOSPC)
  				err = -EAGAIN;
620da4860   Vlad Buslov   net: sched: flowe...
1794
1795
1796
1797
1798
1799
1800
  		} else {
  			handle = 1;
  			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
  					    INT_MAX, GFP_ATOMIC);
  		}
  		if (err)
  			goto errout_hw;
c049d56eb   Vlad Buslov   net: sched: flowe...
1801
  		refcount_inc(&fnew->refcnt);
620da4860   Vlad Buslov   net: sched: flowe...
1802
  		fnew->handle = handle;
05cd271fd   Paul Blakey   cls_flower: Suppo...
1803
  		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
3d81e7118   Vlad Buslov   net: sched: flowe...
1804
  		spin_unlock(&tp->lock);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1805
  	}
620da4860   Vlad Buslov   net: sched: flowe...
1806
  	*arg = fnew;
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1807
  	kfree(tb);
99815f503   Vlad Buslov   net: sched: flowe...
1808
  	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1809
  	return 0;
c049d56eb   Vlad Buslov   net: sched: flowe...
1810
1811
  errout_ht:
  	spin_lock(&tp->lock);
620da4860   Vlad Buslov   net: sched: flowe...
1812
  errout_hw:
c049d56eb   Vlad Buslov   net: sched: flowe...
1813
  	fnew->deleted = true;
3d81e7118   Vlad Buslov   net: sched: flowe...
1814
  	spin_unlock(&tp->lock);
620da4860   Vlad Buslov   net: sched: flowe...
1815
  	if (!tc_skip_hw(fnew->flags))
c24e43d83   Vlad Buslov   net: sched: flowe...
1816
  		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1f17f7742   Vlad Buslov   net: sched: flowe...
1817
1818
1819
  	if (in_ht)
  		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
  				       fnew->mask->filter_ht_params);
ecb3dea40   Vlad Buslov   net: sched: flowe...
1820
  errout_mask:
9994677c9   Vlad Buslov   net: sched: flowe...
1821
  	fl_mask_put(head, fnew->mask);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1822
  errout:
c049d56eb   Vlad Buslov   net: sched: flowe...
1823
  	__fl_put(fnew);
39b7b6a62   Arnd Bergmann   net/sched: cls_fl...
1824
1825
  errout_tb:
  	kfree(tb);
2cddd2014   Ivan Vecera   net/sched: cls_fl...
1826
  errout_mask_alloc:
99815f503   Vlad Buslov   net: sched: flowe...
1827
  	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
061775583   Vlad Buslov   net: sched: flowe...
1828
1829
1830
  errout_fold:
  	if (fold)
  		__fl_put(fold);
77b9900ef   Jiri Pirko   tc: introduce Flo...
1831
1832
  	return err;
  }
571acf210   Alexander Aring   net: sched: cls: ...
1833
  static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
12db03b65   Vlad Buslov   net: sched: exten...
1834
  		     bool rtnl_held, struct netlink_ext_ack *extack)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1835
  {
e474619a2   Vlad Buslov   net: sched: flowe...
1836
  	struct cls_fl_head *head = fl_head_dereference(tp);
8113c0956   WANG Cong   net_sched: use vo...
1837
  	struct cls_fl_filter *f = arg;
b2552b8c4   Vlad Buslov   net: sched: flowe...
1838
1839
  	bool last_on_mask;
  	int err = 0;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1840

c24e43d83   Vlad Buslov   net: sched: flowe...
1841
  	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
05cd271fd   Paul Blakey   cls_flower: Suppo...
1842
  	*last = list_empty(&head->masks);
061775583   Vlad Buslov   net: sched: flowe...
1843
  	__fl_put(f);
b2552b8c4   Vlad Buslov   net: sched: flowe...
1844
  	return err;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1845
  }
12db03b65   Vlad Buslov   net: sched: exten...
1846
1847
  static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
  		    bool rtnl_held)
77b9900ef   Jiri Pirko   tc: introduce Flo...
1848
  {
d39d71496   Cong Wang   idr: introduce id...
1849
1850
  	struct cls_fl_head *head = fl_head_dereference(tp);
  	unsigned long id = arg->cookie, tmp;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1851
  	struct cls_fl_filter *f;
05cd271fd   Paul Blakey   cls_flower: Suppo...
1852

01683a146   Vlad Buslov   net: sched: refac...
1853
  	arg->count = arg->skip;
d39d71496   Cong Wang   idr: introduce id...
1854
1855
1856
1857
  	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
  		/* don't return filters that are being deleted */
  		if (!refcount_inc_not_zero(&f->refcnt))
  			continue;
01683a146   Vlad Buslov   net: sched: refac...
1858
  		if (arg->fn(tp, f, arg) < 0) {
061775583   Vlad Buslov   net: sched: flowe...
1859
  			__fl_put(f);
01683a146   Vlad Buslov   net: sched: refac...
1860
1861
  			arg->stop = 1;
  			break;
05cd271fd   Paul Blakey   cls_flower: Suppo...
1862
  		}
061775583   Vlad Buslov   net: sched: flowe...
1863
  		__fl_put(f);
01683a146   Vlad Buslov   net: sched: refac...
1864
  		arg->count++;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1865
  	}
d39d71496   Cong Wang   idr: introduce id...
1866
  	arg->cookie = id;
77b9900ef   Jiri Pirko   tc: introduce Flo...
1867
  }
c049d56eb   Vlad Buslov   net: sched: flowe...
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
  static struct cls_fl_filter *
  fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
  {
  	struct cls_fl_head *head = fl_head_dereference(tp);
  
  	spin_lock(&tp->lock);
  	if (list_empty(&head->hw_filters)) {
  		spin_unlock(&tp->lock);
  		return NULL;
  	}
  
  	if (!f)
  		f = list_entry(&head->hw_filters, struct cls_fl_filter,
  			       hw_list);
  	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
  		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
  			spin_unlock(&tp->lock);
  			return f;
  		}
  	}
  
  	spin_unlock(&tp->lock);
  	return NULL;
  }
a73233115   Pablo Neira Ayuso   net: flow_offload...
1892
  static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
31533cba4   John Hurley   net: sched: cls_f...
1893
1894
  			void *cb_priv, struct netlink_ext_ack *extack)
  {
31533cba4   John Hurley   net: sched: cls_f...
1895
  	struct tcf_block *block = tp->chain->block;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
1896
  	struct flow_cls_offload cls_flower = {};
c049d56eb   Vlad Buslov   net: sched: flowe...
1897
  	struct cls_fl_filter *f = NULL;
31533cba4   John Hurley   net: sched: cls_f...
1898
  	int err;
c049d56eb   Vlad Buslov   net: sched: flowe...
1899
1900
1901
1902
1903
  	/* hw_filters list can only be changed by hw offload functions after
  	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
  	 * iterating it.
  	 */
  	ASSERT_RTNL();
3a7b68617   Pablo Neira Ayuso   cls_api: add tran...
1904

c049d56eb   Vlad Buslov   net: sched: flowe...
1905
  	while ((f = fl_get_next_hw_filter(tp, f, add))) {
95e27a4da   John Hurley   net: sched: ensur...
1906
1907
1908
1909
1910
1911
  		cls_flower.rule =
  			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
  		if (!cls_flower.rule) {
  			__fl_put(f);
  			return -ENOMEM;
  		}
31533cba4   John Hurley   net: sched: cls_f...
1912

95e27a4da   John Hurley   net: sched: ensur...
1913
  		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
1914
  					   extack);
95e27a4da   John Hurley   net: sched: ensur...
1915
  		cls_flower.command = add ?
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
1916
  			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
95e27a4da   John Hurley   net: sched: ensur...
1917
1918
1919
1920
  		cls_flower.cookie = (unsigned long)f;
  		cls_flower.rule->match.dissector = &f->mask->dissector;
  		cls_flower.rule->match.mask = &f->mask->key;
  		cls_flower.rule->match.key = &f->mkey;
b15e7a6e8   Vlad Buslov   net: sched: don't...
1921
  		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
95e27a4da   John Hurley   net: sched: ensur...
1922
  		if (err) {
8f2566225   Pablo Neira Ayuso   flow_offload: add...
1923
  			kfree(cls_flower.rule);
95e27a4da   John Hurley   net: sched: ensur...
1924
1925
1926
1927
  			if (tc_skip_sw(f->flags)) {
  				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
  				__fl_put(f);
  				return err;
31533cba4   John Hurley   net: sched: cls_f...
1928
  			}
95e27a4da   John Hurley   net: sched: ensur...
1929
1930
  			goto next_flow;
  		}
31533cba4   John Hurley   net: sched: cls_f...
1931

95e27a4da   John Hurley   net: sched: ensur...
1932
  		cls_flower.classid = f->res.classid;
401192113   Vlad Buslov   net: sched: refac...
1933
1934
1935
1936
  		err = tc_setup_cb_reoffload(block, tp, add, cb,
  					    TC_SETUP_CLSFLOWER, &cls_flower,
  					    cb_priv, &f->flags,
  					    &f->in_hw_count);
5a6ff4b13   Vlad Buslov   net: sched: take ...
1937
  		tc_cleanup_flow_action(&cls_flower.rule->action);
95e27a4da   John Hurley   net: sched: ensur...
1938
1939
1940
  		kfree(cls_flower.rule);
  
  		if (err) {
401192113   Vlad Buslov   net: sched: refac...
1941
1942
  			__fl_put(f);
  			return err;
31533cba4   John Hurley   net: sched: cls_f...
1943
  		}
95e27a4da   John Hurley   net: sched: ensur...
1944
  next_flow:
95e27a4da   John Hurley   net: sched: ensur...
1945
  		__fl_put(f);
31533cba4   John Hurley   net: sched: cls_f...
1946
1947
1948
1949
  	}
  
  	return 0;
  }
a449a3e77   Vlad Buslov   net: sched: notif...
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
  static void fl_hw_add(struct tcf_proto *tp, void *type_data)
  {
  	struct flow_cls_offload *cls_flower = type_data;
  	struct cls_fl_filter *f =
  		(struct cls_fl_filter *) cls_flower->cookie;
  	struct cls_fl_head *head = fl_head_dereference(tp);
  
  	spin_lock(&tp->lock);
  	list_add(&f->hw_list, &head->hw_filters);
  	spin_unlock(&tp->lock);
  }
  
  static void fl_hw_del(struct tcf_proto *tp, void *type_data)
  {
  	struct flow_cls_offload *cls_flower = type_data;
  	struct cls_fl_filter *f =
  		(struct cls_fl_filter *) cls_flower->cookie;
  
  	spin_lock(&tp->lock);
  	if (!list_empty(&f->hw_list))
  		list_del_init(&f->hw_list);
  	spin_unlock(&tp->lock);
  }
8f2566225   Pablo Neira Ayuso   flow_offload: add...
1973
1974
  static int fl_hw_create_tmplt(struct tcf_chain *chain,
  			      struct fl_flow_tmplt *tmplt)
347384527   Jiri Pirko   net: sched: cls_f...
1975
  {
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
1976
  	struct flow_cls_offload cls_flower = {};
347384527   Jiri Pirko   net: sched: cls_f...
1977
  	struct tcf_block *block = chain->block;
347384527   Jiri Pirko   net: sched: cls_f...
1978

e3ab786b4   Pablo Neira Ayuso   flow_offload: add...
1979
  	cls_flower.rule = flow_rule_alloc(0);
8f2566225   Pablo Neira Ayuso   flow_offload: add...
1980
1981
  	if (!cls_flower.rule)
  		return -ENOMEM;
347384527   Jiri Pirko   net: sched: cls_f...
1982
  	cls_flower.common.chain_index = chain->index;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
1983
  	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
347384527   Jiri Pirko   net: sched: cls_f...
1984
  	cls_flower.cookie = (unsigned long) tmplt;
8f2566225   Pablo Neira Ayuso   flow_offload: add...
1985
1986
1987
  	cls_flower.rule->match.dissector = &tmplt->dissector;
  	cls_flower.rule->match.mask = &tmplt->mask;
  	cls_flower.rule->match.key = &tmplt->dummy_key;
347384527   Jiri Pirko   net: sched: cls_f...
1988
1989
1990
1991
  
  	/* We don't care if driver (any of them) fails to handle this
  	 * call. It serves just as a hint for it.
  	 */
401192113   Vlad Buslov   net: sched: refac...
1992
  	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
8f2566225   Pablo Neira Ayuso   flow_offload: add...
1993
1994
1995
  	kfree(cls_flower.rule);
  
  	return 0;
347384527   Jiri Pirko   net: sched: cls_f...
1996
1997
1998
1999
2000
  }
  
  static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
  				struct fl_flow_tmplt *tmplt)
  {
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
2001
  	struct flow_cls_offload cls_flower = {};
347384527   Jiri Pirko   net: sched: cls_f...
2002
2003
2004
  	struct tcf_block *block = chain->block;
  
  	cls_flower.common.chain_index = chain->index;
f9e30088d   Pablo Neira Ayuso   net: flow_offload...
2005
  	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
347384527   Jiri Pirko   net: sched: cls_f...
2006
  	cls_flower.cookie = (unsigned long) tmplt;
401192113   Vlad Buslov   net: sched: refac...
2007
  	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
347384527   Jiri Pirko   net: sched: cls_f...
2008
  }
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
  static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
  			     struct nlattr **tca,
  			     struct netlink_ext_ack *extack)
  {
  	struct fl_flow_tmplt *tmplt;
  	struct nlattr **tb;
  	int err;
  
  	if (!tca[TCA_OPTIONS])
  		return ERR_PTR(-EINVAL);
  
  	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  	if (!tb)
  		return ERR_PTR(-ENOBUFS);
8cb081746   Johannes Berg   netlink: make val...
2023
2024
  	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
  					  tca[TCA_OPTIONS], fl_policy, NULL);
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2025
2026
2027
2028
  	if (err)
  		goto errout_tb;
  
  	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1cbc36a53   Dan Carpenter   net: sched: cls_f...
2029
2030
  	if (!tmplt) {
  		err = -ENOMEM;
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2031
  		goto errout_tb;
1cbc36a53   Dan Carpenter   net: sched: cls_f...
2032
  	}
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2033
2034
2035
2036
  	tmplt->chain = chain;
  	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
  	if (err)
  		goto errout_tmplt;
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2037
2038
  
  	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
8f2566225   Pablo Neira Ayuso   flow_offload: add...
2039
2040
2041
  	err = fl_hw_create_tmplt(chain, tmplt);
  	if (err)
  		goto errout_tmplt;
347384527   Jiri Pirko   net: sched: cls_f...
2042

8f2566225   Pablo Neira Ayuso   flow_offload: add...
2043
  	kfree(tb);
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2044
2045
2046
2047
2048
2049
2050
2051
  	return tmplt;
  
  errout_tmplt:
  	kfree(tmplt);
  errout_tb:
  	kfree(tb);
  	return ERR_PTR(err);
  }
ec3ed293e   Vlad Buslov   net_sched: change...
2052
2053
2054
  static void fl_tmplt_destroy(void *tmplt_priv)
  {
  	struct fl_flow_tmplt *tmplt = tmplt_priv;
95278ddaa   Cong Wang   net_sched: conver...
2055
2056
  	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
  	kfree(tmplt);
ec3ed293e   Vlad Buslov   net_sched: change...
2057
  }
77b9900ef   Jiri Pirko   tc: introduce Flo...
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
  static int fl_dump_key_val(struct sk_buff *skb,
  			   void *val, int val_type,
  			   void *mask, int mask_type, int len)
  {
  	int err;
  
  	if (!memchr_inv(mask, 0, len))
  		return 0;
  	err = nla_put(skb, val_type, len, val);
  	if (err)
  		return err;
  	if (mask_type != TCA_FLOWER_UNSPEC) {
  		err = nla_put(skb, mask_type, len, mask);
  		if (err)
  			return err;
  	}
  	return 0;
  }
5c72299fb   Amritha Nambiar   net: sched: cls_f...
2076
2077
2078
  static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
  				  struct fl_flow_key *mask)
  {
8ffb055be   Yoshiki Komachi   cls_flower: Fix t...
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
  	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
  			    TCA_FLOWER_KEY_PORT_DST_MIN,
  			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
  			    sizeof(key->tp_range.tp_min.dst)) ||
  	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
  			    TCA_FLOWER_KEY_PORT_DST_MAX,
  			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
  			    sizeof(key->tp_range.tp_max.dst)) ||
  	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
  			    TCA_FLOWER_KEY_PORT_SRC_MIN,
  			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
  			    sizeof(key->tp_range.tp_min.src)) ||
  	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
  			    TCA_FLOWER_KEY_PORT_SRC_MAX,
  			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
  			    sizeof(key->tp_range.tp_max.src)))
5c72299fb   Amritha Nambiar   net: sched: cls_f...
2095
2096
2097
2098
  		return -1;
  
  	return 0;
  }
a577d8f79   Benjamin LaHaise   cls_flower: add s...
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
  static int fl_dump_key_mpls(struct sk_buff *skb,
  			    struct flow_dissector_key_mpls *mpls_key,
  			    struct flow_dissector_key_mpls *mpls_mask)
  {
  	int err;
  
  	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
  		return 0;
  	if (mpls_mask->mpls_ttl) {
  		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
  				 mpls_key->mpls_ttl);
  		if (err)
  			return err;
  	}
  	if (mpls_mask->mpls_tc) {
  		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
  				 mpls_key->mpls_tc);
  		if (err)
  			return err;
  	}
  	if (mpls_mask->mpls_label) {
  		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
  				  mpls_key->mpls_label);
  		if (err)
  			return err;
  	}
  	if (mpls_mask->mpls_bos) {
  		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
  				 mpls_key->mpls_bos);
  		if (err)
  			return err;
  	}
  	return 0;
  }
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
2133
  static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
2134
2135
2136
  			  struct flow_dissector_key_ip *key,
  			  struct flow_dissector_key_ip *mask)
  {
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
2137
2138
2139
2140
2141
2142
2143
  	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
  	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
  	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
  	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
  
  	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
  	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
2144
2145
2146
2147
  		return -1;
  
  	return 0;
  }
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2148
  static int fl_dump_key_vlan(struct sk_buff *skb,
d64efd092   Jianbo Liu   net/sched: flower...
2149
  			    int vlan_id_key, int vlan_prio_key,
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2150
2151
2152
2153
2154
2155
2156
2157
  			    struct flow_dissector_key_vlan *vlan_key,
  			    struct flow_dissector_key_vlan *vlan_mask)
  {
  	int err;
  
  	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  		return 0;
  	if (vlan_mask->vlan_id) {
d64efd092   Jianbo Liu   net/sched: flower...
2158
  		err = nla_put_u16(skb, vlan_id_key,
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2159
2160
2161
2162
2163
  				  vlan_key->vlan_id);
  		if (err)
  			return err;
  	}
  	if (vlan_mask->vlan_priority) {
d64efd092   Jianbo Liu   net/sched: flower...
2164
  		err = nla_put_u8(skb, vlan_prio_key,
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2165
2166
2167
2168
2169
2170
  				 vlan_key->vlan_priority);
  		if (err)
  			return err;
  	}
  	return 0;
  }
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
  static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  			    u32 *flower_key, u32 *flower_mask,
  			    u32 flower_flag_bit, u32 dissector_flag_bit)
  {
  	if (dissector_mask & dissector_flag_bit) {
  		*flower_mask |= flower_flag_bit;
  		if (dissector_key & dissector_flag_bit)
  			*flower_key |= flower_flag_bit;
  	}
  }
  
  static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
  {
  	u32 key, mask;
  	__be32 _key, _mask;
  	int err;
  
  	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  		return 0;
  
  	key = 0;
  	mask = 0;
  
  	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
459d153d9   Pieter Jansen van Vuuren   net/sched: cls_fl...
2196
2197
2198
  	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  			FLOW_DIS_FIRST_FRAG);
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
  
  	_key = cpu_to_be32(key);
  	_mask = cpu_to_be32(mask);
  
  	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
  	if (err)
  		return err;
  
  	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
  }
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2209
2210
2211
2212
2213
2214
  static int fl_dump_key_geneve_opt(struct sk_buff *skb,
  				  struct flow_dissector_key_enc_opts *enc_opts)
  {
  	struct geneve_opt *opt;
  	struct nlattr *nest;
  	int opt_off = 0;
ae0be8de9   Michal Kubecek   netlink: make nla...
2215
  	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
  	if (!nest)
  		goto nla_put_failure;
  
  	while (enc_opts->len > opt_off) {
  		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
  
  		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
  				 opt->opt_class))
  			goto nla_put_failure;
  		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
  			       opt->type))
  			goto nla_put_failure;
  		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
  			    opt->length * 4, opt->opt_data))
  			goto nla_put_failure;
  
  		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
  	}
  	nla_nest_end(skb, nest);
  	return 0;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -EMSGSIZE;
  }
d8f9dfae4   Xin Long   net: sched: allow...
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
  static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
  				 struct flow_dissector_key_enc_opts *enc_opts)
  {
  	struct vxlan_metadata *md;
  	struct nlattr *nest;
  
  	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
  	if (!nest)
  		goto nla_put_failure;
  
  	md = (struct vxlan_metadata *)&enc_opts->data[0];
  	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
  		goto nla_put_failure;
  
  	nla_nest_end(skb, nest);
  	return 0;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -EMSGSIZE;
  }
79b1011cb   Xin Long   net: sched: allow...
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
  static int fl_dump_key_erspan_opt(struct sk_buff *skb,
  				  struct flow_dissector_key_enc_opts *enc_opts)
  {
  	struct erspan_metadata *md;
  	struct nlattr *nest;
  
  	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
  	if (!nest)
  		goto nla_put_failure;
  
  	md = (struct erspan_metadata *)&enc_opts->data[0];
  	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
  		goto nla_put_failure;
  
  	if (md->version == 1 &&
  	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
  		goto nla_put_failure;
  
  	if (md->version == 2 &&
  	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
  			md->u.md2.dir) ||
  	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
  			get_hwid(&md->u.md2))))
  		goto nla_put_failure;
  
  	nla_nest_end(skb, nest);
  	return 0;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -EMSGSIZE;
  }
e0ace68af   Paul Blakey   net/sched: cls_fl...
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
  static int fl_dump_key_ct(struct sk_buff *skb,
  			  struct flow_dissector_key_ct *key,
  			  struct flow_dissector_key_ct *mask)
  {
  	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
  	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
  			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
  			    sizeof(key->ct_state)))
  		goto nla_put_failure;
  
  	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
  	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
  			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
  			    sizeof(key->ct_zone)))
  		goto nla_put_failure;
  
  	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
  	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
  			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
  			    sizeof(key->ct_mark)))
  		goto nla_put_failure;
  
  	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
  	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
  			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
  			    sizeof(key->ct_labels)))
  		goto nla_put_failure;
  
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2327
2328
2329
2330
2331
2332
2333
2334
  static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
  			       struct flow_dissector_key_enc_opts *enc_opts)
  {
  	struct nlattr *nest;
  	int err;
  
  	if (!enc_opts->len)
  		return 0;
ae0be8de9   Michal Kubecek   netlink: make nla...
2335
  	nest = nla_nest_start_noflag(skb, enc_opt_type);
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2336
2337
2338
2339
2340
2341
2342
2343
2344
  	if (!nest)
  		goto nla_put_failure;
  
  	switch (enc_opts->dst_opt_type) {
  	case TUNNEL_GENEVE_OPT:
  		err = fl_dump_key_geneve_opt(skb, enc_opts);
  		if (err)
  			goto nla_put_failure;
  		break;
d8f9dfae4   Xin Long   net: sched: allow...
2345
2346
2347
2348
2349
  	case TUNNEL_VXLAN_OPT:
  		err = fl_dump_key_vxlan_opt(skb, enc_opts);
  		if (err)
  			goto nla_put_failure;
  		break;
79b1011cb   Xin Long   net: sched: allow...
2350
2351
2352
2353
2354
  	case TUNNEL_ERSPAN_OPT:
  		err = fl_dump_key_erspan_opt(skb, enc_opts);
  		if (err)
  			goto nla_put_failure;
  		break;
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
  	default:
  		goto nla_put_failure;
  	}
  	nla_nest_end(skb, nest);
  	return 0;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -EMSGSIZE;
  }
  
  static int fl_dump_key_enc_opt(struct sk_buff *skb,
  			       struct flow_dissector_key_enc_opts *key_opts,
  			       struct flow_dissector_key_enc_opts *msk_opts)
  {
  	int err;
  
  	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
  	if (err)
  		return err;
  
  	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
  }
f5749081f   Jiri Pirko   net: sched: cls_f...
2378
2379
  static int fl_dump_key(struct sk_buff *skb, struct net *net,
  		       struct fl_flow_key *key, struct fl_flow_key *mask)
77b9900ef   Jiri Pirko   tc: introduce Flo...
2380
  {
8212ed777   Jiri Pirko   net: sched: cls_f...
2381
  	if (mask->meta.ingress_ifindex) {
77b9900ef   Jiri Pirko   tc: introduce Flo...
2382
  		struct net_device *dev;
8212ed777   Jiri Pirko   net: sched: cls_f...
2383
  		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
77b9900ef   Jiri Pirko   tc: introduce Flo...
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
  		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  			goto nla_put_failure;
  	}
  
  	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  			    sizeof(key->eth.dst)) ||
  	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  			    sizeof(key->eth.src)) ||
  	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  			    sizeof(key->basic.n_proto)))
  		goto nla_put_failure;
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2398

a577d8f79   Benjamin LaHaise   cls_flower: add s...
2399
2400
  	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
  		goto nla_put_failure;
d64efd092   Jianbo Liu   net/sched: flower...
2401
2402
  	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
  			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
9399ae9a6   Hadar Hen Zion   net_sched: flower...
2403
  		goto nla_put_failure;
d64efd092   Jianbo Liu   net/sched: flower...
2404
2405
2406
2407
  	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
  			     TCA_FLOWER_KEY_CVLAN_PRIO,
  			     &key->cvlan, &mask->cvlan) ||
  	    (mask->cvlan.vlan_tpid &&
158abbf17   Jianbo Liu   net/sched: cls_fl...
2408
2409
  	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  			  key->cvlan.vlan_tpid)))
d30695126   Jianbo Liu   net/sched: flower...
2410
  		goto nla_put_failure;
5e9a0fe49   Jianbo Liu   net/sched: flower...
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
  	if (mask->basic.n_proto) {
  		if (mask->cvlan.vlan_tpid) {
  			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
  					 key->basic.n_proto))
  				goto nla_put_failure;
  		} else if (mask->vlan.vlan_tpid) {
  			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  					 key->basic.n_proto))
  				goto nla_put_failure;
  		}
d64efd092   Jianbo Liu   net/sched: flower...
2421
  	}
77b9900ef   Jiri Pirko   tc: introduce Flo...
2422
2423
  	if ((key->basic.n_proto == htons(ETH_P_IP) ||
  	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
2424
  	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2425
  			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
4d80cc0aa   Or Gerlitz   net/sched: cls_fl...
2426
  			    sizeof(key->basic.ip_proto)) ||
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
2427
  	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
77b9900ef   Jiri Pirko   tc: introduce Flo...
2428
  		goto nla_put_failure;
c3f832418   Tom Herbert   net: Add full IPv...
2429
  	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
77b9900ef   Jiri Pirko   tc: introduce Flo...
2430
2431
2432
2433
2434
2435
2436
  	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  			     sizeof(key->ipv4.src)) ||
  	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  			     sizeof(key->ipv4.dst))))
  		goto nla_put_failure;
c3f832418   Tom Herbert   net: Add full IPv...
2437
  	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
77b9900ef   Jiri Pirko   tc: introduce Flo...
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
  		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  				  sizeof(key->ipv6.src)) ||
  		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  				  sizeof(key->ipv6.dst))))
  		goto nla_put_failure;
  
  	if (key->basic.ip_proto == IPPROTO_TCP &&
  	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
2448
  			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2449
2450
  			     sizeof(key->tp.src)) ||
  	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
2451
  			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
fdfc7dd6c   Jiri Pirko   net/sched: flower...
2452
2453
2454
2455
  			     sizeof(key->tp.dst)) ||
  	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  			     sizeof(key->tcp.flags))))
77b9900ef   Jiri Pirko   tc: introduce Flo...
2456
2457
2458
  		goto nla_put_failure;
  	else if (key->basic.ip_proto == IPPROTO_UDP &&
  		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
2459
  				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2460
2461
  				  sizeof(key->tp.src)) ||
  		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
aa72d7083   Or Gerlitz   net/sched: cls_fl...
2462
  				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
5976c5f45   Simon Horman   net/sched: cls_fl...
2463
2464
2465
2466
2467
2468
2469
2470
  				  sizeof(key->tp.dst))))
  		goto nla_put_failure;
  	else if (key->basic.ip_proto == IPPROTO_SCTP &&
  		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  				  sizeof(key->tp.src)) ||
  		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2471
2472
  				  sizeof(key->tp.dst))))
  		goto nla_put_failure;
7b684884f   Simon Horman   net/sched: cls_fl...
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
  	else if (key->basic.n_proto == htons(ETH_P_IP) &&
  		 key->basic.ip_proto == IPPROTO_ICMP &&
  		 (fl_dump_key_val(skb, &key->icmp.type,
  				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  				  sizeof(key->icmp.type)) ||
  		  fl_dump_key_val(skb, &key->icmp.code,
  				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  				  sizeof(key->icmp.code))))
  		goto nla_put_failure;
  	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
  		 (fl_dump_key_val(skb, &key->icmp.type,
  				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  				  sizeof(key->icmp.type)) ||
  		  fl_dump_key_val(skb, &key->icmp.code,
  				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  				  sizeof(key->icmp.code))))
  		goto nla_put_failure;
99d31326c   Simon Horman   net/sched: cls_fl...
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
  	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
  		  key->basic.n_proto == htons(ETH_P_RARP)) &&
  		 (fl_dump_key_val(skb, &key->arp.sip,
  				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
  				  TCA_FLOWER_KEY_ARP_SIP_MASK,
  				  sizeof(key->arp.sip)) ||
  		  fl_dump_key_val(skb, &key->arp.tip,
  				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
  				  TCA_FLOWER_KEY_ARP_TIP_MASK,
  				  sizeof(key->arp.tip)) ||
  		  fl_dump_key_val(skb, &key->arp.op,
  				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
  				  TCA_FLOWER_KEY_ARP_OP_MASK,
  				  sizeof(key->arp.op)) ||
  		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  				  sizeof(key->arp.sha)) ||
  		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  				  sizeof(key->arp.tha))))
  		goto nla_put_failure;
77b9900ef   Jiri Pirko   tc: introduce Flo...
2516

5c72299fb   Amritha Nambiar   net: sched: cls_f...
2517
2518
2519
2520
2521
  	if ((key->basic.ip_proto == IPPROTO_TCP ||
  	     key->basic.ip_proto == IPPROTO_UDP ||
  	     key->basic.ip_proto == IPPROTO_SCTP) &&
  	     fl_dump_key_port_range(skb, key, mask))
  		goto nla_put_failure;
bc3103f1e   Amir Vadai   net/sched: cls_fl...
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
  	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
  			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  			    sizeof(key->enc_ipv4.src)) ||
  	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
  			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  			     sizeof(key->enc_ipv4.dst))))
  		goto nla_put_failure;
  	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
  			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  			    sizeof(key->enc_ipv6.src)) ||
  		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
  				 TCA_FLOWER_KEY_ENC_IPV6_DST,
  				 &mask->enc_ipv6.dst,
  				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  			    sizeof(key->enc_ipv6.dst))))
  		goto nla_put_failure;
  
  	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
eb523f42d   Hadar Hen Zion   net/sched: cls_fl...
2545
  			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
f4d997fd6   Hadar Hen Zion   net/sched: cls_fl...
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
  			    sizeof(key->enc_key_id)) ||
  	    fl_dump_key_val(skb, &key->enc_tp.src,
  			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  			    &mask->enc_tp.src,
  			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  			    sizeof(key->enc_tp.src)) ||
  	    fl_dump_key_val(skb, &key->enc_tp.dst,
  			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  			    &mask->enc_tp.dst,
  			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
0e2c17b64   Or Gerlitz   net/sched: cls_fl...
2556
  			    sizeof(key->enc_tp.dst)) ||
0a6e77784   Pieter Jansen van Vuuren   net/sched: allow ...
2557
2558
  	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
  	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
bc3103f1e   Amir Vadai   net/sched: cls_fl...
2559
  		goto nla_put_failure;
e0ace68af   Paul Blakey   net/sched: cls_fl...
2560
2561
  	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
  		goto nla_put_failure;
faa3ffce7   Or Gerlitz   net/sched: cls_fl...
2562
2563
  	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
  		goto nla_put_failure;
f5749081f   Jiri Pirko   net: sched: cls_f...
2564
2565
2566
2567
2568
2569
2570
  	return 0;
  
  nla_put_failure:
  	return -EMSGSIZE;
  }
  
  static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
12db03b65   Vlad Buslov   net: sched: exten...
2571
  		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
f5749081f   Jiri Pirko   net: sched: cls_f...
2572
2573
2574
2575
  {
  	struct cls_fl_filter *f = fh;
  	struct nlattr *nest;
  	struct fl_flow_key *key, *mask;
3d81e7118   Vlad Buslov   net: sched: flowe...
2576
  	bool skip_hw;
f5749081f   Jiri Pirko   net: sched: cls_f...
2577
2578
2579
2580
2581
  
  	if (!f)
  		return skb->len;
  
  	t->tcm_handle = f->handle;
ae0be8de9   Michal Kubecek   netlink: make nla...
2582
  	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
f5749081f   Jiri Pirko   net: sched: cls_f...
2583
2584
  	if (!nest)
  		goto nla_put_failure;
3d81e7118   Vlad Buslov   net: sched: flowe...
2585
  	spin_lock(&tp->lock);
f5749081f   Jiri Pirko   net: sched: cls_f...
2586
2587
  	if (f->res.classid &&
  	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3d81e7118   Vlad Buslov   net: sched: flowe...
2588
  		goto nla_put_failure_locked;
f5749081f   Jiri Pirko   net: sched: cls_f...
2589
2590
2591
  
  	key = &f->key;
  	mask = &f->mask->key;
3d81e7118   Vlad Buslov   net: sched: flowe...
2592
  	skip_hw = tc_skip_hw(f->flags);
f5749081f   Jiri Pirko   net: sched: cls_f...
2593
2594
  
  	if (fl_dump_key(skb, net, key, mask))
3d81e7118   Vlad Buslov   net: sched: flowe...
2595
  		goto nla_put_failure_locked;
f5749081f   Jiri Pirko   net: sched: cls_f...
2596

749e6720d   Or Gerlitz   net/sched: cls_fl...
2597
  	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3d81e7118   Vlad Buslov   net: sched: flowe...
2598
2599
2600
2601
2602
  		goto nla_put_failure_locked;
  
  	spin_unlock(&tp->lock);
  
  	if (!skip_hw)
c24e43d83   Vlad Buslov   net: sched: flowe...
2603
  		fl_hw_update_stats(tp, f, rtnl_held);
e69985c67   Amir Vadai   net/sched: cls_fl...
2604

86c55361e   Vlad Buslov   net: sched: cls_f...
2605
2606
  	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
  		goto nla_put_failure;
77b9900ef   Jiri Pirko   tc: introduce Flo...
2607
2608
2609
2610
2611
2612
2613
2614
2615
  	if (tcf_exts_dump(skb, &f->exts))
  		goto nla_put_failure;
  
  	nla_nest_end(skb, nest);
  
  	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  		goto nla_put_failure;
  
  	return skb->len;
3d81e7118   Vlad Buslov   net: sched: flowe...
2616
2617
  nla_put_failure_locked:
  	spin_unlock(&tp->lock);
77b9900ef   Jiri Pirko   tc: introduce Flo...
2618
2619
2620
2621
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -1;
  }
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2622
2623
2624
2625
2626
  static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
  {
  	struct fl_flow_tmplt *tmplt = tmplt_priv;
  	struct fl_flow_key *key, *mask;
  	struct nlattr *nest;
ae0be8de9   Michal Kubecek   netlink: make nla...
2627
  	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
  	if (!nest)
  		goto nla_put_failure;
  
  	key = &tmplt->dummy_key;
  	mask = &tmplt->mask;
  
  	if (fl_dump_key(skb, net, key, mask))
  		goto nla_put_failure;
  
  	nla_nest_end(skb, nest);
  
  	return skb->len;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -EMSGSIZE;
  }
2e24cd755   Cong Wang   net_sched: fix op...
2645
2646
  static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
  			  unsigned long base)
07d79fc7d   Cong Wang   net_sched: add re...
2647
2648
  {
  	struct cls_fl_filter *f = fh;
2e24cd755   Cong Wang   net_sched: fix op...
2649
2650
2651
2652
2653
2654
  	if (f && f->res.classid == classid) {
  		if (cl)
  			__tcf_bind_filter(q, &f->res, base);
  		else
  			__tcf_unbind_filter(q, &f->res);
  	}
07d79fc7d   Cong Wang   net_sched: add re...
2655
  }
a5b72a083   Davide Caratti   net/sched: add de...
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
  static bool fl_delete_empty(struct tcf_proto *tp)
  {
  	struct cls_fl_head *head = fl_head_dereference(tp);
  
  	spin_lock(&tp->lock);
  	tp->deleting = idr_is_empty(&head->handle_idr);
  	spin_unlock(&tp->lock);
  
  	return tp->deleting;
  }
77b9900ef   Jiri Pirko   tc: introduce Flo...
2666
2667
2668
2669
2670
2671
  static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  	.kind		= "flower",
  	.classify	= fl_classify,
  	.init		= fl_init,
  	.destroy	= fl_destroy,
  	.get		= fl_get,
061775583   Vlad Buslov   net: sched: flowe...
2672
  	.put		= fl_put,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2673
2674
  	.change		= fl_change,
  	.delete		= fl_delete,
a5b72a083   Davide Caratti   net/sched: add de...
2675
  	.delete_empty	= fl_delete_empty,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2676
  	.walk		= fl_walk,
31533cba4   John Hurley   net: sched: cls_f...
2677
  	.reoffload	= fl_reoffload,
a449a3e77   Vlad Buslov   net: sched: notif...
2678
2679
  	.hw_add		= fl_hw_add,
  	.hw_del		= fl_hw_del,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2680
  	.dump		= fl_dump,
07d79fc7d   Cong Wang   net_sched: add re...
2681
  	.bind_class	= fl_bind_class,
b95ec7eb3   Jiri Pirko   net: sched: cls_f...
2682
2683
2684
  	.tmplt_create	= fl_tmplt_create,
  	.tmplt_destroy	= fl_tmplt_destroy,
  	.tmplt_dump	= fl_tmplt_dump,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2685
  	.owner		= THIS_MODULE,
921491900   Vlad Buslov   net: sched: flowe...
2686
  	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
77b9900ef   Jiri Pirko   tc: introduce Flo...
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
  };
  
  static int __init cls_fl_init(void)
  {
  	return register_tcf_proto_ops(&cls_fl_ops);
  }
  
  static void __exit cls_fl_exit(void)
  {
  	unregister_tcf_proto_ops(&cls_fl_ops);
  }
  
  module_init(cls_fl_init);
  module_exit(cls_fl_exit);
  
  MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  MODULE_DESCRIPTION("Flower classifier");
  MODULE_LICENSE("GPL v2");