Blame view

net/sched/cls_bpf.c 16.9 KB
d2912cb15   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
2
3
4
5
6
7
8
9
  /*
   * Berkeley Packet Filter based traffic classifier
   *
   * Might be used to classify traffic through flexible, user-defined and
   * possibly JIT-ed BPF filters for traffic control as an alternative to
   * ematches.
   *
   * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
10
11
12
13
14
15
   */
  
  #include <linux/module.h>
  #include <linux/types.h>
  #include <linux/skbuff.h>
  #include <linux/filter.h>
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
16
  #include <linux/bpf.h>
76cf546c2   Cong Wang   net_sched: use id...
17
  #include <linux/idr.h>
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
18

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
19
20
21
22
23
24
25
  #include <net/rtnetlink.h>
  #include <net/pkt_cls.h>
  #include <net/sock.h>
  
  MODULE_LICENSE("GPL");
  MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  MODULE_DESCRIPTION("TC BPF based classifier");
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
26
  #define CLS_BPF_NAME_LEN	256
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
27
  #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
eadb41489   Jakub Kicinski   net: cls_bpf: add...
28
  	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
29

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
30
31
  struct cls_bpf_head {
  	struct list_head plist;
76cf546c2   Cong Wang   net_sched: use id...
32
  	struct idr handle_idr;
1f947bf15   John Fastabend   net: sched: rcu'i...
33
  	struct rcu_head rcu;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
34
35
36
  };
  
  struct cls_bpf_prog {
7ae457c1e   Alexei Starovoitov   net: filter: spli...
37
  	struct bpf_prog *filter;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
38
  	struct list_head link;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
39
  	struct tcf_result res;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
40
  	bool exts_integrated;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
41
  	u32 gen_flags;
7e916b768   John Hurley   net: sched: cls_b...
42
  	unsigned int in_hw_count;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
43
  	struct tcf_exts exts;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
44
  	u32 handle;
55556dd59   Daniel Borkmann   bpf: drop useless...
45
  	u16 bpf_num_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
46
47
  	struct sock_filter *bpf_ops;
  	const char *bpf_name;
1f947bf15   John Fastabend   net: sched: rcu'i...
48
  	struct tcf_proto *tp;
aaa908ffb   Cong Wang   net_sched: switch...
49
  	struct rcu_work rwork;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
50
51
52
53
  };
  
  static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
045efa82f   Daniel Borkmann   cls_bpf: introduc...
54
  	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
55
  	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
56
  	[TCA_BPF_FD]		= { .type = NLA_U32 },
5a7a5555a   Jamal Hadi Salim   net sched: stylis...
57
58
  	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
  				    .len = CLS_BPF_NAME_LEN },
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
59
60
61
62
  	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
  	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
  				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  };
045efa82f   Daniel Borkmann   cls_bpf: introduc...
63
64
65
66
  static int cls_bpf_exec_opcode(int code)
  {
  	switch (code) {
  	case TC_ACT_OK:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
67
  	case TC_ACT_SHOT:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
68
  	case TC_ACT_STOLEN:
e25ea21ff   Jiri Pirko   net: sched: intro...
69
  	case TC_ACT_TRAP:
27b29f630   Alexei Starovoitov   bpf: add bpf_redi...
70
  	case TC_ACT_REDIRECT:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
71
72
73
74
75
76
  	case TC_ACT_UNSPEC:
  		return code;
  	default:
  		return TC_ACT_UNSPEC;
  	}
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
77
78
79
  static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  			    struct tcf_result *res)
  {
80dcbd12f   WANG Cong   net_sched: fix su...
80
  	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
fdc5432a7   Daniel Borkmann   net, sched: add s...
81
  	bool at_ingress = skb_at_tc_ingress(skb);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
82
  	struct cls_bpf_prog *prog;
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
83
  	int ret = -1;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
84

54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
85
86
  	/* Needed here for accessing maps. */
  	rcu_read_lock();
1f947bf15   John Fastabend   net: sched: rcu'i...
87
  	list_for_each_entry_rcu(prog, &head->plist, link) {
3431205e0   Alexei Starovoitov   bpf: make program...
88
  		int filter_res;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
89
  		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
90
91
92
  		if (tc_skip_sw(prog->gen_flags)) {
  			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
  		} else if (at_ingress) {
3431205e0   Alexei Starovoitov   bpf: make program...
93
94
  			/* It is safe to push/pull even if skb_shared() */
  			__skb_push(skb, skb->mac_len);
6aaae2b6c   Daniel Borkmann   bpf: rename bpf_c...
95
  			bpf_compute_data_pointers(skb);
3431205e0   Alexei Starovoitov   bpf: make program...
96
97
98
  			filter_res = BPF_PROG_RUN(prog->filter, skb);
  			__skb_pull(skb, skb->mac_len);
  		} else {
6aaae2b6c   Daniel Borkmann   bpf: rename bpf_c...
99
  			bpf_compute_data_pointers(skb);
3431205e0   Alexei Starovoitov   bpf: make program...
100
101
  			filter_res = BPF_PROG_RUN(prog->filter, skb);
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
102

045efa82f   Daniel Borkmann   cls_bpf: introduc...
103
  		if (prog->exts_integrated) {
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
104
105
106
  			res->class   = 0;
  			res->classid = TC_H_MAJ(prog->res.classid) |
  				       qdisc_skb_cb(skb)->tc_classid;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
107
108
109
110
111
112
  
  			ret = cls_bpf_exec_opcode(filter_res);
  			if (ret == TC_ACT_UNSPEC)
  				continue;
  			break;
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
113
114
  		if (filter_res == 0)
  			continue;
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
115
116
  		if (filter_res != -1) {
  			res->class   = 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
117
  			res->classid = filter_res;
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
118
119
120
  		} else {
  			*res = prog->res;
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
121
122
123
124
  
  		ret = tcf_exts_exec(skb, &prog->exts, res);
  		if (ret < 0)
  			continue;
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
125
  		break;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
126
  	}
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
127
  	rcu_read_unlock();
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
128

54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
129
  	return ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
130
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
131
132
133
134
  static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
  {
  	return !prog->bpf_ops;
  }
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
135
  static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
631f65ff2   Quentin Monnet   net: sched: cls_b...
136
137
  			       struct cls_bpf_prog *oldprog,
  			       struct netlink_ext_ack *extack)
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
138
  {
3f7889c4c   Jiri Pirko   net: sched: cls_b...
139
  	struct tcf_block *block = tp->chain->block;
de4784ca0   Jiri Pirko   net: sched: get r...
140
  	struct tc_cls_bpf_offload cls_bpf = {};
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
141
142
  	struct cls_bpf_prog *obj;
  	bool skip_sw;
5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
143
  	int err;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
144

102740bd9   Jakub Kicinski   cls_bpf: fix offl...
145
146
  	skip_sw = prog && tc_skip_sw(prog->gen_flags);
  	obj = prog ?: oldprog;
d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
147
  	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
148
149
150
151
152
153
  	cls_bpf.command = TC_CLSBPF_OFFLOAD;
  	cls_bpf.exts = &obj->exts;
  	cls_bpf.prog = prog ? prog->filter : NULL;
  	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
  	cls_bpf.name = obj->bpf_name;
  	cls_bpf.exts_integrated = obj->exts_integrated;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
154

41aa29a58   Jakub Kicinski   net: cls_bpf: fix...
155
  	if (oldprog && prog)
401192113   Vlad Buslov   net: sched: refac...
156
157
158
159
160
  		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
  					  skip_sw, &oldprog->gen_flags,
  					  &oldprog->in_hw_count,
  					  &prog->gen_flags, &prog->in_hw_count,
  					  true);
41aa29a58   Jakub Kicinski   net: cls_bpf: fix...
161
  	else if (prog)
401192113   Vlad Buslov   net: sched: refac...
162
163
164
  		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
  				      skip_sw, &prog->gen_flags,
  				      &prog->in_hw_count, true);
41aa29a58   Jakub Kicinski   net: cls_bpf: fix...
165
166
167
168
  	else
  		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
  					  skip_sw, &oldprog->gen_flags,
  					  &oldprog->in_hw_count, true);
caa726015   Jiri Pirko   net: sched: keep ...
169

401192113   Vlad Buslov   net: sched: refac...
170
171
172
  	if (prog && err) {
  		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
  		return err;
3f7889c4c   Jiri Pirko   net: sched: cls_b...
173
  	}
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
174
  	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
3f7889c4c   Jiri Pirko   net: sched: cls_b...
175
  		return -EINVAL;
5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
176

3f7889c4c   Jiri Pirko   net: sched: cls_b...
177
  	return 0;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
178
  }
ad9294dbc   Daniel Borkmann   bpf: fix cls_bpf ...
179
180
181
182
  static u32 cls_bpf_flags(u32 flags)
  {
  	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
  }
eadb41489   Jakub Kicinski   net: cls_bpf: add...
183
  static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
631f65ff2   Quentin Monnet   net: sched: cls_b...
184
185
  			   struct cls_bpf_prog *oldprog,
  			   struct netlink_ext_ack *extack)
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
186
  {
ad9294dbc   Daniel Borkmann   bpf: fix cls_bpf ...
187
188
189
  	if (prog && oldprog &&
  	    cls_bpf_flags(prog->gen_flags) !=
  	    cls_bpf_flags(oldprog->gen_flags))
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
190
  		return -EINVAL;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
191

102740bd9   Jakub Kicinski   cls_bpf: fix offl...
192
193
194
195
196
197
  	if (prog && tc_skip_hw(prog->gen_flags))
  		prog = NULL;
  	if (oldprog && tc_skip_hw(oldprog->gen_flags))
  		oldprog = NULL;
  	if (!prog && !oldprog)
  		return 0;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
198

631f65ff2   Quentin Monnet   net: sched: cls_b...
199
  	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
200
201
202
  }
  
  static void cls_bpf_stop_offload(struct tcf_proto *tp,
0e908a450   Jakub Kicinski   cls_bpf: propagat...
203
204
  				 struct cls_bpf_prog *prog,
  				 struct netlink_ext_ack *extack)
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
205
206
  {
  	int err;
0e908a450   Jakub Kicinski   cls_bpf: propagat...
207
  	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
208
  	if (err)
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
209
210
  		pr_err("Stopping hardware offload failed: %d
  ", err);
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
211
  }
68d640630   Jakub Kicinski   net: cls_bpf: all...
212
213
214
  static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
  					 struct cls_bpf_prog *prog)
  {
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
215
216
  	struct tcf_block *block = tp->chain->block;
  	struct tc_cls_bpf_offload cls_bpf = {};
d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
217
  	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
102740bd9   Jakub Kicinski   cls_bpf: fix offl...
218
219
220
221
222
  	cls_bpf.command = TC_CLSBPF_STATS;
  	cls_bpf.exts = &prog->exts;
  	cls_bpf.prog = prog->filter;
  	cls_bpf.name = prog->bpf_name;
  	cls_bpf.exts_integrated = prog->exts_integrated;
68d640630   Jakub Kicinski   net: cls_bpf: all...
223

401192113   Vlad Buslov   net: sched: refac...
224
  	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
68d640630   Jakub Kicinski   net: cls_bpf: all...
225
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
226
227
228
229
230
231
232
  static int cls_bpf_init(struct tcf_proto *tp)
  {
  	struct cls_bpf_head *head;
  
  	head = kzalloc(sizeof(*head), GFP_KERNEL);
  	if (head == NULL)
  		return -ENOBUFS;
1f947bf15   John Fastabend   net: sched: rcu'i...
233
  	INIT_LIST_HEAD_RCU(&head->plist);
76cf546c2   Cong Wang   net_sched: use id...
234
  	idr_init(&head->handle_idr);
1f947bf15   John Fastabend   net: sched: rcu'i...
235
  	rcu_assign_pointer(tp->root, head);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
236
237
238
  
  	return 0;
  }
25415cec5   Jakub Kicinski   cls_bpf: don't de...
239
  static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
240
  {
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
241
242
243
244
  	if (cls_bpf_is_ebpf(prog))
  		bpf_prog_put(prog->filter);
  	else
  		bpf_prog_destroy(prog->filter);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
245

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
246
  	kfree(prog->bpf_name);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
247
  	kfree(prog->bpf_ops);
25415cec5   Jakub Kicinski   cls_bpf: don't de...
248
249
250
251
252
253
254
255
  }
  
  static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
  {
  	tcf_exts_destroy(&prog->exts);
  	tcf_exts_put_net(&prog->exts);
  
  	cls_bpf_free_parms(prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
256
257
  	kfree(prog);
  }
e910af676   Cong Wang   net_sched: use tc...
258
259
  static void cls_bpf_delete_prog_work(struct work_struct *work)
  {
aaa908ffb   Cong Wang   net_sched: switch...
260
261
262
  	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
  						 struct cls_bpf_prog,
  						 rwork);
e910af676   Cong Wang   net_sched: use tc...
263
264
265
266
  	rtnl_lock();
  	__cls_bpf_delete_prog(prog);
  	rtnl_unlock();
  }
0e908a450   Jakub Kicinski   cls_bpf: propagat...
267
268
  static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  			     struct netlink_ext_ack *extack)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
269
  {
76cf546c2   Cong Wang   net_sched: use id...
270
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
9c1609414   Matthew Wilcox   idr: Delete idr_r...
271
  	idr_remove(&head->handle_idr, prog->handle);
0e908a450   Jakub Kicinski   cls_bpf: propagat...
272
  	cls_bpf_stop_offload(tp, prog, extack);
472f58370   Jiri Pirko   net_sched: cls_bp...
273
274
  	list_del_rcu(&prog->link);
  	tcf_unbind_filter(tp, &prog->res);
aae2c35ec   Cong Wang   cls_bpf: use tcf_...
275
  	if (tcf_exts_get_net(&prog->exts))
aaa908ffb   Cong Wang   net_sched: switch...
276
  		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
aae2c35ec   Cong Wang   cls_bpf: use tcf_...
277
278
  	else
  		__cls_bpf_delete_prog(prog);
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
279
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
280

571acf210   Alexander Aring   net: sched: cls: ...
281
  static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
12db03b65   Vlad Buslov   net: sched: exten...
282
  			  bool rtnl_held, struct netlink_ext_ack *extack)
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
283
  {
763dbf632   WANG Cong   net_sched: move t...
284
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
0e908a450   Jakub Kicinski   cls_bpf: propagat...
285
  	__cls_bpf_delete(tp, arg, extack);
763dbf632   WANG Cong   net_sched: move t...
286
  	*last = list_empty(&head->plist);
472f58370   Jiri Pirko   net_sched: cls_bp...
287
  	return 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
288
  }
12db03b65   Vlad Buslov   net: sched: exten...
289
  static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
715df5eca   Jakub Kicinski   net: sched: propa...
290
  			    struct netlink_ext_ack *extack)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
291
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
292
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
293
  	struct cls_bpf_prog *prog, *tmp;
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
294
  	list_for_each_entry_safe(prog, tmp, &head->plist, link)
0e908a450   Jakub Kicinski   cls_bpf: propagat...
295
  		__cls_bpf_delete(tp, prog, extack);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
296

76cf546c2   Cong Wang   net_sched: use id...
297
  	idr_destroy(&head->handle_idr);
1f947bf15   John Fastabend   net: sched: rcu'i...
298
  	kfree_rcu(head, rcu);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
299
  }
8113c0956   WANG Cong   net_sched: use vo...
300
  static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
301
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
302
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
303
  	struct cls_bpf_prog *prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
304

3fe6b49e2   Jiri Pirko   net_sched: cls_bp...
305
  	list_for_each_entry(prog, &head->plist, link) {
8113c0956   WANG Cong   net_sched: use vo...
306
307
  		if (prog->handle == handle)
  			return prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
308
  	}
8113c0956   WANG Cong   net_sched: use vo...
309
  	return NULL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
310
  }
045efa82f   Daniel Borkmann   cls_bpf: introduc...
311
  static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
312
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
313
  	struct sock_filter *bpf_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
314
  	struct sock_fprog_kern fprog_tmp;
1f947bf15   John Fastabend   net: sched: rcu'i...
315
  	struct bpf_prog *fp;
33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
316
  	u16 bpf_size, bpf_num_ops;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
317
  	int ret;
33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
318
  	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
319
320
  	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  		return -EINVAL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
321

33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
322
  	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
323
324
  	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
  		return -EINVAL;
7913ecf69   Daniel Borkmann   net: cls_bpf: fix...
325

f9562fa4a   YueHaibing   cls_bpf: Use kmem...
326
  	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
327
328
  	if (bpf_ops == NULL)
  		return -ENOMEM;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
329

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
330
331
  	fprog_tmp.len = bpf_num_ops;
  	fprog_tmp.filter = bpf_ops;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
332

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
333
334
335
336
337
  	ret = bpf_prog_create(&fp, &fprog_tmp);
  	if (ret < 0) {
  		kfree(bpf_ops);
  		return ret;
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
338

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
339
  	prog->bpf_ops = bpf_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
340
341
  	prog->bpf_num_ops = bpf_num_ops;
  	prog->bpf_name = NULL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
342
  	prog->filter = fp;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
343

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
344
345
  	return 0;
  }
c46646d04   Daniel Borkmann   sched, bpf: add h...
346
  static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
6c8dfe21c   Jakub Kicinski   cls_bpf: allow at...
347
  				 u32 gen_flags, const struct tcf_proto *tp)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
348
349
350
  {
  	struct bpf_prog *fp;
  	char *name = NULL;
288b3de55   Jakub Kicinski   bpf: offload: mov...
351
  	bool skip_sw;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
352
353
354
  	u32 bpf_fd;
  
  	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
288b3de55   Jakub Kicinski   bpf: offload: mov...
355
  	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
356

288b3de55   Jakub Kicinski   bpf: offload: mov...
357
  	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
358
359
  	if (IS_ERR(fp))
  		return PTR_ERR(fp);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
360
  	if (tb[TCA_BPF_NAME]) {
b15ca182e   Thomas Graf   netlink: Add nla_...
361
  		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
362
363
364
365
366
367
368
  		if (!name) {
  			bpf_prog_put(fp);
  			return -ENOMEM;
  		}
  	}
  
  	prog->bpf_ops = NULL;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
369
  	prog->bpf_name = name;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
370
  	prog->filter = fp;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
371

f36fe1c49   Jiri Pirko   net: sched: intro...
372
373
  	if (fp->dst_needed)
  		tcf_block_netif_keep_dst(tp->chain->block);
c46646d04   Daniel Borkmann   sched, bpf: add h...
374

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
375
376
  	return 0;
  }
6a725c481   Jiri Pirko   net: sched: cls_b...
377
378
  static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
  			     struct cls_bpf_prog *prog, unsigned long base,
50a561900   Alexander Aring   net: sched: cls: ...
379
380
  			     struct nlattr **tb, struct nlattr *est, bool ovr,
  			     struct netlink_ext_ack *extack)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
381
  {
045efa82f   Daniel Borkmann   cls_bpf: introduc...
382
  	bool is_bpf, is_ebpf, have_exts = false;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
383
  	u32 gen_flags = 0;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
384
385
386
387
  	int ret;
  
  	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
  	is_ebpf = tb[TCA_BPF_FD];
ef146fa40   Daniel Borkmann   cls_bpf: make bin...
388
  	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
389
  		return -EINVAL;
ec6743a10   Vlad Buslov   net: sched: track...
390
391
  	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
  				extack);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
392
393
  	if (ret < 0)
  		return ret;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
394
395
  	if (tb[TCA_BPF_FLAGS]) {
  		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
6839da326   Jiri Pirko   net: sched: cls_b...
396
397
  		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
  			return -EINVAL;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
398
399
400
  
  		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
  	}
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
401
402
403
  	if (tb[TCA_BPF_FLAGS_GEN]) {
  		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
  		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
6839da326   Jiri Pirko   net: sched: cls_b...
404
405
  		    !tc_flags_valid(gen_flags))
  			return -EINVAL;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
406
  	}
045efa82f   Daniel Borkmann   cls_bpf: introduc...
407

045efa82f   Daniel Borkmann   cls_bpf: introduc...
408
  	prog->exts_integrated = have_exts;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
409
  	prog->gen_flags = gen_flags;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
410

045efa82f   Daniel Borkmann   cls_bpf: introduc...
411
  	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
6c8dfe21c   Jakub Kicinski   cls_bpf: allow at...
412
  		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
b9a24bb76   WANG Cong   net_sched: proper...
413
  	if (ret < 0)
6839da326   Jiri Pirko   net: sched: cls_b...
414
  		return ret;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
415

ef146fa40   Daniel Borkmann   cls_bpf: make bin...
416
417
418
419
  	if (tb[TCA_BPF_CLASSID]) {
  		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
  		tcf_bind_filter(tp, &prog->res, base);
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
420

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
421
  	return 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
422
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
423
424
425
  static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
  			  struct tcf_proto *tp, unsigned long base,
  			  u32 handle, struct nlattr **tca,
12db03b65   Vlad Buslov   net: sched: exten...
426
427
  			  void **arg, bool ovr, bool rtnl_held,
  			  struct netlink_ext_ack *extack)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
428
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
429
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
8113c0956   WANG Cong   net_sched: use vo...
430
  	struct cls_bpf_prog *oldprog = *arg;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
431
  	struct nlattr *tb[TCA_BPF_MAX + 1];
1f947bf15   John Fastabend   net: sched: rcu'i...
432
  	struct cls_bpf_prog *prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
433
434
435
436
  	int ret;
  
  	if (tca[TCA_OPTIONS] == NULL)
  		return -EINVAL;
8cb081746   Johannes Berg   netlink: make val...
437
438
  	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
  					  bpf_policy, NULL);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
439
440
  	if (ret < 0)
  		return ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
441
  	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
1f947bf15   John Fastabend   net: sched: rcu'i...
442
  	if (!prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
443
  		return -ENOBUFS;
14215108a   Cong Wang   net_sched: initia...
444
  	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
b9a24bb76   WANG Cong   net_sched: proper...
445
446
  	if (ret < 0)
  		goto errout;
1f947bf15   John Fastabend   net: sched: rcu'i...
447
448
449
450
451
452
453
  
  	if (oldprog) {
  		if (handle && oldprog->handle != handle) {
  			ret = -EINVAL;
  			goto errout;
  		}
  	}
76cf546c2   Cong Wang   net_sched: use id...
454
  	if (handle == 0) {
0b4ce8da7   Matthew Wilcox   cls_bpf: Convert ...
455
456
457
458
459
460
  		handle = 1;
  		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
  				    INT_MAX, GFP_KERNEL);
  	} else if (!oldprog) {
  		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
  				    handle, GFP_KERNEL);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
461
  	}
0b4ce8da7   Matthew Wilcox   cls_bpf: Convert ...
462
463
464
  	if (ret)
  		goto errout;
  	prog->handle = handle;
50a561900   Alexander Aring   net: sched: cls: ...
465
466
  	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
  				extack);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
467
  	if (ret < 0)
76cf546c2   Cong Wang   net_sched: use id...
468
  		goto errout_idr;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
469

631f65ff2   Quentin Monnet   net: sched: cls_b...
470
  	ret = cls_bpf_offload(tp, prog, oldprog, extack);
25415cec5   Jakub Kicinski   cls_bpf: don't de...
471
472
  	if (ret)
  		goto errout_parms;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
473

5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
474
475
  	if (!tc_in_hw(prog->gen_flags))
  		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1f947bf15   John Fastabend   net: sched: rcu'i...
476
  	if (oldprog) {
234a4624e   Matthew Wilcox   idr: Delete idr_r...
477
  		idr_replace(&head->handle_idr, prog, handle);
f6bfc46da   Daniel Borkmann   sched: cls_bpf: f...
478
  		list_replace_rcu(&oldprog->link, &prog->link);
18cdb37eb   John Fastabend   net: sched: do no...
479
  		tcf_unbind_filter(tp, &oldprog->res);
aae2c35ec   Cong Wang   cls_bpf: use tcf_...
480
  		tcf_exts_get_net(&oldprog->exts);
aaa908ffb   Cong Wang   net_sched: switch...
481
  		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
1f947bf15   John Fastabend   net: sched: rcu'i...
482
483
484
  	} else {
  		list_add_rcu(&prog->link, &head->plist);
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
485

8113c0956   WANG Cong   net_sched: use vo...
486
  	*arg = prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
487
  	return 0;
b9a24bb76   WANG Cong   net_sched: proper...
488

25415cec5   Jakub Kicinski   cls_bpf: don't de...
489
490
  errout_parms:
  	cls_bpf_free_parms(prog);
76cf546c2   Cong Wang   net_sched: use id...
491
492
  errout_idr:
  	if (!oldprog)
9c1609414   Matthew Wilcox   idr: Delete idr_r...
493
  		idr_remove(&head->handle_idr, prog->handle);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
494
  errout:
b9a24bb76   WANG Cong   net_sched: proper...
495
  	tcf_exts_destroy(&prog->exts);
1f947bf15   John Fastabend   net: sched: rcu'i...
496
  	kfree(prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
497
498
  	return ret;
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
  				 struct sk_buff *skb)
  {
  	struct nlattr *nla;
  
  	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
  		return -EMSGSIZE;
  
  	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
  			  sizeof(struct sock_filter));
  	if (nla == NULL)
  		return -EMSGSIZE;
  
  	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  
  	return 0;
  }
  
  static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
  				  struct sk_buff *skb)
  {
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
520
  	struct nlattr *nla;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
521
522
523
  	if (prog->bpf_name &&
  	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
  		return -EMSGSIZE;
e86283071   Daniel Borkmann   bpf: expose prog ...
524
525
  	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
  		return -EMSGSIZE;
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
526
  	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
527
528
  	if (nla == NULL)
  		return -EMSGSIZE;
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
529
  	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
530

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
531
532
  	return 0;
  }
8113c0956   WANG Cong   net_sched: use vo...
533
  static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
12db03b65   Vlad Buslov   net: sched: exten...
534
  			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
535
  {
8113c0956   WANG Cong   net_sched: use vo...
536
  	struct cls_bpf_prog *prog = fh;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
537
  	struct nlattr *nest;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
538
  	u32 bpf_flags = 0;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
539
  	int ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
540
541
542
543
544
  
  	if (prog == NULL)
  		return skb->len;
  
  	tm->tcm_handle = prog->handle;
68d640630   Jakub Kicinski   net: cls_bpf: all...
545
  	cls_bpf_offload_update_stats(tp, prog);
ae0be8de9   Michal Kubecek   netlink: make nla...
546
  	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
547
548
  	if (nest == NULL)
  		goto nla_put_failure;
ef146fa40   Daniel Borkmann   cls_bpf: make bin...
549
550
  	if (prog->res.classid &&
  	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
551
  		goto nla_put_failure;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
552

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
553
554
555
556
557
  	if (cls_bpf_is_ebpf(prog))
  		ret = cls_bpf_dump_ebpf_info(prog, skb);
  	else
  		ret = cls_bpf_dump_bpf_info(prog, skb);
  	if (ret)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
558
  		goto nla_put_failure;
5da57f422   WANG Cong   net_sched: cls: r...
559
  	if (tcf_exts_dump(skb, &prog->exts) < 0)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
560
  		goto nla_put_failure;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
561
562
563
564
  	if (prog->exts_integrated)
  		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
  	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
  		goto nla_put_failure;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
565
566
567
  	if (prog->gen_flags &&
  	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
  		goto nla_put_failure;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
568

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
569
  	nla_nest_end(skb, nest);
5da57f422   WANG Cong   net_sched: cls: r...
570
  	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
571
572
573
574
575
576
577
578
  		goto nla_put_failure;
  
  	return skb->len;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -1;
  }
2e24cd755   Cong Wang   net_sched: fix op...
579
580
  static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
  			       void *q, unsigned long base)
07d79fc7d   Cong Wang   net_sched: add re...
581
582
  {
  	struct cls_bpf_prog *prog = fh;
2e24cd755   Cong Wang   net_sched: fix op...
583
584
585
586
587
588
  	if (prog && prog->res.classid == classid) {
  		if (cl)
  			__tcf_bind_filter(q, &prog->res, base);
  		else
  			__tcf_unbind_filter(q, &prog->res);
  	}
07d79fc7d   Cong Wang   net_sched: add re...
589
  }
12db03b65   Vlad Buslov   net: sched: exten...
590
591
  static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
  			 bool rtnl_held)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
592
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
593
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
594
  	struct cls_bpf_prog *prog;
3fe6b49e2   Jiri Pirko   net_sched: cls_bp...
595
  	list_for_each_entry(prog, &head->plist, link) {
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
596
597
  		if (arg->count < arg->skip)
  			goto skip;
8113c0956   WANG Cong   net_sched: use vo...
598
  		if (arg->fn(tp, prog, arg) < 0) {
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
599
600
601
602
603
604
605
  			arg->stop = 1;
  			break;
  		}
  skip:
  		arg->count++;
  	}
  }
a73233115   Pablo Neira Ayuso   net: flow_offload...
606
  static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
7e916b768   John Hurley   net: sched: cls_b...
607
608
609
610
611
612
613
614
615
616
617
618
619
  			     void *cb_priv, struct netlink_ext_ack *extack)
  {
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
  	struct tcf_block *block = tp->chain->block;
  	struct tc_cls_bpf_offload cls_bpf = {};
  	struct cls_bpf_prog *prog;
  	int err;
  
  	list_for_each_entry(prog, &head->plist, link) {
  		if (tc_skip_hw(prog->gen_flags))
  			continue;
  
  		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
d6787147e   Pieter Jansen van Vuuren   net/sched: remove...
620
  					   extack);
7e916b768   John Hurley   net: sched: cls_b...
621
622
623
624
625
626
  		cls_bpf.command = TC_CLSBPF_OFFLOAD;
  		cls_bpf.exts = &prog->exts;
  		cls_bpf.prog = add ? prog->filter : NULL;
  		cls_bpf.oldprog = add ? NULL : prog->filter;
  		cls_bpf.name = prog->bpf_name;
  		cls_bpf.exts_integrated = prog->exts_integrated;
401192113   Vlad Buslov   net: sched: refac...
627
628
629
630
631
  		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
  					    &cls_bpf, cb_priv, &prog->gen_flags,
  					    &prog->in_hw_count);
  		if (err)
  			return err;
7e916b768   John Hurley   net: sched: cls_b...
632
633
634
635
  	}
  
  	return 0;
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
636
637
638
639
640
641
642
  static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
  	.kind		=	"bpf",
  	.owner		=	THIS_MODULE,
  	.classify	=	cls_bpf_classify,
  	.init		=	cls_bpf_init,
  	.destroy	=	cls_bpf_destroy,
  	.get		=	cls_bpf_get,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
643
644
645
  	.change		=	cls_bpf_change,
  	.delete		=	cls_bpf_delete,
  	.walk		=	cls_bpf_walk,
7e916b768   John Hurley   net: sched: cls_b...
646
  	.reoffload	=	cls_bpf_reoffload,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
647
  	.dump		=	cls_bpf_dump,
07d79fc7d   Cong Wang   net_sched: add re...
648
  	.bind_class	=	cls_bpf_bind_class,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  };
  
  static int __init cls_bpf_init_mod(void)
  {
  	return register_tcf_proto_ops(&cls_bpf_ops);
  }
  
  static void __exit cls_bpf_exit_mod(void)
  {
  	unregister_tcf_proto_ops(&cls_bpf_ops);
  }
  
  module_init(cls_bpf_init_mod);
  module_exit(cls_bpf_exit_mod);