Blame view

net/sched/cls_bpf.c 15.1 KB
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
  /*
   * Berkeley Packet Filter based traffic classifier
   *
   * Might be used to classify traffic through flexible, user-defined and
   * possibly JIT-ed BPF filters for traffic control as an alternative to
   * ematches.
   *
   * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  
  #include <linux/module.h>
  #include <linux/types.h>
  #include <linux/skbuff.h>
  #include <linux/filter.h>
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
19
  #include <linux/bpf.h>
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
20
21
22
23
24
25
26
  #include <net/rtnetlink.h>
  #include <net/pkt_cls.h>
  #include <net/sock.h>
  
  MODULE_LICENSE("GPL");
  MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  MODULE_DESCRIPTION("TC BPF based classifier");
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
27
  #define CLS_BPF_NAME_LEN	256
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
28
  #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
eadb41489   Jakub Kicinski   net: cls_bpf: add...
29
  	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
30

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
31
32
33
  struct cls_bpf_head {
  	struct list_head plist;
  	u32 hgen;
1f947bf15   John Fastabend   net: sched: rcu'i...
34
  	struct rcu_head rcu;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
35
36
37
  };
  
  struct cls_bpf_prog {
7ae457c1e   Alexei Starovoitov   net: filter: spli...
38
  	struct bpf_prog *filter;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
39
  	struct list_head link;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
40
  	struct tcf_result res;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
41
  	bool exts_integrated;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
42
  	bool offloaded;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
43
  	u32 gen_flags;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
44
  	struct tcf_exts exts;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
45
  	u32 handle;
55556dd59   Daniel Borkmann   bpf: drop useless...
46
  	u16 bpf_num_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
47
48
  	struct sock_filter *bpf_ops;
  	const char *bpf_name;
1f947bf15   John Fastabend   net: sched: rcu'i...
49
  	struct tcf_proto *tp;
e910af676   Cong Wang   net_sched: use tc...
50
51
52
53
  	union {
  		struct work_struct work;
  		struct rcu_head rcu;
  	};
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
54
55
56
57
  };
  
  static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
045efa82f   Daniel Borkmann   cls_bpf: introduc...
58
  	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
59
  	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
60
  	[TCA_BPF_FD]		= { .type = NLA_U32 },
5a7a5555a   Jamal Hadi Salim   net sched: stylis...
61
62
  	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
  				    .len = CLS_BPF_NAME_LEN },
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
63
64
65
66
  	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
  	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
  				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  };
045efa82f   Daniel Borkmann   cls_bpf: introduc...
67
68
69
70
  static int cls_bpf_exec_opcode(int code)
  {
  	switch (code) {
  	case TC_ACT_OK:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
71
  	case TC_ACT_SHOT:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
72
  	case TC_ACT_STOLEN:
e25ea21ff   Jiri Pirko   net: sched: intro...
73
  	case TC_ACT_TRAP:
27b29f630   Alexei Starovoitov   bpf: add bpf_redi...
74
  	case TC_ACT_REDIRECT:
045efa82f   Daniel Borkmann   cls_bpf: introduc...
75
76
77
78
79
80
  	case TC_ACT_UNSPEC:
  		return code;
  	default:
  		return TC_ACT_UNSPEC;
  	}
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
81
82
83
  static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  			    struct tcf_result *res)
  {
80dcbd12f   WANG Cong   net_sched: fix su...
84
  	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
fdc5432a7   Daniel Borkmann   net, sched: add s...
85
  	bool at_ingress = skb_at_tc_ingress(skb);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
86
  	struct cls_bpf_prog *prog;
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
87
  	int ret = -1;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
88

54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
89
90
  	/* Needed here for accessing maps. */
  	rcu_read_lock();
1f947bf15   John Fastabend   net: sched: rcu'i...
91
  	list_for_each_entry_rcu(prog, &head->plist, link) {
3431205e0   Alexei Starovoitov   bpf: make program...
92
  		int filter_res;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
93
  		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
94
95
96
  		if (tc_skip_sw(prog->gen_flags)) {
  			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
  		} else if (at_ingress) {
3431205e0   Alexei Starovoitov   bpf: make program...
97
98
  			/* It is safe to push/pull even if skb_shared() */
  			__skb_push(skb, skb->mac_len);
db58ba459   Alexei Starovoitov   bpf: wire in data...
99
  			bpf_compute_data_end(skb);
3431205e0   Alexei Starovoitov   bpf: make program...
100
101
102
  			filter_res = BPF_PROG_RUN(prog->filter, skb);
  			__skb_pull(skb, skb->mac_len);
  		} else {
db58ba459   Alexei Starovoitov   bpf: wire in data...
103
  			bpf_compute_data_end(skb);
3431205e0   Alexei Starovoitov   bpf: make program...
104
105
  			filter_res = BPF_PROG_RUN(prog->filter, skb);
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
106

045efa82f   Daniel Borkmann   cls_bpf: introduc...
107
  		if (prog->exts_integrated) {
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
108
109
110
  			res->class   = 0;
  			res->classid = TC_H_MAJ(prog->res.classid) |
  				       qdisc_skb_cb(skb)->tc_classid;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
111
112
113
114
115
116
  
  			ret = cls_bpf_exec_opcode(filter_res);
  			if (ret == TC_ACT_UNSPEC)
  				continue;
  			break;
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
117
118
  		if (filter_res == 0)
  			continue;
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
119
120
  		if (filter_res != -1) {
  			res->class   = 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
121
  			res->classid = filter_res;
3a461da1d   Daniel Borkmann   cls_bpf: reset cl...
122
123
124
  		} else {
  			*res = prog->res;
  		}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
125
126
127
128
  
  		ret = tcf_exts_exec(skb, &prog->exts, res);
  		if (ret < 0)
  			continue;
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
129
  		break;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
130
  	}
54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
131
  	rcu_read_unlock();
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
132

54720df13   Daniel Borkmann   cls_bpf: do eBPF ...
133
  	return ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
134
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
135
136
137
138
  static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
  {
  	return !prog->bpf_ops;
  }
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
139
140
141
142
  static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  			       enum tc_clsbpf_command cmd)
  {
  	struct net_device *dev = tp->q->dev_queue->dev;
de4784ca0   Jiri Pirko   net: sched: get r...
143
  	struct tc_cls_bpf_offload cls_bpf = {};
5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
144
  	int err;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
145

de4784ca0   Jiri Pirko   net: sched: get r...
146
147
148
149
150
151
152
  	tc_cls_common_offload_init(&cls_bpf.common, tp);
  	cls_bpf.command = cmd;
  	cls_bpf.exts = &prog->exts;
  	cls_bpf.prog = prog->filter;
  	cls_bpf.name = prog->bpf_name;
  	cls_bpf.exts_integrated = prog->exts_integrated;
  	cls_bpf.gen_flags = prog->gen_flags;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
153

de4784ca0   Jiri Pirko   net: sched: get r...
154
  	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
155
156
157
158
  	if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
  		prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
  
  	return err;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
159
  }
eadb41489   Jakub Kicinski   net: cls_bpf: add...
160
161
  static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  			   struct cls_bpf_prog *oldprog)
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
162
163
164
165
  {
  	struct net_device *dev = tp->q->dev_queue->dev;
  	struct cls_bpf_prog *obj = prog;
  	enum tc_clsbpf_command cmd;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
166
167
168
169
170
  	bool skip_sw;
  	int ret;
  
  	skip_sw = tc_skip_sw(prog->gen_flags) ||
  		(oldprog && tc_skip_sw(oldprog->gen_flags));
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
171
172
  
  	if (oldprog && oldprog->offloaded) {
7b06e8aed   Jiri Pirko   net: sched: remov...
173
  		if (tc_should_offload(dev, prog->gen_flags)) {
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
174
  			cmd = TC_CLSBPF_REPLACE;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
175
  		} else if (!tc_skip_sw(prog->gen_flags)) {
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
176
177
  			obj = oldprog;
  			cmd = TC_CLSBPF_DESTROY;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
178
179
  		} else {
  			return -EINVAL;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
180
181
  		}
  	} else {
7b06e8aed   Jiri Pirko   net: sched: remov...
182
  		if (!tc_should_offload(dev, prog->gen_flags))
eadb41489   Jakub Kicinski   net: cls_bpf: add...
183
  			return skip_sw ? -EINVAL : 0;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
184
185
  		cmd = TC_CLSBPF_ADD;
  	}
eadb41489   Jakub Kicinski   net: cls_bpf: add...
186
187
188
  	ret = cls_bpf_offload_cmd(tp, obj, cmd);
  	if (ret)
  		return skip_sw ? ret : 0;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
189
190
191
192
  
  	obj->offloaded = true;
  	if (oldprog)
  		oldprog->offloaded = false;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
193
194
  
  	return 0;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  }
  
  static void cls_bpf_stop_offload(struct tcf_proto *tp,
  				 struct cls_bpf_prog *prog)
  {
  	int err;
  
  	if (!prog->offloaded)
  		return;
  
  	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
  	if (err) {
  		pr_err("Stopping hardware offload failed: %d
  ", err);
  		return;
  	}
  
  	prog->offloaded = false;
  }
68d640630   Jakub Kicinski   net: cls_bpf: all...
214
215
216
217
218
219
220
221
  static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
  					 struct cls_bpf_prog *prog)
  {
  	if (!prog->offloaded)
  		return;
  
  	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
222
223
224
225
226
227
228
  static int cls_bpf_init(struct tcf_proto *tp)
  {
  	struct cls_bpf_head *head;
  
  	head = kzalloc(sizeof(*head), GFP_KERNEL);
  	if (head == NULL)
  		return -ENOBUFS;
1f947bf15   John Fastabend   net: sched: rcu'i...
229
230
  	INIT_LIST_HEAD_RCU(&head->plist);
  	rcu_assign_pointer(tp->root, head);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
231
232
233
  
  	return 0;
  }
627a59561   Jakub Kicinski   cls_bpf: don't de...
234
  static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
235
  {
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
236
237
238
239
  	if (cls_bpf_is_ebpf(prog))
  		bpf_prog_put(prog->filter);
  	else
  		bpf_prog_destroy(prog->filter);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
240

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
241
  	kfree(prog->bpf_name);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
242
  	kfree(prog->bpf_ops);
627a59561   Jakub Kicinski   cls_bpf: don't de...
243
244
245
246
247
248
249
250
  }
  
  static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
  {
  	tcf_exts_destroy(&prog->exts);
  	tcf_exts_put_net(&prog->exts);
  
  	cls_bpf_free_parms(prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
251
252
  	kfree(prog);
  }
e910af676   Cong Wang   net_sched: use tc...
253
254
255
256
257
258
259
260
  static void cls_bpf_delete_prog_work(struct work_struct *work)
  {
  	struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
  
  	rtnl_lock();
  	__cls_bpf_delete_prog(prog);
  	rtnl_unlock();
  }
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
261
  static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
1f947bf15   John Fastabend   net: sched: rcu'i...
262
  {
e910af676   Cong Wang   net_sched: use tc...
263
264
265
266
  	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
  
  	INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
  	tcf_queue_work(&prog->work);
1f947bf15   John Fastabend   net: sched: rcu'i...
267
  }
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
268
  static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
269
  {
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
270
  	cls_bpf_stop_offload(tp, prog);
472f58370   Jiri Pirko   net_sched: cls_bp...
271
272
  	list_del_rcu(&prog->link);
  	tcf_unbind_filter(tp, &prog->res);
aae2c35ec   Cong Wang   cls_bpf: use tcf_...
273
274
275
276
  	if (tcf_exts_get_net(&prog->exts))
  		call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
  	else
  		__cls_bpf_delete_prog(prog);
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
277
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
278

8113c0956   WANG Cong   net_sched: use vo...
279
  static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
280
  {
763dbf632   WANG Cong   net_sched: move t...
281
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
8113c0956   WANG Cong   net_sched: use vo...
282
  	__cls_bpf_delete(tp, arg);
763dbf632   WANG Cong   net_sched: move t...
283
  	*last = list_empty(&head->plist);
472f58370   Jiri Pirko   net_sched: cls_bp...
284
  	return 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
285
  }
763dbf632   WANG Cong   net_sched: move t...
286
  static void cls_bpf_destroy(struct tcf_proto *tp)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
287
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
288
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
289
  	struct cls_bpf_prog *prog, *tmp;
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
290
291
  	list_for_each_entry_safe(prog, tmp, &head->plist, link)
  		__cls_bpf_delete(tp, prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
292

1f947bf15   John Fastabend   net: sched: rcu'i...
293
  	kfree_rcu(head, rcu);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
294
  }
8113c0956   WANG Cong   net_sched: use vo...
295
  static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
296
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
297
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
298
  	struct cls_bpf_prog *prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
299

3fe6b49e2   Jiri Pirko   net_sched: cls_bp...
300
  	list_for_each_entry(prog, &head->plist, link) {
8113c0956   WANG Cong   net_sched: use vo...
301
302
  		if (prog->handle == handle)
  			return prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
303
  	}
8113c0956   WANG Cong   net_sched: use vo...
304
  	return NULL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
305
  }
045efa82f   Daniel Borkmann   cls_bpf: introduc...
306
  static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
307
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
308
  	struct sock_filter *bpf_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
309
  	struct sock_fprog_kern fprog_tmp;
1f947bf15   John Fastabend   net: sched: rcu'i...
310
  	struct bpf_prog *fp;
33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
311
  	u16 bpf_size, bpf_num_ops;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
312
  	int ret;
33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
313
  	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
314
315
  	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  		return -EINVAL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
316

33e9fcc66   Jiri Pirko   tc: cls_bpf: rena...
317
  	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
318
319
  	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
  		return -EINVAL;
7913ecf69   Daniel Borkmann   net: cls_bpf: fix...
320

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
321
  	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
322
323
  	if (bpf_ops == NULL)
  		return -ENOMEM;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
324
325
  
  	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
326
327
  	fprog_tmp.len = bpf_num_ops;
  	fprog_tmp.filter = bpf_ops;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
328

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
329
330
331
332
333
  	ret = bpf_prog_create(&fp, &fprog_tmp);
  	if (ret < 0) {
  		kfree(bpf_ops);
  		return ret;
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
334

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
335
  	prog->bpf_ops = bpf_ops;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
336
337
  	prog->bpf_num_ops = bpf_num_ops;
  	prog->bpf_name = NULL;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
338
  	prog->filter = fp;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
339

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
340
341
  	return 0;
  }
c46646d04   Daniel Borkmann   sched, bpf: add h...
342
343
  static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
  				 const struct tcf_proto *tp)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
344
345
346
347
348
349
  {
  	struct bpf_prog *fp;
  	char *name = NULL;
  	u32 bpf_fd;
  
  	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
113214be7   Daniel Borkmann   bpf: refactor bpf...
350
  	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
351
352
  	if (IS_ERR(fp))
  		return PTR_ERR(fp);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
353
  	if (tb[TCA_BPF_NAME]) {
b15ca182e   Thomas Graf   netlink: Add nla_...
354
  		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
355
356
357
358
359
360
361
  		if (!name) {
  			bpf_prog_put(fp);
  			return -ENOMEM;
  		}
  	}
  
  	prog->bpf_ops = NULL;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
362
  	prog->bpf_name = name;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
363
  	prog->filter = fp;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
364

1f211a1b9   Daniel Borkmann   net, sched: add c...
365
  	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
c46646d04   Daniel Borkmann   sched, bpf: add h...
366
  		netif_keep_dst(qdisc_dev(tp->q));
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
367
368
  	return 0;
  }
6a725c481   Jiri Pirko   net: sched: cls_b...
369
370
371
  static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
  			     struct cls_bpf_prog *prog, unsigned long base,
  			     struct nlattr **tb, struct nlattr *est, bool ovr)
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
372
  {
045efa82f   Daniel Borkmann   cls_bpf: introduc...
373
  	bool is_bpf, is_ebpf, have_exts = false;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
374
  	u32 gen_flags = 0;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
375
376
377
378
  	int ret;
  
  	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
  	is_ebpf = tb[TCA_BPF_FD];
ef146fa40   Daniel Borkmann   cls_bpf: make bin...
379
  	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
380
  		return -EINVAL;
6839da326   Jiri Pirko   net: sched: cls_b...
381
  	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
382
383
  	if (ret < 0)
  		return ret;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
384
385
  	if (tb[TCA_BPF_FLAGS]) {
  		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
6839da326   Jiri Pirko   net: sched: cls_b...
386
387
  		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
  			return -EINVAL;
045efa82f   Daniel Borkmann   cls_bpf: introduc...
388
389
390
  
  		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
  	}
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
391
392
393
  	if (tb[TCA_BPF_FLAGS_GEN]) {
  		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
  		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
6839da326   Jiri Pirko   net: sched: cls_b...
394
395
  		    !tc_flags_valid(gen_flags))
  			return -EINVAL;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
396
  	}
045efa82f   Daniel Borkmann   cls_bpf: introduc...
397

045efa82f   Daniel Borkmann   cls_bpf: introduc...
398
  	prog->exts_integrated = have_exts;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
399
  	prog->gen_flags = gen_flags;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
400

045efa82f   Daniel Borkmann   cls_bpf: introduc...
401
  	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
c46646d04   Daniel Borkmann   sched, bpf: add h...
402
  		       cls_bpf_prog_from_efd(tb, prog, tp);
b9a24bb76   WANG Cong   net_sched: proper...
403
  	if (ret < 0)
6839da326   Jiri Pirko   net: sched: cls_b...
404
  		return ret;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
405

ef146fa40   Daniel Borkmann   cls_bpf: make bin...
406
407
408
409
  	if (tb[TCA_BPF_CLASSID]) {
  		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
  		tcf_bind_filter(tp, &prog->res, base);
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
410

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
411
  	return 0;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
412
413
414
415
416
417
  }
  
  static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
  				   struct cls_bpf_head *head)
  {
  	unsigned int i = 0x80000000;
3f2ab1359   Daniel Borkmann   net: cls_bpf: fix...
418
  	u32 handle;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
419
420
421
422
423
  
  	do {
  		if (++head->hgen == 0x7FFFFFFF)
  			head->hgen = 1;
  	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
3f2ab1359   Daniel Borkmann   net: cls_bpf: fix...
424
425
  
  	if (unlikely(i == 0)) {
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
426
427
  		pr_err("Insufficient number of handles
  ");
3f2ab1359   Daniel Borkmann   net: cls_bpf: fix...
428
429
430
431
  		handle = 0;
  	} else {
  		handle = head->hgen;
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
432

3f2ab1359   Daniel Borkmann   net: cls_bpf: fix...
433
  	return handle;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
434
435
436
437
438
  }
  
  static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
  			  struct tcf_proto *tp, unsigned long base,
  			  u32 handle, struct nlattr **tca,
8113c0956   WANG Cong   net_sched: use vo...
439
  			  void **arg, bool ovr)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
440
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
441
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
8113c0956   WANG Cong   net_sched: use vo...
442
  	struct cls_bpf_prog *oldprog = *arg;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
443
  	struct nlattr *tb[TCA_BPF_MAX + 1];
1f947bf15   John Fastabend   net: sched: rcu'i...
444
  	struct cls_bpf_prog *prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
445
446
447
448
  	int ret;
  
  	if (tca[TCA_OPTIONS] == NULL)
  		return -EINVAL;
fceb6435e   Johannes Berg   netlink: pass ext...
449
450
  	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
  			       NULL);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
451
452
  	if (ret < 0)
  		return ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
453
  	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
1f947bf15   John Fastabend   net: sched: rcu'i...
454
  	if (!prog)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
455
  		return -ENOBUFS;
b9a24bb76   WANG Cong   net_sched: proper...
456
457
458
  	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  	if (ret < 0)
  		goto errout;
1f947bf15   John Fastabend   net: sched: rcu'i...
459
460
461
462
463
464
465
  
  	if (oldprog) {
  		if (handle && oldprog->handle != handle) {
  			ret = -EINVAL;
  			goto errout;
  		}
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
466
467
468
469
470
471
472
473
  	if (handle == 0)
  		prog->handle = cls_bpf_grab_new_handle(tp, head);
  	else
  		prog->handle = handle;
  	if (prog->handle == 0) {
  		ret = -EINVAL;
  		goto errout;
  	}
6a725c481   Jiri Pirko   net: sched: cls_b...
474
  	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
475
476
  	if (ret < 0)
  		goto errout;
eadb41489   Jakub Kicinski   net: cls_bpf: add...
477
  	ret = cls_bpf_offload(tp, prog, oldprog);
627a59561   Jakub Kicinski   cls_bpf: don't de...
478
479
  	if (ret)
  		goto errout_parms;
332ae8e2f   Jakub Kicinski   net: cls_bpf: add...
480

5cecb6cc0   Or Gerlitz   net/sched: cls_bp...
481
482
  	if (!tc_in_hw(prog->gen_flags))
  		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1f947bf15   John Fastabend   net: sched: rcu'i...
483
  	if (oldprog) {
f6bfc46da   Daniel Borkmann   sched: cls_bpf: f...
484
  		list_replace_rcu(&oldprog->link, &prog->link);
18cdb37eb   John Fastabend   net: sched: do no...
485
  		tcf_unbind_filter(tp, &oldprog->res);
aae2c35ec   Cong Wang   cls_bpf: use tcf_...
486
  		tcf_exts_get_net(&oldprog->exts);
8d829bdb9   Daniel Borkmann   bpf, cls: consoli...
487
  		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
1f947bf15   John Fastabend   net: sched: rcu'i...
488
489
490
  	} else {
  		list_add_rcu(&prog->link, &head->plist);
  	}
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
491

8113c0956   WANG Cong   net_sched: use vo...
492
  	*arg = prog;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
493
  	return 0;
b9a24bb76   WANG Cong   net_sched: proper...
494

627a59561   Jakub Kicinski   cls_bpf: don't de...
495
496
  errout_parms:
  	cls_bpf_free_parms(prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
497
  errout:
b9a24bb76   WANG Cong   net_sched: proper...
498
  	tcf_exts_destroy(&prog->exts);
1f947bf15   John Fastabend   net: sched: rcu'i...
499
  	kfree(prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
500
501
  	return ret;
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
  static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
  				 struct sk_buff *skb)
  {
  	struct nlattr *nla;
  
  	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
  		return -EMSGSIZE;
  
  	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
  			  sizeof(struct sock_filter));
  	if (nla == NULL)
  		return -EMSGSIZE;
  
  	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  
  	return 0;
  }
  
  static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
  				  struct sk_buff *skb)
  {
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
523
  	struct nlattr *nla;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
524
525
526
  	if (prog->bpf_name &&
  	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
  		return -EMSGSIZE;
e86283071   Daniel Borkmann   bpf: expose prog ...
527
528
  	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
  		return -EMSGSIZE;
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
529
  	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
530
531
  	if (nla == NULL)
  		return -EMSGSIZE;
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
532
  	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
533

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
534
535
  	return 0;
  }
8113c0956   WANG Cong   net_sched: use vo...
536
  static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
537
538
  			struct sk_buff *skb, struct tcmsg *tm)
  {
8113c0956   WANG Cong   net_sched: use vo...
539
  	struct cls_bpf_prog *prog = fh;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
540
  	struct nlattr *nest;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
541
  	u32 bpf_flags = 0;
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
542
  	int ret;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
543
544
545
546
547
  
  	if (prog == NULL)
  		return skb->len;
  
  	tm->tcm_handle = prog->handle;
68d640630   Jakub Kicinski   net: cls_bpf: all...
548
  	cls_bpf_offload_update_stats(tp, prog);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
549
550
551
  	nest = nla_nest_start(skb, TCA_OPTIONS);
  	if (nest == NULL)
  		goto nla_put_failure;
ef146fa40   Daniel Borkmann   cls_bpf: make bin...
552
553
  	if (prog->res.classid &&
  	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
554
  		goto nla_put_failure;
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
555

e2e9b6541   Daniel Borkmann   cls_bpf: add init...
556
557
558
559
560
  	if (cls_bpf_is_ebpf(prog))
  		ret = cls_bpf_dump_ebpf_info(prog, skb);
  	else
  		ret = cls_bpf_dump_bpf_info(prog, skb);
  	if (ret)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
561
  		goto nla_put_failure;
5da57f422   WANG Cong   net_sched: cls: r...
562
  	if (tcf_exts_dump(skb, &prog->exts) < 0)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
563
  		goto nla_put_failure;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
564
565
566
567
  	if (prog->exts_integrated)
  		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
  	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
  		goto nla_put_failure;
0d01d45f1   Jakub Kicinski   net: cls_bpf: lim...
568
569
570
  	if (prog->gen_flags &&
  	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
  		goto nla_put_failure;
bf007d1c7   Daniel Borkmann   cls_bpf: also dum...
571

7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
572
  	nla_nest_end(skb, nest);
5da57f422   WANG Cong   net_sched: cls: r...
573
  	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
574
575
576
577
578
579
580
581
  		goto nla_put_failure;
  
  	return skb->len;
  
  nla_put_failure:
  	nla_nest_cancel(skb, nest);
  	return -1;
  }
07d79fc7d   Cong Wang   net_sched: add re...
582
583
584
585
586
587
588
  static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
  {
  	struct cls_bpf_prog *prog = fh;
  
  	if (prog && prog->res.classid == classid)
  		prog->res.class = cl;
  }
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
589
590
  static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  {
1f947bf15   John Fastabend   net: sched: rcu'i...
591
  	struct cls_bpf_head *head = rtnl_dereference(tp->root);
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
592
  	struct cls_bpf_prog *prog;
3fe6b49e2   Jiri Pirko   net_sched: cls_bp...
593
  	list_for_each_entry(prog, &head->plist, link) {
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
594
595
  		if (arg->count < arg->skip)
  			goto skip;
8113c0956   WANG Cong   net_sched: use vo...
596
  		if (arg->fn(tp, prog, arg) < 0) {
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
  			arg->stop = 1;
  			break;
  		}
  skip:
  		arg->count++;
  	}
  }
  
  static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
  	.kind		=	"bpf",
  	.owner		=	THIS_MODULE,
  	.classify	=	cls_bpf_classify,
  	.init		=	cls_bpf_init,
  	.destroy	=	cls_bpf_destroy,
  	.get		=	cls_bpf_get,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
612
613
614
615
  	.change		=	cls_bpf_change,
  	.delete		=	cls_bpf_delete,
  	.walk		=	cls_bpf_walk,
  	.dump		=	cls_bpf_dump,
07d79fc7d   Cong Wang   net_sched: add re...
616
  	.bind_class	=	cls_bpf_bind_class,
7d1d65cb8   Daniel Borkmann   net: sched: cls_b...
617
618
619
620
621
622
623
624
625
626
627
628
629
630
  };
  
  static int __init cls_bpf_init_mod(void)
  {
  	return register_tcf_proto_ops(&cls_bpf_ops);
  }
  
  static void __exit cls_bpf_exit_mod(void)
  {
  	unregister_tcf_proto_ops(&cls_bpf_ops);
  }
  
  module_init(cls_bpf_init_mod);
  module_exit(cls_bpf_exit_mod);