Blame view

include/linux/bpf.h 10.6 KB
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1
2
3
4
5
6
7
8
9
10
11
  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of version 2 of the GNU General Public
   * License as published by the Free Software Foundation.
   */
  #ifndef _LINUX_BPF_H
  #define _LINUX_BPF_H 1
  
  #include <uapi/linux/bpf.h>
  #include <linux/workqueue.h>
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
12
  #include <linux/file.h>
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
13
  #include <linux/percpu.h>
002245cc6   Zi Shen Lim   bpf: fix missing ...
14
  #include <linux/err.h>
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
15

3b1efb196   Daniel Borkmann   bpf, maps: flush ...
16
  struct perf_event;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
17
18
19
20
21
22
  struct bpf_map;
  
  /* map is generic key/value storage optionally accesible by eBPF programs */
  struct bpf_map_ops {
  	/* funcs callable from userspace (via syscall) */
  	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
61d1b6a42   Daniel Borkmann   bpf, maps: add re...
23
24
  	void (*map_release)(struct bpf_map *map, struct file *map_file);
  	void (*map_free)(struct bpf_map *map);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
25
26
27
28
  	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
  
  	/* funcs callable from userspace and from eBPF programs */
  	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3274f5207   Alexei Starovoitov   bpf: add 'flags' ...
29
  	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
30
  	int (*map_delete_elem)(struct bpf_map *map, void *key);
2a36f0b92   Wang Nan   bpf: Make the bpf...
31
32
  
  	/* funcs called by prog_array and perf_event_array map */
d056a7887   Daniel Borkmann   bpf, maps: extend...
33
34
35
  	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
  				int fd);
  	void (*map_fd_put_ptr)(void *ptr);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
36
37
38
39
40
41
42
43
  };
  
  struct bpf_map {
  	atomic_t refcnt;
  	enum bpf_map_type map_type;
  	u32 key_size;
  	u32 value_size;
  	u32 max_entries;
6c9059817   Alexei Starovoitov   bpf: pre-allocate...
44
  	u32 map_flags;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
45
46
  	u32 pages;
  	struct user_struct *user;
a2c83fff5   Daniel Borkmann   ebpf: constify va...
47
  	const struct bpf_map_ops *ops;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
48
  	struct work_struct work;
c9da161c6   Daniel Borkmann   bpf: fix clearing...
49
  	atomic_t usercnt;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
50
51
52
53
  };
  
  struct bpf_map_type_list {
  	struct list_head list_node;
a2c83fff5   Daniel Borkmann   ebpf: constify va...
54
  	const struct bpf_map_ops *ops;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
55
56
  	enum bpf_map_type type;
  };
17a526706   Alexei Starovoitov   bpf: verifier (ad...
57
58
  /* function argument constraints */
  enum bpf_arg_type {
80f1d68cc   Daniel Borkmann   ebpf: verifier: c...
59
  	ARG_DONTCARE = 0,	/* unused argument in helper function */
17a526706   Alexei Starovoitov   bpf: verifier (ad...
60
61
62
63
64
65
66
67
68
69
70
71
  
  	/* the following constraints used to prototype
  	 * bpf_map_lookup/update/delete_elem() functions
  	 */
  	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
  	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
  	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
  
  	/* the following constraints used to prototype bpf_memcmp() and other
  	 * functions that access data on eBPF program stack
  	 */
  	ARG_PTR_TO_STACK,	/* any pointer to eBPF program stack */
435faee1a   Daniel Borkmann   bpf, verifier: ad...
72
73
74
75
  	ARG_PTR_TO_RAW_STACK,	/* any pointer to eBPF program stack, area does not
  				 * need to be initialized, helper function must fill
  				 * all bytes or clear them in error case.
  				 */
17a526706   Alexei Starovoitov   bpf: verifier (ad...
76
  	ARG_CONST_STACK_SIZE,	/* number of bytes accessed from stack */
8e2fe1d9f   Daniel Borkmann   bpf: add new arg_...
77
  	ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
80f1d68cc   Daniel Borkmann   ebpf: verifier: c...
78

608cd71a9   Alexei Starovoitov   tc: bpf: generali...
79
  	ARG_PTR_TO_CTX,		/* pointer to context */
80f1d68cc   Daniel Borkmann   ebpf: verifier: c...
80
  	ARG_ANYTHING,		/* any (initialized) argument is ok */
17a526706   Alexei Starovoitov   bpf: verifier (ad...
81
82
83
84
85
86
87
88
  };
  
  /* type of values returned from helper functions */
  enum bpf_return_type {
  	RET_INTEGER,			/* function returns integer */
  	RET_VOID,			/* function doesn't return anything */
  	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
  };
09756af46   Alexei Starovoitov   bpf: expand BPF s...
89
90
91
92
93
94
95
  /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
   * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
   * instructions after verifying
   */
  struct bpf_func_proto {
  	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  	bool gpl_only;
36bbef52c   Daniel Borkmann   bpf: direct packe...
96
  	bool pkt_access;
17a526706   Alexei Starovoitov   bpf: verifier (ad...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  	enum bpf_return_type ret_type;
  	enum bpf_arg_type arg1_type;
  	enum bpf_arg_type arg2_type;
  	enum bpf_arg_type arg3_type;
  	enum bpf_arg_type arg4_type;
  	enum bpf_arg_type arg5_type;
  };
  
  /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
   * the first argument to eBPF programs.
   * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
   */
  struct bpf_context;
  
  enum bpf_access_type {
  	BPF_READ = 1,
  	BPF_WRITE = 2
09756af46   Alexei Starovoitov   bpf: expand BPF s...
114
  };
19de99f70   Alexei Starovoitov   bpf: fix matching...
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  /* types of values stored in eBPF registers */
  enum bpf_reg_type {
  	NOT_INIT = 0,		 /* nothing was written into register */
  	UNKNOWN_VALUE,		 /* reg doesn't contain a valid pointer */
  	PTR_TO_CTX,		 /* reg points to bpf_context */
  	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
  	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
  	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
  	FRAME_PTR,		 /* reg == frame_pointer */
  	PTR_TO_STACK,		 /* reg == frame_pointer + imm */
  	CONST_IMM,		 /* constant integer value */
  
  	/* PTR_TO_PACKET represents:
  	 * skb->data
  	 * skb->data + imm
  	 * skb->data + (u16) var
  	 * skb->data + (u16) var + imm
  	 * if (range > 0) then [ptr, ptr + range - off) is safe to access
  	 * if (id > 0) means that some 'var' was added
  	 * if (off > 0) menas that 'imm' was added
  	 */
  	PTR_TO_PACKET,
  	PTR_TO_PACKET_END,	 /* skb->data + headlen */
484611357   Josef Bacik   bpf: allow access...
138
139
140
141
142
143
144
  
  	/* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
  	 * elem value.  We only allow this if we can statically verify that
  	 * access from this register are going to fall within the size of the
  	 * map element.
  	 */
  	PTR_TO_MAP_VALUE_ADJ,
19de99f70   Alexei Starovoitov   bpf: fix matching...
145
  };
ff936a04e   Alexei Starovoitov   bpf: fix cb acces...
146
  struct bpf_prog;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
147
148
149
  struct bpf_verifier_ops {
  	/* return eBPF function prototype for verification */
  	const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
17a526706   Alexei Starovoitov   bpf: verifier (ad...
150
151
152
153
  
  	/* return true if 'size' wide access at offset 'off' within bpf_context
  	 * with 'type' (read or write) is allowed
  	 */
19de99f70   Alexei Starovoitov   bpf: fix matching...
154
155
  	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
  				enum bpf_reg_type *reg_type);
36bbef52c   Daniel Borkmann   bpf: direct packe...
156
157
  	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
  			    const struct bpf_prog *prog);
d691f9e8d   Alexei Starovoitov   bpf: allow progra...
158
159
  	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
  				  int src_reg, int ctx_off,
ff936a04e   Alexei Starovoitov   bpf: fix cb acces...
160
  				  struct bpf_insn *insn, struct bpf_prog *prog);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
161
162
163
164
  };
  
  struct bpf_prog_type_list {
  	struct list_head list_node;
a2c83fff5   Daniel Borkmann   ebpf: constify va...
165
  	const struct bpf_verifier_ops *ops;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
166
167
  	enum bpf_prog_type type;
  };
09756af46   Alexei Starovoitov   bpf: expand BPF s...
168
169
  struct bpf_prog_aux {
  	atomic_t refcnt;
24701ecea   Daniel Borkmann   ebpf: move read-o...
170
  	u32 used_map_cnt;
32bbe0078   Alexei Starovoitov   bpf: sanitize bpf...
171
  	u32 max_ctx_offset;
a2c83fff5   Daniel Borkmann   ebpf: constify va...
172
  	const struct bpf_verifier_ops *ops;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
173
  	struct bpf_map **used_maps;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
174
  	struct bpf_prog *prog;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
175
  	struct user_struct *user;
abf2e7d6e   Alexei Starovoitov   bpf: add missing ...
176
177
178
179
  	union {
  		struct work_struct work;
  		struct rcu_head	rcu;
  	};
09756af46   Alexei Starovoitov   bpf: expand BPF s...
180
  };
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
181
182
183
184
185
186
187
188
189
190
191
192
  struct bpf_array {
  	struct bpf_map map;
  	u32 elem_size;
  	/* 'ownership' of prog_array is claimed by the first program that
  	 * is going to use this map or by the first program which FD is stored
  	 * in the map to make sure that all callers and callees have the same
  	 * prog_type and JITed flag
  	 */
  	enum bpf_prog_type owner_prog_type;
  	bool owner_jited;
  	union {
  		char value[0] __aligned(8);
2a36f0b92   Wang Nan   bpf: Make the bpf...
193
  		void *ptrs[0] __aligned(8);
a10423b87   Alexei Starovoitov   bpf: introduce BP...
194
  		void __percpu *pptrs[0] __aligned(8);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
195
196
  	};
  };
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
197

04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
198
  #define MAX_TAIL_CALL_CNT 32
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
199
200
201
202
203
204
  struct bpf_event_entry {
  	struct perf_event *event;
  	struct file *perf_file;
  	struct file *map_file;
  	struct rcu_head rcu;
  };
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
205
  u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
9940d67c9   Alexei Starovoitov   bpf: support bpf_...
206
  u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
d056a7887   Daniel Borkmann   bpf, maps: extend...
207

04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
208
  bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
bd570ff97   Daniel Borkmann   bpf: add event ou...
209

0756ea3e8   Alexei Starovoitov   bpf: allow networ...
210
  const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
555c8a862   Daniel Borkmann   bpf: avoid stack ...
211
212
  
  typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
aa7145c16   Daniel Borkmann   bpf, events: fix ...
213
  					unsigned long off, unsigned long len);
555c8a862   Daniel Borkmann   bpf: avoid stack ...
214
215
216
  
  u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
  		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
217

89aa07583   Alexei Starovoitov   net: sock: allow ...
218
  #ifdef CONFIG_BPF_SYSCALL
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
219
  DECLARE_PER_CPU(int, bpf_prog_active);
0fc174dea   Daniel Borkmann   ebpf: make intern...
220
  void bpf_register_prog_type(struct bpf_prog_type_list *tl);
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
221
  void bpf_register_map_type(struct bpf_map_type_list *tl);
0fc174dea   Daniel Borkmann   ebpf: make intern...
222

0fc174dea   Daniel Borkmann   ebpf: make intern...
223
  struct bpf_prog *bpf_prog_get(u32 ufd);
113214be7   Daniel Borkmann   bpf: refactor bpf...
224
  struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
59d3656d5   Brenden Blanco   bpf: add bpf_prog...
225
  struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
226
  struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
227
  void bpf_prog_put(struct bpf_prog *prog);
c9da161c6   Daniel Borkmann   bpf: fix clearing...
228
  struct bpf_map *bpf_map_get_with_uref(u32 ufd);
c21012976   Daniel Borkmann   bpf: align and cl...
229
  struct bpf_map *__bpf_map_get(struct fd f);
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
230
  struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
c9da161c6   Daniel Borkmann   bpf: fix clearing...
231
  void bpf_map_put_with_uref(struct bpf_map *map);
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
232
  void bpf_map_put(struct bpf_map *map);
6c9059817   Alexei Starovoitov   bpf: pre-allocate...
233
  int bpf_map_precharge_memlock(u32 pages);
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
234

1be7f75d1   Alexei Starovoitov   bpf: enable non-r...
235
  extern int sysctl_unprivileged_bpf_disabled;
b2197755b   Daniel Borkmann   bpf: add support ...
236
237
238
239
240
  int bpf_map_new_fd(struct bpf_map *map);
  int bpf_prog_new_fd(struct bpf_prog *prog);
  
  int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
  int bpf_obj_get_user(const char __user *pathname);
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
241
242
243
244
245
246
  int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
  int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
  int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
  			   u64 flags);
  int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  			    u64 flags);
d056a7887   Daniel Borkmann   bpf, maps: extend...
247

557c0c6e7   Alexei Starovoitov   bpf: convert stac...
248
  int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
249

d056a7887   Daniel Borkmann   bpf, maps: extend...
250
251
252
  int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  				 void *key, void *value, u64 map_flags);
  void bpf_fd_array_map_clear(struct bpf_map *map);
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
  /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
   * forced to use 'long' read/writes to try to atomically copy long counters.
   * Best-effort only.  No barriers here, since it _will_ race with concurrent
   * updates from BPF programs. Called from bpf syscall and mostly used with
   * size 8 or 16 bytes, so ask compiler to inline it.
   */
  static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
  {
  	const long *lsrc = src;
  	long *ldst = dst;
  
  	size /= sizeof(long);
  	while (size--)
  		*ldst++ = *lsrc++;
  }
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
268
  /* verify correctness of eBPF program */
9bac3d6d5   Alexei Starovoitov   bpf: allow extend...
269
  int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
89aa07583   Alexei Starovoitov   net: sock: allow ...
270
  #else
0fc174dea   Daniel Borkmann   ebpf: make intern...
271
272
273
274
275
276
277
278
  static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
  {
  }
  
  static inline struct bpf_prog *bpf_prog_get(u32 ufd)
  {
  	return ERR_PTR(-EOPNOTSUPP);
  }
113214be7   Daniel Borkmann   bpf: refactor bpf...
279
280
281
282
283
  static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
  						 enum bpf_prog_type type)
  {
  	return ERR_PTR(-EOPNOTSUPP);
  }
cc2e0b3fb   Brenden Blanco   bpf: fix implicit...
284
285
286
287
  static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
  {
  	return ERR_PTR(-EOPNOTSUPP);
  }
113214be7   Daniel Borkmann   bpf: refactor bpf...
288

0fc174dea   Daniel Borkmann   ebpf: make intern...
289
290
291
  static inline void bpf_prog_put(struct bpf_prog *prog)
  {
  }
aa6a5f3cb   Alexei Starovoitov   perf, bpf: add pe...
292
293
294
295
  static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  {
  	return ERR_PTR(-EOPNOTSUPP);
  }
61e021f3b   Daniel Borkmann   ebpf: move CONFIG...
296
  #endif /* CONFIG_BPF_SYSCALL */
09756af46   Alexei Starovoitov   bpf: expand BPF s...
297

d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
298
  /* verifier prototypes for helper functions called from eBPF programs */
a2c83fff5   Daniel Borkmann   ebpf: constify va...
299
300
301
  extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
  extern const struct bpf_func_proto bpf_map_update_elem_proto;
  extern const struct bpf_func_proto bpf_map_delete_elem_proto;
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
302

03e69b508   Daniel Borkmann   ebpf: add prandom...
303
  extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
c04167ce2   Daniel Borkmann   ebpf: add helper ...
304
  extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
305
  extern const struct bpf_func_proto bpf_tail_call_proto;
17ca8cbf4   Daniel Borkmann   ebpf: allow bpf_k...
306
  extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
307
308
309
  extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
  extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
  extern const struct bpf_func_proto bpf_get_current_comm_proto;
4e10df9a6   Alexei Starovoitov   bpf: introduce bp...
310
311
  extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
  extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
d5a3b1f69   Alexei Starovoitov   bpf: introduce BP...
312
  extern const struct bpf_func_proto bpf_get_stackid_proto;
03e69b508   Daniel Borkmann   ebpf: add prandom...
313

3ad004057   Daniel Borkmann   bpf: split state ...
314
315
316
  /* Shared helpers among cBPF and eBPF. */
  void bpf_user_rnd_init_once(void);
  u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
317
  #endif /* _LINUX_BPF_H */