Blame view

kernel/bpf/syscall.c 34 KB
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1
2
3
4
5
6
7
8
9
10
11
12
  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of version 2 of the GNU General Public
   * License as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
   * General Public License for more details.
   */
  #include <linux/bpf.h>
a67edbf4f   Daniel Borkmann   bpf: add initial ...
13
  #include <linux/bpf_trace.h>
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
14
15
  #include <linux/syscalls.h>
  #include <linux/slab.h>
3f07c0144   Ingo Molnar   sched/headers: Pr...
16
  #include <linux/sched/signal.h>
d407bd25a   Daniel Borkmann   bpf: don't trigge...
17
18
  #include <linux/vmalloc.h>
  #include <linux/mmzone.h>
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
19
  #include <linux/anon_inodes.h>
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
20
  #include <linux/file.h>
09756af46   Alexei Starovoitov   bpf: expand BPF s...
21
22
  #include <linux/license.h>
  #include <linux/filter.h>
2541517c3   Alexei Starovoitov   tracing, perf: Im...
23
  #include <linux/version.h>
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
24
  #include <linux/kernel.h>
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
25
  #include <linux/idr.h>
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
26

14dc6f04f   Martin KaFai Lau   bpf: Add syscall ...
27
28
29
30
31
32
  #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
  			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
  			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
  			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
  #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
33
  DEFINE_PER_CPU(int, bpf_prog_active);
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
34
35
  static DEFINE_IDR(prog_idr);
  static DEFINE_SPINLOCK(prog_idr_lock);
f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
36
37
  static DEFINE_IDR(map_idr);
  static DEFINE_SPINLOCK(map_idr_lock);
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
38

1be7f75d1   Alexei Starovoitov   bpf: enable non-r...
39
  int sysctl_unprivileged_bpf_disabled __read_mostly;
40077e0cf   Johannes Berg   bpf: remove struc...
40
41
42
43
44
45
46
47
  static const struct bpf_map_ops * const bpf_map_types[] = {
  #define BPF_PROG_TYPE(_id, _ops)
  #define BPF_MAP_TYPE(_id, _ops) \
  	[_id] = &_ops,
  #include <linux/bpf_types.h>
  #undef BPF_PROG_TYPE
  #undef BPF_MAP_TYPE
  };
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
48

752ba56fb   Mickaël Salaün   bpf: Extend check...
49
50
51
52
53
54
55
56
57
  /*
   * If we're handed a bigger struct than we know of, ensure all the unknown bits
   * are 0 - i.e. new user-space does not rely on any kernel feature extensions
   * we don't know about yet.
   *
   * There is a ToCToU between this function call and the following
   * copy_from_user() call. However, this is not a concern since this function is
   * meant to be a future-proofing of bits.
   */
58291a746   Mickaël Salaün   bpf: Move check_u...
58
59
60
61
62
63
64
65
  static int check_uarg_tail_zero(void __user *uaddr,
  				size_t expected_size,
  				size_t actual_size)
  {
  	unsigned char __user *addr;
  	unsigned char __user *end;
  	unsigned char val;
  	int err;
752ba56fb   Mickaël Salaün   bpf: Extend check...
66
67
68
69
70
  	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
  		return -E2BIG;
  
  	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
  		return -EFAULT;
58291a746   Mickaël Salaün   bpf: Move check_u...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
  	if (actual_size <= expected_size)
  		return 0;
  
  	addr = uaddr + expected_size;
  	end  = uaddr + actual_size;
  
  	for (; addr < end; addr++) {
  		err = get_user(val, addr);
  		if (err)
  			return err;
  		if (val)
  			return -E2BIG;
  	}
  
  	return 0;
  }
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
87
88
  static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
  {
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
89
  	struct bpf_map *map;
40077e0cf   Johannes Berg   bpf: remove struc...
90
91
92
  	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
  	    !bpf_map_types[attr->map_type])
  		return ERR_PTR(-EINVAL);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
93

40077e0cf   Johannes Berg   bpf: remove struc...
94
95
96
97
98
99
  	map = bpf_map_types[attr->map_type]->map_alloc(attr);
  	if (IS_ERR(map))
  		return map;
  	map->ops = bpf_map_types[attr->map_type];
  	map->map_type = attr->map_type;
  	return map;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
100
  }
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
101
  void *bpf_map_area_alloc(size_t size, int numa_node)
d407bd25a   Daniel Borkmann   bpf: don't trigge...
102
103
104
105
106
107
108
109
110
  {
  	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
  	 * trigger under memory pressure as we really just want to
  	 * fail instead.
  	 */
  	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
  	void *area;
  
  	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
111
  		area = kmalloc_node(size, GFP_USER | flags, numa_node);
d407bd25a   Daniel Borkmann   bpf: don't trigge...
112
113
114
  		if (area != NULL)
  			return area;
  	}
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
115
116
  	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
  					   __builtin_return_address(0));
d407bd25a   Daniel Borkmann   bpf: don't trigge...
117
118
119
120
121
122
  }
  
  void bpf_map_area_free(void *area)
  {
  	kvfree(area);
  }
6c9059817   Alexei Starovoitov   bpf: pre-allocate...
123
124
125
126
127
128
129
130
131
132
133
134
  int bpf_map_precharge_memlock(u32 pages)
  {
  	struct user_struct *user = get_current_user();
  	unsigned long memlock_limit, cur;
  
  	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  	cur = atomic_long_read(&user->locked_vm);
  	free_uid(user);
  	if (cur + pages > memlock_limit)
  		return -EPERM;
  	return 0;
  }
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
  static int bpf_map_charge_memlock(struct bpf_map *map)
  {
  	struct user_struct *user = get_current_user();
  	unsigned long memlock_limit;
  
  	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  
  	atomic_long_add(map->pages, &user->locked_vm);
  
  	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
  		atomic_long_sub(map->pages, &user->locked_vm);
  		free_uid(user);
  		return -EPERM;
  	}
  	map->user = user;
  	return 0;
  }
  
  static void bpf_map_uncharge_memlock(struct bpf_map *map)
  {
  	struct user_struct *user = map->user;
  
  	atomic_long_sub(map->pages, &user->locked_vm);
  	free_uid(user);
  }
f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  static int bpf_map_alloc_id(struct bpf_map *map)
  {
  	int id;
  
  	spin_lock_bh(&map_idr_lock);
  	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
  	if (id > 0)
  		map->id = id;
  	spin_unlock_bh(&map_idr_lock);
  
  	if (WARN_ON_ONCE(!id))
  		return -ENOSPC;
  
  	return id > 0 ? 0 : id;
  }
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
175
  static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
176
  {
930651a75   Eric Dumazet   bpf: do not disab...
177
  	unsigned long flags;
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
178
  	if (do_idr_lock)
930651a75   Eric Dumazet   bpf: do not disab...
179
  		spin_lock_irqsave(&map_idr_lock, flags);
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
180
181
  	else
  		__acquire(&map_idr_lock);
f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
182
  	idr_remove(&map_idr, map->id);
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
183
184
  
  	if (do_idr_lock)
930651a75   Eric Dumazet   bpf: do not disab...
185
  		spin_unlock_irqrestore(&map_idr_lock, flags);
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
186
187
  	else
  		__release(&map_idr_lock);
f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
188
  }
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
189
190
191
192
  /* called from workqueue */
  static void bpf_map_free_deferred(struct work_struct *work)
  {
  	struct bpf_map *map = container_of(work, struct bpf_map, work);
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
193
  	bpf_map_uncharge_memlock(map);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
194
195
196
  	/* implementation dependent freeing */
  	map->ops->map_free(map);
  }
c9da161c6   Daniel Borkmann   bpf: fix clearing...
197
198
199
  static void bpf_map_put_uref(struct bpf_map *map)
  {
  	if (atomic_dec_and_test(&map->usercnt)) {
3c0cff34e   John Fastabend   bpf: sockmap, map...
200
201
  		if (map->ops->map_release_uref)
  			map->ops->map_release_uref(map);
c9da161c6   Daniel Borkmann   bpf: fix clearing...
202
203
  	}
  }
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
204
205
206
  /* decrement map refcnt and schedule it for freeing via workqueue
   * (unrelying map implementation ops->map_free() might sleep)
   */
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
207
  static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
208
209
  {
  	if (atomic_dec_and_test(&map->refcnt)) {
34ad5580f   Martin KaFai Lau   bpf: Add BPF_(PRO...
210
  		/* bpf_map_free_id() must be called first */
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
211
  		bpf_map_free_id(map, do_idr_lock);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
212
213
214
215
  		INIT_WORK(&map->work, bpf_map_free_deferred);
  		schedule_work(&map->work);
  	}
  }
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
216
217
218
219
  void bpf_map_put(struct bpf_map *map)
  {
  	__bpf_map_put(map, true);
  }
c9da161c6   Daniel Borkmann   bpf: fix clearing...
220
  void bpf_map_put_with_uref(struct bpf_map *map)
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
221
  {
c9da161c6   Daniel Borkmann   bpf: fix clearing...
222
  	bpf_map_put_uref(map);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
223
  	bpf_map_put(map);
c9da161c6   Daniel Borkmann   bpf: fix clearing...
224
225
226
227
  }
  
  static int bpf_map_release(struct inode *inode, struct file *filp)
  {
61d1b6a42   Daniel Borkmann   bpf, maps: add re...
228
229
230
231
232
233
  	struct bpf_map *map = filp->private_data;
  
  	if (map->ops->map_release)
  		map->ops->map_release(map, filp);
  
  	bpf_map_put_with_uref(map);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
234
235
  	return 0;
  }
f99bf205d   Daniel Borkmann   bpf: add show_fdi...
236
237
238
239
  #ifdef CONFIG_PROC_FS
  static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
  {
  	const struct bpf_map *map = filp->private_data;
21116b706   Daniel Borkmann   bpf: add owner_pr...
240
241
  	const struct bpf_array *array;
  	u32 owner_prog_type = 0;
9780c0ab1   Daniel Borkmann   bpf: export wheth...
242
  	u32 owner_jited = 0;
21116b706   Daniel Borkmann   bpf: add owner_pr...
243
244
245
246
  
  	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
  		array = container_of(map, struct bpf_array, map);
  		owner_prog_type = array->owner_prog_type;
9780c0ab1   Daniel Borkmann   bpf: export wheth...
247
  		owner_jited = array->owner_jited;
21116b706   Daniel Borkmann   bpf: add owner_pr...
248
  	}
f99bf205d   Daniel Borkmann   bpf: add show_fdi...
249
250
251
252
253
254
255
256
  
  	seq_printf(m,
  		   "map_type:\t%u
  "
  		   "key_size:\t%u
  "
  		   "value_size:\t%u
  "
322cea2f4   Daniel Borkmann   bpf: add missing ...
257
258
  		   "max_entries:\t%u
  "
21116b706   Daniel Borkmann   bpf: add owner_pr...
259
260
261
262
  		   "map_flags:\t%#x
  "
  		   "memlock:\t%llu
  ",
f99bf205d   Daniel Borkmann   bpf: add show_fdi...
263
264
265
  		   map->map_type,
  		   map->key_size,
  		   map->value_size,
322cea2f4   Daniel Borkmann   bpf: add missing ...
266
  		   map->max_entries,
21116b706   Daniel Borkmann   bpf: add owner_pr...
267
268
  		   map->map_flags,
  		   map->pages * 1ULL << PAGE_SHIFT);
9780c0ab1   Daniel Borkmann   bpf: export wheth...
269
  	if (owner_prog_type) {
21116b706   Daniel Borkmann   bpf: add owner_pr...
270
271
272
  		seq_printf(m, "owner_prog_type:\t%u
  ",
  			   owner_prog_type);
9780c0ab1   Daniel Borkmann   bpf: export wheth...
273
274
275
276
  		seq_printf(m, "owner_jited:\t%u
  ",
  			   owner_jited);
  	}
f99bf205d   Daniel Borkmann   bpf: add show_fdi...
277
278
  }
  #endif
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
279
  static const struct file_operations bpf_map_fops = {
f99bf205d   Daniel Borkmann   bpf: add show_fdi...
280
281
282
283
  #ifdef CONFIG_PROC_FS
  	.show_fdinfo	= bpf_map_show_fdinfo,
  #endif
  	.release	= bpf_map_release,
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
284
  };
b2197755b   Daniel Borkmann   bpf: add support ...
285
  int bpf_map_new_fd(struct bpf_map *map)
aa79781b6   Daniel Borkmann   bpf: abstract ano...
286
287
288
289
  {
  	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
  				O_RDWR | O_CLOEXEC);
  }
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
290
291
292
293
294
295
296
  /* helper macro to check that unused fields 'union bpf_attr' are zero */
  #define CHECK_ATTR(CMD) \
  	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
  		   sizeof(attr->CMD##_LAST_FIELD), 0, \
  		   sizeof(*attr) - \
  		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
  		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
297
  #define BPF_MAP_CREATE_LAST_FIELD numa_node
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
298
299
300
  /* called via syscall */
  static int map_create(union bpf_attr *attr)
  {
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
301
  	int numa_node = bpf_map_attr_numa_node(attr);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
302
303
304
305
306
307
  	struct bpf_map *map;
  	int err;
  
  	err = CHECK_ATTR(BPF_MAP_CREATE);
  	if (err)
  		return -EINVAL;
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
308
  	if (numa_node != NUMA_NO_NODE &&
96e5ae4e7   Eric Dumazet   bpf: fix numa_nod...
309
310
  	    ((unsigned int)numa_node >= nr_node_ids ||
  	     !node_online(numa_node)))
96eabe7a4   Martin KaFai Lau   bpf: Allow select...
311
  		return -EINVAL;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
312
313
314
315
316
317
  	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
  	map = find_and_alloc_map(attr);
  	if (IS_ERR(map))
  		return PTR_ERR(map);
  
  	atomic_set(&map->refcnt, 1);
c9da161c6   Daniel Borkmann   bpf: fix clearing...
318
  	atomic_set(&map->usercnt, 1);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
319

aaac3ba95   Alexei Starovoitov   bpf: charge user ...
320
321
  	err = bpf_map_charge_memlock(map);
  	if (err)
20b2b24f9   Daniel Borkmann   bpf: fix map not ...
322
  		goto free_map_nouncharge;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
323

f3f1c054c   Martin KaFai Lau   bpf: Introduce bp...
324
325
326
  	err = bpf_map_alloc_id(map);
  	if (err)
  		goto free_map;
aa79781b6   Daniel Borkmann   bpf: abstract ano...
327
  	err = bpf_map_new_fd(map);
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
328
329
330
331
332
333
334
335
336
337
  	if (err < 0) {
  		/* failed to allocate fd.
  		 * bpf_map_put() is needed because the above
  		 * bpf_map_alloc_id() has published the map
  		 * to the userspace and the userspace may
  		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
  		 */
  		bpf_map_put(map);
  		return err;
  	}
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
338

a67edbf4f   Daniel Borkmann   bpf: add initial ...
339
  	trace_bpf_map_create(map, err);
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
340
341
342
  	return err;
  
  free_map:
20b2b24f9   Daniel Borkmann   bpf: fix map not ...
343
344
  	bpf_map_uncharge_memlock(map);
  free_map_nouncharge:
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
345
346
347
  	map->ops->map_free(map);
  	return err;
  }
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
348
349
350
  /* if error is returned, fd is released.
   * On success caller should complete fd access with matching fdput()
   */
c21012976   Daniel Borkmann   bpf: align and cl...
351
  struct bpf_map *__bpf_map_get(struct fd f)
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
352
  {
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
353
354
  	if (!f.file)
  		return ERR_PTR(-EBADF);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
355
356
357
358
  	if (f.file->f_op != &bpf_map_fops) {
  		fdput(f);
  		return ERR_PTR(-EINVAL);
  	}
c21012976   Daniel Borkmann   bpf: align and cl...
359
360
  	return f.file->private_data;
  }
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
361
362
363
364
  /* prog's and map's refcnt limit */
  #define BPF_MAX_REFCNT 32768
  
  struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
c9da161c6   Daniel Borkmann   bpf: fix clearing...
365
  {
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
366
367
368
369
  	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
  		atomic_dec(&map->refcnt);
  		return ERR_PTR(-EBUSY);
  	}
c9da161c6   Daniel Borkmann   bpf: fix clearing...
370
371
  	if (uref)
  		atomic_inc(&map->usercnt);
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
372
  	return map;
c9da161c6   Daniel Borkmann   bpf: fix clearing...
373
374
375
  }
  
  struct bpf_map *bpf_map_get_with_uref(u32 ufd)
c21012976   Daniel Borkmann   bpf: align and cl...
376
377
378
379
380
381
382
  {
  	struct fd f = fdget(ufd);
  	struct bpf_map *map;
  
  	map = __bpf_map_get(f);
  	if (IS_ERR(map))
  		return map;
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
383
  	map = bpf_map_inc(map, true);
c21012976   Daniel Borkmann   bpf: align and cl...
384
  	fdput(f);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
385
386
387
  
  	return map;
  }
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
  /* map_idr_lock should have been held */
  static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
  					    bool uref)
  {
  	int refold;
  
  	refold = __atomic_add_unless(&map->refcnt, 1, 0);
  
  	if (refold >= BPF_MAX_REFCNT) {
  		__bpf_map_put(map, false);
  		return ERR_PTR(-EBUSY);
  	}
  
  	if (!refold)
  		return ERR_PTR(-ENOENT);
  
  	if (uref)
  		atomic_inc(&map->usercnt);
  
  	return map;
  }
b8cdc0517   Alexei Starovoitov   bpf: bpf_stackmap...
409
410
411
412
  int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  {
  	return -ENOTSUPP;
  }
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
413
414
415
416
417
  /* last field in 'union bpf_attr' used by this command */
  #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
  
  static int map_lookup_elem(union bpf_attr *attr)
  {
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
418
419
  	void __user *ukey = u64_to_user_ptr(attr->key);
  	void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
420
  	int ufd = attr->map_fd;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
421
  	struct bpf_map *map;
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
422
  	void *key, *value, *ptr;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
423
  	u32 value_size;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
424
  	struct fd f;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
425
426
427
428
  	int err;
  
  	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
  		return -EINVAL;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
429
  	f = fdget(ufd);
c21012976   Daniel Borkmann   bpf: align and cl...
430
  	map = __bpf_map_get(f);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
431
432
  	if (IS_ERR(map))
  		return PTR_ERR(map);
e4448ed87   Al Viro   bpf: don't open-c...
433
434
435
  	key = memdup_user(ukey, map->key_size);
  	if (IS_ERR(key)) {
  		err = PTR_ERR(key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
436
  		goto err_put;
e4448ed87   Al Viro   bpf: don't open-c...
437
  	}
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
438

15a07b338   Alexei Starovoitov   bpf: add lookup/u...
439
  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f8449384   Martin KaFai Lau   bpf: Add BPF_MAP_...
440
  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
441
442
  	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  		value_size = round_up(map->value_size, 8) * num_possible_cpus();
14dc6f04f   Martin KaFai Lau   bpf: Add syscall ...
443
444
  	else if (IS_FD_MAP(map))
  		value_size = sizeof(u32);
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
445
446
  	else
  		value_size = map->value_size;
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
447
  	err = -ENOMEM;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
448
  	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
449
  	if (!value)
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
450
  		goto free_key;
8f8449384   Martin KaFai Lau   bpf: Add BPF_MAP_...
451
452
  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
453
454
455
  		err = bpf_percpu_hash_copy(map, key, value);
  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  		err = bpf_percpu_array_copy(map, key, value);
557c0c6e7   Alexei Starovoitov   bpf: convert stac...
456
457
  	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
  		err = bpf_stackmap_copy(map, key, value);
14dc6f04f   Martin KaFai Lau   bpf: Add syscall ...
458
459
460
461
  	} else if (IS_FD_ARRAY(map)) {
  		err = bpf_fd_array_map_lookup_elem(map, key, value);
  	} else if (IS_FD_HASH(map)) {
  		err = bpf_fd_htab_map_lookup_elem(map, key, value);
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
462
463
464
465
466
467
468
469
  	} else {
  		rcu_read_lock();
  		ptr = map->ops->map_lookup_elem(map, key);
  		if (ptr)
  			memcpy(value, ptr, value_size);
  		rcu_read_unlock();
  		err = ptr ? 0 : -ENOENT;
  	}
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
470

15a07b338   Alexei Starovoitov   bpf: add lookup/u...
471
  	if (err)
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
472
  		goto free_value;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
473
474
  
  	err = -EFAULT;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
475
  	if (copy_to_user(uvalue, value, value_size) != 0)
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
476
  		goto free_value;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
477

a67edbf4f   Daniel Borkmann   bpf: add initial ...
478
  	trace_bpf_map_lookup_elem(map, ufd, key, value);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
479
  	err = 0;
8ebe667c4   Alexei Starovoitov   bpf: rcu lock mus...
480
481
  free_value:
  	kfree(value);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
482
483
484
485
486
487
  free_key:
  	kfree(key);
  err_put:
  	fdput(f);
  	return err;
  }
4bea15f79   Daniel Colascione   bpf: wait for run...
488
489
490
491
492
493
494
495
496
497
  static void maybe_wait_bpf_programs(struct bpf_map *map)
  {
  	/* Wait for any running BPF programs to complete so that
  	 * userspace, when we return to it, knows that all programs
  	 * that could be running use the new map value.
  	 */
  	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
  	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
  		synchronize_rcu();
  }
3274f5207   Alexei Starovoitov   bpf: add 'flags' ...
498
  #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
499
500
501
  
  static int map_update_elem(union bpf_attr *attr)
  {
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
502
503
  	void __user *ukey = u64_to_user_ptr(attr->key);
  	void __user *uvalue = u64_to_user_ptr(attr->value);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
504
  	int ufd = attr->map_fd;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
505
506
  	struct bpf_map *map;
  	void *key, *value;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
507
  	u32 value_size;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
508
  	struct fd f;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
509
510
511
512
  	int err;
  
  	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
  		return -EINVAL;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
513
  	f = fdget(ufd);
c21012976   Daniel Borkmann   bpf: align and cl...
514
  	map = __bpf_map_get(f);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
515
516
  	if (IS_ERR(map))
  		return PTR_ERR(map);
e4448ed87   Al Viro   bpf: don't open-c...
517
518
519
  	key = memdup_user(ukey, map->key_size);
  	if (IS_ERR(key)) {
  		err = PTR_ERR(key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
520
  		goto err_put;
e4448ed87   Al Viro   bpf: don't open-c...
521
  	}
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
522

15a07b338   Alexei Starovoitov   bpf: add lookup/u...
523
  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
8f8449384   Martin KaFai Lau   bpf: Add BPF_MAP_...
524
  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
525
526
527
528
  	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  		value_size = round_up(map->value_size, 8) * num_possible_cpus();
  	else
  		value_size = map->value_size;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
529
  	err = -ENOMEM;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
530
  	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
531
532
533
534
  	if (!value)
  		goto free_key;
  
  	err = -EFAULT;
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
535
  	if (copy_from_user(value, uvalue, value_size) != 0)
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
536
  		goto free_value;
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
537
538
539
540
541
  	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
  	 * inside bpf map update or delete otherwise deadlocks are possible
  	 */
  	preempt_disable();
  	__this_cpu_inc(bpf_prog_active);
8f8449384   Martin KaFai Lau   bpf: Add BPF_MAP_...
542
543
  	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
  	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
544
545
546
  		err = bpf_percpu_hash_update(map, key, value, attr->flags);
  	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
  		err = bpf_percpu_array_update(map, key, value, attr->flags);
d056a7887   Daniel Borkmann   bpf, maps: extend...
547
  	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
4ed8ec521   Martin KaFai Lau   cgroup: bpf: Add ...
548
  		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
56f668dfe   Martin KaFai Lau   bpf: Add array of...
549
550
  		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
  		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
d056a7887   Daniel Borkmann   bpf, maps: extend...
551
552
553
554
  		rcu_read_lock();
  		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
  						   attr->flags);
  		rcu_read_unlock();
bcc6b1b7e   Martin KaFai Lau   bpf: Add hash of ...
555
556
557
558
559
  	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  		rcu_read_lock();
  		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
  						  attr->flags);
  		rcu_read_unlock();
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
560
561
562
563
564
  	} else {
  		rcu_read_lock();
  		err = map->ops->map_update_elem(map, key, value, attr->flags);
  		rcu_read_unlock();
  	}
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
565
566
  	__this_cpu_dec(bpf_prog_active);
  	preempt_enable();
4bea15f79   Daniel Colascione   bpf: wait for run...
567
  	maybe_wait_bpf_programs(map);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
568

a67edbf4f   Daniel Borkmann   bpf: add initial ...
569
570
  	if (!err)
  		trace_bpf_map_update_elem(map, ufd, key, value);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
571
572
573
574
575
576
577
578
579
580
581
582
583
  free_value:
  	kfree(value);
  free_key:
  	kfree(key);
  err_put:
  	fdput(f);
  	return err;
  }
  
  #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
  
  static int map_delete_elem(union bpf_attr *attr)
  {
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
584
  	void __user *ukey = u64_to_user_ptr(attr->key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
585
  	int ufd = attr->map_fd;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
586
  	struct bpf_map *map;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
587
  	struct fd f;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
588
589
590
591
592
  	void *key;
  	int err;
  
  	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
  		return -EINVAL;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
593
  	f = fdget(ufd);
c21012976   Daniel Borkmann   bpf: align and cl...
594
  	map = __bpf_map_get(f);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
595
596
  	if (IS_ERR(map))
  		return PTR_ERR(map);
e4448ed87   Al Viro   bpf: don't open-c...
597
598
599
  	key = memdup_user(ukey, map->key_size);
  	if (IS_ERR(key)) {
  		err = PTR_ERR(key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
600
  		goto err_put;
e4448ed87   Al Viro   bpf: don't open-c...
601
  	}
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
602

b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
603
604
  	preempt_disable();
  	__this_cpu_inc(bpf_prog_active);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
605
606
607
  	rcu_read_lock();
  	err = map->ops->map_delete_elem(map, key);
  	rcu_read_unlock();
b121d1e74   Alexei Starovoitov   bpf: prevent kpro...
608
609
  	__this_cpu_dec(bpf_prog_active);
  	preempt_enable();
4bea15f79   Daniel Colascione   bpf: wait for run...
610
  	maybe_wait_bpf_programs(map);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
611

a67edbf4f   Daniel Borkmann   bpf: add initial ...
612
613
  	if (!err)
  		trace_bpf_map_delete_elem(map, ufd, key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
614
615
616
617
618
619
620
621
622
623
624
  	kfree(key);
  err_put:
  	fdput(f);
  	return err;
  }
  
  /* last field in 'union bpf_attr' used by this command */
  #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
  
  static int map_get_next_key(union bpf_attr *attr)
  {
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
625
626
  	void __user *ukey = u64_to_user_ptr(attr->key);
  	void __user *unext_key = u64_to_user_ptr(attr->next_key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
627
  	int ufd = attr->map_fd;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
628
629
  	struct bpf_map *map;
  	void *key, *next_key;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
630
  	struct fd f;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
631
632
633
634
  	int err;
  
  	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
  		return -EINVAL;
592867bfa   Daniel Borkmann   ebpf: fix fd refc...
635
  	f = fdget(ufd);
c21012976   Daniel Borkmann   bpf: align and cl...
636
  	map = __bpf_map_get(f);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
637
638
  	if (IS_ERR(map))
  		return PTR_ERR(map);
8fe459243   Teng Qin   bpf: map_get_next...
639
  	if (ukey) {
e4448ed87   Al Viro   bpf: don't open-c...
640
641
642
  		key = memdup_user(ukey, map->key_size);
  		if (IS_ERR(key)) {
  			err = PTR_ERR(key);
8fe459243   Teng Qin   bpf: map_get_next...
643
  			goto err_put;
e4448ed87   Al Viro   bpf: don't open-c...
644
  		}
8fe459243   Teng Qin   bpf: map_get_next...
645
646
647
  	} else {
  		key = NULL;
  	}
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  
  	err = -ENOMEM;
  	next_key = kmalloc(map->key_size, GFP_USER);
  	if (!next_key)
  		goto free_key;
  
  	rcu_read_lock();
  	err = map->ops->map_get_next_key(map, key, next_key);
  	rcu_read_unlock();
  	if (err)
  		goto free_next_key;
  
  	err = -EFAULT;
  	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
  		goto free_next_key;
a67edbf4f   Daniel Borkmann   bpf: add initial ...
663
  	trace_bpf_map_next_key(map, ufd, key, next_key);
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
664
665
666
667
668
669
670
671
672
673
  	err = 0;
  
  free_next_key:
  	kfree(next_key);
  free_key:
  	kfree(key);
  err_put:
  	fdput(f);
  	return err;
  }
be9370a7d   Johannes Berg   bpf: remove struc...
674
675
676
  static const struct bpf_verifier_ops * const bpf_prog_types[] = {
  #define BPF_PROG_TYPE(_id, _ops) \
  	[_id] = &_ops,
40077e0cf   Johannes Berg   bpf: remove struc...
677
  #define BPF_MAP_TYPE(_id, _ops)
be9370a7d   Johannes Berg   bpf: remove struc...
678
679
  #include <linux/bpf_types.h>
  #undef BPF_PROG_TYPE
40077e0cf   Johannes Berg   bpf: remove struc...
680
  #undef BPF_MAP_TYPE
be9370a7d   Johannes Berg   bpf: remove struc...
681
  };
09756af46   Alexei Starovoitov   bpf: expand BPF s...
682
683
684
  
  static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
  {
be9370a7d   Johannes Berg   bpf: remove struc...
685
686
  	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
  		return -EINVAL;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
687

be9370a7d   Johannes Berg   bpf: remove struc...
688
689
690
  	prog->aux->ops = bpf_prog_types[type];
  	prog->type = type;
  	return 0;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
691
692
693
694
695
696
697
698
699
700
701
702
  }
  
  /* drop refcnt on maps used by eBPF program and free auxilary data */
  static void free_used_maps(struct bpf_prog_aux *aux)
  {
  	int i;
  
  	for (i = 0; i < aux->used_map_cnt; i++)
  		bpf_map_put(aux->used_maps[i]);
  
  	kfree(aux->used_maps);
  }
5ccb071e9   Daniel Borkmann   bpf: fix overflow...
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
  int __bpf_prog_charge(struct user_struct *user, u32 pages)
  {
  	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  	unsigned long user_bufs;
  
  	if (user) {
  		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
  		if (user_bufs > memlock_limit) {
  			atomic_long_sub(pages, &user->locked_vm);
  			return -EPERM;
  		}
  	}
  
  	return 0;
  }
  
  void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
  {
  	if (user)
  		atomic_long_sub(pages, &user->locked_vm);
  }
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
724
725
726
  static int bpf_prog_charge_memlock(struct bpf_prog *prog)
  {
  	struct user_struct *user = get_current_user();
5ccb071e9   Daniel Borkmann   bpf: fix overflow...
727
  	int ret;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
728

5ccb071e9   Daniel Borkmann   bpf: fix overflow...
729
730
  	ret = __bpf_prog_charge(user, prog->pages);
  	if (ret) {
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
731
  		free_uid(user);
5ccb071e9   Daniel Borkmann   bpf: fix overflow...
732
  		return ret;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
733
  	}
5ccb071e9   Daniel Borkmann   bpf: fix overflow...
734

aaac3ba95   Alexei Starovoitov   bpf: charge user ...
735
736
737
738
739
740
741
  	prog->aux->user = user;
  	return 0;
  }
  
  static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
  {
  	struct user_struct *user = prog->aux->user;
5ccb071e9   Daniel Borkmann   bpf: fix overflow...
742
  	__bpf_prog_uncharge(user, prog->pages);
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
743
744
  	free_uid(user);
  }
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  static int bpf_prog_alloc_id(struct bpf_prog *prog)
  {
  	int id;
  
  	spin_lock_bh(&prog_idr_lock);
  	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
  	if (id > 0)
  		prog->aux->id = id;
  	spin_unlock_bh(&prog_idr_lock);
  
  	/* id is in [1, INT_MAX) */
  	if (WARN_ON_ONCE(!id))
  		return -ENOSPC;
  
  	return id > 0 ? 0 : id;
  }
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
761
  static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
762
763
764
765
  {
  	/* cBPF to eBPF migrations are currently not in the idr store. */
  	if (!prog->aux->id)
  		return;
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
766
767
768
769
  	if (do_idr_lock)
  		spin_lock_bh(&prog_idr_lock);
  	else
  		__acquire(&prog_idr_lock);
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
770
  	idr_remove(&prog_idr, prog->aux->id);
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
771
772
773
774
775
  
  	if (do_idr_lock)
  		spin_unlock_bh(&prog_idr_lock);
  	else
  		__release(&prog_idr_lock);
dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
776
  }
1aacde3d2   Daniel Borkmann   bpf: generally mo...
777
  static void __bpf_prog_put_rcu(struct rcu_head *rcu)
abf2e7d6e   Alexei Starovoitov   bpf: add missing ...
778
779
780
781
  {
  	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
  
  	free_used_maps(aux);
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
782
  	bpf_prog_uncharge_memlock(aux->prog);
abf2e7d6e   Alexei Starovoitov   bpf: add missing ...
783
784
  	bpf_prog_free(aux->prog);
  }
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
785
  static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
09756af46   Alexei Starovoitov   bpf: expand BPF s...
786
  {
a67edbf4f   Daniel Borkmann   bpf: add initial ...
787
788
  	if (atomic_dec_and_test(&prog->aux->refcnt)) {
  		trace_bpf_prog_put_rcu(prog);
34ad5580f   Martin KaFai Lau   bpf: Add BPF_(PRO...
789
  		/* bpf_prog_free_id() must be called first */
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
790
  		bpf_prog_free_id(prog, do_idr_lock);
74451e66d   Daniel Borkmann   bpf: make jited p...
791
  		bpf_prog_kallsyms_del(prog);
1aacde3d2   Daniel Borkmann   bpf: generally mo...
792
  		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
a67edbf4f   Daniel Borkmann   bpf: add initial ...
793
  	}
09756af46   Alexei Starovoitov   bpf: expand BPF s...
794
  }
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
795
796
797
798
799
  
  void bpf_prog_put(struct bpf_prog *prog)
  {
  	__bpf_prog_put(prog, true);
  }
e2e9b6541   Daniel Borkmann   cls_bpf: add init...
800
  EXPORT_SYMBOL_GPL(bpf_prog_put);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
801
802
803
804
  
  static int bpf_prog_release(struct inode *inode, struct file *filp)
  {
  	struct bpf_prog *prog = filp->private_data;
1aacde3d2   Daniel Borkmann   bpf: generally mo...
805
  	bpf_prog_put(prog);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
806
807
  	return 0;
  }
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
808
809
810
811
  #ifdef CONFIG_PROC_FS
  static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
  {
  	const struct bpf_prog *prog = filp->private_data;
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
812
  	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
813

f1f7714ea   Daniel Borkmann   bpf: rework prog_...
814
  	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
815
816
817
818
819
  	seq_printf(m,
  		   "prog_type:\t%u
  "
  		   "prog_jited:\t%u
  "
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
820
821
  		   "prog_tag:\t%s
  "
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
822
823
824
825
  		   "memlock:\t%llu
  ",
  		   prog->type,
  		   prog->jited,
f1f7714ea   Daniel Borkmann   bpf: rework prog_...
826
  		   prog_tag,
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
827
828
829
  		   prog->pages * 1ULL << PAGE_SHIFT);
  }
  #endif
09756af46   Alexei Starovoitov   bpf: expand BPF s...
830
  static const struct file_operations bpf_prog_fops = {
7bd509e31   Daniel Borkmann   bpf: add prog_dig...
831
832
833
834
  #ifdef CONFIG_PROC_FS
  	.show_fdinfo	= bpf_prog_show_fdinfo,
  #endif
  	.release	= bpf_prog_release,
09756af46   Alexei Starovoitov   bpf: expand BPF s...
835
  };
b2197755b   Daniel Borkmann   bpf: add support ...
836
  int bpf_prog_new_fd(struct bpf_prog *prog)
aa79781b6   Daniel Borkmann   bpf: abstract ano...
837
838
839
840
  {
  	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
  				O_RDWR | O_CLOEXEC);
  }
113214be7   Daniel Borkmann   bpf: refactor bpf...
841
  static struct bpf_prog *____bpf_prog_get(struct fd f)
09756af46   Alexei Starovoitov   bpf: expand BPF s...
842
  {
09756af46   Alexei Starovoitov   bpf: expand BPF s...
843
844
  	if (!f.file)
  		return ERR_PTR(-EBADF);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
845
846
847
848
  	if (f.file->f_op != &bpf_prog_fops) {
  		fdput(f);
  		return ERR_PTR(-EINVAL);
  	}
c21012976   Daniel Borkmann   bpf: align and cl...
849
  	return f.file->private_data;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
850
  }
59d3656d5   Brenden Blanco   bpf: add bpf_prog...
851
  struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
852
  {
59d3656d5   Brenden Blanco   bpf: add bpf_prog...
853
854
  	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
  		atomic_sub(i, &prog->aux->refcnt);
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
855
856
857
858
  		return ERR_PTR(-EBUSY);
  	}
  	return prog;
  }
59d3656d5   Brenden Blanco   bpf: add bpf_prog...
859
  EXPORT_SYMBOL_GPL(bpf_prog_add);
c540594f8   Daniel Borkmann   bpf, mlx4: fix pr...
860
861
862
863
864
865
866
867
868
869
  void bpf_prog_sub(struct bpf_prog *prog, int i)
  {
  	/* Only to be used for undoing previous bpf_prog_add() in some
  	 * error path. We still know that another entity in our call
  	 * path holds a reference to the program, thus atomic_sub() can
  	 * be safely used in such cases!
  	 */
  	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
  }
  EXPORT_SYMBOL_GPL(bpf_prog_sub);
59d3656d5   Brenden Blanco   bpf: add bpf_prog...
870
871
872
873
  struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
  {
  	return bpf_prog_add(prog, 1);
  }
97bc402db   Daniel Borkmann   bpf, mlx5: fix ml...
874
  EXPORT_SYMBOL_GPL(bpf_prog_inc);
92117d844   Alexei Starovoitov   bpf: fix refcnt o...
875

b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
876
  /* prog_idr_lock should have been held */
a6f6df69c   John Fastabend   bpf: export bpf_p...
877
  struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
  {
  	int refold;
  
  	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
  
  	if (refold >= BPF_MAX_REFCNT) {
  		__bpf_prog_put(prog, false);
  		return ERR_PTR(-EBUSY);
  	}
  
  	if (!refold)
  		return ERR_PTR(-ENOENT);
  
  	return prog;
  }
a6f6df69c   John Fastabend   bpf: export bpf_p...
893
  EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
894

113214be7   Daniel Borkmann   bpf: refactor bpf...
895
  static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
09756af46   Alexei Starovoitov   bpf: expand BPF s...
896
897
898
  {
  	struct fd f = fdget(ufd);
  	struct bpf_prog *prog;
113214be7   Daniel Borkmann   bpf: refactor bpf...
899
  	prog = ____bpf_prog_get(f);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
900
901
  	if (IS_ERR(prog))
  		return prog;
113214be7   Daniel Borkmann   bpf: refactor bpf...
902
903
904
905
  	if (type && prog->type != *type) {
  		prog = ERR_PTR(-EINVAL);
  		goto out;
  	}
09756af46   Alexei Starovoitov   bpf: expand BPF s...
906

92117d844   Alexei Starovoitov   bpf: fix refcnt o...
907
  	prog = bpf_prog_inc(prog);
113214be7   Daniel Borkmann   bpf: refactor bpf...
908
  out:
09756af46   Alexei Starovoitov   bpf: expand BPF s...
909
910
911
  	fdput(f);
  	return prog;
  }
113214be7   Daniel Borkmann   bpf: refactor bpf...
912
913
914
915
916
917
918
919
  
  struct bpf_prog *bpf_prog_get(u32 ufd)
  {
  	return __bpf_prog_get(ufd, NULL);
  }
  
  struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
  {
a67edbf4f   Daniel Borkmann   bpf: add initial ...
920
921
922
923
924
  	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
  
  	if (!IS_ERR(prog))
  		trace_bpf_prog_get_type(prog);
  	return prog;
113214be7   Daniel Borkmann   bpf: refactor bpf...
925
926
  }
  EXPORT_SYMBOL_GPL(bpf_prog_get_type);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
927
928
  
  /* last field in 'union bpf_attr' used by this command */
e07b98d9b   David S. Miller   bpf: Add strict a...
929
  #define	BPF_PROG_LOAD_LAST_FIELD prog_flags
09756af46   Alexei Starovoitov   bpf: expand BPF s...
930
931
932
933
934
935
936
937
938
939
940
  
  static int bpf_prog_load(union bpf_attr *attr)
  {
  	enum bpf_prog_type type = attr->prog_type;
  	struct bpf_prog *prog;
  	int err;
  	char license[128];
  	bool is_gpl;
  
  	if (CHECK_ATTR(BPF_PROG_LOAD))
  		return -EINVAL;
e07b98d9b   David S. Miller   bpf: Add strict a...
941
942
  	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
  		return -EINVAL;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
943
  	/* copy eBPF program license from user space */
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
944
  	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
09756af46   Alexei Starovoitov   bpf: expand BPF s...
945
946
947
948
949
950
  			      sizeof(license) - 1) < 0)
  		return -EFAULT;
  	license[sizeof(license) - 1] = 0;
  
  	/* eBPF programs must be GPL compatible to use GPL-ed functions */
  	is_gpl = license_is_gpl_compatible(license);
ef0915cac   Daniel Borkmann   bpf: fix loading ...
951
952
  	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
  		return -E2BIG;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
953

2541517c3   Alexei Starovoitov   tracing, perf: Im...
954
955
956
  	if (type == BPF_PROG_TYPE_KPROBE &&
  	    attr->kern_version != LINUX_VERSION_CODE)
  		return -EINVAL;
80b7d8191   Chenbo Feng   bpf: Remove the c...
957
958
959
  	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
  	    type != BPF_PROG_TYPE_CGROUP_SKB &&
  	    !capable(CAP_SYS_ADMIN))
1be7f75d1   Alexei Starovoitov   bpf: enable non-r...
960
  		return -EPERM;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
961
962
963
964
  	/* plain bpf_prog allocation */
  	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
  	if (!prog)
  		return -ENOMEM;
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
965
966
967
  	err = bpf_prog_charge_memlock(prog);
  	if (err)
  		goto free_prog_nouncharge;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
968
969
970
  	prog->len = attr->insn_cnt;
  
  	err = -EFAULT;
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
971
  	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
aafe6ae9c   Daniel Borkmann   bpf: dynamically ...
972
  			   bpf_prog_insn_size(prog)) != 0)
09756af46   Alexei Starovoitov   bpf: expand BPF s...
973
974
975
  		goto free_prog;
  
  	prog->orig_prog = NULL;
a91263d52   Daniel Borkmann   ebpf: migrate bpf...
976
  	prog->jited = 0;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
977
978
  
  	atomic_set(&prog->aux->refcnt, 1);
a91263d52   Daniel Borkmann   ebpf: migrate bpf...
979
  	prog->gpl_compatible = is_gpl ? 1 : 0;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
980
981
982
983
984
985
986
  
  	/* find program type: socket_filter vs tracing_filter */
  	err = find_prog_type(type, prog);
  	if (err < 0)
  		goto free_prog;
  
  	/* run eBPF verifier */
9bac3d6d5   Alexei Starovoitov   bpf: allow extend...
987
  	err = bpf_check(&prog, attr);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
988
989
990
991
  	if (err < 0)
  		goto free_used_maps;
  
  	/* eBPF program is ready to be JITed */
d1c55ab5e   Daniel Borkmann   bpf: prepare bpf_...
992
  	prog = bpf_prog_select_runtime(prog, &err);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
993
994
  	if (err < 0)
  		goto free_used_maps;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
995

dc4bb0e23   Martin KaFai Lau   bpf: Introduce bp...
996
997
998
  	err = bpf_prog_alloc_id(prog);
  	if (err)
  		goto free_used_maps;
aa79781b6   Daniel Borkmann   bpf: abstract ano...
999
  	err = bpf_prog_new_fd(prog);
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
  	if (err < 0) {
  		/* failed to allocate fd.
  		 * bpf_prog_put() is needed because the above
  		 * bpf_prog_alloc_id() has published the prog
  		 * to the userspace and the userspace may
  		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
  		 */
  		bpf_prog_put(prog);
  		return err;
  	}
09756af46   Alexei Starovoitov   bpf: expand BPF s...
1010

74451e66d   Daniel Borkmann   bpf: make jited p...
1011
  	bpf_prog_kallsyms_add(prog);
a67edbf4f   Daniel Borkmann   bpf: add initial ...
1012
  	trace_bpf_prog_load(prog, err);
09756af46   Alexei Starovoitov   bpf: expand BPF s...
1013
1014
1015
1016
1017
  	return err;
  
  free_used_maps:
  	free_used_maps(prog->aux);
  free_prog:
aaac3ba95   Alexei Starovoitov   bpf: charge user ...
1018
1019
  	bpf_prog_uncharge_memlock(prog);
  free_prog_nouncharge:
09756af46   Alexei Starovoitov   bpf: expand BPF s...
1020
1021
1022
  	bpf_prog_free(prog);
  	return err;
  }
b2197755b   Daniel Borkmann   bpf: add support ...
1023
1024
1025
1026
1027
1028
  #define BPF_OBJ_LAST_FIELD bpf_fd
  
  static int bpf_obj_pin(const union bpf_attr *attr)
  {
  	if (CHECK_ATTR(BPF_OBJ))
  		return -EINVAL;
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
1029
  	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
b2197755b   Daniel Borkmann   bpf: add support ...
1030
1031
1032
1033
1034
1035
  }
  
  static int bpf_obj_get(const union bpf_attr *attr)
  {
  	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
  		return -EINVAL;
535e7b4b5   Mickaël Salaün   bpf: Use u64_to_u...
1036
  	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
b2197755b   Daniel Borkmann   bpf: add support ...
1037
  }
f43245514   Daniel Mack   bpf: add BPF_PROG...
1038
  #ifdef CONFIG_CGROUP_BPF
464bc0fd6   John Fastabend   bpf: convert sock...
1039
  #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
174a79ff9   John Fastabend   bpf: sockmap with...
1040

5a67da2a7   John Fastabend   bpf: add support ...
1041
  static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
174a79ff9   John Fastabend   bpf: sockmap with...
1042
  {
5a67da2a7   John Fastabend   bpf: add support ...
1043
  	struct bpf_prog *prog = NULL;
174a79ff9   John Fastabend   bpf: sockmap with...
1044
1045
1046
1047
1048
1049
1050
1051
1052
  	int ufd = attr->target_fd;
  	struct bpf_map *map;
  	struct fd f;
  	int err;
  
  	f = fdget(ufd);
  	map = __bpf_map_get(f);
  	if (IS_ERR(map))
  		return PTR_ERR(map);
5a67da2a7   John Fastabend   bpf: add support ...
1053
1054
1055
1056
1057
1058
1059
  	if (attach) {
  		prog = bpf_prog_get_type(attr->attach_bpf_fd,
  					 BPF_PROG_TYPE_SK_SKB);
  		if (IS_ERR(prog)) {
  			fdput(f);
  			return PTR_ERR(prog);
  		}
174a79ff9   John Fastabend   bpf: sockmap with...
1060
  	}
5a67da2a7   John Fastabend   bpf: add support ...
1061
  	err = sock_map_prog(map, prog, attr->attach_type);
174a79ff9   John Fastabend   bpf: sockmap with...
1062
1063
  	if (err) {
  		fdput(f);
5a67da2a7   John Fastabend   bpf: add support ...
1064
1065
  		if (prog)
  			bpf_prog_put(prog);
ae2b27b85   Dan Carpenter   bpf: fix a return...
1066
  		return err;
174a79ff9   John Fastabend   bpf: sockmap with...
1067
1068
1069
  	}
  
  	fdput(f);
ae2b27b85   Dan Carpenter   bpf: fix a return...
1070
  	return 0;
174a79ff9   John Fastabend   bpf: sockmap with...
1071
  }
f43245514   Daniel Mack   bpf: add BPF_PROG...
1072
1073
1074
  
  static int bpf_prog_attach(const union bpf_attr *attr)
  {
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1075
  	enum bpf_prog_type ptype;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1076
1077
  	struct bpf_prog *prog;
  	struct cgroup *cgrp;
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1078
  	int ret;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1079
1080
1081
1082
1083
1084
  
  	if (!capable(CAP_NET_ADMIN))
  		return -EPERM;
  
  	if (CHECK_ATTR(BPF_PROG_ATTACH))
  		return -EINVAL;
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1085
1086
  	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
  		return -EINVAL;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1087
1088
1089
  	switch (attr->attach_type) {
  	case BPF_CGROUP_INET_INGRESS:
  	case BPF_CGROUP_INET_EGRESS:
b2cd12574   David Ahern   bpf: Refactor cgr...
1090
  		ptype = BPF_PROG_TYPE_CGROUP_SKB;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1091
  		break;
610236587   David Ahern   bpf: Add new cgro...
1092
1093
1094
  	case BPF_CGROUP_INET_SOCK_CREATE:
  		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
  		break;
40304b2a1   Lawrence Brakmo   bpf: BPF support ...
1095
1096
1097
  	case BPF_CGROUP_SOCK_OPS:
  		ptype = BPF_PROG_TYPE_SOCK_OPS;
  		break;
464bc0fd6   John Fastabend   bpf: convert sock...
1098
1099
  	case BPF_SK_SKB_STREAM_PARSER:
  	case BPF_SK_SKB_STREAM_VERDICT:
5a67da2a7   John Fastabend   bpf: add support ...
1100
  		return sockmap_get_from_fd(attr, true);
f43245514   Daniel Mack   bpf: add BPF_PROG...
1101
1102
1103
  	default:
  		return -EINVAL;
  	}
b2cd12574   David Ahern   bpf: Refactor cgr...
1104
1105
1106
1107
1108
1109
1110
1111
1112
  	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
  	if (IS_ERR(prog))
  		return PTR_ERR(prog);
  
  	cgrp = cgroup_get_from_fd(attr->target_fd);
  	if (IS_ERR(cgrp)) {
  		bpf_prog_put(prog);
  		return PTR_ERR(cgrp);
  	}
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1113
1114
1115
1116
  	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
  				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
  	if (ret)
  		bpf_prog_put(prog);
b2cd12574   David Ahern   bpf: Refactor cgr...
1117
  	cgroup_put(cgrp);
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1118
  	return ret;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1119
1120
1121
1122
1123
1124
1125
  }
  
  #define BPF_PROG_DETACH_LAST_FIELD attach_type
  
  static int bpf_prog_detach(const union bpf_attr *attr)
  {
  	struct cgroup *cgrp;
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1126
  	int ret;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
  
  	if (!capable(CAP_NET_ADMIN))
  		return -EPERM;
  
  	if (CHECK_ATTR(BPF_PROG_DETACH))
  		return -EINVAL;
  
  	switch (attr->attach_type) {
  	case BPF_CGROUP_INET_INGRESS:
  	case BPF_CGROUP_INET_EGRESS:
610236587   David Ahern   bpf: Add new cgro...
1137
  	case BPF_CGROUP_INET_SOCK_CREATE:
40304b2a1   Lawrence Brakmo   bpf: BPF support ...
1138
  	case BPF_CGROUP_SOCK_OPS:
f43245514   Daniel Mack   bpf: add BPF_PROG...
1139
1140
1141
  		cgrp = cgroup_get_from_fd(attr->target_fd);
  		if (IS_ERR(cgrp))
  			return PTR_ERR(cgrp);
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1142
  		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
f43245514   Daniel Mack   bpf: add BPF_PROG...
1143
1144
  		cgroup_put(cgrp);
  		break;
5a67da2a7   John Fastabend   bpf: add support ...
1145
1146
1147
1148
  	case BPF_SK_SKB_STREAM_PARSER:
  	case BPF_SK_SKB_STREAM_VERDICT:
  		ret = sockmap_get_from_fd(attr, false);
  		break;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1149
1150
1151
  	default:
  		return -EINVAL;
  	}
7f6776333   Alexei Starovoitov   bpf: introduce BP...
1152
  	return ret;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1153
  }
40304b2a1   Lawrence Brakmo   bpf: BPF support ...
1154

f43245514   Daniel Mack   bpf: add BPF_PROG...
1155
  #endif /* CONFIG_CGROUP_BPF */
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
  #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
  
  static int bpf_prog_test_run(const union bpf_attr *attr,
  			     union bpf_attr __user *uattr)
  {
  	struct bpf_prog *prog;
  	int ret = -ENOTSUPP;
  
  	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
  		return -EINVAL;
  
  	prog = bpf_prog_get(attr->test.prog_fd);
  	if (IS_ERR(prog))
  		return PTR_ERR(prog);
  
  	if (prog->aux->ops->test_run)
  		ret = prog->aux->ops->test_run(prog, attr, uattr);
  
  	bpf_prog_put(prog);
  	return ret;
  }
34ad5580f   Martin KaFai Lau   bpf: Add BPF_(PRO...
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
  #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
  
  static int bpf_obj_get_next_id(const union bpf_attr *attr,
  			       union bpf_attr __user *uattr,
  			       struct idr *idr,
  			       spinlock_t *lock)
  {
  	u32 next_id = attr->start_id;
  	int err = 0;
  
  	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
  		return -EINVAL;
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EPERM;
  
  	next_id++;
  	spin_lock_bh(lock);
  	if (!idr_get_next(idr, &next_id))
  		err = -ENOENT;
  	spin_unlock_bh(lock);
  
  	if (!err)
  		err = put_user(next_id, &uattr->next_id);
  
  	return err;
  }
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
  #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
  
  static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
  {
  	struct bpf_prog *prog;
  	u32 id = attr->prog_id;
  	int fd;
  
  	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
  		return -EINVAL;
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EPERM;
  
  	spin_lock_bh(&prog_idr_lock);
  	prog = idr_find(&prog_idr, id);
  	if (prog)
  		prog = bpf_prog_inc_not_zero(prog);
  	else
  		prog = ERR_PTR(-ENOENT);
  	spin_unlock_bh(&prog_idr_lock);
  
  	if (IS_ERR(prog))
  		return PTR_ERR(prog);
  
  	fd = bpf_prog_new_fd(prog);
  	if (fd < 0)
  		bpf_prog_put(prog);
  
  	return fd;
  }
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
  #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
  
  static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
  {
  	struct bpf_map *map;
  	u32 id = attr->map_id;
  	int fd;
  
  	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
  		return -EINVAL;
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EPERM;
  
  	spin_lock_bh(&map_idr_lock);
  	map = idr_find(&map_idr, id);
  	if (map)
  		map = bpf_map_inc_not_zero(map, true);
  	else
  		map = ERR_PTR(-ENOENT);
  	spin_unlock_bh(&map_idr_lock);
  
  	if (IS_ERR(map))
  		return PTR_ERR(map);
  
  	fd = bpf_map_new_fd(map);
  	if (fd < 0)
  		bpf_map_put(map);
  
  	return fd;
  }
1e2709769   Martin KaFai Lau   bpf: Add BPF_OBJ_...
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
  static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
  				   const union bpf_attr *attr,
  				   union bpf_attr __user *uattr)
  {
  	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  	struct bpf_prog_info info = {};
  	u32 info_len = attr->info.info_len;
  	char __user *uinsns;
  	u32 ulen;
  	int err;
  
  	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
  	if (err)
  		return err;
  	info_len = min_t(u32, sizeof(info), info_len);
  
  	if (copy_from_user(&info, uinfo, info_len))
89b096898   Daniel Borkmann   bpf: don't indica...
1283
  		return -EFAULT;
1e2709769   Martin KaFai Lau   bpf: Add BPF_OBJ_...
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
  
  	info.type = prog->type;
  	info.id = prog->aux->id;
  
  	memcpy(info.tag, prog->tag, sizeof(prog->tag));
  
  	if (!capable(CAP_SYS_ADMIN)) {
  		info.jited_prog_len = 0;
  		info.xlated_prog_len = 0;
  		goto done;
  	}
  
  	ulen = info.jited_prog_len;
  	info.jited_prog_len = prog->jited_len;
  	if (info.jited_prog_len && ulen) {
  		uinsns = u64_to_user_ptr(info.jited_prog_insns);
  		ulen = min_t(u32, info.jited_prog_len, ulen);
  		if (copy_to_user(uinsns, prog->bpf_func, ulen))
  			return -EFAULT;
  	}
  
  	ulen = info.xlated_prog_len;
9975a54b3   Daniel Borkmann   bpf: fix bpf_prog...
1306
  	info.xlated_prog_len = bpf_prog_insn_size(prog);
1e2709769   Martin KaFai Lau   bpf: Add BPF_OBJ_...
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
  	if (info.xlated_prog_len && ulen) {
  		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
  		ulen = min_t(u32, info.xlated_prog_len, ulen);
  		if (copy_to_user(uinsns, prog->insnsi, ulen))
  			return -EFAULT;
  	}
  
  done:
  	if (copy_to_user(uinfo, &info, info_len) ||
  	    put_user(info_len, &uattr->info.info_len))
  		return -EFAULT;
  
  	return 0;
  }
  
  static int bpf_map_get_info_by_fd(struct bpf_map *map,
  				  const union bpf_attr *attr,
  				  union bpf_attr __user *uattr)
  {
  	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
  	struct bpf_map_info info = {};
  	u32 info_len = attr->info.info_len;
  	int err;
  
  	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
  	if (err)
  		return err;
  	info_len = min_t(u32, sizeof(info), info_len);
  
  	info.type = map->map_type;
  	info.id = map->id;
  	info.key_size = map->key_size;
  	info.value_size = map->value_size;
  	info.max_entries = map->max_entries;
  	info.map_flags = map->map_flags;
  
  	if (copy_to_user(uinfo, &info, info_len) ||
  	    put_user(info_len, &uattr->info.info_len))
  		return -EFAULT;
  
  	return 0;
  }
  
  #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
  
  static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
  				  union bpf_attr __user *uattr)
  {
  	int ufd = attr->info.bpf_fd;
  	struct fd f;
  	int err;
  
  	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
  		return -EINVAL;
  
  	f = fdget(ufd);
  	if (!f.file)
  		return -EBADFD;
  
  	if (f.file->f_op == &bpf_prog_fops)
  		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
  					      uattr);
  	else if (f.file->f_op == &bpf_map_fops)
  		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
  					     uattr);
  	else
  		err = -EINVAL;
  
  	fdput(f);
  	return err;
  }
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1378
1379
1380
1381
  SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  {
  	union bpf_attr attr = {};
  	int err;
b4e02202a   Chenbo Feng   bpf: skip unneces...
1382
  	if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1383
  		return -EPERM;
1e2709769   Martin KaFai Lau   bpf: Add BPF_OBJ_...
1384
1385
1386
1387
  	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
  	if (err)
  		return err;
  	size = min_t(u32, size, sizeof(attr));
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1388
1389
1390
1391
1392
1393
1394
1395
1396
  
  	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
  	if (copy_from_user(&attr, uattr, size) != 0)
  		return -EFAULT;
  
  	switch (cmd) {
  	case BPF_MAP_CREATE:
  		err = map_create(&attr);
  		break;
db20fd2b0   Alexei Starovoitov   bpf: add lookup/u...
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
  	case BPF_MAP_LOOKUP_ELEM:
  		err = map_lookup_elem(&attr);
  		break;
  	case BPF_MAP_UPDATE_ELEM:
  		err = map_update_elem(&attr);
  		break;
  	case BPF_MAP_DELETE_ELEM:
  		err = map_delete_elem(&attr);
  		break;
  	case BPF_MAP_GET_NEXT_KEY:
  		err = map_get_next_key(&attr);
  		break;
09756af46   Alexei Starovoitov   bpf: expand BPF s...
1409
1410
1411
  	case BPF_PROG_LOAD:
  		err = bpf_prog_load(&attr);
  		break;
b2197755b   Daniel Borkmann   bpf: add support ...
1412
1413
1414
1415
1416
1417
  	case BPF_OBJ_PIN:
  		err = bpf_obj_pin(&attr);
  		break;
  	case BPF_OBJ_GET:
  		err = bpf_obj_get(&attr);
  		break;
f43245514   Daniel Mack   bpf: add BPF_PROG...
1418
1419
1420
1421
1422
1423
1424
1425
  #ifdef CONFIG_CGROUP_BPF
  	case BPF_PROG_ATTACH:
  		err = bpf_prog_attach(&attr);
  		break;
  	case BPF_PROG_DETACH:
  		err = bpf_prog_detach(&attr);
  		break;
  #endif
1cf1cae96   Alexei Starovoitov   bpf: introduce BP...
1426
1427
1428
  	case BPF_PROG_TEST_RUN:
  		err = bpf_prog_test_run(&attr, uattr);
  		break;
34ad5580f   Martin KaFai Lau   bpf: Add BPF_(PRO...
1429
1430
1431
1432
1433
1434
1435
1436
  	case BPF_PROG_GET_NEXT_ID:
  		err = bpf_obj_get_next_id(&attr, uattr,
  					  &prog_idr, &prog_idr_lock);
  		break;
  	case BPF_MAP_GET_NEXT_ID:
  		err = bpf_obj_get_next_id(&attr, uattr,
  					  &map_idr, &map_idr_lock);
  		break;
b16d9aa4c   Martin KaFai Lau   bpf: Add BPF_PROG...
1437
1438
1439
  	case BPF_PROG_GET_FD_BY_ID:
  		err = bpf_prog_get_fd_by_id(&attr);
  		break;
bd5f5f4ec   Martin KaFai Lau   bpf: Add BPF_MAP_...
1440
1441
1442
  	case BPF_MAP_GET_FD_BY_ID:
  		err = bpf_map_get_fd_by_id(&attr);
  		break;
1e2709769   Martin KaFai Lau   bpf: Add BPF_OBJ_...
1443
1444
1445
  	case BPF_OBJ_GET_INFO_BY_FD:
  		err = bpf_obj_get_info_by_fd(&attr, uattr);
  		break;
99c55f7d4   Alexei Starovoitov   bpf: introduce BP...
1446
1447
1448
1449
1450
1451
1452
  	default:
  		err = -EINVAL;
  		break;
  	}
  
  	return err;
  }