Blame view

kernel/bpf/arraymap.c 14.4 KB
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of version 2 of the GNU General Public
   * License as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
   * General Public License for more details.
   */
  #include <linux/bpf.h>
  #include <linux/err.h>
  #include <linux/vmalloc.h>
  #include <linux/slab.h>
  #include <linux/mm.h>
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
17
  #include <linux/filter.h>
0cdf5640e   Daniel Borkmann   ebpf: include per...
18
  #include <linux/perf_event.h>
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
19

a10423b87   Alexei Starovoitov   bpf: introduce BP...
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
  static void bpf_array_free_percpu(struct bpf_array *array)
  {
  	int i;
  
  	for (i = 0; i < array->map.max_entries; i++)
  		free_percpu(array->pptrs[i]);
  }
  
  static int bpf_array_alloc_percpu(struct bpf_array *array)
  {
  	void __percpu *ptr;
  	int i;
  
  	for (i = 0; i < array->map.max_entries; i++) {
  		ptr = __alloc_percpu_gfp(array->elem_size, 8,
  					 GFP_USER | __GFP_NOWARN);
  		if (!ptr) {
  			bpf_array_free_percpu(array);
  			return -ENOMEM;
  		}
  		array->pptrs[i] = ptr;
  	}
  
  	return 0;
  }
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
45
46
47
  /* Called from syscall */
  static struct bpf_map *array_map_alloc(union bpf_attr *attr)
  {
a10423b87   Alexei Starovoitov   bpf: introduce BP...
48
  	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
49
  	struct bpf_array *array;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
50
51
  	u64 array_size;
  	u32 elem_size;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
52
53
54
  
  	/* check sanity of attributes */
  	if (attr->max_entries == 0 || attr->key_size != 4 ||
823707b68   Alexei Starovoitov   bpf: check for re...
55
  	    attr->value_size == 0 || attr->map_flags)
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
56
  		return ERR_PTR(-EINVAL);
01b3f5215   Alexei Starovoitov   bpf: fix allocati...
57
58
59
60
61
  	if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
  		/* if value_size is bigger, the user space won't be able to
  		 * access the elements.
  		 */
  		return ERR_PTR(-E2BIG);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
62
  	elem_size = round_up(attr->value_size, 8);
a10423b87   Alexei Starovoitov   bpf: introduce BP...
63
64
65
66
67
68
69
70
  	array_size = sizeof(*array);
  	if (percpu)
  		array_size += (u64) attr->max_entries * sizeof(void *);
  	else
  		array_size += (u64) attr->max_entries * elem_size;
  
  	/* make sure there is no u32 overflow later in round_up() */
  	if (array_size >= U32_MAX - PAGE_SIZE)
daaf427c6   Alexei Starovoitov   bpf: fix arraymap...
71
  		return ERR_PTR(-ENOMEM);
daaf427c6   Alexei Starovoitov   bpf: fix arraymap...
72

28fbcfa08   Alexei Starovoitov   bpf: add array ty...
73
  	/* allocate all map elements and zero-initialize them */
daaf427c6   Alexei Starovoitov   bpf: fix arraymap...
74
  	array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
75
  	if (!array) {
daaf427c6   Alexei Starovoitov   bpf: fix arraymap...
76
  		array = vzalloc(array_size);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
77
78
79
80
81
  		if (!array)
  			return ERR_PTR(-ENOMEM);
  	}
  
  	/* copy mandatory map attributes */
a10423b87   Alexei Starovoitov   bpf: introduce BP...
82
  	array->map.map_type = attr->map_type;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
83
84
85
  	array->map.key_size = attr->key_size;
  	array->map.value_size = attr->value_size;
  	array->map.max_entries = attr->max_entries;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
86
  	array->elem_size = elem_size;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
87
88
89
90
91
92
93
94
95
96
97
98
  	if (!percpu)
  		goto out;
  
  	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
  
  	if (array_size >= U32_MAX - PAGE_SIZE ||
  	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
  		kvfree(array);
  		return ERR_PTR(-ENOMEM);
  	}
  out:
  	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
99
  	return &array->map;
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
100
101
102
103
104
105
106
  }
  
  /* Called from syscall or from eBPF program */
  static void *array_map_lookup_elem(struct bpf_map *map, void *key)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
107
  	if (unlikely(index >= array->map.max_entries))
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
108
109
110
111
  		return NULL;
  
  	return array->value + array->elem_size * index;
  }
a10423b87   Alexei Starovoitov   bpf: introduce BP...
112
113
114
115
116
117
118
119
120
121
122
  /* Called from eBPF program */
  static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
  
  	if (unlikely(index >= array->map.max_entries))
  		return NULL;
  
  	return this_cpu_ptr(array->pptrs[index]);
  }
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
  int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
  	void __percpu *pptr;
  	int cpu, off = 0;
  	u32 size;
  
  	if (unlikely(index >= array->map.max_entries))
  		return -ENOENT;
  
  	/* per_cpu areas are zero-filled and bpf programs can only
  	 * access 'value_size' of them, so copying rounded areas
  	 * will not leak any kernel data
  	 */
  	size = round_up(map->value_size, 8);
  	rcu_read_lock();
  	pptr = array->pptrs[index];
  	for_each_possible_cpu(cpu) {
  		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
  		off += size;
  	}
  	rcu_read_unlock();
  	return 0;
  }
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  /* Called from syscall */
  static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
  	u32 *next = (u32 *)next_key;
  
  	if (index >= array->map.max_entries) {
  		*next = 0;
  		return 0;
  	}
  
  	if (index == array->map.max_entries - 1)
  		return -ENOENT;
  
  	*next = index + 1;
  	return 0;
  }
  
  /* Called from syscall or from eBPF program */
  static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
  				 u64 map_flags)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
173
  	if (unlikely(map_flags > BPF_EXIST))
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
174
175
  		/* unknown flags */
  		return -EINVAL;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
176
  	if (unlikely(index >= array->map.max_entries))
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
177
178
  		/* all elements were pre-allocated, cannot insert a new one */
  		return -E2BIG;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
179
  	if (unlikely(map_flags == BPF_NOEXIST))
daaf427c6   Alexei Starovoitov   bpf: fix arraymap...
180
  		/* all elements already exist */
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
181
  		return -EEXIST;
a10423b87   Alexei Starovoitov   bpf: introduce BP...
182
183
184
185
186
187
  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  		memcpy(this_cpu_ptr(array->pptrs[index]),
  		       value, map->value_size);
  	else
  		memcpy(array->value + array->elem_size * index,
  		       value, map->value_size);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
188
189
  	return 0;
  }
15a07b338   Alexei Starovoitov   bpf: add lookup/u...
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
  int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
  			    u64 map_flags)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	u32 index = *(u32 *)key;
  	void __percpu *pptr;
  	int cpu, off = 0;
  	u32 size;
  
  	if (unlikely(map_flags > BPF_EXIST))
  		/* unknown flags */
  		return -EINVAL;
  
  	if (unlikely(index >= array->map.max_entries))
  		/* all elements were pre-allocated, cannot insert a new one */
  		return -E2BIG;
  
  	if (unlikely(map_flags == BPF_NOEXIST))
  		/* all elements already exist */
  		return -EEXIST;
  
  	/* the user space will provide round_up(value_size, 8) bytes that
  	 * will be copied into per-cpu area. bpf programs can only access
  	 * value_size of it. During lookup the same extra bytes will be
  	 * returned or zeros which were zero-filled by percpu_alloc,
  	 * so no kernel data leaks possible
  	 */
  	size = round_up(map->value_size, 8);
  	rcu_read_lock();
  	pptr = array->pptrs[index];
  	for_each_possible_cpu(cpu) {
  		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
  		off += size;
  	}
  	rcu_read_unlock();
  	return 0;
  }
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
  /* Called from syscall or from eBPF program */
  static int array_map_delete_elem(struct bpf_map *map, void *key)
  {
  	return -EINVAL;
  }
  
  /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  static void array_map_free(struct bpf_map *map)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  
  	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
  	 * so the programs (can be more than one that used this map) were
  	 * disconnected from events. Wait for outstanding programs to complete
  	 * and free the array
  	 */
  	synchronize_rcu();
a10423b87   Alexei Starovoitov   bpf: introduce BP...
244
245
  	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
  		bpf_array_free_percpu(array);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
246
247
  	kvfree(array);
  }
a2c83fff5   Daniel Borkmann   ebpf: constify va...
248
  static const struct bpf_map_ops array_ops = {
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
249
250
251
252
253
254
255
  	.map_alloc = array_map_alloc,
  	.map_free = array_map_free,
  	.map_get_next_key = array_map_get_next_key,
  	.map_lookup_elem = array_map_lookup_elem,
  	.map_update_elem = array_map_update_elem,
  	.map_delete_elem = array_map_delete_elem,
  };
a2c83fff5   Daniel Borkmann   ebpf: constify va...
256
  static struct bpf_map_type_list array_type __read_mostly = {
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
257
258
259
  	.ops = &array_ops,
  	.type = BPF_MAP_TYPE_ARRAY,
  };
a10423b87   Alexei Starovoitov   bpf: introduce BP...
260
261
262
263
264
265
266
267
268
269
270
271
272
  static const struct bpf_map_ops percpu_array_ops = {
  	.map_alloc = array_map_alloc,
  	.map_free = array_map_free,
  	.map_get_next_key = array_map_get_next_key,
  	.map_lookup_elem = percpu_array_map_lookup_elem,
  	.map_update_elem = array_map_update_elem,
  	.map_delete_elem = array_map_delete_elem,
  };
  
  static struct bpf_map_type_list percpu_array_type __read_mostly = {
  	.ops = &percpu_array_ops,
  	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
  };
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
273
274
  static int __init register_array_map(void)
  {
a2c83fff5   Daniel Borkmann   ebpf: constify va...
275
  	bpf_register_map_type(&array_type);
a10423b87   Alexei Starovoitov   bpf: introduce BP...
276
  	bpf_register_map_type(&percpu_array_type);
28fbcfa08   Alexei Starovoitov   bpf: add array ty...
277
278
279
  	return 0;
  }
  late_initcall(register_array_map);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
280

2a36f0b92   Wang Nan   bpf: Make the bpf...
281
  static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
282
  {
2a36f0b92   Wang Nan   bpf: Make the bpf...
283
  	/* only file descriptors can be stored in this type of map */
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
284
285
286
287
  	if (attr->value_size != sizeof(u32))
  		return ERR_PTR(-EINVAL);
  	return array_map_alloc(attr);
  }
2a36f0b92   Wang Nan   bpf: Make the bpf...
288
  static void fd_array_map_free(struct bpf_map *map)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
289
290
291
292
293
294
295
296
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	int i;
  
  	synchronize_rcu();
  
  	/* make sure it's empty */
  	for (i = 0; i < array->map.max_entries; i++)
2a36f0b92   Wang Nan   bpf: Make the bpf...
297
  		BUG_ON(array->ptrs[i] != NULL);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
298
299
  	kvfree(array);
  }
2a36f0b92   Wang Nan   bpf: Make the bpf...
300
  static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
301
302
303
304
305
  {
  	return NULL;
  }
  
  /* only called from syscall */
d056a7887   Daniel Borkmann   bpf, maps: extend...
306
307
  int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
  				 void *key, void *value, u64 map_flags)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
308
309
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b92   Wang Nan   bpf: Make the bpf...
310
  	void *new_ptr, *old_ptr;
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
311
312
313
314
315
316
317
318
319
  	u32 index = *(u32 *)key, ufd;
  
  	if (map_flags != BPF_ANY)
  		return -EINVAL;
  
  	if (index >= array->map.max_entries)
  		return -E2BIG;
  
  	ufd = *(u32 *)value;
d056a7887   Daniel Borkmann   bpf, maps: extend...
320
  	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2a36f0b92   Wang Nan   bpf: Make the bpf...
321
322
  	if (IS_ERR(new_ptr))
  		return PTR_ERR(new_ptr);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
323

2a36f0b92   Wang Nan   bpf: Make the bpf...
324
325
326
  	old_ptr = xchg(array->ptrs + index, new_ptr);
  	if (old_ptr)
  		map->ops->map_fd_put_ptr(old_ptr);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
327
328
329
  
  	return 0;
  }
2a36f0b92   Wang Nan   bpf: Make the bpf...
330
  static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
331
332
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b92   Wang Nan   bpf: Make the bpf...
333
  	void *old_ptr;
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
334
335
336
337
  	u32 index = *(u32 *)key;
  
  	if (index >= array->map.max_entries)
  		return -E2BIG;
2a36f0b92   Wang Nan   bpf: Make the bpf...
338
339
340
  	old_ptr = xchg(array->ptrs + index, NULL);
  	if (old_ptr) {
  		map->ops->map_fd_put_ptr(old_ptr);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
341
342
343
344
345
  		return 0;
  	} else {
  		return -ENOENT;
  	}
  }
d056a7887   Daniel Borkmann   bpf, maps: extend...
346
347
  static void *prog_fd_array_get_ptr(struct bpf_map *map,
  				   struct file *map_file, int fd)
2a36f0b92   Wang Nan   bpf: Make the bpf...
348
349
350
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	struct bpf_prog *prog = bpf_prog_get(fd);
d056a7887   Daniel Borkmann   bpf, maps: extend...
351

2a36f0b92   Wang Nan   bpf: Make the bpf...
352
353
354
355
356
357
358
  	if (IS_ERR(prog))
  		return prog;
  
  	if (!bpf_prog_array_compatible(array, prog)) {
  		bpf_prog_put(prog);
  		return ERR_PTR(-EINVAL);
  	}
d056a7887   Daniel Borkmann   bpf, maps: extend...
359

2a36f0b92   Wang Nan   bpf: Make the bpf...
360
361
362
363
364
  	return prog;
  }
  
  static void prog_fd_array_put_ptr(void *ptr)
  {
1aacde3d2   Daniel Borkmann   bpf: generally mo...
365
  	bpf_prog_put(ptr);
2a36f0b92   Wang Nan   bpf: Make the bpf...
366
  }
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
367
  /* decrement refcnt of all bpf_progs that are stored in this map */
2a36f0b92   Wang Nan   bpf: Make the bpf...
368
  void bpf_fd_array_map_clear(struct bpf_map *map)
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
369
370
371
372
373
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	int i;
  
  	for (i = 0; i < array->map.max_entries; i++)
2a36f0b92   Wang Nan   bpf: Make the bpf...
374
  		fd_array_map_delete_elem(map, &i);
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
375
376
377
  }
  
  static const struct bpf_map_ops prog_array_ops = {
2a36f0b92   Wang Nan   bpf: Make the bpf...
378
379
  	.map_alloc = fd_array_map_alloc,
  	.map_free = fd_array_map_free,
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
380
  	.map_get_next_key = array_map_get_next_key,
2a36f0b92   Wang Nan   bpf: Make the bpf...
381
  	.map_lookup_elem = fd_array_map_lookup_elem,
2a36f0b92   Wang Nan   bpf: Make the bpf...
382
383
384
  	.map_delete_elem = fd_array_map_delete_elem,
  	.map_fd_get_ptr = prog_fd_array_get_ptr,
  	.map_fd_put_ptr = prog_fd_array_put_ptr,
04fd61ab3   Alexei Starovoitov   bpf: allow bpf pr...
385
386
387
388
389
390
391
392
393
394
395
396
397
  };
  
  static struct bpf_map_type_list prog_array_type __read_mostly = {
  	.ops = &prog_array_ops,
  	.type = BPF_MAP_TYPE_PROG_ARRAY,
  };
  
  static int __init register_prog_array_map(void)
  {
  	bpf_register_map_type(&prog_array_type);
  	return 0;
  }
  late_initcall(register_prog_array_map);
ea317b267   Kaixu Xia   bpf: Add new bpf ...
398

3b1efb196   Daniel Borkmann   bpf, maps: flush ...
399
400
  static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
  						   struct file *map_file)
ea317b267   Kaixu Xia   bpf: Add new bpf ...
401
  {
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
402
  	struct bpf_event_entry *ee;
858d68f10   Daniel Borkmann   bpf: bpf_event_en...
403
  	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
  	if (ee) {
  		ee->event = perf_file->private_data;
  		ee->perf_file = perf_file;
  		ee->map_file = map_file;
  	}
  
  	return ee;
  }
  
  static void __bpf_event_entry_free(struct rcu_head *rcu)
  {
  	struct bpf_event_entry *ee;
  
  	ee = container_of(rcu, struct bpf_event_entry, rcu);
  	fput(ee->perf_file);
  	kfree(ee);
  }
  
  static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
  {
  	call_rcu(&ee->rcu, __bpf_event_entry_free);
ea317b267   Kaixu Xia   bpf: Add new bpf ...
425
  }
d056a7887   Daniel Borkmann   bpf, maps: extend...
426
427
  static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
  					 struct file *map_file, int fd)
ea317b267   Kaixu Xia   bpf: Add new bpf ...
428
  {
ea317b267   Kaixu Xia   bpf: Add new bpf ...
429
  	const struct perf_event_attr *attr;
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
430
431
432
  	struct bpf_event_entry *ee;
  	struct perf_event *event;
  	struct file *perf_file;
ea317b267   Kaixu Xia   bpf: Add new bpf ...
433

3b1efb196   Daniel Borkmann   bpf, maps: flush ...
434
435
436
  	perf_file = perf_event_get(fd);
  	if (IS_ERR(perf_file))
  		return perf_file;
e03e7ee34   Alexei Starovoitov   perf/bpf: Convert...
437

3b1efb196   Daniel Borkmann   bpf, maps: flush ...
438
439
  	event = perf_file->private_data;
  	ee = ERR_PTR(-EINVAL);
ea317b267   Kaixu Xia   bpf: Add new bpf ...
440
441
  
  	attr = perf_event_attrs(event);
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
  	if (IS_ERR(attr) || attr->inherit)
  		goto err_out;
  
  	switch (attr->type) {
  	case PERF_TYPE_SOFTWARE:
  		if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
  			goto err_out;
  		/* fall-through */
  	case PERF_TYPE_RAW:
  	case PERF_TYPE_HARDWARE:
  		ee = bpf_event_entry_gen(perf_file, map_file);
  		if (ee)
  			return ee;
  		ee = ERR_PTR(-ENOMEM);
  		/* fall-through */
  	default:
  		break;
  	}
62544ce8e   Alexei Starovoitov   bpf: fix bpf_perf...
460

3b1efb196   Daniel Borkmann   bpf, maps: flush ...
461
462
463
  err_out:
  	fput(perf_file);
  	return ee;
ea317b267   Kaixu Xia   bpf: Add new bpf ...
464
465
466
467
  }
  
  static void perf_event_fd_array_put_ptr(void *ptr)
  {
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
  	bpf_event_entry_free_rcu(ptr);
  }
  
  static void perf_event_fd_array_release(struct bpf_map *map,
  					struct file *map_file)
  {
  	struct bpf_array *array = container_of(map, struct bpf_array, map);
  	struct bpf_event_entry *ee;
  	int i;
  
  	rcu_read_lock();
  	for (i = 0; i < array->map.max_entries; i++) {
  		ee = READ_ONCE(array->ptrs[i]);
  		if (ee && ee->map_file == map_file)
  			fd_array_map_delete_elem(map, &i);
  	}
  	rcu_read_unlock();
ea317b267   Kaixu Xia   bpf: Add new bpf ...
485
486
487
488
  }
  
  static const struct bpf_map_ops perf_event_array_ops = {
  	.map_alloc = fd_array_map_alloc,
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
489
  	.map_free = fd_array_map_free,
ea317b267   Kaixu Xia   bpf: Add new bpf ...
490
491
  	.map_get_next_key = array_map_get_next_key,
  	.map_lookup_elem = fd_array_map_lookup_elem,
ea317b267   Kaixu Xia   bpf: Add new bpf ...
492
493
494
  	.map_delete_elem = fd_array_map_delete_elem,
  	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
  	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
3b1efb196   Daniel Borkmann   bpf, maps: flush ...
495
  	.map_release = perf_event_fd_array_release,
ea317b267   Kaixu Xia   bpf: Add new bpf ...
496
497
498
499
500
501
502
503
504
505
506
507
508
  };
  
  static struct bpf_map_type_list perf_event_array_type __read_mostly = {
  	.ops = &perf_event_array_ops,
  	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  };
  
  static int __init register_perf_event_array_map(void)
  {
  	bpf_register_map_type(&perf_event_array_type);
  	return 0;
  }
  late_initcall(register_perf_event_array_map);
4ed8ec521   Martin KaFai Lau   cgroup: bpf: Add ...
509

60d20f919   Sargun Dhillon   bpf: Add bpf_curr...
510
  #ifdef CONFIG_CGROUPS
4ed8ec521   Martin KaFai Lau   cgroup: bpf: Add ...
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
  static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
  				     struct file *map_file /* not used */,
  				     int fd)
  {
  	return cgroup_get_from_fd(fd);
  }
  
  static void cgroup_fd_array_put_ptr(void *ptr)
  {
  	/* cgroup_put free cgrp after a rcu grace period */
  	cgroup_put(ptr);
  }
  
  static void cgroup_fd_array_free(struct bpf_map *map)
  {
  	bpf_fd_array_map_clear(map);
  	fd_array_map_free(map);
  }
  
  static const struct bpf_map_ops cgroup_array_ops = {
  	.map_alloc = fd_array_map_alloc,
  	.map_free = cgroup_fd_array_free,
  	.map_get_next_key = array_map_get_next_key,
  	.map_lookup_elem = fd_array_map_lookup_elem,
  	.map_delete_elem = fd_array_map_delete_elem,
  	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
  	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
  };
  
  static struct bpf_map_type_list cgroup_array_type __read_mostly = {
  	.ops = &cgroup_array_ops,
  	.type = BPF_MAP_TYPE_CGROUP_ARRAY,
  };
  
  static int __init register_cgroup_array_map(void)
  {
  	bpf_register_map_type(&cgroup_array_type);
  	return 0;
  }
  late_initcall(register_cgroup_array_map);
  #endif