Blame view

kernel/bpf/local_storage.c 14.2 KB
de9cbbaad   Roman Gushchin   bpf: introduce cg...
1
2
3
  //SPDX-License-Identifier: GPL-2.0
  #include <linux/bpf-cgroup.h>
  #include <linux/bpf.h>
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
4
  #include <linux/btf.h>
de9cbbaad   Roman Gushchin   bpf: introduce cg...
5
6
7
8
9
  #include <linux/bug.h>
  #include <linux/filter.h>
  #include <linux/mm.h>
  #include <linux/rbtree.h>
  #include <linux/slab.h>
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
10
  #include <uapi/linux/btf.h>
de9cbbaad   Roman Gushchin   bpf: introduce cg...
11

cf0dd411e   Rustam Kovhaev   bpf, tags: Fix DE...
12
  DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
aa0ad5b03   Roman Gushchin   bpf: pass a point...
13

de9cbbaad   Roman Gushchin   bpf: introduce cg...
14
15
16
  #ifdef CONFIG_CGROUP_BPF
  
  #define LOCAL_STORAGE_CREATE_FLAG_MASK					\
591fe9888   Daniel Borkmann   bpf: add program ...
17
  	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
de9cbbaad   Roman Gushchin   bpf: introduce cg...
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  
  struct bpf_cgroup_storage_map {
  	struct bpf_map map;
  
  	spinlock_t lock;
  	struct bpf_prog *prog;
  	struct rb_root root;
  	struct list_head list;
  };
  
  static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
  {
  	return container_of(map, struct bpf_cgroup_storage_map, map);
  }
  
  static int bpf_cgroup_storage_key_cmp(
  	const struct bpf_cgroup_storage_key *key1,
  	const struct bpf_cgroup_storage_key *key2)
  {
  	if (key1->cgroup_inode_id < key2->cgroup_inode_id)
  		return -1;
  	else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
  		return 1;
  	else if (key1->attach_type < key2->attach_type)
  		return -1;
  	else if (key1->attach_type > key2->attach_type)
  		return 1;
  	return 0;
  }
  
  static struct bpf_cgroup_storage *cgroup_storage_lookup(
  	struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
  	bool locked)
  {
  	struct rb_root *root = &map->root;
  	struct rb_node *node;
  
  	if (!locked)
  		spin_lock_bh(&map->lock);
  
  	node = root->rb_node;
  	while (node) {
  		struct bpf_cgroup_storage *storage;
  
  		storage = container_of(node, struct bpf_cgroup_storage, node);
  
  		switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
  		case -1:
  			node = node->rb_left;
  			break;
  		case 1:
  			node = node->rb_right;
  			break;
  		default:
  			if (!locked)
  				spin_unlock_bh(&map->lock);
  			return storage;
  		}
  	}
  
  	if (!locked)
  		spin_unlock_bh(&map->lock);
  
  	return NULL;
  }
  
  static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
  				 struct bpf_cgroup_storage *storage)
  {
  	struct rb_root *root = &map->root;
  	struct rb_node **new = &(root->rb_node), *parent = NULL;
  
  	while (*new) {
  		struct bpf_cgroup_storage *this;
  
  		this = container_of(*new, struct bpf_cgroup_storage, node);
  
  		parent = *new;
  		switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
  		case -1:
  			new = &((*new)->rb_left);
  			break;
  		case 1:
  			new = &((*new)->rb_right);
  			break;
  		default:
  			return -EEXIST;
  		}
  	}
  
  	rb_link_node(&storage->node, parent, new);
  	rb_insert_color(&storage->node, root);
  
  	return 0;
  }
  
  static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
  {
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage *storage;
  
  	storage = cgroup_storage_lookup(map, key, false);
  	if (!storage)
  		return NULL;
  
  	return &READ_ONCE(storage->buf)->data[0];
  }
  
  static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
  				      void *value, u64 flags)
  {
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage *storage;
  	struct bpf_storage_buffer *new;
96049f3af   Alexei Starovoitov   bpf: introduce BP...
133
134
135
136
137
138
139
140
  	if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST)))
  		return -EINVAL;
  
  	if (unlikely(flags & BPF_NOEXIST))
  		return -EINVAL;
  
  	if (unlikely((flags & BPF_F_LOCK) &&
  		     !map_value_has_spin_lock(map)))
de9cbbaad   Roman Gushchin   bpf: introduce cg...
141
142
143
144
145
146
  		return -EINVAL;
  
  	storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
  					key, false);
  	if (!storage)
  		return -ENOENT;
96049f3af   Alexei Starovoitov   bpf: introduce BP...
147
148
149
150
  	if (flags & BPF_F_LOCK) {
  		copy_map_value_locked(map, storage->buf->data, value, false);
  		return 0;
  	}
de9cbbaad   Roman Gushchin   bpf: introduce cg...
151
  	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
569a933b0   Roman Gushchin   bpf: allocate loc...
152
153
  			   map->value_size,
  			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
de9cbbaad   Roman Gushchin   bpf: introduce cg...
154
155
156
157
158
  			   map->numa_node);
  	if (!new)
  		return -ENOMEM;
  
  	memcpy(&new->data[0], value, map->value_size);
e16d2f1ab   Alexei Starovoitov   bpf: add support ...
159
  	check_and_init_map_lock(map, new->data);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
160
161
162
163
164
165
  
  	new = xchg(&storage->buf, new);
  	kfree_rcu(new, rcu);
  
  	return 0;
  }
b741f1630   Roman Gushchin   bpf: introduce pe...
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
  				   void *value)
  {
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage *storage;
  	int cpu, off = 0;
  	u32 size;
  
  	rcu_read_lock();
  	storage = cgroup_storage_lookup(map, key, false);
  	if (!storage) {
  		rcu_read_unlock();
  		return -ENOENT;
  	}
  
  	/* per_cpu areas are zero-filled and bpf programs can only
  	 * access 'value_size' of them, so copying rounded areas
  	 * will not leak any kernel data
  	 */
  	size = round_up(_map->value_size, 8);
  	for_each_possible_cpu(cpu) {
  		bpf_long_memcpy(value + off,
  				per_cpu_ptr(storage->percpu_buf, cpu), size);
  		off += size;
  	}
  	rcu_read_unlock();
  	return 0;
  }
  
  int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
  				     void *value, u64 map_flags)
  {
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage *storage;
  	int cpu, off = 0;
  	u32 size;
  
  	if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
  		return -EINVAL;
  
  	rcu_read_lock();
  	storage = cgroup_storage_lookup(map, key, false);
  	if (!storage) {
  		rcu_read_unlock();
  		return -ENOENT;
  	}
  
  	/* the user space will provide round_up(value_size, 8) bytes that
  	 * will be copied into per-cpu area. bpf programs can only access
  	 * value_size of it. During lookup the same extra bytes will be
  	 * returned or zeros which were zero-filled by percpu_alloc,
  	 * so no kernel data leaks possible
  	 */
  	size = round_up(_map->value_size, 8);
  	for_each_possible_cpu(cpu) {
  		bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
  				value + off, size);
  		off += size;
  	}
  	rcu_read_unlock();
  	return 0;
  }
de9cbbaad   Roman Gushchin   bpf: introduce cg...
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
  static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
  				       void *_next_key)
  {
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage_key *next = _next_key;
  	struct bpf_cgroup_storage *storage;
  
  	spin_lock_bh(&map->lock);
  
  	if (list_empty(&map->list))
  		goto enoent;
  
  	if (key) {
  		storage = cgroup_storage_lookup(map, key, true);
  		if (!storage)
  			goto enoent;
  
  		storage = list_next_entry(storage, list);
  		if (!storage)
  			goto enoent;
  	} else {
  		storage = list_first_entry(&map->list,
  					 struct bpf_cgroup_storage, list);
  	}
  
  	spin_unlock_bh(&map->lock);
  	next->attach_type = storage->key.attach_type;
  	next->cgroup_inode_id = storage->key.cgroup_inode_id;
  	return 0;
  
  enoent:
  	spin_unlock_bh(&map->lock);
  	return -ENOENT;
  }
  
  static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
  {
  	int numa_node = bpf_map_attr_numa_node(attr);
  	struct bpf_cgroup_storage_map *map;
b936ca643   Roman Gushchin   bpf: rework memlo...
270
  	struct bpf_map_memory mem;
ffc8b144d   Roman Gushchin   bpf: add memlock ...
271
  	int ret;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
272
273
274
  
  	if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
  		return ERR_PTR(-EINVAL);
b0584ea66   Roman Gushchin   bpf: don't accept...
275
276
  	if (attr->value_size == 0)
  		return ERR_PTR(-EINVAL);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
277
278
  	if (attr->value_size > PAGE_SIZE)
  		return ERR_PTR(-E2BIG);
591fe9888   Daniel Borkmann   bpf: add program ...
279
280
  	if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
  	    !bpf_map_flags_access_ok(attr->map_flags))
de9cbbaad   Roman Gushchin   bpf: introduce cg...
281
282
283
284
285
  		return ERR_PTR(-EINVAL);
  
  	if (attr->max_entries)
  		/* max_entries is not used and enforced to be 0 */
  		return ERR_PTR(-EINVAL);
c85d69135   Roman Gushchin   bpf: move memory ...
286
  	ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
ffc8b144d   Roman Gushchin   bpf: add memlock ...
287
288
  	if (ret < 0)
  		return ERR_PTR(ret);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
289
290
  	map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
  			   __GFP_ZERO | GFP_USER, numa_node);
b936ca643   Roman Gushchin   bpf: rework memlo...
291
292
  	if (!map) {
  		bpf_map_charge_finish(&mem);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
293
  		return ERR_PTR(-ENOMEM);
b936ca643   Roman Gushchin   bpf: rework memlo...
294
  	}
de9cbbaad   Roman Gushchin   bpf: introduce cg...
295

b936ca643   Roman Gushchin   bpf: rework memlo...
296
  	bpf_map_charge_move(&map->map.memory, &mem);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
  
  	/* copy mandatory map attributes */
  	bpf_map_init_from_attr(&map->map, attr);
  
  	spin_lock_init(&map->lock);
  	map->root = RB_ROOT;
  	INIT_LIST_HEAD(&map->list);
  
  	return &map->map;
  }
  
  static void cgroup_storage_map_free(struct bpf_map *_map)
  {
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  
  	WARN_ON(!RB_EMPTY_ROOT(&map->root));
  	WARN_ON(!list_empty(&map->list));
  
  	kfree(map);
  }
  
  static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
  {
  	return -EINVAL;
  }
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
322
323
324
325
326
  static int cgroup_storage_check_btf(const struct bpf_map *map,
  				    const struct btf *btf,
  				    const struct btf_type *key_type,
  				    const struct btf_type *value_type)
  {
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
327
  	struct btf_member *m;
ffa0c1cf5   Yonghong Song   bpf: enable cgrou...
328
  	u32 offset, size;
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
  
  	/* Key is expected to be of struct bpf_cgroup_storage_key type,
  	 * which is:
  	 * struct bpf_cgroup_storage_key {
  	 *	__u64	cgroup_inode_id;
  	 *	__u32	attach_type;
  	 * };
  	 */
  
  	/*
  	 * Key_type must be a structure with two fields.
  	 */
  	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
  	    BTF_INFO_VLEN(key_type->info) != 2)
  		return -EINVAL;
  
  	/*
  	 * The first field must be a 64 bit integer at 0 offset.
  	 */
  	m = (struct btf_member *)(key_type + 1);
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
349
  	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
ffa0c1cf5   Yonghong Song   bpf: enable cgrou...
350
  	if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
351
352
353
354
355
356
  		return -EINVAL;
  
  	/*
  	 * The second field must be a 32 bit integer at 64 bit offset.
  	 */
  	m++;
ffa0c1cf5   Yonghong Song   bpf: enable cgrou...
357
  	offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
358
  	size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
ffa0c1cf5   Yonghong Song   bpf: enable cgrou...
359
  	if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
  		return -EINVAL;
  
  	return 0;
  }
  
  static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
  					 struct seq_file *m)
  {
  	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
  	struct bpf_cgroup_storage_key *key = _key;
  	struct bpf_cgroup_storage *storage;
  	int cpu;
  
  	rcu_read_lock();
  	storage = cgroup_storage_lookup(map_to_storage(map), key, false);
  	if (!storage) {
  		rcu_read_unlock();
  		return;
  	}
  
  	btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
  	stype = cgroup_storage_type(map);
  	if (stype == BPF_CGROUP_STORAGE_SHARED) {
  		seq_puts(m, ": ");
  		btf_type_seq_show(map->btf, map->btf_value_type_id,
  				  &READ_ONCE(storage->buf)->data[0], m);
  		seq_puts(m, "
  ");
  	} else {
  		seq_puts(m, ": {
  ");
  		for_each_possible_cpu(cpu) {
  			seq_printf(m, "\tcpu%d: ", cpu);
  			btf_type_seq_show(map->btf, map->btf_value_type_id,
  					  per_cpu_ptr(storage->percpu_buf, cpu),
  					  m);
  			seq_puts(m, "
  ");
  		}
  		seq_puts(m, "}
  ");
  	}
  	rcu_read_unlock();
  }
de9cbbaad   Roman Gushchin   bpf: introduce cg...
404
405
406
407
408
409
410
  const struct bpf_map_ops cgroup_storage_map_ops = {
  	.map_alloc = cgroup_storage_map_alloc,
  	.map_free = cgroup_storage_map_free,
  	.map_get_next_key = cgroup_storage_get_next_key,
  	.map_lookup_elem = cgroup_storage_lookup_elem,
  	.map_update_elem = cgroup_storage_update_elem,
  	.map_delete_elem = cgroup_storage_delete_elem,
9a1126b63   Roman Gushchin   bpf: add bpffs pr...
411
412
  	.map_check_btf = cgroup_storage_check_btf,
  	.map_seq_show_elem = cgroup_storage_seq_show_elem,
de9cbbaad   Roman Gushchin   bpf: introduce cg...
413
414
415
416
  };
  
  int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
  {
8bad74f98   Roman Gushchin   bpf: extend cgrou...
417
  	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
418
419
420
421
422
423
424
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  	int ret = -EBUSY;
  
  	spin_lock_bh(&map->lock);
  
  	if (map->prog && map->prog != prog)
  		goto unlock;
8bad74f98   Roman Gushchin   bpf: extend cgrou...
425
426
  	if (prog->aux->cgroup_storage[stype] &&
  	    prog->aux->cgroup_storage[stype] != _map)
de9cbbaad   Roman Gushchin   bpf: introduce cg...
427
428
429
  		goto unlock;
  
  	map->prog = prog;
8bad74f98   Roman Gushchin   bpf: extend cgrou...
430
  	prog->aux->cgroup_storage[stype] = _map;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
431
432
433
434
435
436
437
438
439
  	ret = 0;
  unlock:
  	spin_unlock_bh(&map->lock);
  
  	return ret;
  }
  
  void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
  {
8bad74f98   Roman Gushchin   bpf: extend cgrou...
440
  	enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
441
442
443
444
  	struct bpf_cgroup_storage_map *map = map_to_storage(_map);
  
  	spin_lock_bh(&map->lock);
  	if (map->prog == prog) {
8bad74f98   Roman Gushchin   bpf: extend cgrou...
445
  		WARN_ON(prog->aux->cgroup_storage[stype] != _map);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
446
  		map->prog = NULL;
8bad74f98   Roman Gushchin   bpf: extend cgrou...
447
  		prog->aux->cgroup_storage[stype] = NULL;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
448
449
450
  	}
  	spin_unlock_bh(&map->lock);
  }
b741f1630   Roman Gushchin   bpf: introduce pe...
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
  static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
  {
  	size_t size;
  
  	if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
  		size = sizeof(struct bpf_storage_buffer) + map->value_size;
  		*pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
  				  PAGE_SIZE) >> PAGE_SHIFT;
  	} else {
  		size = map->value_size;
  		*pages = round_up(round_up(size, 8) * num_possible_cpus(),
  				  PAGE_SIZE) >> PAGE_SHIFT;
  	}
  
  	return size;
  }
8bad74f98   Roman Gushchin   bpf: extend cgrou...
467
468
  struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
  					enum bpf_cgroup_storage_type stype)
de9cbbaad   Roman Gushchin   bpf: introduce cg...
469
470
471
  {
  	struct bpf_cgroup_storage *storage;
  	struct bpf_map *map;
b741f1630   Roman Gushchin   bpf: introduce pe...
472
473
  	gfp_t flags;
  	size_t size;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
474
  	u32 pages;
8bad74f98   Roman Gushchin   bpf: extend cgrou...
475
  	map = prog->aux->cgroup_storage[stype];
de9cbbaad   Roman Gushchin   bpf: introduce cg...
476
477
  	if (!map)
  		return NULL;
b741f1630   Roman Gushchin   bpf: introduce pe...
478
  	size = bpf_cgroup_storage_calculate_size(map, &pages);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
479
480
481
482
483
  	if (bpf_map_charge_memlock(map, pages))
  		return ERR_PTR(-EPERM);
  
  	storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
  			       __GFP_ZERO | GFP_USER, map->numa_node);
b741f1630   Roman Gushchin   bpf: introduce pe...
484
485
  	if (!storage)
  		goto enomem;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
486

b741f1630   Roman Gushchin   bpf: introduce pe...
487
488
489
490
491
492
  	flags = __GFP_ZERO | GFP_USER;
  
  	if (stype == BPF_CGROUP_STORAGE_SHARED) {
  		storage->buf = kmalloc_node(size, flags, map->numa_node);
  		if (!storage->buf)
  			goto enomem;
e16d2f1ab   Alexei Starovoitov   bpf: add support ...
493
  		check_and_init_map_lock(map, storage->buf->data);
b741f1630   Roman Gushchin   bpf: introduce pe...
494
495
496
497
  	} else {
  		storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
  		if (!storage->percpu_buf)
  			goto enomem;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
498
499
500
501
502
  	}
  
  	storage->map = (struct bpf_cgroup_storage_map *)map;
  
  	return storage;
b741f1630   Roman Gushchin   bpf: introduce pe...
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
  
  enomem:
  	bpf_map_uncharge_memlock(map, pages);
  	kfree(storage);
  	return ERR_PTR(-ENOMEM);
  }
  
  static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
  {
  	struct bpf_cgroup_storage *storage =
  		container_of(rcu, struct bpf_cgroup_storage, rcu);
  
  	kfree(storage->buf);
  	kfree(storage);
  }
  
  static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
  {
  	struct bpf_cgroup_storage *storage =
  		container_of(rcu, struct bpf_cgroup_storage, rcu);
  
  	free_percpu(storage->percpu_buf);
  	kfree(storage);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
526
527
528
529
  }
  
  void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
  {
b741f1630   Roman Gushchin   bpf: introduce pe...
530
  	enum bpf_cgroup_storage_type stype;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
531
  	struct bpf_map *map;
b741f1630   Roman Gushchin   bpf: introduce pe...
532
  	u32 pages;
de9cbbaad   Roman Gushchin   bpf: introduce cg...
533
534
535
536
537
  
  	if (!storage)
  		return;
  
  	map = &storage->map->map;
b741f1630   Roman Gushchin   bpf: introduce pe...
538
539
  
  	bpf_cgroup_storage_calculate_size(map, &pages);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
540
  	bpf_map_uncharge_memlock(map, pages);
b741f1630   Roman Gushchin   bpf: introduce pe...
541
542
543
544
545
  	stype = cgroup_storage_type(map);
  	if (stype == BPF_CGROUP_STORAGE_SHARED)
  		call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
  	else
  		call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
de9cbbaad   Roman Gushchin   bpf: introduce cg...
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
  }
  
  void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
  			     struct cgroup *cgroup,
  			     enum bpf_attach_type type)
  {
  	struct bpf_cgroup_storage_map *map;
  
  	if (!storage)
  		return;
  
  	storage->key.attach_type = type;
  	storage->key.cgroup_inode_id = cgroup->kn->id.id;
  
  	map = storage->map;
  
  	spin_lock_bh(&map->lock);
  	WARN_ON(cgroup_storage_insert(map, storage));
  	list_add(&storage->list, &map->list);
  	spin_unlock_bh(&map->lock);
  }
  
  void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
  {
  	struct bpf_cgroup_storage_map *map;
  	struct rb_root *root;
  
  	if (!storage)
  		return;
  
  	map = storage->map;
  
  	spin_lock_bh(&map->lock);
  	root = &map->root;
  	rb_erase(&storage->node, root);
  
  	list_del(&storage->list);
  	spin_unlock_bh(&map->lock);
  }
  
  #endif