Blame view
kernel/bpf/stackmap.c
7.32 KB
d5a3b1f69 bpf: introduce BP... |
1 2 3 4 5 6 7 8 9 |
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include <linux/bpf.h> #include <linux/jhash.h> #include <linux/filter.h> |
d5a3b1f69 bpf: introduce BP... |
10 11 |
#include <linux/stacktrace.h> #include <linux/perf_event.h> |
557c0c6e7 bpf: convert stac... |
12 |
#include "percpu_freelist.h" |
d5a3b1f69 bpf: introduce BP... |
13 14 |
struct stack_map_bucket { |
557c0c6e7 bpf: convert stac... |
15 |
struct pcpu_freelist_node fnode; |
d5a3b1f69 bpf: introduce BP... |
16 17 18 19 20 21 22 |
u32 hash; u32 nr; u64 ip[]; }; struct bpf_stack_map { struct bpf_map map; |
557c0c6e7 bpf: convert stac... |
23 24 |
void *elems; struct pcpu_freelist freelist; |
d5a3b1f69 bpf: introduce BP... |
25 |
u32 n_buckets; |
557c0c6e7 bpf: convert stac... |
26 |
struct stack_map_bucket *buckets[]; |
d5a3b1f69 bpf: introduce BP... |
27 |
}; |
557c0c6e7 bpf: convert stac... |
28 29 30 31 |
static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) { u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; int err; |
251d00bf1 bpf: don't trigge... |
32 |
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); |
557c0c6e7 bpf: convert stac... |
33 34 35 36 37 38 39 40 41 42 43 44 |
if (!smap->elems) return -ENOMEM; err = pcpu_freelist_init(&smap->freelist); if (err) goto free_elems; pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, smap->map.max_entries); return 0; free_elems: |
251d00bf1 bpf: don't trigge... |
45 |
bpf_map_area_free(smap->elems); |
557c0c6e7 bpf: convert stac... |
46 47 |
return err; } |
d5a3b1f69 bpf: introduce BP... |
48 49 50 51 52 53 54 55 56 57 |
/* Called from syscall */ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) { u32 value_size = attr->value_size; struct bpf_stack_map *smap; u64 cost, n_buckets; int err; if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); |
823707b68 bpf: check for re... |
58 59 |
if (attr->map_flags) return ERR_PTR(-EINVAL); |
d5a3b1f69 bpf: introduce BP... |
60 61 62 |
/* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || value_size < 8 || value_size % 8 || |
c5dfd78eb perf core: Allow ... |
63 |
value_size / 8 > sysctl_perf_event_max_stack) |
d5a3b1f69 bpf: introduce BP... |
64 65 66 67 68 69 70 71 |
return ERR_PTR(-EINVAL); /* hash table size must be power of 2 */ n_buckets = roundup_pow_of_two(attr->max_entries); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-E2BIG); |
251d00bf1 bpf: don't trigge... |
72 73 74 |
smap = bpf_map_area_alloc(cost); if (!smap) return ERR_PTR(-ENOMEM); |
d5a3b1f69 bpf: introduce BP... |
75 76 77 78 79 80 81 82 83 84 |
err = -E2BIG; cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); if (cost >= U32_MAX - PAGE_SIZE) goto free_smap; smap->map.map_type = attr->map_type; smap->map.key_size = attr->key_size; smap->map.value_size = value_size; smap->map.max_entries = attr->max_entries; |
816cfeb77 bpf: fix wrong ex... |
85 |
smap->map.map_flags = attr->map_flags; |
d5a3b1f69 bpf: introduce BP... |
86 87 |
smap->n_buckets = n_buckets; smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
557c0c6e7 bpf: convert stac... |
88 89 90 |
err = bpf_map_precharge_memlock(smap->map.pages); if (err) goto free_smap; |
97c79a38c perf core: Per ev... |
91 |
err = get_callchain_buffers(sysctl_perf_event_max_stack); |
d5a3b1f69 bpf: introduce BP... |
92 93 |
if (err) goto free_smap; |
557c0c6e7 bpf: convert stac... |
94 95 96 |
err = prealloc_elems_and_freelist(smap); if (err) goto put_buffers; |
d5a3b1f69 bpf: introduce BP... |
97 |
return &smap->map; |
557c0c6e7 bpf: convert stac... |
98 99 |
put_buffers: put_callchain_buffers(); |
d5a3b1f69 bpf: introduce BP... |
100 |
free_smap: |
251d00bf1 bpf: don't trigge... |
101 |
bpf_map_area_free(smap); |
d5a3b1f69 bpf: introduce BP... |
102 103 |
return ERR_PTR(err); } |
f3694e001 bpf: add BPF_CALL... |
104 105 |
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) |
d5a3b1f69 bpf: introduce BP... |
106 |
{ |
d5a3b1f69 bpf: introduce BP... |
107 108 109 110 |
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct perf_callchain_entry *trace; struct stack_map_bucket *bucket, *new_bucket, *old_bucket; u32 max_depth = map->value_size / 8; |
c5dfd78eb perf core: Allow ... |
111 112 |
/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ u32 init_nr = sysctl_perf_event_max_stack - max_depth; |
d5a3b1f69 bpf: introduce BP... |
113 114 115 116 117 118 119 120 121 |
u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; bool kernel = !user; u64 *ips; if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; |
cfbcf4684 perf core: Pass m... |
122 123 |
trace = get_perf_callchain(regs, init_nr, kernel, user, sysctl_perf_event_max_stack, false, false); |
d5a3b1f69 bpf: introduce BP... |
124 125 126 127 128 129 |
if (unlikely(!trace)) /* couldn't fetch the stack trace */ return -EFAULT; /* get_perf_callchain() guarantees that trace->nr >= init_nr |
c5dfd78eb perf core: Allow ... |
130 |
* and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth |
d5a3b1f69 bpf: introduce BP... |
131 132 133 134 135 136 137 138 139 140 141 142 |
*/ trace_nr = trace->nr - init_nr; if (trace_nr <= skip) /* skipping more than usable stack trace */ return -EFAULT; trace_nr -= skip; trace_len = trace_nr * sizeof(u64); ips = trace->ip + skip + init_nr; hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); id = hash & (smap->n_buckets - 1); |
557c0c6e7 bpf: convert stac... |
143 |
bucket = READ_ONCE(smap->buckets[id]); |
d5a3b1f69 bpf: introduce BP... |
144 145 146 147 148 149 150 151 152 153 154 155 |
if (bucket && bucket->hash == hash) { if (flags & BPF_F_FAST_STACK_CMP) return id; if (bucket->nr == trace_nr && memcmp(bucket->ip, ips, trace_len) == 0) return id; } /* this call stack is not in the map, try to add it */ if (bucket && !(flags & BPF_F_REUSE_STACKID)) return -EEXIST; |
557c0c6e7 bpf: convert stac... |
156 157 |
new_bucket = (struct stack_map_bucket *) pcpu_freelist_pop(&smap->freelist); |
d5a3b1f69 bpf: introduce BP... |
158 159 160 161 |
if (unlikely(!new_bucket)) return -ENOMEM; memcpy(new_bucket->ip, ips, trace_len); |
d5a3b1f69 bpf: introduce BP... |
162 163 164 165 166 |
new_bucket->hash = hash; new_bucket->nr = trace_nr; old_bucket = xchg(&smap->buckets[id], new_bucket); if (old_bucket) |
557c0c6e7 bpf: convert stac... |
167 |
pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); |
d5a3b1f69 bpf: introduce BP... |
168 169 170 171 172 173 174 175 176 177 178 |
return id; } const struct bpf_func_proto bpf_get_stackid_proto = { .func = bpf_get_stackid, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; |
557c0c6e7 bpf: convert stac... |
179 |
/* Called from eBPF program */ |
d5a3b1f69 bpf: introduce BP... |
180 181 |
static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { |
557c0c6e7 bpf: convert stac... |
182 183 184 185 186 187 |
return NULL; } /* Called from syscall */ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) { |
d5a3b1f69 bpf: introduce BP... |
188 |
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
557c0c6e7 bpf: convert stac... |
189 190 |
struct stack_map_bucket *bucket, *old_bucket; u32 id = *(u32 *)key, trace_len; |
d5a3b1f69 bpf: introduce BP... |
191 192 |
if (unlikely(id >= smap->n_buckets)) |
557c0c6e7 bpf: convert stac... |
193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
return -ENOENT; bucket = xchg(&smap->buckets[id], NULL); if (!bucket) return -ENOENT; trace_len = bucket->nr * sizeof(u64); memcpy(value, bucket->ip, trace_len); memset(value + trace_len, 0, map->value_size - trace_len); old_bucket = xchg(&smap->buckets[id], bucket); if (old_bucket) pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); return 0; |
d5a3b1f69 bpf: introduce BP... |
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
} static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { return -EINVAL; } static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { return -EINVAL; } /* Called from syscall or from eBPF program */ static int stack_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *old_bucket; u32 id = *(u32 *)key; if (unlikely(id >= smap->n_buckets)) return -E2BIG; old_bucket = xchg(&smap->buckets[id], NULL); if (old_bucket) { |
557c0c6e7 bpf: convert stac... |
232 |
pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); |
d5a3b1f69 bpf: introduce BP... |
233 234 235 236 237 238 239 240 241 242 |
return 0; } else { return -ENOENT; } } /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void stack_map_free(struct bpf_map *map) { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); |
d5a3b1f69 bpf: introduce BP... |
243 |
|
557c0c6e7 bpf: convert stac... |
244 |
/* wait for bpf programs to complete before freeing stack map */ |
d5a3b1f69 bpf: introduce BP... |
245 |
synchronize_rcu(); |
251d00bf1 bpf: don't trigge... |
246 |
bpf_map_area_free(smap->elems); |
557c0c6e7 bpf: convert stac... |
247 |
pcpu_freelist_destroy(&smap->freelist); |
251d00bf1 bpf: don't trigge... |
248 |
bpf_map_area_free(smap); |
d5a3b1f69 bpf: introduce BP... |
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 |
put_callchain_buffers(); } static const struct bpf_map_ops stack_map_ops = { .map_alloc = stack_map_alloc, .map_free = stack_map_free, .map_get_next_key = stack_map_get_next_key, .map_lookup_elem = stack_map_lookup_elem, .map_update_elem = stack_map_update_elem, .map_delete_elem = stack_map_delete_elem, }; static struct bpf_map_type_list stack_map_type __read_mostly = { .ops = &stack_map_ops, .type = BPF_MAP_TYPE_STACK_TRACE, }; static int __init register_stack_map(void) { bpf_register_map_type(&stack_map_type); return 0; } late_initcall(register_stack_map); |