Blame view

kernel/bpf/trampoline.c 11.1 KB
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
1
2
3
4
5
  // SPDX-License-Identifier: GPL-2.0-only
  /* Copyright (c) 2019 Facebook */
  #include <linux/hash.h>
  #include <linux/bpf.h>
  #include <linux/filter.h>
b91e014f0   Alexei Starovoitov   bpf: Make BPF tra...
6
  #include <linux/ftrace.h>
e9b4e606c   Jiri Olsa   bpf: Allow to res...
7
  #include <linux/rbtree_latch.h>
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
8
  #include <linux/perf_event.h>
9e4e01dfd   KP Singh   bpf: lsm: Impleme...
9
  #include <linux/btf.h>
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
10
11
  #include <linux/rcupdate_trace.h>
  #include <linux/rcupdate_wait.h>
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
12

be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
13
14
15
16
17
  /* dummy _ops. The verifier will operate on target program's ops. */
  const struct bpf_verifier_ops bpf_extension_verifier_ops = {
  };
  const struct bpf_prog_ops bpf_extension_prog_ops = {
  };
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
18
19
20
21
22
  /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
  #define TRAMPOLINE_HASH_BITS 10
  #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
  
  static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
23
  /* serializes access to trampoline_table */
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
24
  static DEFINE_MUTEX(trampoline_mutex);
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
25
  void *bpf_jit_alloc_exec_page(void)
98e8627ef   Björn Töpel   bpf: Move trampol...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  {
  	void *image;
  
  	image = bpf_jit_alloc_exec(PAGE_SIZE);
  	if (!image)
  		return NULL;
  
  	set_vm_flush_reset_perms(image);
  	/* Keep image as writeable. The alternative is to keep flipping ro/rw
  	 * everytime new program is attached or detached.
  	 */
  	set_memory_x((long)image, 1);
  	return image;
  }
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
40
41
42
  void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
  {
  	ksym->start = (unsigned long) data;
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
43
  	ksym->end = ksym->start + PAGE_SIZE;
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
44
45
  	bpf_ksym_add(ksym);
  	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
46
  			   PAGE_SIZE, false, ksym->name);
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
47
48
49
50
51
52
  }
  
  void bpf_image_ksym_del(struct bpf_ksym *ksym)
  {
  	bpf_ksym_del(ksym);
  	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
53
  			   PAGE_SIZE, true, ksym->name);
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
54
55
56
57
58
59
60
61
62
  }
  
  static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
  {
  	struct bpf_ksym *ksym = &tr->ksym;
  
  	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
  	bpf_image_ksym_add(tr->image, ksym);
  }
f7b12b6fe   Toke Høiland-Jørgensen   bpf: verifier: re...
63
  static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  {
  	struct bpf_trampoline *tr;
  	struct hlist_head *head;
  	void *image;
  	int i;
  
  	mutex_lock(&trampoline_mutex);
  	head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
  	hlist_for_each_entry(tr, head, hlist) {
  		if (tr->key == key) {
  			refcount_inc(&tr->refcnt);
  			goto out;
  		}
  	}
  	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
  	if (!tr)
  		goto out;
  
  	/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
83
  	image = bpf_jit_alloc_exec_page();
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
84
85
86
87
88
89
90
91
92
93
94
95
96
  	if (!image) {
  		kfree(tr);
  		tr = NULL;
  		goto out;
  	}
  
  	tr->key = key;
  	INIT_HLIST_NODE(&tr->hlist);
  	hlist_add_head(&tr->hlist, head);
  	refcount_set(&tr->refcnt, 1);
  	mutex_init(&tr->mutex);
  	for (i = 0; i < BPF_TRAMP_MAX; i++)
  		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
97
  	tr->image = image;
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
98
99
  	INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
  	bpf_trampoline_ksym_add(tr);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
100
101
102
103
  out:
  	mutex_unlock(&trampoline_mutex);
  	return tr;
  }
b91e014f0   Alexei Starovoitov   bpf: Make BPF tra...
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
  static int is_ftrace_location(void *ip)
  {
  	long addr;
  
  	addr = ftrace_location((long)ip);
  	if (!addr)
  		return 0;
  	if (WARN_ON_ONCE(addr != (long)ip))
  		return -EFAULT;
  	return 1;
  }
  
  static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
  {
  	void *ip = tr->func.addr;
  	int ret;
  
  	if (tr->func.ftrace_managed)
  		ret = unregister_ftrace_direct((long)ip, (long)old_addr);
  	else
  		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
  	return ret;
  }
  
  static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
  {
  	void *ip = tr->func.addr;
  	int ret;
  
  	if (tr->func.ftrace_managed)
  		ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
  	else
  		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
  	return ret;
  }
  
  /* first time registering */
  static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
  {
  	void *ip = tr->func.addr;
  	int ret;
  
  	ret = is_ftrace_location(ip);
  	if (ret < 0)
  		return ret;
  	tr->func.ftrace_managed = ret;
  
  	if (tr->func.ftrace_managed)
  		ret = register_ftrace_direct((long)ip, (long)new_addr);
  	else
  		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
  	return ret;
  }
88fd9e535   KP Singh   bpf: Refactor tra...
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  static struct bpf_tramp_progs *
  bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
  {
  	const struct bpf_prog_aux *aux;
  	struct bpf_tramp_progs *tprogs;
  	struct bpf_prog **progs;
  	int kind;
  
  	*total = 0;
  	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
  	if (!tprogs)
  		return ERR_PTR(-ENOMEM);
  
  	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
  		tprogs[kind].nr_progs = tr->progs_cnt[kind];
  		*total += tr->progs_cnt[kind];
  		progs = tprogs[kind].progs;
  
  		hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
  			*progs++ = aux->prog;
  	}
  	return tprogs;
  }
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
180
181
182
  
  static int bpf_trampoline_update(struct bpf_trampoline *tr)
  {
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
183
184
  	void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
  	void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
88fd9e535   KP Singh   bpf: Refactor tra...
185
  	struct bpf_tramp_progs *tprogs;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
186
  	u32 flags = BPF_TRAMP_F_RESTORE_REGS;
88fd9e535   KP Singh   bpf: Refactor tra...
187
  	int err, total;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
188

88fd9e535   KP Singh   bpf: Refactor tra...
189
190
191
192
193
  	tprogs = bpf_trampoline_get_progs(tr, &total);
  	if (IS_ERR(tprogs))
  		return PTR_ERR(tprogs);
  
  	if (total == 0) {
b91e014f0   Alexei Starovoitov   bpf: Make BPF tra...
194
  		err = unregister_fentry(tr, old_image);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
195
196
197
  		tr->selector = 0;
  		goto out;
  	}
ae2408233   KP Singh   bpf: Introduce BP...
198
199
  	if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
  	    tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
200
  		flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
05d57f179   Alexei Starovoitov   bpf: Fix trampoli...
201
202
203
204
205
  	/* Though the second half of trampoline page is unused a task could be
  	 * preempted in the middle of the first half of trampoline and two
  	 * updates to trampoline would change the code from underneath the
  	 * preempted task. Hence wait for tasks to voluntarily schedule or go
  	 * to userspace.
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
206
207
208
209
  	 * The same trampoline can hold both sleepable and non-sleepable progs.
  	 * synchronize_rcu_tasks_trace() is needed to make sure all sleepable
  	 * programs finish executing.
  	 * Wait for these two grace periods together.
05d57f179   Alexei Starovoitov   bpf: Fix trampoli...
210
  	 */
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
211
  	synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
05d57f179   Alexei Starovoitov   bpf: Fix trampoli...
212

7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
213
  	err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
88fd9e535   KP Singh   bpf: Refactor tra...
214
  					  &tr->func.model, flags, tprogs,
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
215
  					  tr->func.addr);
85d33df35   Martin KaFai Lau   bpf: Introduce BP...
216
  	if (err < 0)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
217
218
219
220
  		goto out;
  
  	if (tr->selector)
  		/* progs already running at this address */
b91e014f0   Alexei Starovoitov   bpf: Make BPF tra...
221
  		err = modify_fentry(tr, old_image, new_image);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
222
223
  	else
  		/* first time registering */
b91e014f0   Alexei Starovoitov   bpf: Make BPF tra...
224
  		err = register_fentry(tr, new_image);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
225
226
227
228
  	if (err)
  		goto out;
  	tr->selector++;
  out:
88fd9e535   KP Singh   bpf: Refactor tra...
229
  	kfree(tprogs);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
230
231
  	return err;
  }
9e4e01dfd   KP Singh   bpf: lsm: Impleme...
232
  static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
233
  {
9e4e01dfd   KP Singh   bpf: lsm: Impleme...
234
  	switch (prog->expected_attach_type) {
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
235
236
  	case BPF_TRACE_FENTRY:
  		return BPF_TRAMP_FENTRY;
ae2408233   KP Singh   bpf: Introduce BP...
237
238
  	case BPF_MODIFY_RETURN:
  		return BPF_TRAMP_MODIFY_RETURN;
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
239
  	case BPF_TRACE_FEXIT:
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
240
  		return BPF_TRAMP_FEXIT;
9e4e01dfd   KP Singh   bpf: lsm: Impleme...
241
242
243
244
245
246
247
248
  	case BPF_LSM_MAC:
  		if (!prog->aux->attach_func_proto->type)
  			/* The function returns void, we cannot modify its
  			 * return value.
  			 */
  			return BPF_TRAMP_FEXIT;
  		else
  			return BPF_TRAMP_MODIFY_RETURN;
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
249
250
  	default:
  		return BPF_TRAMP_REPLACE;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
251
252
  	}
  }
3aac1ead5   Toke Høiland-Jørgensen   bpf: Move prog->a...
253
  int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
254
255
  {
  	enum bpf_tramp_prog_type kind;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
256
  	int err = 0;
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
257
  	int cnt;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
258

9e4e01dfd   KP Singh   bpf: lsm: Impleme...
259
  	kind = bpf_attach_type_to_tramp(prog);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
260
  	mutex_lock(&tr->mutex);
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  	if (tr->extension_prog) {
  		/* cannot attach fentry/fexit if extension prog is attached.
  		 * cannot overwrite extension prog either.
  		 */
  		err = -EBUSY;
  		goto out;
  	}
  	cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
  	if (kind == BPF_TRAMP_REPLACE) {
  		/* Cannot attach extension if fentry/fexit are in use. */
  		if (cnt) {
  			err = -EBUSY;
  			goto out;
  		}
  		tr->extension_prog = prog;
  		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
  					 prog->bpf_func);
  		goto out;
  	}
  	if (cnt >= BPF_MAX_TRAMP_PROGS) {
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
281
282
283
284
285
286
287
288
289
290
  		err = -E2BIG;
  		goto out;
  	}
  	if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
  		/* prog already linked */
  		err = -EBUSY;
  		goto out;
  	}
  	hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
  	tr->progs_cnt[kind]++;
3aac1ead5   Toke Høiland-Jørgensen   bpf: Move prog->a...
291
  	err = bpf_trampoline_update(tr);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
292
293
294
295
296
297
298
299
300
301
  	if (err) {
  		hlist_del(&prog->aux->tramp_hlist);
  		tr->progs_cnt[kind]--;
  	}
  out:
  	mutex_unlock(&tr->mutex);
  	return err;
  }
  
  /* bpf_trampoline_unlink_prog() should never fail. */
3aac1ead5   Toke Høiland-Jørgensen   bpf: Move prog->a...
302
  int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
303
304
  {
  	enum bpf_tramp_prog_type kind;
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
305
  	int err;
9e4e01dfd   KP Singh   bpf: lsm: Impleme...
306
  	kind = bpf_attach_type_to_tramp(prog);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
307
  	mutex_lock(&tr->mutex);
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
308
309
310
311
312
313
314
  	if (kind == BPF_TRAMP_REPLACE) {
  		WARN_ON_ONCE(!tr->extension_prog);
  		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
  					 tr->extension_prog->bpf_func, NULL);
  		tr->extension_prog = NULL;
  		goto out;
  	}
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
315
316
  	hlist_del(&prog->aux->tramp_hlist);
  	tr->progs_cnt[kind]--;
3aac1ead5   Toke Høiland-Jørgensen   bpf: Move prog->a...
317
  	err = bpf_trampoline_update(tr);
be8704ff0   Alexei Starovoitov   bpf: Introduce dy...
318
  out:
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
319
320
321
  	mutex_unlock(&tr->mutex);
  	return err;
  }
f7b12b6fe   Toke Høiland-Jørgensen   bpf: verifier: re...
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
  struct bpf_trampoline *bpf_trampoline_get(u64 key,
  					  struct bpf_attach_target_info *tgt_info)
  {
  	struct bpf_trampoline *tr;
  
  	tr = bpf_trampoline_lookup(key);
  	if (!tr)
  		return NULL;
  
  	mutex_lock(&tr->mutex);
  	if (tr->func.addr)
  		goto out;
  
  	memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
  	tr->func.addr = (void *)tgt_info->tgt_addr;
  out:
  	mutex_unlock(&tr->mutex);
  	return tr;
  }
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
341
342
343
344
345
346
347
348
349
350
351
352
  void bpf_trampoline_put(struct bpf_trampoline *tr)
  {
  	if (!tr)
  		return;
  	mutex_lock(&trampoline_mutex);
  	if (!refcount_dec_and_test(&tr->refcnt))
  		goto out;
  	WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
  	if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
  		goto out;
  	if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
  		goto out;
a108f7dcf   Jiri Olsa   bpf: Add trampoli...
353
  	bpf_image_ksym_del(&tr->ksym);
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
354
355
356
357
358
359
360
361
  	/* This code will be executed when all bpf progs (both sleepable and
  	 * non-sleepable) went through
  	 * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred().
  	 * Hence no need for another synchronize_rcu_tasks_trace() here,
  	 * but synchronize_rcu_tasks() is still needed, since trampoline
  	 * may not have had any sleepable programs and we need to wait
  	 * for tasks to get out of trampoline code before freeing it.
  	 */
05d57f179   Alexei Starovoitov   bpf: Fix trampoli...
362
  	synchronize_rcu_tasks();
7ac88eba1   Jiri Olsa   bpf: Remove bpf_i...
363
  	bpf_jit_free_exec(tr->image);
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
364
365
366
367
368
  	hlist_del(&tr->hlist);
  	kfree(tr);
  out:
  	mutex_unlock(&trampoline_mutex);
  }
02ad05965   David Miller   bpf: Use migrate_...
369
370
371
  /* The logic is similar to BPF_PROG_RUN, but with an explicit
   * rcu_read_lock() and migrate_disable() which are required
   * for the trampoline. The macro is split into
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
372
373
374
375
376
   * call _bpf_prog_enter
   * call prog->bpf_func
   * call __bpf_prog_exit
   */
  u64 notrace __bpf_prog_enter(void)
dcce11d54   Jules Irenge   bpf: Add missing ...
377
  	__acquires(RCU)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
378
379
380
381
  {
  	u64 start = 0;
  
  	rcu_read_lock();
02ad05965   David Miller   bpf: Use migrate_...
382
  	migrate_disable();
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
383
384
385
386
387
388
  	if (static_branch_unlikely(&bpf_stats_enabled_key))
  		start = sched_clock();
  	return start;
  }
  
  void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
dcce11d54   Jules Irenge   bpf: Add missing ...
389
  	__releases(RCU)
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
  {
  	struct bpf_prog_stats *stats;
  
  	if (static_branch_unlikely(&bpf_stats_enabled_key) &&
  	    /* static_key could be enabled in __bpf_prog_enter
  	     * and disabled in __bpf_prog_exit.
  	     * And vice versa.
  	     * Hence check that 'start' is not zero.
  	     */
  	    start) {
  		stats = this_cpu_ptr(prog->aux->stats);
  		u64_stats_update_begin(&stats->syncp);
  		stats->cnt++;
  		stats->nsecs += sched_clock() - start;
  		u64_stats_update_end(&stats->syncp);
  	}
02ad05965   David Miller   bpf: Use migrate_...
406
  	migrate_enable();
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
407
408
  	rcu_read_unlock();
  }
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
409
410
411
  void notrace __bpf_prog_enter_sleepable(void)
  {
  	rcu_read_lock_trace();
f56407fa6   Alexei Starovoitov   bpf: Remove bpf_l...
412
  	might_fault();
1e6c62a88   Alexei Starovoitov   bpf: Introduce sl...
413
414
415
416
417
418
  }
  
  void notrace __bpf_prog_exit_sleepable(void)
  {
  	rcu_read_unlock_trace();
  }
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
419
  int __weak
85d33df35   Martin KaFai Lau   bpf: Introduce BP...
420
421
  arch_prepare_bpf_trampoline(void *image, void *image_end,
  			    const struct btf_func_model *m, u32 flags,
88fd9e535   KP Singh   bpf: Refactor tra...
422
  			    struct bpf_tramp_progs *tprogs,
fec56f589   Alexei Starovoitov   bpf: Introduce BP...
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  			    void *orig_call)
  {
  	return -ENOTSUPP;
  }
  
  static int __init init_trampolines(void)
  {
  	int i;
  
  	for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
  		INIT_HLIST_HEAD(&trampoline_table[i]);
  	return 0;
  }
  late_initcall(init_trampolines);