Blame view

kernel/bpf/helpers.c 4.32 KB
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of version 2 of the GNU General Public
   * License as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful, but
   * WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
   * General Public License for more details.
   */
  #include <linux/bpf.h>
  #include <linux/rcupdate.h>
03e69b508   Daniel Borkmann   ebpf: add prandom...
14
  #include <linux/random.h>
c04167ce2   Daniel Borkmann   ebpf: add helper ...
15
  #include <linux/smp.h>
17ca8cbf4   Daniel Borkmann   ebpf: allow bpf_k...
16
  #include <linux/ktime.h>
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
17
18
  #include <linux/sched.h>
  #include <linux/uidgid.h>
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
19
  #include <linux/filter.h>
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
20
21
22
23
24
25
26
27
28
29
  
  /* If kernel subsystem is allowing eBPF programs to call this function,
   * inside its own verifier_ops->get_func_proto() callback it should return
   * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
   *
   * Different map implementations will rely on rcu in map methods
   * lookup/update/delete, therefore eBPF programs must run under rcu lock
   * if program is allowed to access maps, so check rcu_read_lock_held in
   * all three functions.
   */
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
30
  BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
31
  {
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
32
  	WARN_ON_ONCE(!rcu_read_lock_held());
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
33
  	return (unsigned long) map->ops->map_lookup_elem(map, key);
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
34
  }
a2c83fff5   Daniel Borkmann   ebpf: constify va...
35
  const struct bpf_func_proto bpf_map_lookup_elem_proto = {
3324b584b   Daniel Borkmann   ebpf: misc core c...
36
37
  	.func		= bpf_map_lookup_elem,
  	.gpl_only	= false,
36bbef52c   Daniel Borkmann   bpf: direct packe...
38
  	.pkt_access	= true,
3324b584b   Daniel Borkmann   ebpf: misc core c...
39
40
41
  	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
  	.arg1_type	= ARG_CONST_MAP_PTR,
  	.arg2_type	= ARG_PTR_TO_MAP_KEY,
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
42
  };
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
43
44
  BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
  	   void *, value, u64, flags)
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
45
  {
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
46
  	WARN_ON_ONCE(!rcu_read_lock_held());
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
47
  	return map->ops->map_update_elem(map, key, value, flags);
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
48
  }
a2c83fff5   Daniel Borkmann   ebpf: constify va...
49
  const struct bpf_func_proto bpf_map_update_elem_proto = {
3324b584b   Daniel Borkmann   ebpf: misc core c...
50
51
  	.func		= bpf_map_update_elem,
  	.gpl_only	= false,
36bbef52c   Daniel Borkmann   bpf: direct packe...
52
  	.pkt_access	= true,
3324b584b   Daniel Borkmann   ebpf: misc core c...
53
54
55
56
57
  	.ret_type	= RET_INTEGER,
  	.arg1_type	= ARG_CONST_MAP_PTR,
  	.arg2_type	= ARG_PTR_TO_MAP_KEY,
  	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
  	.arg4_type	= ARG_ANYTHING,
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
58
  };
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
59
  BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
60
  {
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
61
  	WARN_ON_ONCE(!rcu_read_lock_held());
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
62
63
  	return map->ops->map_delete_elem(map, key);
  }
a2c83fff5   Daniel Borkmann   ebpf: constify va...
64
  const struct bpf_func_proto bpf_map_delete_elem_proto = {
3324b584b   Daniel Borkmann   ebpf: misc core c...
65
66
  	.func		= bpf_map_delete_elem,
  	.gpl_only	= false,
36bbef52c   Daniel Borkmann   bpf: direct packe...
67
  	.pkt_access	= true,
3324b584b   Daniel Borkmann   ebpf: misc core c...
68
69
70
  	.ret_type	= RET_INTEGER,
  	.arg1_type	= ARG_CONST_MAP_PTR,
  	.arg2_type	= ARG_PTR_TO_MAP_KEY,
d0003ec01   Alexei Starovoitov   bpf: allow eBPF p...
71
  };
03e69b508   Daniel Borkmann   ebpf: add prandom...
72

03e69b508   Daniel Borkmann   ebpf: add prandom...
73
  const struct bpf_func_proto bpf_get_prandom_u32_proto = {
3ad004057   Daniel Borkmann   bpf: split state ...
74
  	.func		= bpf_user_rnd_u32,
03e69b508   Daniel Borkmann   ebpf: add prandom...
75
76
77
  	.gpl_only	= false,
  	.ret_type	= RET_INTEGER,
  };
c04167ce2   Daniel Borkmann   ebpf: add helper ...
78

f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
79
  BPF_CALL_0(bpf_get_smp_processor_id)
c04167ce2   Daniel Borkmann   ebpf: add helper ...
80
  {
80b48c445   Daniel Borkmann   bpf: don't use ra...
81
  	return smp_processor_id();
c04167ce2   Daniel Borkmann   ebpf: add helper ...
82
83
84
85
86
87
88
  }
  
  const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
  	.func		= bpf_get_smp_processor_id,
  	.gpl_only	= false,
  	.ret_type	= RET_INTEGER,
  };
17ca8cbf4   Daniel Borkmann   ebpf: allow bpf_k...
89

f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
90
  BPF_CALL_0(bpf_ktime_get_ns)
17ca8cbf4   Daniel Borkmann   ebpf: allow bpf_k...
91
92
93
94
95
96
97
98
99
100
  {
  	/* NMI safe access to clock monotonic */
  	return ktime_get_mono_fast_ns();
  }
  
  const struct bpf_func_proto bpf_ktime_get_ns_proto = {
  	.func		= bpf_ktime_get_ns,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
  };
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
101

f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
102
  BPF_CALL_0(bpf_get_current_pid_tgid)
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
103
104
  {
  	struct task_struct *task = current;
6088b5823   Daniel Borkmann   bpf: minor cleanu...
105
  	if (unlikely(!task))
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
106
107
108
109
110
111
112
113
114
115
  		return -EINVAL;
  
  	return (u64) task->tgid << 32 | task->pid;
  }
  
  const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
  	.func		= bpf_get_current_pid_tgid,
  	.gpl_only	= false,
  	.ret_type	= RET_INTEGER,
  };
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
116
  BPF_CALL_0(bpf_get_current_uid_gid)
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
117
118
119
120
  {
  	struct task_struct *task = current;
  	kuid_t uid;
  	kgid_t gid;
6088b5823   Daniel Borkmann   bpf: minor cleanu...
121
  	if (unlikely(!task))
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
122
123
124
125
  		return -EINVAL;
  
  	current_uid_gid(&uid, &gid);
  	return (u64) from_kgid(&init_user_ns, gid) << 32 |
6088b5823   Daniel Borkmann   bpf: minor cleanu...
126
  		     from_kuid(&init_user_ns, uid);
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
127
128
129
130
131
132
133
  }
  
  const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
  	.func		= bpf_get_current_uid_gid,
  	.gpl_only	= false,
  	.ret_type	= RET_INTEGER,
  };
f3694e001   Daniel Borkmann   bpf: add BPF_CALL...
134
  BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
135
136
  {
  	struct task_struct *task = current;
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
137

074f528ee   Daniel Borkmann   bpf: convert rele...
138
139
  	if (unlikely(!task))
  		goto err_clear;
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
140

074f528ee   Daniel Borkmann   bpf: convert rele...
141
142
143
144
145
146
147
  	strncpy(buf, task->comm, size);
  
  	/* Verifier guarantees that size > 0. For task->comm exceeding
  	 * size, guarantee that buf is %NUL-terminated. Unconditionally
  	 * done here to save the size test.
  	 */
  	buf[size - 1] = 0;
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
148
  	return 0;
074f528ee   Daniel Borkmann   bpf: convert rele...
149
150
151
  err_clear:
  	memset(buf, 0, size);
  	return -EINVAL;
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
152
153
154
155
156
157
  }
  
  const struct bpf_func_proto bpf_get_current_comm_proto = {
  	.func		= bpf_get_current_comm,
  	.gpl_only	= false,
  	.ret_type	= RET_INTEGER,
074f528ee   Daniel Borkmann   bpf: convert rele...
158
  	.arg1_type	= ARG_PTR_TO_RAW_STACK,
ffeedafbf   Alexei Starovoitov   bpf: introduce cu...
159
160
  	.arg2_type	= ARG_CONST_STACK_SIZE,
  };