Blame view
kernel/bpf/helpers.c
4.32 KB
d0003ec01
|
1 2 3 4 5 6 7 8 9 10 11 12 13 |
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/bpf.h> #include <linux/rcupdate.h> |
03e69b508
|
14 |
#include <linux/random.h> |
c04167ce2
|
15 |
#include <linux/smp.h> |
17ca8cbf4
|
16 |
#include <linux/ktime.h> |
ffeedafbf
|
17 18 |
#include <linux/sched.h> #include <linux/uidgid.h> |
f3694e001
|
19 |
#include <linux/filter.h> |
d0003ec01
|
20 21 22 23 24 25 26 27 28 29 |
/* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments * * Different map implementations will rely on rcu in map methods * lookup/update/delete, therefore eBPF programs must run under rcu lock * if program is allowed to access maps, so check rcu_read_lock_held in * all three functions. */ |
f3694e001
|
30 |
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) |
d0003ec01
|
31 |
{ |
d0003ec01
|
32 |
WARN_ON_ONCE(!rcu_read_lock_held()); |
f3694e001
|
33 |
return (unsigned long) map->ops->map_lookup_elem(map, key); |
d0003ec01
|
34 |
} |
a2c83fff5
|
35 |
const struct bpf_func_proto bpf_map_lookup_elem_proto = { |
3324b584b
|
36 37 |
.func = bpf_map_lookup_elem, .gpl_only = false, |
36bbef52c
|
38 |
.pkt_access = true, |
3324b584b
|
39 40 41 |
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, |
d0003ec01
|
42 |
}; |
f3694e001
|
43 44 |
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, void *, value, u64, flags) |
d0003ec01
|
45 |
{ |
d0003ec01
|
46 |
WARN_ON_ONCE(!rcu_read_lock_held()); |
f3694e001
|
47 |
return map->ops->map_update_elem(map, key, value, flags); |
d0003ec01
|
48 |
} |
a2c83fff5
|
49 |
const struct bpf_func_proto bpf_map_update_elem_proto = { |
3324b584b
|
50 51 |
.func = bpf_map_update_elem, .gpl_only = false, |
36bbef52c
|
52 |
.pkt_access = true, |
3324b584b
|
53 54 55 56 57 |
.ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, .arg3_type = ARG_PTR_TO_MAP_VALUE, .arg4_type = ARG_ANYTHING, |
d0003ec01
|
58 |
}; |
f3694e001
|
59 |
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) |
d0003ec01
|
60 |
{ |
d0003ec01
|
61 |
WARN_ON_ONCE(!rcu_read_lock_held()); |
d0003ec01
|
62 63 |
return map->ops->map_delete_elem(map, key); } |
a2c83fff5
|
64 |
const struct bpf_func_proto bpf_map_delete_elem_proto = { |
3324b584b
|
65 66 |
.func = bpf_map_delete_elem, .gpl_only = false, |
36bbef52c
|
67 |
.pkt_access = true, |
3324b584b
|
68 69 70 |
.ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, |
d0003ec01
|
71 |
}; |
03e69b508
|
72 |
|
03e69b508
|
73 |
const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
3ad004057
|
74 |
.func = bpf_user_rnd_u32, |
03e69b508
|
75 76 77 |
.gpl_only = false, .ret_type = RET_INTEGER, }; |
c04167ce2
|
78 |
|
f3694e001
|
79 |
BPF_CALL_0(bpf_get_smp_processor_id) |
c04167ce2
|
80 |
{ |
80b48c445
|
81 |
return smp_processor_id(); |
c04167ce2
|
82 83 84 85 86 87 88 |
} const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .func = bpf_get_smp_processor_id, .gpl_only = false, .ret_type = RET_INTEGER, }; |
17ca8cbf4
|
89 |
|
f3694e001
|
90 |
BPF_CALL_0(bpf_ktime_get_ns) |
17ca8cbf4
|
91 92 93 94 95 96 97 98 99 100 |
{ /* NMI safe access to clock monotonic */ return ktime_get_mono_fast_ns(); } const struct bpf_func_proto bpf_ktime_get_ns_proto = { .func = bpf_ktime_get_ns, .gpl_only = true, .ret_type = RET_INTEGER, }; |
ffeedafbf
|
101 |
|
f3694e001
|
102 |
BPF_CALL_0(bpf_get_current_pid_tgid) |
ffeedafbf
|
103 104 |
{ struct task_struct *task = current; |
6088b5823
|
105 |
if (unlikely(!task)) |
ffeedafbf
|
106 107 108 109 110 111 112 113 114 115 |
return -EINVAL; return (u64) task->tgid << 32 | task->pid; } const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { .func = bpf_get_current_pid_tgid, .gpl_only = false, .ret_type = RET_INTEGER, }; |
f3694e001
|
116 |
BPF_CALL_0(bpf_get_current_uid_gid) |
ffeedafbf
|
117 118 119 120 |
{ struct task_struct *task = current; kuid_t uid; kgid_t gid; |
6088b5823
|
121 |
if (unlikely(!task)) |
ffeedafbf
|
122 123 124 125 |
return -EINVAL; current_uid_gid(&uid, &gid); return (u64) from_kgid(&init_user_ns, gid) << 32 | |
6088b5823
|
126 |
from_kuid(&init_user_ns, uid); |
ffeedafbf
|
127 128 129 130 131 132 133 |
} const struct bpf_func_proto bpf_get_current_uid_gid_proto = { .func = bpf_get_current_uid_gid, .gpl_only = false, .ret_type = RET_INTEGER, }; |
f3694e001
|
134 |
BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) |
ffeedafbf
|
135 136 |
{ struct task_struct *task = current; |
ffeedafbf
|
137 |
|
074f528ee
|
138 139 |
if (unlikely(!task)) goto err_clear; |
ffeedafbf
|
140 |
|
074f528ee
|
141 142 143 144 145 146 147 |
strncpy(buf, task->comm, size); /* Verifier guarantees that size > 0. For task->comm exceeding * size, guarantee that buf is %NUL-terminated. Unconditionally * done here to save the size test. */ buf[size - 1] = 0; |
ffeedafbf
|
148 |
return 0; |
074f528ee
|
149 150 151 |
err_clear: memset(buf, 0, size); return -EINVAL; |
ffeedafbf
|
152 153 154 155 156 157 |
} const struct bpf_func_proto bpf_get_current_comm_proto = { .func = bpf_get_current_comm, .gpl_only = false, .ret_type = RET_INTEGER, |
074f528ee
|
158 |
.arg1_type = ARG_PTR_TO_RAW_STACK, |
ffeedafbf
|
159 160 |
.arg2_type = ARG_CONST_STACK_SIZE, }; |