Commit 04df41e343db9ca91a278ea14606bbaaf0491f2e
Committed by
David S. Miller
1 parent
6c4a01b278
bpf: update tools/include/uapi/linux/bpf.h
Update tools/include/uapi/linux/bpf.h to include changes related to new bpf sock_ops program type. Signed-off-by: Lawrence Brakmo <brakmo@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 65 additions and 1 deletions Inline Diff
tools/include/uapi/linux/bpf.h
1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com | 1 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
2 | * | 2 | * |
3 | * This program is free software; you can redistribute it and/or | 3 | * This program is free software; you can redistribute it and/or |
4 | * modify it under the terms of version 2 of the GNU General Public | 4 | * modify it under the terms of version 2 of the GNU General Public |
5 | * License as published by the Free Software Foundation. | 5 | * License as published by the Free Software Foundation. |
6 | */ | 6 | */ |
7 | #ifndef _UAPI__LINUX_BPF_H__ | 7 | #ifndef _UAPI__LINUX_BPF_H__ |
8 | #define _UAPI__LINUX_BPF_H__ | 8 | #define _UAPI__LINUX_BPF_H__ |
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/bpf_common.h> | 11 | #include <linux/bpf_common.h> |
12 | 12 | ||
13 | /* Extended instruction set based on top of classic BPF */ | 13 | /* Extended instruction set based on top of classic BPF */ |
14 | 14 | ||
15 | /* instruction classes */ | 15 | /* instruction classes */ |
16 | #define BPF_ALU64 0x07 /* alu mode in double word width */ | 16 | #define BPF_ALU64 0x07 /* alu mode in double word width */ |
17 | 17 | ||
18 | /* ld/ldx fields */ | 18 | /* ld/ldx fields */ |
19 | #define BPF_DW 0x18 /* double word */ | 19 | #define BPF_DW 0x18 /* double word */ |
20 | #define BPF_XADD 0xc0 /* exclusive add */ | 20 | #define BPF_XADD 0xc0 /* exclusive add */ |
21 | 21 | ||
22 | /* alu/jmp fields */ | 22 | /* alu/jmp fields */ |
23 | #define BPF_MOV 0xb0 /* mov reg to reg */ | 23 | #define BPF_MOV 0xb0 /* mov reg to reg */ |
24 | #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ | 24 | #define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ |
25 | 25 | ||
26 | /* change endianness of a register */ | 26 | /* change endianness of a register */ |
27 | #define BPF_END 0xd0 /* flags for endianness conversion: */ | 27 | #define BPF_END 0xd0 /* flags for endianness conversion: */ |
28 | #define BPF_TO_LE 0x00 /* convert to little-endian */ | 28 | #define BPF_TO_LE 0x00 /* convert to little-endian */ |
29 | #define BPF_TO_BE 0x08 /* convert to big-endian */ | 29 | #define BPF_TO_BE 0x08 /* convert to big-endian */ |
30 | #define BPF_FROM_LE BPF_TO_LE | 30 | #define BPF_FROM_LE BPF_TO_LE |
31 | #define BPF_FROM_BE BPF_TO_BE | 31 | #define BPF_FROM_BE BPF_TO_BE |
32 | 32 | ||
33 | #define BPF_JNE 0x50 /* jump != */ | 33 | #define BPF_JNE 0x50 /* jump != */ |
34 | #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ | 34 | #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ |
35 | #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ | 35 | #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ |
36 | #define BPF_CALL 0x80 /* function call */ | 36 | #define BPF_CALL 0x80 /* function call */ |
37 | #define BPF_EXIT 0x90 /* function return */ | 37 | #define BPF_EXIT 0x90 /* function return */ |
38 | 38 | ||
39 | /* Register numbers */ | 39 | /* Register numbers */ |
40 | enum { | 40 | enum { |
41 | BPF_REG_0 = 0, | 41 | BPF_REG_0 = 0, |
42 | BPF_REG_1, | 42 | BPF_REG_1, |
43 | BPF_REG_2, | 43 | BPF_REG_2, |
44 | BPF_REG_3, | 44 | BPF_REG_3, |
45 | BPF_REG_4, | 45 | BPF_REG_4, |
46 | BPF_REG_5, | 46 | BPF_REG_5, |
47 | BPF_REG_6, | 47 | BPF_REG_6, |
48 | BPF_REG_7, | 48 | BPF_REG_7, |
49 | BPF_REG_8, | 49 | BPF_REG_8, |
50 | BPF_REG_9, | 50 | BPF_REG_9, |
51 | BPF_REG_10, | 51 | BPF_REG_10, |
52 | __MAX_BPF_REG, | 52 | __MAX_BPF_REG, |
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ | 55 | /* BPF has 10 general purpose 64-bit registers and stack frame. */ |
56 | #define MAX_BPF_REG __MAX_BPF_REG | 56 | #define MAX_BPF_REG __MAX_BPF_REG |
57 | 57 | ||
58 | struct bpf_insn { | 58 | struct bpf_insn { |
59 | __u8 code; /* opcode */ | 59 | __u8 code; /* opcode */ |
60 | __u8 dst_reg:4; /* dest register */ | 60 | __u8 dst_reg:4; /* dest register */ |
61 | __u8 src_reg:4; /* source register */ | 61 | __u8 src_reg:4; /* source register */ |
62 | __s16 off; /* signed offset */ | 62 | __s16 off; /* signed offset */ |
63 | __s32 imm; /* signed immediate constant */ | 63 | __s32 imm; /* signed immediate constant */ |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ | 66 | /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ |
67 | struct bpf_lpm_trie_key { | 67 | struct bpf_lpm_trie_key { |
68 | __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ | 68 | __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ |
69 | __u8 data[0]; /* Arbitrary size */ | 69 | __u8 data[0]; /* Arbitrary size */ |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* BPF syscall commands, see bpf(2) man-page for details. */ | 72 | /* BPF syscall commands, see bpf(2) man-page for details. */ |
73 | enum bpf_cmd { | 73 | enum bpf_cmd { |
74 | BPF_MAP_CREATE, | 74 | BPF_MAP_CREATE, |
75 | BPF_MAP_LOOKUP_ELEM, | 75 | BPF_MAP_LOOKUP_ELEM, |
76 | BPF_MAP_UPDATE_ELEM, | 76 | BPF_MAP_UPDATE_ELEM, |
77 | BPF_MAP_DELETE_ELEM, | 77 | BPF_MAP_DELETE_ELEM, |
78 | BPF_MAP_GET_NEXT_KEY, | 78 | BPF_MAP_GET_NEXT_KEY, |
79 | BPF_PROG_LOAD, | 79 | BPF_PROG_LOAD, |
80 | BPF_OBJ_PIN, | 80 | BPF_OBJ_PIN, |
81 | BPF_OBJ_GET, | 81 | BPF_OBJ_GET, |
82 | BPF_PROG_ATTACH, | 82 | BPF_PROG_ATTACH, |
83 | BPF_PROG_DETACH, | 83 | BPF_PROG_DETACH, |
84 | BPF_PROG_TEST_RUN, | 84 | BPF_PROG_TEST_RUN, |
85 | BPF_PROG_GET_NEXT_ID, | 85 | BPF_PROG_GET_NEXT_ID, |
86 | BPF_MAP_GET_NEXT_ID, | 86 | BPF_MAP_GET_NEXT_ID, |
87 | BPF_PROG_GET_FD_BY_ID, | 87 | BPF_PROG_GET_FD_BY_ID, |
88 | BPF_MAP_GET_FD_BY_ID, | 88 | BPF_MAP_GET_FD_BY_ID, |
89 | BPF_OBJ_GET_INFO_BY_FD, | 89 | BPF_OBJ_GET_INFO_BY_FD, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | enum bpf_map_type { | 92 | enum bpf_map_type { |
93 | BPF_MAP_TYPE_UNSPEC, | 93 | BPF_MAP_TYPE_UNSPEC, |
94 | BPF_MAP_TYPE_HASH, | 94 | BPF_MAP_TYPE_HASH, |
95 | BPF_MAP_TYPE_ARRAY, | 95 | BPF_MAP_TYPE_ARRAY, |
96 | BPF_MAP_TYPE_PROG_ARRAY, | 96 | BPF_MAP_TYPE_PROG_ARRAY, |
97 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, | 97 | BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
98 | BPF_MAP_TYPE_PERCPU_HASH, | 98 | BPF_MAP_TYPE_PERCPU_HASH, |
99 | BPF_MAP_TYPE_PERCPU_ARRAY, | 99 | BPF_MAP_TYPE_PERCPU_ARRAY, |
100 | BPF_MAP_TYPE_STACK_TRACE, | 100 | BPF_MAP_TYPE_STACK_TRACE, |
101 | BPF_MAP_TYPE_CGROUP_ARRAY, | 101 | BPF_MAP_TYPE_CGROUP_ARRAY, |
102 | BPF_MAP_TYPE_LRU_HASH, | 102 | BPF_MAP_TYPE_LRU_HASH, |
103 | BPF_MAP_TYPE_LRU_PERCPU_HASH, | 103 | BPF_MAP_TYPE_LRU_PERCPU_HASH, |
104 | BPF_MAP_TYPE_LPM_TRIE, | 104 | BPF_MAP_TYPE_LPM_TRIE, |
105 | BPF_MAP_TYPE_ARRAY_OF_MAPS, | 105 | BPF_MAP_TYPE_ARRAY_OF_MAPS, |
106 | BPF_MAP_TYPE_HASH_OF_MAPS, | 106 | BPF_MAP_TYPE_HASH_OF_MAPS, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | enum bpf_prog_type { | 109 | enum bpf_prog_type { |
110 | BPF_PROG_TYPE_UNSPEC, | 110 | BPF_PROG_TYPE_UNSPEC, |
111 | BPF_PROG_TYPE_SOCKET_FILTER, | 111 | BPF_PROG_TYPE_SOCKET_FILTER, |
112 | BPF_PROG_TYPE_KPROBE, | 112 | BPF_PROG_TYPE_KPROBE, |
113 | BPF_PROG_TYPE_SCHED_CLS, | 113 | BPF_PROG_TYPE_SCHED_CLS, |
114 | BPF_PROG_TYPE_SCHED_ACT, | 114 | BPF_PROG_TYPE_SCHED_ACT, |
115 | BPF_PROG_TYPE_TRACEPOINT, | 115 | BPF_PROG_TYPE_TRACEPOINT, |
116 | BPF_PROG_TYPE_XDP, | 116 | BPF_PROG_TYPE_XDP, |
117 | BPF_PROG_TYPE_PERF_EVENT, | 117 | BPF_PROG_TYPE_PERF_EVENT, |
118 | BPF_PROG_TYPE_CGROUP_SKB, | 118 | BPF_PROG_TYPE_CGROUP_SKB, |
119 | BPF_PROG_TYPE_CGROUP_SOCK, | 119 | BPF_PROG_TYPE_CGROUP_SOCK, |
120 | BPF_PROG_TYPE_LWT_IN, | 120 | BPF_PROG_TYPE_LWT_IN, |
121 | BPF_PROG_TYPE_LWT_OUT, | 121 | BPF_PROG_TYPE_LWT_OUT, |
122 | BPF_PROG_TYPE_LWT_XMIT, | 122 | BPF_PROG_TYPE_LWT_XMIT, |
123 | BPF_PROG_TYPE_SOCK_OPS, | ||
123 | }; | 124 | }; |
124 | 125 | ||
125 | enum bpf_attach_type { | 126 | enum bpf_attach_type { |
126 | BPF_CGROUP_INET_INGRESS, | 127 | BPF_CGROUP_INET_INGRESS, |
127 | BPF_CGROUP_INET_EGRESS, | 128 | BPF_CGROUP_INET_EGRESS, |
128 | BPF_CGROUP_INET_SOCK_CREATE, | 129 | BPF_CGROUP_INET_SOCK_CREATE, |
130 | BPF_CGROUP_SOCK_OPS, | ||
129 | __MAX_BPF_ATTACH_TYPE | 131 | __MAX_BPF_ATTACH_TYPE |
130 | }; | 132 | }; |
131 | 133 | ||
132 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE | 134 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE |
133 | 135 | ||
134 | /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command | 136 | /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command |
135 | * to the given target_fd cgroup the descendent cgroup will be able to | 137 | * to the given target_fd cgroup the descendent cgroup will be able to |
136 | * override effective bpf program that was inherited from this cgroup | 138 | * override effective bpf program that was inherited from this cgroup |
137 | */ | 139 | */ |
138 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) | 140 | #define BPF_F_ALLOW_OVERRIDE (1U << 0) |
139 | 141 | ||
140 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the | 142 | /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the |
141 | * verifier will perform strict alignment checking as if the kernel | 143 | * verifier will perform strict alignment checking as if the kernel |
142 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, | 144 | * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, |
143 | * and NET_IP_ALIGN defined to 2. | 145 | * and NET_IP_ALIGN defined to 2. |
144 | */ | 146 | */ |
145 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) | 147 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) |
146 | 148 | ||
147 | #define BPF_PSEUDO_MAP_FD 1 | 149 | #define BPF_PSEUDO_MAP_FD 1 |
148 | 150 | ||
149 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 151 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
150 | #define BPF_ANY 0 /* create new element or update existing */ | 152 | #define BPF_ANY 0 /* create new element or update existing */ |
151 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */ | 153 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */ |
152 | #define BPF_EXIST 2 /* update existing element */ | 154 | #define BPF_EXIST 2 /* update existing element */ |
153 | 155 | ||
154 | #define BPF_F_NO_PREALLOC (1U << 0) | 156 | #define BPF_F_NO_PREALLOC (1U << 0) |
155 | /* Instead of having one common LRU list in the | 157 | /* Instead of having one common LRU list in the |
156 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list | 158 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list |
157 | * which can scale and perform better. | 159 | * which can scale and perform better. |
158 | * Note, the LRU nodes (including free nodes) cannot be moved | 160 | * Note, the LRU nodes (including free nodes) cannot be moved |
159 | * across different LRU lists. | 161 | * across different LRU lists. |
160 | */ | 162 | */ |
161 | #define BPF_F_NO_COMMON_LRU (1U << 1) | 163 | #define BPF_F_NO_COMMON_LRU (1U << 1) |
162 | 164 | ||
163 | union bpf_attr { | 165 | union bpf_attr { |
164 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ | 166 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ |
165 | __u32 map_type; /* one of enum bpf_map_type */ | 167 | __u32 map_type; /* one of enum bpf_map_type */ |
166 | __u32 key_size; /* size of key in bytes */ | 168 | __u32 key_size; /* size of key in bytes */ |
167 | __u32 value_size; /* size of value in bytes */ | 169 | __u32 value_size; /* size of value in bytes */ |
168 | __u32 max_entries; /* max number of entries in a map */ | 170 | __u32 max_entries; /* max number of entries in a map */ |
169 | __u32 map_flags; /* prealloc or not */ | 171 | __u32 map_flags; /* prealloc or not */ |
170 | __u32 inner_map_fd; /* fd pointing to the inner map */ | 172 | __u32 inner_map_fd; /* fd pointing to the inner map */ |
171 | }; | 173 | }; |
172 | 174 | ||
173 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ | 175 | struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ |
174 | __u32 map_fd; | 176 | __u32 map_fd; |
175 | __aligned_u64 key; | 177 | __aligned_u64 key; |
176 | union { | 178 | union { |
177 | __aligned_u64 value; | 179 | __aligned_u64 value; |
178 | __aligned_u64 next_key; | 180 | __aligned_u64 next_key; |
179 | }; | 181 | }; |
180 | __u64 flags; | 182 | __u64 flags; |
181 | }; | 183 | }; |
182 | 184 | ||
183 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ | 185 | struct { /* anonymous struct used by BPF_PROG_LOAD command */ |
184 | __u32 prog_type; /* one of enum bpf_prog_type */ | 186 | __u32 prog_type; /* one of enum bpf_prog_type */ |
185 | __u32 insn_cnt; | 187 | __u32 insn_cnt; |
186 | __aligned_u64 insns; | 188 | __aligned_u64 insns; |
187 | __aligned_u64 license; | 189 | __aligned_u64 license; |
188 | __u32 log_level; /* verbosity level of verifier */ | 190 | __u32 log_level; /* verbosity level of verifier */ |
189 | __u32 log_size; /* size of user buffer */ | 191 | __u32 log_size; /* size of user buffer */ |
190 | __aligned_u64 log_buf; /* user supplied buffer */ | 192 | __aligned_u64 log_buf; /* user supplied buffer */ |
191 | __u32 kern_version; /* checked when prog_type=kprobe */ | 193 | __u32 kern_version; /* checked when prog_type=kprobe */ |
192 | __u32 prog_flags; | 194 | __u32 prog_flags; |
193 | }; | 195 | }; |
194 | 196 | ||
195 | struct { /* anonymous struct used by BPF_OBJ_* commands */ | 197 | struct { /* anonymous struct used by BPF_OBJ_* commands */ |
196 | __aligned_u64 pathname; | 198 | __aligned_u64 pathname; |
197 | __u32 bpf_fd; | 199 | __u32 bpf_fd; |
198 | }; | 200 | }; |
199 | 201 | ||
200 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ | 202 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ |
201 | __u32 target_fd; /* container object to attach to */ | 203 | __u32 target_fd; /* container object to attach to */ |
202 | __u32 attach_bpf_fd; /* eBPF program to attach */ | 204 | __u32 attach_bpf_fd; /* eBPF program to attach */ |
203 | __u32 attach_type; | 205 | __u32 attach_type; |
204 | __u32 attach_flags; | 206 | __u32 attach_flags; |
205 | }; | 207 | }; |
206 | 208 | ||
207 | struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ | 209 | struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ |
208 | __u32 prog_fd; | 210 | __u32 prog_fd; |
209 | __u32 retval; | 211 | __u32 retval; |
210 | __u32 data_size_in; | 212 | __u32 data_size_in; |
211 | __u32 data_size_out; | 213 | __u32 data_size_out; |
212 | __aligned_u64 data_in; | 214 | __aligned_u64 data_in; |
213 | __aligned_u64 data_out; | 215 | __aligned_u64 data_out; |
214 | __u32 repeat; | 216 | __u32 repeat; |
215 | __u32 duration; | 217 | __u32 duration; |
216 | } test; | 218 | } test; |
217 | 219 | ||
218 | struct { /* anonymous struct used by BPF_*_GET_*_ID */ | 220 | struct { /* anonymous struct used by BPF_*_GET_*_ID */ |
219 | union { | 221 | union { |
220 | __u32 start_id; | 222 | __u32 start_id; |
221 | __u32 prog_id; | 223 | __u32 prog_id; |
222 | __u32 map_id; | 224 | __u32 map_id; |
223 | }; | 225 | }; |
224 | __u32 next_id; | 226 | __u32 next_id; |
225 | }; | 227 | }; |
226 | 228 | ||
227 | struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ | 229 | struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ |
228 | __u32 bpf_fd; | 230 | __u32 bpf_fd; |
229 | __u32 info_len; | 231 | __u32 info_len; |
230 | __aligned_u64 info; | 232 | __aligned_u64 info; |
231 | } info; | 233 | } info; |
232 | } __attribute__((aligned(8))); | 234 | } __attribute__((aligned(8))); |
233 | 235 | ||
234 | /* BPF helper function descriptions: | 236 | /* BPF helper function descriptions: |
235 | * | 237 | * |
236 | * void *bpf_map_lookup_elem(&map, &key) | 238 | * void *bpf_map_lookup_elem(&map, &key) |
237 | * Return: Map value or NULL | 239 | * Return: Map value or NULL |
238 | * | 240 | * |
239 | * int bpf_map_update_elem(&map, &key, &value, flags) | 241 | * int bpf_map_update_elem(&map, &key, &value, flags) |
240 | * Return: 0 on success or negative error | 242 | * Return: 0 on success or negative error |
241 | * | 243 | * |
242 | * int bpf_map_delete_elem(&map, &key) | 244 | * int bpf_map_delete_elem(&map, &key) |
243 | * Return: 0 on success or negative error | 245 | * Return: 0 on success or negative error |
244 | * | 246 | * |
245 | * int bpf_probe_read(void *dst, int size, void *src) | 247 | * int bpf_probe_read(void *dst, int size, void *src) |
246 | * Return: 0 on success or negative error | 248 | * Return: 0 on success or negative error |
247 | * | 249 | * |
248 | * u64 bpf_ktime_get_ns(void) | 250 | * u64 bpf_ktime_get_ns(void) |
249 | * Return: current ktime | 251 | * Return: current ktime |
250 | * | 252 | * |
251 | * int bpf_trace_printk(const char *fmt, int fmt_size, ...) | 253 | * int bpf_trace_printk(const char *fmt, int fmt_size, ...) |
252 | * Return: length of buffer written or negative error | 254 | * Return: length of buffer written or negative error |
253 | * | 255 | * |
254 | * u32 bpf_prandom_u32(void) | 256 | * u32 bpf_prandom_u32(void) |
255 | * Return: random value | 257 | * Return: random value |
256 | * | 258 | * |
257 | * u32 bpf_raw_smp_processor_id(void) | 259 | * u32 bpf_raw_smp_processor_id(void) |
258 | * Return: SMP processor ID | 260 | * Return: SMP processor ID |
259 | * | 261 | * |
260 | * int bpf_skb_store_bytes(skb, offset, from, len, flags) | 262 | * int bpf_skb_store_bytes(skb, offset, from, len, flags) |
261 | * store bytes into packet | 263 | * store bytes into packet |
262 | * @skb: pointer to skb | 264 | * @skb: pointer to skb |
263 | * @offset: offset within packet from skb->mac_header | 265 | * @offset: offset within packet from skb->mac_header |
264 | * @from: pointer where to copy bytes from | 266 | * @from: pointer where to copy bytes from |
265 | * @len: number of bytes to store into packet | 267 | * @len: number of bytes to store into packet |
266 | * @flags: bit 0 - if true, recompute skb->csum | 268 | * @flags: bit 0 - if true, recompute skb->csum |
267 | * other bits - reserved | 269 | * other bits - reserved |
268 | * Return: 0 on success or negative error | 270 | * Return: 0 on success or negative error |
269 | * | 271 | * |
270 | * int bpf_l3_csum_replace(skb, offset, from, to, flags) | 272 | * int bpf_l3_csum_replace(skb, offset, from, to, flags) |
271 | * recompute IP checksum | 273 | * recompute IP checksum |
272 | * @skb: pointer to skb | 274 | * @skb: pointer to skb |
273 | * @offset: offset within packet where IP checksum is located | 275 | * @offset: offset within packet where IP checksum is located |
274 | * @from: old value of header field | 276 | * @from: old value of header field |
275 | * @to: new value of header field | 277 | * @to: new value of header field |
276 | * @flags: bits 0-3 - size of header field | 278 | * @flags: bits 0-3 - size of header field |
277 | * other bits - reserved | 279 | * other bits - reserved |
278 | * Return: 0 on success or negative error | 280 | * Return: 0 on success or negative error |
279 | * | 281 | * |
280 | * int bpf_l4_csum_replace(skb, offset, from, to, flags) | 282 | * int bpf_l4_csum_replace(skb, offset, from, to, flags) |
281 | * recompute TCP/UDP checksum | 283 | * recompute TCP/UDP checksum |
282 | * @skb: pointer to skb | 284 | * @skb: pointer to skb |
283 | * @offset: offset within packet where TCP/UDP checksum is located | 285 | * @offset: offset within packet where TCP/UDP checksum is located |
284 | * @from: old value of header field | 286 | * @from: old value of header field |
285 | * @to: new value of header field | 287 | * @to: new value of header field |
286 | * @flags: bits 0-3 - size of header field | 288 | * @flags: bits 0-3 - size of header field |
287 | * bit 4 - is pseudo header | 289 | * bit 4 - is pseudo header |
288 | * other bits - reserved | 290 | * other bits - reserved |
289 | * Return: 0 on success or negative error | 291 | * Return: 0 on success or negative error |
290 | * | 292 | * |
291 | * int bpf_tail_call(ctx, prog_array_map, index) | 293 | * int bpf_tail_call(ctx, prog_array_map, index) |
292 | * jump into another BPF program | 294 | * jump into another BPF program |
293 | * @ctx: context pointer passed to next program | 295 | * @ctx: context pointer passed to next program |
294 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 296 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
295 | * @index: index inside array that selects specific program to run | 297 | * @index: index inside array that selects specific program to run |
296 | * Return: 0 on success or negative error | 298 | * Return: 0 on success or negative error |
297 | * | 299 | * |
298 | * int bpf_clone_redirect(skb, ifindex, flags) | 300 | * int bpf_clone_redirect(skb, ifindex, flags) |
299 | * redirect to another netdev | 301 | * redirect to another netdev |
300 | * @skb: pointer to skb | 302 | * @skb: pointer to skb |
301 | * @ifindex: ifindex of the net device | 303 | * @ifindex: ifindex of the net device |
302 | * @flags: bit 0 - if set, redirect to ingress instead of egress | 304 | * @flags: bit 0 - if set, redirect to ingress instead of egress |
303 | * other bits - reserved | 305 | * other bits - reserved |
304 | * Return: 0 on success or negative error | 306 | * Return: 0 on success or negative error |
305 | * | 307 | * |
306 | * u64 bpf_get_current_pid_tgid(void) | 308 | * u64 bpf_get_current_pid_tgid(void) |
307 | * Return: current->tgid << 32 | current->pid | 309 | * Return: current->tgid << 32 | current->pid |
308 | * | 310 | * |
309 | * u64 bpf_get_current_uid_gid(void) | 311 | * u64 bpf_get_current_uid_gid(void) |
310 | * Return: current_gid << 32 | current_uid | 312 | * Return: current_gid << 32 | current_uid |
311 | * | 313 | * |
312 | * int bpf_get_current_comm(char *buf, int size_of_buf) | 314 | * int bpf_get_current_comm(char *buf, int size_of_buf) |
313 | * stores current->comm into buf | 315 | * stores current->comm into buf |
314 | * Return: 0 on success or negative error | 316 | * Return: 0 on success or negative error |
315 | * | 317 | * |
316 | * u32 bpf_get_cgroup_classid(skb) | 318 | * u32 bpf_get_cgroup_classid(skb) |
317 | * retrieve a proc's classid | 319 | * retrieve a proc's classid |
318 | * @skb: pointer to skb | 320 | * @skb: pointer to skb |
319 | * Return: classid if != 0 | 321 | * Return: classid if != 0 |
320 | * | 322 | * |
321 | * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) | 323 | * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) |
322 | * Return: 0 on success or negative error | 324 | * Return: 0 on success or negative error |
323 | * | 325 | * |
324 | * int bpf_skb_vlan_pop(skb) | 326 | * int bpf_skb_vlan_pop(skb) |
325 | * Return: 0 on success or negative error | 327 | * Return: 0 on success or negative error |
326 | * | 328 | * |
327 | * int bpf_skb_get_tunnel_key(skb, key, size, flags) | 329 | * int bpf_skb_get_tunnel_key(skb, key, size, flags) |
328 | * int bpf_skb_set_tunnel_key(skb, key, size, flags) | 330 | * int bpf_skb_set_tunnel_key(skb, key, size, flags) |
329 | * retrieve or populate tunnel metadata | 331 | * retrieve or populate tunnel metadata |
330 | * @skb: pointer to skb | 332 | * @skb: pointer to skb |
331 | * @key: pointer to 'struct bpf_tunnel_key' | 333 | * @key: pointer to 'struct bpf_tunnel_key' |
332 | * @size: size of 'struct bpf_tunnel_key' | 334 | * @size: size of 'struct bpf_tunnel_key' |
333 | * @flags: room for future extensions | 335 | * @flags: room for future extensions |
334 | * Return: 0 on success or negative error | 336 | * Return: 0 on success or negative error |
335 | * | 337 | * |
336 | * u64 bpf_perf_event_read(map, flags) | 338 | * u64 bpf_perf_event_read(map, flags) |
337 | * read perf event counter value | 339 | * read perf event counter value |
338 | * @map: pointer to perf_event_array map | 340 | * @map: pointer to perf_event_array map |
339 | * @flags: index of event in the map or bitmask flags | 341 | * @flags: index of event in the map or bitmask flags |
340 | * Return: value of perf event counter read or error code | 342 | * Return: value of perf event counter read or error code |
341 | * | 343 | * |
342 | * int bpf_redirect(ifindex, flags) | 344 | * int bpf_redirect(ifindex, flags) |
343 | * redirect to another netdev | 345 | * redirect to another netdev |
344 | * @ifindex: ifindex of the net device | 346 | * @ifindex: ifindex of the net device |
345 | * @flags: bit 0 - if set, redirect to ingress instead of egress | 347 | * @flags: bit 0 - if set, redirect to ingress instead of egress |
346 | * other bits - reserved | 348 | * other bits - reserved |
347 | * Return: TC_ACT_REDIRECT | 349 | * Return: TC_ACT_REDIRECT |
348 | * | 350 | * |
349 | * u32 bpf_get_route_realm(skb) | 351 | * u32 bpf_get_route_realm(skb) |
350 | * retrieve a dst's tclassid | 352 | * retrieve a dst's tclassid |
351 | * @skb: pointer to skb | 353 | * @skb: pointer to skb |
352 | * Return: realm if != 0 | 354 | * Return: realm if != 0 |
353 | * | 355 | * |
354 | * int bpf_perf_event_output(ctx, map, flags, data, size) | 356 | * int bpf_perf_event_output(ctx, map, flags, data, size) |
355 | * output perf raw sample | 357 | * output perf raw sample |
356 | * @ctx: struct pt_regs* | 358 | * @ctx: struct pt_regs* |
357 | * @map: pointer to perf_event_array map | 359 | * @map: pointer to perf_event_array map |
358 | * @flags: index of event in the map or bitmask flags | 360 | * @flags: index of event in the map or bitmask flags |
359 | * @data: data on stack to be output as raw data | 361 | * @data: data on stack to be output as raw data |
360 | * @size: size of data | 362 | * @size: size of data |
361 | * Return: 0 on success or negative error | 363 | * Return: 0 on success or negative error |
362 | * | 364 | * |
363 | * int bpf_get_stackid(ctx, map, flags) | 365 | * int bpf_get_stackid(ctx, map, flags) |
364 | * walk user or kernel stack and return id | 366 | * walk user or kernel stack and return id |
365 | * @ctx: struct pt_regs* | 367 | * @ctx: struct pt_regs* |
366 | * @map: pointer to stack_trace map | 368 | * @map: pointer to stack_trace map |
367 | * @flags: bits 0-7 - numer of stack frames to skip | 369 | * @flags: bits 0-7 - numer of stack frames to skip |
368 | * bit 8 - collect user stack instead of kernel | 370 | * bit 8 - collect user stack instead of kernel |
369 | * bit 9 - compare stacks by hash only | 371 | * bit 9 - compare stacks by hash only |
370 | * bit 10 - if two different stacks hash into the same stackid | 372 | * bit 10 - if two different stacks hash into the same stackid |
371 | * discard old | 373 | * discard old |
372 | * other bits - reserved | 374 | * other bits - reserved |
373 | * Return: >= 0 stackid on success or negative error | 375 | * Return: >= 0 stackid on success or negative error |
374 | * | 376 | * |
375 | * s64 bpf_csum_diff(from, from_size, to, to_size, seed) | 377 | * s64 bpf_csum_diff(from, from_size, to, to_size, seed) |
376 | * calculate csum diff | 378 | * calculate csum diff |
377 | * @from: raw from buffer | 379 | * @from: raw from buffer |
378 | * @from_size: length of from buffer | 380 | * @from_size: length of from buffer |
379 | * @to: raw to buffer | 381 | * @to: raw to buffer |
380 | * @to_size: length of to buffer | 382 | * @to_size: length of to buffer |
381 | * @seed: optional seed | 383 | * @seed: optional seed |
382 | * Return: csum result or negative error code | 384 | * Return: csum result or negative error code |
383 | * | 385 | * |
384 | * int bpf_skb_get_tunnel_opt(skb, opt, size) | 386 | * int bpf_skb_get_tunnel_opt(skb, opt, size) |
385 | * retrieve tunnel options metadata | 387 | * retrieve tunnel options metadata |
386 | * @skb: pointer to skb | 388 | * @skb: pointer to skb |
387 | * @opt: pointer to raw tunnel option data | 389 | * @opt: pointer to raw tunnel option data |
388 | * @size: size of @opt | 390 | * @size: size of @opt |
389 | * Return: option size | 391 | * Return: option size |
390 | * | 392 | * |
391 | * int bpf_skb_set_tunnel_opt(skb, opt, size) | 393 | * int bpf_skb_set_tunnel_opt(skb, opt, size) |
392 | * populate tunnel options metadata | 394 | * populate tunnel options metadata |
393 | * @skb: pointer to skb | 395 | * @skb: pointer to skb |
394 | * @opt: pointer to raw tunnel option data | 396 | * @opt: pointer to raw tunnel option data |
395 | * @size: size of @opt | 397 | * @size: size of @opt |
396 | * Return: 0 on success or negative error | 398 | * Return: 0 on success or negative error |
397 | * | 399 | * |
398 | * int bpf_skb_change_proto(skb, proto, flags) | 400 | * int bpf_skb_change_proto(skb, proto, flags) |
399 | * Change protocol of the skb. Currently supported is v4 -> v6, | 401 | * Change protocol of the skb. Currently supported is v4 -> v6, |
400 | * v6 -> v4 transitions. The helper will also resize the skb. eBPF | 402 | * v6 -> v4 transitions. The helper will also resize the skb. eBPF |
401 | * program is expected to fill the new headers via skb_store_bytes | 403 | * program is expected to fill the new headers via skb_store_bytes |
402 | * and lX_csum_replace. | 404 | * and lX_csum_replace. |
403 | * @skb: pointer to skb | 405 | * @skb: pointer to skb |
404 | * @proto: new skb->protocol type | 406 | * @proto: new skb->protocol type |
405 | * @flags: reserved | 407 | * @flags: reserved |
406 | * Return: 0 on success or negative error | 408 | * Return: 0 on success or negative error |
407 | * | 409 | * |
408 | * int bpf_skb_change_type(skb, type) | 410 | * int bpf_skb_change_type(skb, type) |
409 | * Change packet type of skb. | 411 | * Change packet type of skb. |
410 | * @skb: pointer to skb | 412 | * @skb: pointer to skb |
411 | * @type: new skb->pkt_type type | 413 | * @type: new skb->pkt_type type |
412 | * Return: 0 on success or negative error | 414 | * Return: 0 on success or negative error |
413 | * | 415 | * |
414 | * int bpf_skb_under_cgroup(skb, map, index) | 416 | * int bpf_skb_under_cgroup(skb, map, index) |
415 | * Check cgroup2 membership of skb | 417 | * Check cgroup2 membership of skb |
416 | * @skb: pointer to skb | 418 | * @skb: pointer to skb |
417 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | 419 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type |
418 | * @index: index of the cgroup in the bpf_map | 420 | * @index: index of the cgroup in the bpf_map |
419 | * Return: | 421 | * Return: |
420 | * == 0 skb failed the cgroup2 descendant test | 422 | * == 0 skb failed the cgroup2 descendant test |
421 | * == 1 skb succeeded the cgroup2 descendant test | 423 | * == 1 skb succeeded the cgroup2 descendant test |
422 | * < 0 error | 424 | * < 0 error |
423 | * | 425 | * |
424 | * u32 bpf_get_hash_recalc(skb) | 426 | * u32 bpf_get_hash_recalc(skb) |
425 | * Retrieve and possibly recalculate skb->hash. | 427 | * Retrieve and possibly recalculate skb->hash. |
426 | * @skb: pointer to skb | 428 | * @skb: pointer to skb |
427 | * Return: hash | 429 | * Return: hash |
428 | * | 430 | * |
429 | * u64 bpf_get_current_task(void) | 431 | * u64 bpf_get_current_task(void) |
430 | * Returns current task_struct | 432 | * Returns current task_struct |
431 | * Return: current | 433 | * Return: current |
432 | * | 434 | * |
433 | * int bpf_probe_write_user(void *dst, void *src, int len) | 435 | * int bpf_probe_write_user(void *dst, void *src, int len) |
434 | * safely attempt to write to a location | 436 | * safely attempt to write to a location |
435 | * @dst: destination address in userspace | 437 | * @dst: destination address in userspace |
436 | * @src: source address on stack | 438 | * @src: source address on stack |
437 | * @len: number of bytes to copy | 439 | * @len: number of bytes to copy |
438 | * Return: 0 on success or negative error | 440 | * Return: 0 on success or negative error |
439 | * | 441 | * |
440 | * int bpf_current_task_under_cgroup(map, index) | 442 | * int bpf_current_task_under_cgroup(map, index) |
441 | * Check cgroup2 membership of current task | 443 | * Check cgroup2 membership of current task |
442 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | 444 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type |
443 | * @index: index of the cgroup in the bpf_map | 445 | * @index: index of the cgroup in the bpf_map |
444 | * Return: | 446 | * Return: |
445 | * == 0 current failed the cgroup2 descendant test | 447 | * == 0 current failed the cgroup2 descendant test |
446 | * == 1 current succeeded the cgroup2 descendant test | 448 | * == 1 current succeeded the cgroup2 descendant test |
447 | * < 0 error | 449 | * < 0 error |
448 | * | 450 | * |
449 | * int bpf_skb_change_tail(skb, len, flags) | 451 | * int bpf_skb_change_tail(skb, len, flags) |
450 | * The helper will resize the skb to the given new size, to be used f.e. | 452 | * The helper will resize the skb to the given new size, to be used f.e. |
451 | * with control messages. | 453 | * with control messages. |
452 | * @skb: pointer to skb | 454 | * @skb: pointer to skb |
453 | * @len: new skb length | 455 | * @len: new skb length |
454 | * @flags: reserved | 456 | * @flags: reserved |
455 | * Return: 0 on success or negative error | 457 | * Return: 0 on success or negative error |
456 | * | 458 | * |
457 | * int bpf_skb_pull_data(skb, len) | 459 | * int bpf_skb_pull_data(skb, len) |
458 | * The helper will pull in non-linear data in case the skb is non-linear | 460 | * The helper will pull in non-linear data in case the skb is non-linear |
459 | * and not all of len are part of the linear section. Only needed for | 461 | * and not all of len are part of the linear section. Only needed for |
460 | * read/write with direct packet access. | 462 | * read/write with direct packet access. |
461 | * @skb: pointer to skb | 463 | * @skb: pointer to skb |
462 | * @len: len to make read/writeable | 464 | * @len: len to make read/writeable |
463 | * Return: 0 on success or negative error | 465 | * Return: 0 on success or negative error |
464 | * | 466 | * |
465 | * s64 bpf_csum_update(skb, csum) | 467 | * s64 bpf_csum_update(skb, csum) |
466 | * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. | 468 | * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. |
467 | * @skb: pointer to skb | 469 | * @skb: pointer to skb |
468 | * @csum: csum to add | 470 | * @csum: csum to add |
469 | * Return: csum on success or negative error | 471 | * Return: csum on success or negative error |
470 | * | 472 | * |
471 | * void bpf_set_hash_invalid(skb) | 473 | * void bpf_set_hash_invalid(skb) |
472 | * Invalidate current skb->hash. | 474 | * Invalidate current skb->hash. |
473 | * @skb: pointer to skb | 475 | * @skb: pointer to skb |
474 | * | 476 | * |
475 | * int bpf_get_numa_node_id() | 477 | * int bpf_get_numa_node_id() |
476 | * Return: Id of current NUMA node. | 478 | * Return: Id of current NUMA node. |
477 | * | 479 | * |
478 | * int bpf_skb_change_head() | 480 | * int bpf_skb_change_head() |
479 | * Grows headroom of skb and adjusts MAC header offset accordingly. | 481 | * Grows headroom of skb and adjusts MAC header offset accordingly. |
480 | * Will extends/reallocae as required automatically. | 482 | * Will extends/reallocae as required automatically. |
481 | * May change skb data pointer and will thus invalidate any check | 483 | * May change skb data pointer and will thus invalidate any check |
482 | * performed for direct packet access. | 484 | * performed for direct packet access. |
483 | * @skb: pointer to skb | 485 | * @skb: pointer to skb |
484 | * @len: length of header to be pushed in front | 486 | * @len: length of header to be pushed in front |
485 | * @flags: Flags (unused for now) | 487 | * @flags: Flags (unused for now) |
486 | * Return: 0 on success or negative error | 488 | * Return: 0 on success or negative error |
487 | * | 489 | * |
488 | * int bpf_xdp_adjust_head(xdp_md, delta) | 490 | * int bpf_xdp_adjust_head(xdp_md, delta) |
489 | * Adjust the xdp_md.data by delta | 491 | * Adjust the xdp_md.data by delta |
490 | * @xdp_md: pointer to xdp_md | 492 | * @xdp_md: pointer to xdp_md |
491 | * @delta: An positive/negative integer to be added to xdp_md.data | 493 | * @delta: An positive/negative integer to be added to xdp_md.data |
492 | * Return: 0 on success or negative on error | 494 | * Return: 0 on success or negative on error |
493 | * | 495 | * |
494 | * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) | 496 | * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) |
495 | * Copy a NUL terminated string from unsafe address. In case the string | 497 | * Copy a NUL terminated string from unsafe address. In case the string |
496 | * length is smaller than size, the target is not padded with further NUL | 498 | * length is smaller than size, the target is not padded with further NUL |
497 | * bytes. In case the string length is larger than size, just count-1 | 499 | * bytes. In case the string length is larger than size, just count-1 |
498 | * bytes are copied and the last byte is set to NUL. | 500 | * bytes are copied and the last byte is set to NUL. |
499 | * @dst: destination address | 501 | * @dst: destination address |
500 | * @size: maximum number of bytes to copy, including the trailing NUL | 502 | * @size: maximum number of bytes to copy, including the trailing NUL |
501 | * @unsafe_ptr: unsafe address | 503 | * @unsafe_ptr: unsafe address |
502 | * Return: | 504 | * Return: |
503 | * > 0 length of the string including the trailing NUL on success | 505 | * > 0 length of the string including the trailing NUL on success |
504 | * < 0 error | 506 | * < 0 error |
505 | * | 507 | * |
506 | * u64 bpf_get_socket_cookie(skb) | 508 | * u64 bpf_get_socket_cookie(skb) |
507 | * Get the cookie for the socket stored inside sk_buff. | 509 | * Get the cookie for the socket stored inside sk_buff. |
508 | * @skb: pointer to skb | 510 | * @skb: pointer to skb |
509 | * Return: 8 Bytes non-decreasing number on success or 0 if the socket | 511 | * Return: 8 Bytes non-decreasing number on success or 0 if the socket |
510 | * field is missing inside sk_buff | 512 | * field is missing inside sk_buff |
511 | * | 513 | * |
512 | * u32 bpf_get_socket_uid(skb) | 514 | * u32 bpf_get_socket_uid(skb) |
513 | * Get the owner uid of the socket stored inside sk_buff. | 515 | * Get the owner uid of the socket stored inside sk_buff. |
514 | * @skb: pointer to skb | 516 | * @skb: pointer to skb |
515 | * Return: uid of the socket owner on success or overflowuid if failed. | 517 | * Return: uid of the socket owner on success or overflowuid if failed. |
516 | * | 518 | * |
517 | * u32 bpf_set_hash(skb, hash) | 519 | * u32 bpf_set_hash(skb, hash) |
518 | * Set full skb->hash. | 520 | * Set full skb->hash. |
519 | * @skb: pointer to skb | 521 | * @skb: pointer to skb |
520 | * @hash: hash to set | 522 | * @hash: hash to set |
523 | * | ||
524 | * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) | ||
525 | * Calls setsockopt. Not all opts are available, only those with | ||
526 | * integer optvals plus TCP_CONGESTION. | ||
527 | * Supported levels: SOL_SOCKET and IPROTO_TCP | ||
528 | * @bpf_socket: pointer to bpf_socket | ||
529 | * @level: SOL_SOCKET or IPROTO_TCP | ||
530 | * @optname: option name | ||
531 | * @optval: pointer to option value | ||
532 | * @optlen: length of optval in byes | ||
533 | * Return: 0 or negative error | ||
521 | */ | 534 | */ |
522 | #define __BPF_FUNC_MAPPER(FN) \ | 535 | #define __BPF_FUNC_MAPPER(FN) \ |
523 | FN(unspec), \ | 536 | FN(unspec), \ |
524 | FN(map_lookup_elem), \ | 537 | FN(map_lookup_elem), \ |
525 | FN(map_update_elem), \ | 538 | FN(map_update_elem), \ |
526 | FN(map_delete_elem), \ | 539 | FN(map_delete_elem), \ |
527 | FN(probe_read), \ | 540 | FN(probe_read), \ |
528 | FN(ktime_get_ns), \ | 541 | FN(ktime_get_ns), \ |
529 | FN(trace_printk), \ | 542 | FN(trace_printk), \ |
530 | FN(get_prandom_u32), \ | 543 | FN(get_prandom_u32), \ |
531 | FN(get_smp_processor_id), \ | 544 | FN(get_smp_processor_id), \ |
532 | FN(skb_store_bytes), \ | 545 | FN(skb_store_bytes), \ |
533 | FN(l3_csum_replace), \ | 546 | FN(l3_csum_replace), \ |
534 | FN(l4_csum_replace), \ | 547 | FN(l4_csum_replace), \ |
535 | FN(tail_call), \ | 548 | FN(tail_call), \ |
536 | FN(clone_redirect), \ | 549 | FN(clone_redirect), \ |
537 | FN(get_current_pid_tgid), \ | 550 | FN(get_current_pid_tgid), \ |
538 | FN(get_current_uid_gid), \ | 551 | FN(get_current_uid_gid), \ |
539 | FN(get_current_comm), \ | 552 | FN(get_current_comm), \ |
540 | FN(get_cgroup_classid), \ | 553 | FN(get_cgroup_classid), \ |
541 | FN(skb_vlan_push), \ | 554 | FN(skb_vlan_push), \ |
542 | FN(skb_vlan_pop), \ | 555 | FN(skb_vlan_pop), \ |
543 | FN(skb_get_tunnel_key), \ | 556 | FN(skb_get_tunnel_key), \ |
544 | FN(skb_set_tunnel_key), \ | 557 | FN(skb_set_tunnel_key), \ |
545 | FN(perf_event_read), \ | 558 | FN(perf_event_read), \ |
546 | FN(redirect), \ | 559 | FN(redirect), \ |
547 | FN(get_route_realm), \ | 560 | FN(get_route_realm), \ |
548 | FN(perf_event_output), \ | 561 | FN(perf_event_output), \ |
549 | FN(skb_load_bytes), \ | 562 | FN(skb_load_bytes), \ |
550 | FN(get_stackid), \ | 563 | FN(get_stackid), \ |
551 | FN(csum_diff), \ | 564 | FN(csum_diff), \ |
552 | FN(skb_get_tunnel_opt), \ | 565 | FN(skb_get_tunnel_opt), \ |
553 | FN(skb_set_tunnel_opt), \ | 566 | FN(skb_set_tunnel_opt), \ |
554 | FN(skb_change_proto), \ | 567 | FN(skb_change_proto), \ |
555 | FN(skb_change_type), \ | 568 | FN(skb_change_type), \ |
556 | FN(skb_under_cgroup), \ | 569 | FN(skb_under_cgroup), \ |
557 | FN(get_hash_recalc), \ | 570 | FN(get_hash_recalc), \ |
558 | FN(get_current_task), \ | 571 | FN(get_current_task), \ |
559 | FN(probe_write_user), \ | 572 | FN(probe_write_user), \ |
560 | FN(current_task_under_cgroup), \ | 573 | FN(current_task_under_cgroup), \ |
561 | FN(skb_change_tail), \ | 574 | FN(skb_change_tail), \ |
562 | FN(skb_pull_data), \ | 575 | FN(skb_pull_data), \ |
563 | FN(csum_update), \ | 576 | FN(csum_update), \ |
564 | FN(set_hash_invalid), \ | 577 | FN(set_hash_invalid), \ |
565 | FN(get_numa_node_id), \ | 578 | FN(get_numa_node_id), \ |
566 | FN(skb_change_head), \ | 579 | FN(skb_change_head), \ |
567 | FN(xdp_adjust_head), \ | 580 | FN(xdp_adjust_head), \ |
568 | FN(probe_read_str), \ | 581 | FN(probe_read_str), \ |
569 | FN(get_socket_cookie), \ | 582 | FN(get_socket_cookie), \ |
570 | FN(get_socket_uid), \ | 583 | FN(get_socket_uid), \ |
571 | FN(set_hash), | 584 | FN(set_hash), \ |
585 | FN(setsockopt), | ||
572 | 586 | ||
573 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 587 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
574 | * function eBPF program intends to call | 588 | * function eBPF program intends to call |
575 | */ | 589 | */ |
576 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x | 590 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x |
577 | enum bpf_func_id { | 591 | enum bpf_func_id { |
578 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) | 592 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) |
579 | __BPF_FUNC_MAX_ID, | 593 | __BPF_FUNC_MAX_ID, |
580 | }; | 594 | }; |
581 | #undef __BPF_ENUM_FN | 595 | #undef __BPF_ENUM_FN |
582 | 596 | ||
583 | /* All flags used by eBPF helper functions, placed here. */ | 597 | /* All flags used by eBPF helper functions, placed here. */ |
584 | 598 | ||
585 | /* BPF_FUNC_skb_store_bytes flags. */ | 599 | /* BPF_FUNC_skb_store_bytes flags. */ |
586 | #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) | 600 | #define BPF_F_RECOMPUTE_CSUM (1ULL << 0) |
587 | #define BPF_F_INVALIDATE_HASH (1ULL << 1) | 601 | #define BPF_F_INVALIDATE_HASH (1ULL << 1) |
588 | 602 | ||
589 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. | 603 | /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. |
590 | * First 4 bits are for passing the header field size. | 604 | * First 4 bits are for passing the header field size. |
591 | */ | 605 | */ |
592 | #define BPF_F_HDR_FIELD_MASK 0xfULL | 606 | #define BPF_F_HDR_FIELD_MASK 0xfULL |
593 | 607 | ||
594 | /* BPF_FUNC_l4_csum_replace flags. */ | 608 | /* BPF_FUNC_l4_csum_replace flags. */ |
595 | #define BPF_F_PSEUDO_HDR (1ULL << 4) | 609 | #define BPF_F_PSEUDO_HDR (1ULL << 4) |
596 | #define BPF_F_MARK_MANGLED_0 (1ULL << 5) | 610 | #define BPF_F_MARK_MANGLED_0 (1ULL << 5) |
597 | #define BPF_F_MARK_ENFORCE (1ULL << 6) | 611 | #define BPF_F_MARK_ENFORCE (1ULL << 6) |
598 | 612 | ||
599 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ | 613 | /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ |
600 | #define BPF_F_INGRESS (1ULL << 0) | 614 | #define BPF_F_INGRESS (1ULL << 0) |
601 | 615 | ||
602 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ | 616 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ |
603 | #define BPF_F_TUNINFO_IPV6 (1ULL << 0) | 617 | #define BPF_F_TUNINFO_IPV6 (1ULL << 0) |
604 | 618 | ||
605 | /* BPF_FUNC_get_stackid flags. */ | 619 | /* BPF_FUNC_get_stackid flags. */ |
606 | #define BPF_F_SKIP_FIELD_MASK 0xffULL | 620 | #define BPF_F_SKIP_FIELD_MASK 0xffULL |
607 | #define BPF_F_USER_STACK (1ULL << 8) | 621 | #define BPF_F_USER_STACK (1ULL << 8) |
608 | #define BPF_F_FAST_STACK_CMP (1ULL << 9) | 622 | #define BPF_F_FAST_STACK_CMP (1ULL << 9) |
609 | #define BPF_F_REUSE_STACKID (1ULL << 10) | 623 | #define BPF_F_REUSE_STACKID (1ULL << 10) |
610 | 624 | ||
611 | /* BPF_FUNC_skb_set_tunnel_key flags. */ | 625 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
612 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) | 626 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) |
613 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) | 627 | #define BPF_F_DONT_FRAGMENT (1ULL << 2) |
614 | 628 | ||
615 | /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ | 629 | /* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ |
616 | #define BPF_F_INDEX_MASK 0xffffffffULL | 630 | #define BPF_F_INDEX_MASK 0xffffffffULL |
617 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK | 631 | #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK |
618 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ | 632 | /* BPF_FUNC_perf_event_output for sk_buff input context. */ |
619 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) | 633 | #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) |
620 | 634 | ||
621 | /* user accessible mirror of in-kernel sk_buff. | 635 | /* user accessible mirror of in-kernel sk_buff. |
622 | * new fields can only be added to the end of this structure | 636 | * new fields can only be added to the end of this structure |
623 | */ | 637 | */ |
624 | struct __sk_buff { | 638 | struct __sk_buff { |
625 | __u32 len; | 639 | __u32 len; |
626 | __u32 pkt_type; | 640 | __u32 pkt_type; |
627 | __u32 mark; | 641 | __u32 mark; |
628 | __u32 queue_mapping; | 642 | __u32 queue_mapping; |
629 | __u32 protocol; | 643 | __u32 protocol; |
630 | __u32 vlan_present; | 644 | __u32 vlan_present; |
631 | __u32 vlan_tci; | 645 | __u32 vlan_tci; |
632 | __u32 vlan_proto; | 646 | __u32 vlan_proto; |
633 | __u32 priority; | 647 | __u32 priority; |
634 | __u32 ingress_ifindex; | 648 | __u32 ingress_ifindex; |
635 | __u32 ifindex; | 649 | __u32 ifindex; |
636 | __u32 tc_index; | 650 | __u32 tc_index; |
637 | __u32 cb[5]; | 651 | __u32 cb[5]; |
638 | __u32 hash; | 652 | __u32 hash; |
639 | __u32 tc_classid; | 653 | __u32 tc_classid; |
640 | __u32 data; | 654 | __u32 data; |
641 | __u32 data_end; | 655 | __u32 data_end; |
642 | __u32 napi_id; | 656 | __u32 napi_id; |
643 | }; | 657 | }; |
644 | 658 | ||
645 | struct bpf_tunnel_key { | 659 | struct bpf_tunnel_key { |
646 | __u32 tunnel_id; | 660 | __u32 tunnel_id; |
647 | union { | 661 | union { |
648 | __u32 remote_ipv4; | 662 | __u32 remote_ipv4; |
649 | __u32 remote_ipv6[4]; | 663 | __u32 remote_ipv6[4]; |
650 | }; | 664 | }; |
651 | __u8 tunnel_tos; | 665 | __u8 tunnel_tos; |
652 | __u8 tunnel_ttl; | 666 | __u8 tunnel_ttl; |
653 | __u16 tunnel_ext; | 667 | __u16 tunnel_ext; |
654 | __u32 tunnel_label; | 668 | __u32 tunnel_label; |
655 | }; | 669 | }; |
656 | 670 | ||
657 | /* Generic BPF return codes which all BPF program types may support. | 671 | /* Generic BPF return codes which all BPF program types may support. |
658 | * The values are binary compatible with their TC_ACT_* counter-part to | 672 | * The values are binary compatible with their TC_ACT_* counter-part to |
659 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT | 673 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT |
660 | * programs. | 674 | * programs. |
661 | * | 675 | * |
662 | * XDP is handled seprately, see XDP_*. | 676 | * XDP is handled seprately, see XDP_*. |
663 | */ | 677 | */ |
664 | enum bpf_ret_code { | 678 | enum bpf_ret_code { |
665 | BPF_OK = 0, | 679 | BPF_OK = 0, |
666 | /* 1 reserved */ | 680 | /* 1 reserved */ |
667 | BPF_DROP = 2, | 681 | BPF_DROP = 2, |
668 | /* 3-6 reserved */ | 682 | /* 3-6 reserved */ |
669 | BPF_REDIRECT = 7, | 683 | BPF_REDIRECT = 7, |
670 | /* >127 are reserved for prog type specific return codes */ | 684 | /* >127 are reserved for prog type specific return codes */ |
671 | }; | 685 | }; |
672 | 686 | ||
673 | struct bpf_sock { | 687 | struct bpf_sock { |
674 | __u32 bound_dev_if; | 688 | __u32 bound_dev_if; |
675 | __u32 family; | 689 | __u32 family; |
676 | __u32 type; | 690 | __u32 type; |
677 | __u32 protocol; | 691 | __u32 protocol; |
678 | }; | 692 | }; |
679 | 693 | ||
680 | #define XDP_PACKET_HEADROOM 256 | 694 | #define XDP_PACKET_HEADROOM 256 |
681 | 695 | ||
682 | /* User return codes for XDP prog type. | 696 | /* User return codes for XDP prog type. |
683 | * A valid XDP program must return one of these defined values. All other | 697 | * A valid XDP program must return one of these defined values. All other |
684 | * return codes are reserved for future use. Unknown return codes will result | 698 | * return codes are reserved for future use. Unknown return codes will result |
685 | * in packet drop. | 699 | * in packet drop. |
686 | */ | 700 | */ |
687 | enum xdp_action { | 701 | enum xdp_action { |
688 | XDP_ABORTED = 0, | 702 | XDP_ABORTED = 0, |
689 | XDP_DROP, | 703 | XDP_DROP, |
690 | XDP_PASS, | 704 | XDP_PASS, |
691 | XDP_TX, | 705 | XDP_TX, |
692 | }; | 706 | }; |
693 | 707 | ||
694 | /* user accessible metadata for XDP packet hook | 708 | /* user accessible metadata for XDP packet hook |
695 | * new fields must be added to the end of this structure | 709 | * new fields must be added to the end of this structure |
696 | */ | 710 | */ |
697 | struct xdp_md { | 711 | struct xdp_md { |
698 | __u32 data; | 712 | __u32 data; |
699 | __u32 data_end; | 713 | __u32 data_end; |
700 | }; | 714 | }; |
701 | 715 | ||
702 | #define BPF_TAG_SIZE 8 | 716 | #define BPF_TAG_SIZE 8 |
703 | 717 | ||
704 | struct bpf_prog_info { | 718 | struct bpf_prog_info { |
705 | __u32 type; | 719 | __u32 type; |
706 | __u32 id; | 720 | __u32 id; |
707 | __u8 tag[BPF_TAG_SIZE]; | 721 | __u8 tag[BPF_TAG_SIZE]; |
708 | __u32 jited_prog_len; | 722 | __u32 jited_prog_len; |
709 | __u32 xlated_prog_len; | 723 | __u32 xlated_prog_len; |
710 | __aligned_u64 jited_prog_insns; | 724 | __aligned_u64 jited_prog_insns; |
711 | __aligned_u64 xlated_prog_insns; | 725 | __aligned_u64 xlated_prog_insns; |
712 | } __attribute__((aligned(8))); | 726 | } __attribute__((aligned(8))); |
713 | 727 | ||
714 | struct bpf_map_info { | 728 | struct bpf_map_info { |
715 | __u32 type; | 729 | __u32 type; |
716 | __u32 id; | 730 | __u32 id; |
717 | __u32 key_size; | 731 | __u32 key_size; |
718 | __u32 value_size; | 732 | __u32 value_size; |
719 | __u32 max_entries; | 733 | __u32 max_entries; |
720 | __u32 map_flags; | 734 | __u32 map_flags; |
721 | } __attribute__((aligned(8))); | 735 | } __attribute__((aligned(8))); |
736 | |||
737 | /* User bpf_sock_ops struct to access socket values and specify request ops | ||
738 | * and their replies. | ||
739 | * New fields can only be added at the end of this structure | ||
740 | */ | ||
741 | struct bpf_sock_ops { | ||
742 | __u32 op; | ||
743 | union { | ||
744 | __u32 reply; | ||
745 | __u32 replylong[4]; | ||
746 | }; | ||
747 | __u32 family; | ||
748 | __u32 remote_ip4; | ||
749 | __u32 local_ip4; | ||
750 | __u32 remote_ip6[4]; | ||
751 | __u32 local_ip6[4]; | ||
752 | __u32 remote_port; | ||
753 | __u32 local_port; | ||
754 | }; | ||
755 | |||
756 | /* List of known BPF sock_ops operators. | ||
757 | * New entries can only be added at the end | ||
758 | */ | ||
759 | enum { | ||
760 | BPF_SOCK_OPS_VOID, | ||
761 | BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or | ||
762 | * -1 if default value should be used | ||
763 | */ | ||
764 | BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized | ||
765 | * window (in packets) or -1 if default | ||
766 | * value should be used | ||
767 | */ | ||
768 | BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an | ||
769 | * active connection is initialized | ||
770 | */ | ||
771 | BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an | ||
772 | * active connection is | ||
773 | * established | ||
774 | */ | ||
775 | BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a | ||
776 | * passive connection is | ||
777 | * established | ||
778 | */ | ||
779 | BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control | ||
780 | * needs ECN | ||
781 | */ | ||
782 | }; | ||
783 | |||
784 | #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ | ||
785 | #define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ | ||
722 | 786 | ||
723 | #endif /* _UAPI__LINUX_BPF_H__ */ | 787 | #endif /* _UAPI__LINUX_BPF_H__ */ |
724 | 788 |