Blame view
include/linux/filter.h
14.5 KB
1da177e4c
|
1 2 3 |
/* * Linux Socket Filter Data Structures */ |
1da177e4c
|
4 5 |
#ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ |
b954d8342
|
6 |
#include <stdarg.h> |
60063497a
|
7 |
#include <linux/atomic.h> |
0c5fe1b42
|
8 |
#include <linux/compat.h> |
9f12fbe60
|
9 |
#include <linux/skbuff.h> |
b954d8342
|
10 11 |
#include <linux/linkage.h> #include <linux/printk.h> |
d45ed4a4e
|
12 |
#include <linux/workqueue.h> |
b13138ef7
|
13 |
#include <linux/sched.h> |
ff936a04e
|
14 |
#include <net/sch_generic.h> |
b954d8342
|
15 |
|
60a3b2253
|
16 |
#include <asm/cacheflush.h> |
b954d8342
|
17 18 |
#include <uapi/linux/filter.h> |
daedfb224
|
19 |
#include <uapi/linux/bpf.h> |
60a3b2253
|
20 21 22 23 |
struct sk_buff; struct sock; struct seccomp_data; |
09756af46
|
24 |
struct bpf_prog_aux; |
792d4b5cb
|
25 |
|
30743837d
|
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
/* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function * calls in BPF_CALL instruction. */ #define BPF_REG_ARG1 BPF_REG_1 #define BPF_REG_ARG2 BPF_REG_2 #define BPF_REG_ARG3 BPF_REG_3 #define BPF_REG_ARG4 BPF_REG_4 #define BPF_REG_ARG5 BPF_REG_5 #define BPF_REG_CTX BPF_REG_6 #define BPF_REG_FP BPF_REG_10 /* Additional register mappings for converted user programs. */ #define BPF_REG_A BPF_REG_0 #define BPF_REG_X BPF_REG_7 #define BPF_REG_TMP BPF_REG_8 |
bd4cf0ed3
|
42 43 44 |
/* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 |
f8f6d679a
|
45 |
/* Helper macros for filter block array initializers. */ |
e430f34ee
|
46 |
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
f8f6d679a
|
47 |
|
e430f34ee
|
48 |
#define BPF_ALU64_REG(OP, DST, SRC) \ |
2695fb552
|
49 |
((struct bpf_insn) { \ |
f8f6d679a
|
50 |
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
e430f34ee
|
51 52 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
53 54 |
.off = 0, \ .imm = 0 }) |
e430f34ee
|
55 |
#define BPF_ALU32_REG(OP, DST, SRC) \ |
2695fb552
|
56 |
((struct bpf_insn) { \ |
f8f6d679a
|
57 |
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
e430f34ee
|
58 59 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
60 61 |
.off = 0, \ .imm = 0 }) |
e430f34ee
|
62 |
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
f8f6d679a
|
63 |
|
e430f34ee
|
64 |
#define BPF_ALU64_IMM(OP, DST, IMM) \ |
2695fb552
|
65 |
((struct bpf_insn) { \ |
f8f6d679a
|
66 |
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
e430f34ee
|
67 68 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
69 70 |
.off = 0, \ .imm = IMM }) |
e430f34ee
|
71 |
#define BPF_ALU32_IMM(OP, DST, IMM) \ |
2695fb552
|
72 |
((struct bpf_insn) { \ |
f8f6d679a
|
73 |
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
e430f34ee
|
74 75 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
76 77 78 79 |
.off = 0, \ .imm = IMM }) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ |
e430f34ee
|
80 |
#define BPF_ENDIAN(TYPE, DST, LEN) \ |
2695fb552
|
81 |
((struct bpf_insn) { \ |
f8f6d679a
|
82 |
.code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
e430f34ee
|
83 84 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
85 86 |
.off = 0, \ .imm = LEN }) |
e430f34ee
|
87 |
/* Short form of mov, dst_reg = src_reg */ |
f8f6d679a
|
88 |
|
e430f34ee
|
89 |
#define BPF_MOV64_REG(DST, SRC) \ |
2695fb552
|
90 |
((struct bpf_insn) { \ |
f8f6d679a
|
91 |
.code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
e430f34ee
|
92 93 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
94 95 |
.off = 0, \ .imm = 0 }) |
e430f34ee
|
96 |
#define BPF_MOV32_REG(DST, SRC) \ |
2695fb552
|
97 |
((struct bpf_insn) { \ |
f8f6d679a
|
98 |
.code = BPF_ALU | BPF_MOV | BPF_X, \ |
e430f34ee
|
99 100 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
101 102 |
.off = 0, \ .imm = 0 }) |
e430f34ee
|
103 |
/* Short form of mov, dst_reg = imm32 */ |
f8f6d679a
|
104 |
|
e430f34ee
|
105 |
#define BPF_MOV64_IMM(DST, IMM) \ |
2695fb552
|
106 |
((struct bpf_insn) { \ |
f8f6d679a
|
107 |
.code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
e430f34ee
|
108 109 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
110 111 |
.off = 0, \ .imm = IMM }) |
e430f34ee
|
112 |
#define BPF_MOV32_IMM(DST, IMM) \ |
2695fb552
|
113 |
((struct bpf_insn) { \ |
f8f6d679a
|
114 |
.code = BPF_ALU | BPF_MOV | BPF_K, \ |
e430f34ee
|
115 116 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
117 118 |
.off = 0, \ .imm = IMM }) |
02ab695bb
|
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ ((struct bpf_insn) { \ .code = BPF_LD | BPF_DW | BPF_IMM, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = 0, \ .imm = (__u32) (IMM) }), \ ((struct bpf_insn) { \ .code = 0, /* zero is reserved opcode */ \ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ .imm = ((__u64) (IMM)) >> 32 }) |
0246e64d9
|
136 137 138 |
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ #define BPF_LD_MAP_FD(DST, MAP_FD) \ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) |
e430f34ee
|
139 |
/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
f8f6d679a
|
140 |
|
e430f34ee
|
141 |
#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
2695fb552
|
142 |
((struct bpf_insn) { \ |
f8f6d679a
|
143 |
.code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34ee
|
144 145 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
146 147 |
.off = 0, \ .imm = IMM }) |
e430f34ee
|
148 |
#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
2695fb552
|
149 |
((struct bpf_insn) { \ |
f8f6d679a
|
150 |
.code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34ee
|
151 152 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
153 154 |
.off = 0, \ .imm = IMM }) |
e430f34ee
|
155 |
/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
f8f6d679a
|
156 |
|
e430f34ee
|
157 |
#define BPF_LD_ABS(SIZE, IMM) \ |
2695fb552
|
158 |
((struct bpf_insn) { \ |
f8f6d679a
|
159 |
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
e430f34ee
|
160 161 |
.dst_reg = 0, \ .src_reg = 0, \ |
f8f6d679a
|
162 |
.off = 0, \ |
e430f34ee
|
163 |
.imm = IMM }) |
f8f6d679a
|
164 |
|
e430f34ee
|
165 |
/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
f8f6d679a
|
166 |
|
e430f34ee
|
167 |
#define BPF_LD_IND(SIZE, SRC, IMM) \ |
2695fb552
|
168 |
((struct bpf_insn) { \ |
f8f6d679a
|
169 |
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
e430f34ee
|
170 171 |
.dst_reg = 0, \ .src_reg = SRC, \ |
f8f6d679a
|
172 |
.off = 0, \ |
e430f34ee
|
173 |
.imm = IMM }) |
f8f6d679a
|
174 |
|
e430f34ee
|
175 |
/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
f8f6d679a
|
176 |
|
e430f34ee
|
177 |
#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb552
|
178 |
((struct bpf_insn) { \ |
f8f6d679a
|
179 |
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34ee
|
180 181 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
182 183 |
.off = OFF, \ .imm = 0 }) |
e430f34ee
|
184 185 186 |
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb552
|
187 |
((struct bpf_insn) { \ |
f8f6d679a
|
188 |
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34ee
|
189 190 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
191 192 |
.off = OFF, \ .imm = 0 }) |
cffc642d9
|
193 194 195 196 197 198 199 200 201 |
/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ .dst_reg = DST, \ .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) |
e430f34ee
|
202 203 204 |
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ |
2695fb552
|
205 |
((struct bpf_insn) { \ |
e430f34ee
|
206 207 208 209 210 211 212 |
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ .off = OFF, \ .imm = IMM }) /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ |
f8f6d679a
|
213 |
|
e430f34ee
|
214 |
#define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
2695fb552
|
215 |
((struct bpf_insn) { \ |
f8f6d679a
|
216 |
.code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
e430f34ee
|
217 218 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
219 220 |
.off = OFF, \ .imm = 0 }) |
e430f34ee
|
221 |
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
f8f6d679a
|
222 |
|
e430f34ee
|
223 |
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
2695fb552
|
224 |
((struct bpf_insn) { \ |
f8f6d679a
|
225 |
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
e430f34ee
|
226 227 |
.dst_reg = DST, \ .src_reg = 0, \ |
f8f6d679a
|
228 229 230 231 232 233 |
.off = OFF, \ .imm = IMM }) /* Function call */ #define BPF_EMIT_CALL(FUNC) \ |
2695fb552
|
234 |
((struct bpf_insn) { \ |
f8f6d679a
|
235 |
.code = BPF_JMP | BPF_CALL, \ |
e430f34ee
|
236 237 |
.dst_reg = 0, \ .src_reg = 0, \ |
f8f6d679a
|
238 239 240 241 |
.off = 0, \ .imm = ((FUNC) - __bpf_call_base) }) /* Raw code statement block */ |
e430f34ee
|
242 |
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
2695fb552
|
243 |
((struct bpf_insn) { \ |
f8f6d679a
|
244 |
.code = CODE, \ |
e430f34ee
|
245 246 |
.dst_reg = DST, \ .src_reg = SRC, \ |
f8f6d679a
|
247 248 249 250 251 252 |
.off = OFF, \ .imm = IMM }) /* Program exit */ #define BPF_EXIT_INSN() \ |
2695fb552
|
253 |
((struct bpf_insn) { \ |
f8f6d679a
|
254 |
.code = BPF_JMP | BPF_EXIT, \ |
e430f34ee
|
255 256 |
.dst_reg = 0, \ .src_reg = 0, \ |
f8f6d679a
|
257 258 |
.off = 0, \ .imm = 0 }) |
a4afd37b2
|
259 260 261 262 263 264 265 |
/* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ ((struct sock_filter) BPF_STMT(CODE, K)) #define __BPF_JUMP(CODE, K, JT, JF) \ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) |
f8f6d679a
|
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
#define bytes_to_bpf_size(bytes) \ ({ \ int bpf_size = -EINVAL; \ \ if (bytes == sizeof(u8)) \ bpf_size = BPF_B; \ else if (bytes == sizeof(u16)) \ bpf_size = BPF_H; \ else if (bytes == sizeof(u32)) \ bpf_size = BPF_W; \ else if (bytes == sizeof(u64)) \ bpf_size = BPF_DW; \ \ bpf_size; \ }) |
9739eef13
|
281 |
|
bd4cf0ed3
|
282 283 |
#ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ |
0c5fe1b42
|
284 285 |
struct compat_sock_fprog { u16 len; |
bd4cf0ed3
|
286 |
compat_uptr_t filter; /* struct sock_filter * */ |
0c5fe1b42
|
287 288 |
}; #endif |
a3ea269b8
|
289 290 291 292 |
struct sock_fprog_kern { u16 len; struct sock_filter *filter; }; |
738cbe72a
|
293 294 295 296 |
struct bpf_binary_header { unsigned int pages; u8 image[]; }; |
7ae457c1e
|
297 |
struct bpf_prog { |
286aad3c4
|
298 |
u16 pages; /* Number of allocated pages */ |
a91263d52
|
299 300 |
kmemcheck_bitfield_begin(meta); u16 jited:1, /* Is our filter JIT'ed? */ |
c46646d04
|
301 |
gpl_compatible:1, /* Is filter GPL compatible? */ |
ff936a04e
|
302 |
cb_access:1, /* Is control block accessed? */ |
c46646d04
|
303 |
dst_needed:1; /* Do we need dst entry? */ |
a91263d52
|
304 |
kmemcheck_bitfield_end(meta); |
286aad3c4
|
305 |
u32 len; /* Number of filter blocks */ |
24701ecea
|
306 |
enum bpf_prog_type type; /* Type of BPF program */ |
09756af46
|
307 |
struct bpf_prog_aux *aux; /* Auxiliary fields */ |
24701ecea
|
308 |
struct sock_fprog_kern *orig_prog; /* Original BPF program */ |
0a14842f5
|
309 |
unsigned int (*bpf_func)(const struct sk_buff *skb, |
2695fb552
|
310 |
const struct bpf_insn *filter); |
60a3b2253
|
311 |
/* Instructions for interpreter */ |
d45ed4a4e
|
312 |
union { |
bd4cf0ed3
|
313 |
struct sock_filter insns[0]; |
2695fb552
|
314 |
struct bpf_insn insnsi[0]; |
d45ed4a4e
|
315 |
}; |
b715631fa
|
316 |
}; |
7ae457c1e
|
317 318 319 320 321 322 323 |
struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) |
ff936a04e
|
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = qdisc_skb_cb(skb)->data; u8 saved_cb[QDISC_CB_PRIV_LEN]; u32 res; BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != QDISC_CB_PRIV_LEN); if (unlikely(prog->cb_access)) { memcpy(saved_cb, cb_data, sizeof(saved_cb)); memset(cb_data, 0, sizeof(saved_cb)); } res = BPF_PROG_RUN(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, saved_cb, sizeof(saved_cb)); return res; } static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { u8 *cb_data = qdisc_skb_cb(skb)->data; if (unlikely(prog->cb_access)) memset(cb_data, 0, QDISC_CB_PRIV_LEN); return BPF_PROG_RUN(prog, skb); } |
7ae457c1e
|
356 |
static inline unsigned int bpf_prog_size(unsigned int proglen) |
b715631fa
|
357 |
{ |
7ae457c1e
|
358 359 |
return max(sizeof(struct bpf_prog), offsetof(struct bpf_prog, insns[proglen])); |
b715631fa
|
360 |
} |
7b36f9293
|
361 362 363 364 365 366 367 368 369 |
static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) { /* When classic BPF programs have been loaded and the arch * does not have a classic BPF JIT (anymore), they have been * converted via bpf_migrate_filter() to eBPF and thus always * have an unspec program type. */ return prog->type == BPF_PROG_TYPE_UNSPEC; } |
009937e78
|
370 |
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) |
a3ea269b8
|
371 |
|
60a3b2253
|
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
#ifdef CONFIG_DEBUG_SET_MODULE_RONX static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { set_memory_ro((unsigned long)fp, fp->pages); } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { set_memory_rw((unsigned long)fp, fp->pages); } #else static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { } static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { } #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ |
fbc907f0b
|
391 |
int sk_filter(struct sock *sk, struct sk_buff *skb); |
bd4cf0ed3
|
392 |
|
04fd61ab3
|
393 |
int bpf_prog_select_runtime(struct bpf_prog *fp); |
7ae457c1e
|
394 |
void bpf_prog_free(struct bpf_prog *fp); |
bd4cf0ed3
|
395 |
|
60a3b2253
|
396 397 398 399 400 401 402 403 404 405 |
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { bpf_prog_unlock_ro(fp); __bpf_prog_free(fp); } |
ac67eb2c5
|
406 407 |
typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, unsigned int flen); |
7ae457c1e
|
408 |
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
ac67eb2c5
|
409 |
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
bab189918
|
410 |
bpf_aux_classic_check_t trans, bool save_orig); |
7ae457c1e
|
411 |
void bpf_prog_destroy(struct bpf_prog *fp); |
a3ea269b8
|
412 |
|
fbc907f0b
|
413 |
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
89aa07583
|
414 |
int sk_attach_bpf(u32 ufd, struct sock *sk); |
fbc907f0b
|
415 |
int sk_detach_filter(struct sock *sk); |
fbc907f0b
|
416 417 |
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); |
fbc907f0b
|
418 |
|
278571bac
|
419 |
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
fbc907f0b
|
420 |
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
0a14842f5
|
421 |
|
622582786
|
422 |
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
7ae457c1e
|
423 |
void bpf_int_jit_compile(struct bpf_prog *fp); |
4e10df9a6
|
424 |
bool bpf_helper_changes_skb_data(void *func); |
622582786
|
425 |
|
b954d8342
|
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 |
#ifdef CONFIG_BPF_JIT typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { |
b13138ef7
|
441 442 443 |
pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d ", flen, proglen, pass, image, current->comm, task_pid_nr(current)); |
b954d8342
|
444 445 446 447 448 449 450 451 452 453 454 455 456 457 |
if (image) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } #else static inline void bpf_jit_compile(struct bpf_prog *fp) { } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } #endif /* CONFIG_BPF_JIT */ |
348059313
|
458 |
#define BPF_ANC BIT(15) |
55795ef54
|
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 |
static inline bool bpf_needs_clear_a(const struct sock_filter *first) { switch (first->code) { case BPF_RET | BPF_K: case BPF_LD | BPF_W | BPF_LEN: return false; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) return true; return false; default: return true; } } |
348059313
|
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 |
static inline u16 bpf_anc_helper(const struct sock_filter *ftest) { BUG_ON(ftest->code & BPF_ANC); switch (ftest->code) { case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ return BPF_ANC | SKF_AD_##CODE switch (ftest->k) { BPF_ANCILLARY(PROTOCOL); BPF_ANCILLARY(PKTTYPE); BPF_ANCILLARY(IFINDEX); BPF_ANCILLARY(NLATTR); BPF_ANCILLARY(NLATTR_NEST); BPF_ANCILLARY(MARK); BPF_ANCILLARY(QUEUE); BPF_ANCILLARY(HATYPE); BPF_ANCILLARY(RXHASH); BPF_ANCILLARY(CPU); BPF_ANCILLARY(ALU_XOR_X); BPF_ANCILLARY(VLAN_TAG); BPF_ANCILLARY(VLAN_TAG_PRESENT); BPF_ANCILLARY(PAY_OFFSET); BPF_ANCILLARY(RANDOM); |
27cd54524
|
503 |
BPF_ANCILLARY(VLAN_TPID); |
348059313
|
504 505 506 507 508 509 |
} /* Fallthrough. */ default: return ftest->code; } } |
9f12fbe60
|
510 511 512 513 514 515 516 517 518 519 520 |
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size); static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, unsigned int size, void *buffer) { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); return bpf_internal_load_pointer_neg_helper(skb, k, size); } |
ea02f9411
|
521 522 |
static inline int bpf_tell_extensions(void) { |
376922993
|
523 |
return SKF_AD_MAX; |
ea02f9411
|
524 |
} |
1da177e4c
|
525 |
#endif /* __LINUX_FILTER_H__ */ |