Commit 28035366afe93f7bdb833a7867caccf4b7eda166

Authored by Alexei Starovoitov
Committed by Greg Kroah-Hartman
1 parent 60c7a9cd50

bpf: move fixup_bpf_calls() function

commit e245c5c6a5656e4d61aa7bb08e9694fd6e5b2b9d upstream.

no functional change.
move fixup_bpf_calls() to verifier.c
it's being refactored in the next patch

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Cc: Jiri Slaby <jslaby@suse.cz>
[backported to 4.9 - gregkh]
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 2 changed files with 54 additions and 54 deletions Side-by-side Diff

kernel/bpf/syscall.c
... ... @@ -565,57 +565,6 @@
565 565 list_add(&tl->list_node, &bpf_prog_types);
566 566 }
567 567  
568   -/* fixup insn->imm field of bpf_call instructions:
569   - * if (insn->imm == BPF_FUNC_map_lookup_elem)
570   - * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
571   - * else if (insn->imm == BPF_FUNC_map_update_elem)
572   - * insn->imm = bpf_map_update_elem - __bpf_call_base;
573   - * else ...
574   - *
575   - * this function is called after eBPF program passed verification
576   - */
577   -static void fixup_bpf_calls(struct bpf_prog *prog)
578   -{
579   - const struct bpf_func_proto *fn;
580   - int i;
581   -
582   - for (i = 0; i < prog->len; i++) {
583   - struct bpf_insn *insn = &prog->insnsi[i];
584   -
585   - if (insn->code == (BPF_JMP | BPF_CALL)) {
586   - /* we reach here when program has bpf_call instructions
587   - * and it passed bpf_check(), means that
588   - * ops->get_func_proto must have been supplied, check it
589   - */
590   - BUG_ON(!prog->aux->ops->get_func_proto);
591   -
592   - if (insn->imm == BPF_FUNC_get_route_realm)
593   - prog->dst_needed = 1;
594   - if (insn->imm == BPF_FUNC_get_prandom_u32)
595   - bpf_user_rnd_init_once();
596   - if (insn->imm == BPF_FUNC_tail_call) {
597   - /* mark bpf_tail_call as different opcode
598   - * to avoid conditional branch in
599   - * interpeter for every normal call
600   - * and to prevent accidental JITing by
601   - * JIT compiler that doesn't support
602   - * bpf_tail_call yet
603   - */
604   - insn->imm = 0;
605   - insn->code |= BPF_X;
606   - continue;
607   - }
608   -
609   - fn = prog->aux->ops->get_func_proto(insn->imm);
610   - /* all functions that have prototype and verifier allowed
611   - * programs to call them, must be real in-kernel functions
612   - */
613   - BUG_ON(!fn->func);
614   - insn->imm = fn->func - __bpf_call_base;
615   - }
616   - }
617   -}
618   -
619 568 /* drop refcnt on maps used by eBPF program and free auxilary data */
620 569 static void free_used_maps(struct bpf_prog_aux *aux)
621 570 {
... ... @@ -807,9 +756,6 @@
807 756 err = bpf_check(&prog, attr);
808 757 if (err < 0)
809 758 goto free_used_maps;
810   -
811   - /* fixup BPF_CALL->imm field */
812   - fixup_bpf_calls(prog);
813 759  
814 760 /* eBPF program is ready to be JITed */
815 761 prog = bpf_prog_select_runtime(prog, &err);
kernel/bpf/verifier.c
... ... @@ -3362,6 +3362,57 @@
3362 3362 return 0;
3363 3363 }
3364 3364  
  3365 +/* fixup insn->imm field of bpf_call instructions:
  3366 + * if (insn->imm == BPF_FUNC_map_lookup_elem)
  3367 + * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
  3368 + * else if (insn->imm == BPF_FUNC_map_update_elem)
  3369 + * insn->imm = bpf_map_update_elem - __bpf_call_base;
  3370 + * else ...
  3371 + *
  3372 + * this function is called after eBPF program passed verification
  3373 + */
  3374 +static void fixup_bpf_calls(struct bpf_prog *prog)
  3375 +{
  3376 + const struct bpf_func_proto *fn;
  3377 + int i;
  3378 +
  3379 + for (i = 0; i < prog->len; i++) {
  3380 + struct bpf_insn *insn = &prog->insnsi[i];
  3381 +
  3382 + if (insn->code == (BPF_JMP | BPF_CALL)) {
  3383 + /* we reach here when program has bpf_call instructions
  3384 + * and it passed bpf_check(), means that
  3385 + * ops->get_func_proto must have been supplied, check it
  3386 + */
  3387 + BUG_ON(!prog->aux->ops->get_func_proto);
  3388 +
  3389 + if (insn->imm == BPF_FUNC_get_route_realm)
  3390 + prog->dst_needed = 1;
  3391 + if (insn->imm == BPF_FUNC_get_prandom_u32)
  3392 + bpf_user_rnd_init_once();
  3393 + if (insn->imm == BPF_FUNC_tail_call) {
  3394 + /* mark bpf_tail_call as different opcode
  3395 + * to avoid conditional branch in
  3396 + * interpeter for every normal call
  3397 + * and to prevent accidental JITing by
  3398 + * JIT compiler that doesn't support
  3399 + * bpf_tail_call yet
  3400 + */
  3401 + insn->imm = 0;
  3402 + insn->code |= BPF_X;
  3403 + continue;
  3404 + }
  3405 +
  3406 + fn = prog->aux->ops->get_func_proto(insn->imm);
  3407 + /* all functions that have prototype and verifier allowed
  3408 + * programs to call them, must be real in-kernel functions
  3409 + */
  3410 + BUG_ON(!fn->func);
  3411 + insn->imm = fn->func - __bpf_call_base;
  3412 + }
  3413 + }
  3414 +}
  3415 +
3365 3416 static void free_states(struct bpf_verifier_env *env)
3366 3417 {
3367 3418 struct bpf_verifier_state_list *sl, *sln;
... ... @@ -3462,6 +3513,9 @@
3462 3513 if (ret == 0)
3463 3514 /* program is valid, convert *(u32*)(ctx + off) accesses */
3464 3515 ret = convert_ctx_accesses(env);
  3516 +
  3517 + if (ret == 0)
  3518 + fixup_bpf_calls(env->prog);
3465 3519  
3466 3520 if (log_level && log_len >= log_size - 1) {
3467 3521 BUG_ON(log_len >= log_size);