diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5d1650af899d0..2d86bd4b0b97e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2366,8 +2366,8 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, const struct bpf_insn *insn) { /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON - * is not working properly, or interpreter is being used when - * prog->jit_requested is not 0, so warn about it! + * or may_goto may cause stack size > 512 is not working properly, + * so warn about it! */ WARN_ON_ONCE(1); return 0; @@ -2478,10 +2478,10 @@ static void bpf_prog_select_func(struct bpf_prog *fp) * But for non-JITed programs, we don't need bpf_func, so no bounds * check needed. */ - if (!fp->jit_requested && - !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { + if (idx < ARRAY_SIZE(interpreters)) { fp->bpf_func = interpreters[idx]; } else { + WARN_ON_ONCE(!fp->jit_requested); fp->bpf_func = __bpf_prog_ret0_warn; } #else @@ -2505,7 +2505,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) /* In case of BPF to BPF calls, verifier did all the prep * work with regards to JITing, etc. */ - bool jit_needed = fp->jit_requested; + bool jit_needed = false; if (fp->bpf_func) goto finalize; @@ -2515,6 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) jit_needed = true; bpf_prog_select_func(fp); + if (fp->bpf_func == __bpf_prog_ret0_warn) + jit_needed = true; /* eBPF JITs can rewrite the program in case constant * blinding is active. However, in case of error during