Skip to content

Commit 79f047c

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
bpf: table based bpf_insn_successors()
Converting bpf_insn_successors() to use lookup table makes it ~1.5 times faster. Also remove unnecessary conditionals: - `idx + 1 < prog->len` is unnecessary because after check_cfg() all jump targets are guaranteed to be within a program; - `i == 0 || succ[0] != dst` is unnecessary because any client of bpf_insn_successors() can handle duplicate edges: - compute_live_registers() - compute_scc() Moving bpf_insn_successors() to liveness.c allows its inlining in liveness.c:__update_stack_liveness(). Such inlining speeds up __update_stack_liveness() by ~40%. bpf_insn_successors() is used in both verifier.c and liveness.c. perf shows such move does not negatively impact users in verifier.c, as these are executed only once before main varification pass. Unlike __update_stack_liveness() which can be triggered multiple times. Signed-off-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-10-c3cd27bacc60@gmail.com Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 107e169 commit 79f047c

File tree

3 files changed

+58
-71
lines changed

3 files changed

+58
-71
lines changed

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,6 +1049,7 @@ void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_st
10491049
u32 frameno);
10501050

10511051
struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
1052+
int bpf_jmp_offset(struct bpf_insn *insn);
10521053
int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]);
10531054
void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
10541055
bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);

kernel/bpf/liveness.c

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -433,6 +433,62 @@ static void log_mask_change(struct bpf_verifier_env *env, struct callchain *call
433433
bpf_log(&env->log, "\n");
434434
}
435435

436+
int bpf_jmp_offset(struct bpf_insn *insn)
437+
{
438+
u8 code = insn->code;
439+
440+
if (code == (BPF_JMP32 | BPF_JA))
441+
return insn->imm;
442+
return insn->off;
443+
}
444+
445+
__diag_push();
446+
__diag_ignore_all("-Woverride-init", "Allow field initialization overrides for opcode_info_tbl");
447+
448+
inline int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
449+
{
450+
static const struct opcode_info {
451+
bool can_jump;
452+
bool can_fallthrough;
453+
} opcode_info_tbl[256] = {
454+
[0 ... 255] = {.can_jump = false, .can_fallthrough = true},
455+
#define _J(code, ...) \
456+
[BPF_JMP | code] = __VA_ARGS__, \
457+
[BPF_JMP32 | code] = __VA_ARGS__
458+
459+
_J(BPF_EXIT, {.can_jump = false, .can_fallthrough = false}),
460+
_J(BPF_JA, {.can_jump = true, .can_fallthrough = false}),
461+
_J(BPF_JEQ, {.can_jump = true, .can_fallthrough = true}),
462+
_J(BPF_JNE, {.can_jump = true, .can_fallthrough = true}),
463+
_J(BPF_JLT, {.can_jump = true, .can_fallthrough = true}),
464+
_J(BPF_JLE, {.can_jump = true, .can_fallthrough = true}),
465+
_J(BPF_JGT, {.can_jump = true, .can_fallthrough = true}),
466+
_J(BPF_JGE, {.can_jump = true, .can_fallthrough = true}),
467+
_J(BPF_JSGT, {.can_jump = true, .can_fallthrough = true}),
468+
_J(BPF_JSGE, {.can_jump = true, .can_fallthrough = true}),
469+
_J(BPF_JSLT, {.can_jump = true, .can_fallthrough = true}),
470+
_J(BPF_JSLE, {.can_jump = true, .can_fallthrough = true}),
471+
_J(BPF_JCOND, {.can_jump = true, .can_fallthrough = true}),
472+
_J(BPF_JSET, {.can_jump = true, .can_fallthrough = true}),
473+
#undef _J
474+
};
475+
struct bpf_insn *insn = &prog->insnsi[idx];
476+
const struct opcode_info *opcode_info;
477+
int i = 0, insn_sz;
478+
479+
opcode_info = &opcode_info_tbl[BPF_CLASS(insn->code) | BPF_OP(insn->code)];
480+
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
481+
if (opcode_info->can_fallthrough)
482+
succ[i++] = idx + insn_sz;
483+
484+
if (opcode_info->can_jump)
485+
succ[i++] = idx + bpf_jmp_offset(insn) + 1;
486+
487+
return i;
488+
}
489+
490+
__diag_pop();
491+
436492
static struct func_instance *get_outer_instance(struct bpf_verifier_env *env,
437493
struct func_instance *instance)
438494
{

kernel/bpf/verifier.c

Lines changed: 1 addition & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -3485,15 +3485,6 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
34853485
return 0;
34863486
}
34873487

3488-
static int jmp_offset(struct bpf_insn *insn)
3489-
{
3490-
u8 code = insn->code;
3491-
3492-
if (code == (BPF_JMP32 | BPF_JA))
3493-
return insn->imm;
3494-
return insn->off;
3495-
}
3496-
34973488
static int check_subprogs(struct bpf_verifier_env *env)
34983489
{
34993490
int i, subprog_start, subprog_end, off, cur_subprog = 0;
@@ -3520,7 +3511,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
35203511
goto next;
35213512
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
35223513
goto next;
3523-
off = i + jmp_offset(&insn[i]) + 1;
3514+
off = i + bpf_jmp_offset(&insn[i]) + 1;
35243515
if (off < subprog_start || off >= subprog_end) {
35253516
verbose(env, "jump out of range from insn %d to %d\n", i, off);
35263517
return -EINVAL;
@@ -23944,67 +23935,6 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
2394423935
return 0;
2394523936
}
2394623937

23947-
static bool can_fallthrough(struct bpf_insn *insn)
23948-
{
23949-
u8 class = BPF_CLASS(insn->code);
23950-
u8 opcode = BPF_OP(insn->code);
23951-
23952-
if (class != BPF_JMP && class != BPF_JMP32)
23953-
return true;
23954-
23955-
if (opcode == BPF_EXIT || opcode == BPF_JA)
23956-
return false;
23957-
23958-
return true;
23959-
}
23960-
23961-
static bool can_jump(struct bpf_insn *insn)
23962-
{
23963-
u8 class = BPF_CLASS(insn->code);
23964-
u8 opcode = BPF_OP(insn->code);
23965-
23966-
if (class != BPF_JMP && class != BPF_JMP32)
23967-
return false;
23968-
23969-
switch (opcode) {
23970-
case BPF_JA:
23971-
case BPF_JEQ:
23972-
case BPF_JNE:
23973-
case BPF_JLT:
23974-
case BPF_JLE:
23975-
case BPF_JGT:
23976-
case BPF_JGE:
23977-
case BPF_JSGT:
23978-
case BPF_JSGE:
23979-
case BPF_JSLT:
23980-
case BPF_JSLE:
23981-
case BPF_JCOND:
23982-
case BPF_JSET:
23983-
return true;
23984-
}
23985-
23986-
return false;
23987-
}
23988-
23989-
int bpf_insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
23990-
{
23991-
struct bpf_insn *insn = &prog->insnsi[idx];
23992-
int i = 0, insn_sz;
23993-
u32 dst;
23994-
23995-
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
23996-
if (can_fallthrough(insn) && idx + 1 < prog->len)
23997-
succ[i++] = idx + insn_sz;
23998-
23999-
if (can_jump(insn)) {
24000-
dst = idx + jmp_offset(insn) + 1;
24001-
if (i == 0 || succ[0] != dst)
24002-
succ[i++] = dst;
24003-
}
24004-
24005-
return i;
24006-
}
24007-
2400823938
/* Each field is a register bitmask */
2400923939
struct insn_live_regs {
2401023940
u16 use; /* registers read by instruction */

0 commit comments

Comments
 (0)