Skip to content

Commit 6ea5fc9

Browse files
aspskAlexei Starovoitov
authored andcommitted
bpf: fix the return value of push_stack
In [1] Eduard mentioned that on push_stack failure verifier code should return -ENOMEM instead of -EFAULT. After checking with the other call sites I've found that code randomly returns either -ENOMEM or -EFAULT. This patch unifies the return values for the push_stack (and similar push_async_cb) functions such that error codes are always assigned properly. [1] https://lore.kernel.org/bpf/[email protected] Signed-off-by: Anton Protopopov <[email protected]> Acked-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 96d31df commit 6ea5fc9

File tree

1 file changed

+40
-40
lines changed

1 file changed

+40
-40
lines changed

kernel/bpf/verifier.c

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -2109,7 +2109,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
21092109

21102110
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
21112111
if (!elem)
2112-
return NULL;
2112+
return ERR_PTR(-ENOMEM);
21132113

21142114
elem->insn_idx = insn_idx;
21152115
elem->prev_insn_idx = prev_insn_idx;
@@ -2119,12 +2119,12 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
21192119
env->stack_size++;
21202120
err = copy_verifier_state(&elem->st, cur);
21212121
if (err)
2122-
return NULL;
2122+
return ERR_PTR(-ENOMEM);
21232123
elem->st.speculative |= speculative;
21242124
if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
21252125
verbose(env, "The sequence of %d jumps is too complex.\n",
21262126
env->stack_size);
2127-
return NULL;
2127+
return ERR_PTR(-E2BIG);
21282128
}
21292129
if (elem->st.parent) {
21302130
++elem->st.parent->branches;
@@ -2919,7 +2919,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
29192919

29202920
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
29212921
if (!elem)
2922-
return NULL;
2922+
return ERR_PTR(-ENOMEM);
29232923

29242924
elem->insn_idx = insn_idx;
29252925
elem->prev_insn_idx = prev_insn_idx;
@@ -2931,7 +2931,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
29312931
verbose(env,
29322932
"The sequence of %d jumps is too complex for async cb.\n",
29332933
env->stack_size);
2934-
return NULL;
2934+
return ERR_PTR(-E2BIG);
29352935
}
29362936
/* Unlike push_stack() do not copy_verifier_state().
29372937
* The caller state doesn't matter.
@@ -2942,7 +2942,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
29422942
elem->st.in_sleepable = is_sleepable;
29432943
frame = kzalloc(sizeof(*frame), GFP_KERNEL_ACCOUNT);
29442944
if (!frame)
2945-
return NULL;
2945+
return ERR_PTR(-ENOMEM);
29462946
init_func_state(env, frame,
29472947
BPF_MAIN_FUNC /* callsite */,
29482948
0 /* frameno within this callchain */,
@@ -9045,8 +9045,8 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
90459045
prev_st = find_prev_entry(env, cur_st->parent, insn_idx);
90469046
/* branch out active iter state */
90479047
queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
9048-
if (!queued_st)
9049-
return -ENOMEM;
9048+
if (IS_ERR(queued_st))
9049+
return PTR_ERR(queued_st);
90509050

90519051
queued_iter = get_iter_from_state(queued_st, meta);
90529052
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
@@ -10616,8 +10616,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
1061610616
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
1061710617
insn_idx, subprog,
1061810618
is_async_cb_sleepable(env, insn));
10619-
if (!async_cb)
10620-
return -EFAULT;
10619+
if (IS_ERR(async_cb))
10620+
return PTR_ERR(async_cb);
1062110621
callee = async_cb->frame[0];
1062210622
callee->async_entry_cnt = caller->async_entry_cnt + 1;
1062310623

@@ -10633,8 +10633,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
1063310633
* proceed with next instruction within current frame.
1063410634
*/
1063510635
callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
10636-
if (!callback_state)
10637-
return -ENOMEM;
10636+
if (IS_ERR(callback_state))
10637+
return PTR_ERR(callback_state);
1063810638

1063910639
err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
1064010640
callback_state);
@@ -13859,9 +13859,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1385913859
struct bpf_reg_state *regs;
1386013860

1386113861
branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
13862-
if (!branch) {
13862+
if (IS_ERR(branch)) {
1386313863
verbose(env, "failed to push state for failed lock acquisition\n");
13864-
return -ENOMEM;
13864+
return PTR_ERR(branch);
1386513865
}
1386613866

1386713867
regs = branch->frame[branch->curframe]->regs;
@@ -14316,16 +14316,15 @@ struct bpf_sanitize_info {
1431614316
bool mask_to_left;
1431714317
};
1431814318

14319-
static struct bpf_verifier_state *
14320-
sanitize_speculative_path(struct bpf_verifier_env *env,
14321-
const struct bpf_insn *insn,
14322-
u32 next_idx, u32 curr_idx)
14319+
static int sanitize_speculative_path(struct bpf_verifier_env *env,
14320+
const struct bpf_insn *insn,
14321+
u32 next_idx, u32 curr_idx)
1432314322
{
1432414323
struct bpf_verifier_state *branch;
1432514324
struct bpf_reg_state *regs;
1432614325

1432714326
branch = push_stack(env, next_idx, curr_idx, true);
14328-
if (branch && insn) {
14327+
if (!IS_ERR(branch) && insn) {
1432914328
regs = branch->frame[branch->curframe]->regs;
1433014329
if (BPF_SRC(insn->code) == BPF_K) {
1433114330
mark_reg_unknown(env, regs, insn->dst_reg);
@@ -14334,7 +14333,7 @@ sanitize_speculative_path(struct bpf_verifier_env *env,
1433414333
mark_reg_unknown(env, regs, insn->src_reg);
1433514334
}
1433614335
}
14337-
return branch;
14336+
return PTR_ERR_OR_ZERO(branch);
1433814337
}
1433914338

1434014339
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
@@ -14353,7 +14352,6 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
1435314352
u8 opcode = BPF_OP(insn->code);
1435414353
u32 alu_state, alu_limit;
1435514354
struct bpf_reg_state tmp;
14356-
bool ret;
1435714355
int err;
1435814356

1435914357
if (can_skip_alu_sanitation(env, insn))
@@ -14426,11 +14424,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
1442614424
tmp = *dst_reg;
1442714425
copy_register_state(dst_reg, ptr_reg);
1442814426
}
14429-
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
14430-
env->insn_idx);
14431-
if (!ptr_is_dst_reg && ret)
14427+
err = sanitize_speculative_path(env, NULL, env->insn_idx + 1, env->insn_idx);
14428+
if (err < 0)
14429+
return REASON_STACK;
14430+
if (!ptr_is_dst_reg)
1443214431
*dst_reg = tmp;
14433-
return !ret ? REASON_STACK : 0;
14432+
return 0;
1443414433
}
1443514434

1443614435
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
@@ -16750,8 +16749,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1675016749

1675116750
/* branch out 'fallthrough' insn as a new state to explore */
1675216751
queued_st = push_stack(env, idx + 1, idx, false);
16753-
if (!queued_st)
16754-
return -ENOMEM;
16752+
if (IS_ERR(queued_st))
16753+
return PTR_ERR(queued_st);
1675516754

1675616755
queued_st->may_goto_depth++;
1675716756
if (prev_st)
@@ -16829,10 +16828,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1682916828
* the fall-through branch for simulation under speculative
1683016829
* execution.
1683116830
*/
16832-
if (!env->bypass_spec_v1 &&
16833-
!sanitize_speculative_path(env, insn, *insn_idx + 1,
16834-
*insn_idx))
16835-
return -EFAULT;
16831+
if (!env->bypass_spec_v1) {
16832+
err = sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx);
16833+
if (err < 0)
16834+
return err;
16835+
}
1683616836
if (env->log.level & BPF_LOG_LEVEL)
1683716837
print_insn_state(env, this_branch, this_branch->curframe);
1683816838
*insn_idx += insn->off;
@@ -16842,11 +16842,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1684216842
* program will go. If needed, push the goto branch for
1684316843
* simulation under speculative execution.
1684416844
*/
16845-
if (!env->bypass_spec_v1 &&
16846-
!sanitize_speculative_path(env, insn,
16847-
*insn_idx + insn->off + 1,
16848-
*insn_idx))
16849-
return -EFAULT;
16845+
if (!env->bypass_spec_v1) {
16846+
err = sanitize_speculative_path(env, insn, *insn_idx + insn->off + 1,
16847+
*insn_idx);
16848+
if (err < 0)
16849+
return err;
16850+
}
1685016851
if (env->log.level & BPF_LOG_LEVEL)
1685116852
print_insn_state(env, this_branch, this_branch->curframe);
1685216853
return 0;
@@ -16867,10 +16868,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1686716868
return err;
1686816869
}
1686916870

16870-
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
16871-
false);
16872-
if (!other_branch)
16873-
return -EFAULT;
16871+
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false);
16872+
if (IS_ERR(other_branch))
16873+
return PTR_ERR(other_branch);
1687416874
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
1687516875

1687616876
if (BPF_SRC(insn->code) == BPF_X) {

0 commit comments

Comments
 (0)