Skip to content

Commit 45e9cd3

Browse files
Yonghong SongAlexei Starovoitov
authored andcommitted
bpf: Reduce stack frame size by using env->insn_buf for bpf insns
Arnd Bergmann reported an issue ([1]) where clang compiler (less than llvm18) may trigger an error where the stack frame size exceeds the limit. I can reproduce the error like below: kernel/bpf/verifier.c:24491:5: error: stack frame size (2552) exceeds limit (1280) in 'bpf_check' [-Werror,-Wframe-larger-than] kernel/bpf/verifier.c:19921:12: error: stack frame size (1368) exceeds limit (1280) in 'do_check' [-Werror,-Wframe-larger-than] Use env->insn_buf for bpf insns instead of putting these insns on the stack. This can resolve the above 'bpf_check' error. The 'do_check' error will be resolved in the next patch. [1] https://lore.kernel.org/bpf/[email protected]/ Reported-by: Arnd Bergmann <[email protected]> Tested-by: Arnd Bergmann <[email protected]> Acked-by: Jiri Olsa <[email protected]> Acked-by: Eduard Zingerman <[email protected]> Signed-off-by: Yonghong Song <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 3b87251 commit 45e9cd3

File tree

1 file changed

+92
-98
lines changed

1 file changed

+92
-98
lines changed

kernel/bpf/verifier.c

Lines changed: 92 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -21011,7 +21011,10 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
2101121011
static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2101221012
const union bpf_attr *attr)
2101321013
{
21014-
struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
21014+
struct bpf_insn *patch;
21015+
/* use env->insn_buf as two independent buffers */
21016+
struct bpf_insn *zext_patch = env->insn_buf;
21017+
struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2];
2101521018
struct bpf_insn_aux_data *aux = env->insn_aux_data;
2101621019
int i, patch_len, delta = 0, len = env->prog->len;
2101721020
struct bpf_insn *insns = env->prog->insnsi;
@@ -21189,13 +21192,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2118921192

2119021193
if (env->insn_aux_data[i + delta].nospec) {
2119121194
WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state);
21192-
struct bpf_insn patch[] = {
21193-
BPF_ST_NOSPEC(),
21194-
*insn,
21195-
};
21195+
struct bpf_insn *patch = insn_buf;
2119621196

21197-
cnt = ARRAY_SIZE(patch);
21198-
new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21197+
*patch++ = BPF_ST_NOSPEC();
21198+
*patch++ = *insn;
21199+
cnt = patch - insn_buf;
21200+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2119921201
if (!new_prog)
2120021202
return -ENOMEM;
2120121203

@@ -21263,13 +21265,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2126321265
/* nospec_result is only used to mitigate Spectre v4 and
2126421266
* to limit verification-time for Spectre v1.
2126521267
*/
21266-
struct bpf_insn patch[] = {
21267-
*insn,
21268-
BPF_ST_NOSPEC(),
21269-
};
21268+
struct bpf_insn *patch = insn_buf;
2127021269

21271-
cnt = ARRAY_SIZE(patch);
21272-
new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21270+
*patch++ = *insn;
21271+
*patch++ = BPF_ST_NOSPEC();
21272+
cnt = patch - insn_buf;
21273+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2127321274
if (!new_prog)
2127421275
return -ENOMEM;
2127521276

@@ -21939,13 +21940,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2193921940
u16 stack_depth_extra = 0;
2194021941

2194121942
if (env->seen_exception && !env->exception_callback_subprog) {
21942-
struct bpf_insn patch[] = {
21943-
env->prog->insnsi[insn_cnt - 1],
21944-
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
21945-
BPF_EXIT_INSN(),
21946-
};
21943+
struct bpf_insn *patch = insn_buf;
2194721944

21948-
ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch));
21945+
*patch++ = env->prog->insnsi[insn_cnt - 1];
21946+
*patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
21947+
*patch++ = BPF_EXIT_INSN();
21948+
ret = add_hidden_subprog(env, insn_buf, patch - insn_buf);
2194921949
if (ret < 0)
2195021950
return ret;
2195121951
prog = env->prog;
@@ -21981,20 +21981,18 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2198121981
insn->off == 1 && insn->imm == -1) {
2198221982
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
2198321983
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
21984-
struct bpf_insn *patchlet;
21985-
struct bpf_insn chk_and_sdiv[] = {
21986-
BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21987-
BPF_NEG | BPF_K, insn->dst_reg,
21988-
0, 0, 0),
21989-
};
21990-
struct bpf_insn chk_and_smod[] = {
21991-
BPF_MOV32_IMM(insn->dst_reg, 0),
21992-
};
21984+
struct bpf_insn *patch = insn_buf;
2199321985

21994-
patchlet = isdiv ? chk_and_sdiv : chk_and_smod;
21995-
cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod);
21986+
if (isdiv)
21987+
*patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21988+
BPF_NEG | BPF_K, insn->dst_reg,
21989+
0, 0, 0);
21990+
else
21991+
*patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
21992+
21993+
cnt = patch - insn_buf;
2199621994

21997-
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
21995+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2199821996
if (!new_prog)
2199921997
return -ENOMEM;
2200021998

@@ -22013,83 +22011,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2201322011
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
2201422012
bool is_sdiv = isdiv && insn->off == 1;
2201522013
bool is_smod = !isdiv && insn->off == 1;
22016-
struct bpf_insn *patchlet;
22017-
struct bpf_insn chk_and_div[] = {
22018-
/* [R,W]x div 0 -> 0 */
22019-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22020-
BPF_JNE | BPF_K, insn->src_reg,
22021-
0, 2, 0),
22022-
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
22023-
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22024-
*insn,
22025-
};
22026-
struct bpf_insn chk_and_mod[] = {
22027-
/* [R,W]x mod 0 -> [R,W]x */
22028-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029-
BPF_JEQ | BPF_K, insn->src_reg,
22030-
0, 1 + (is64 ? 0 : 1), 0),
22031-
*insn,
22032-
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22033-
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22034-
};
22035-
struct bpf_insn chk_and_sdiv[] = {
22014+
struct bpf_insn *patch = insn_buf;
22015+
22016+
if (is_sdiv) {
2203622017
/* [R,W]x sdiv 0 -> 0
2203722018
* LLONG_MIN sdiv -1 -> LLONG_MIN
2203822019
* INT_MIN sdiv -1 -> INT_MIN
2203922020
*/
22040-
BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22041-
BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22042-
BPF_ADD | BPF_K, BPF_REG_AX,
22043-
0, 0, 1),
22044-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22045-
BPF_JGT | BPF_K, BPF_REG_AX,
22046-
0, 4, 1),
22047-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22048-
BPF_JEQ | BPF_K, BPF_REG_AX,
22049-
0, 1, 0),
22050-
BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22051-
BPF_MOV | BPF_K, insn->dst_reg,
22052-
0, 0, 0),
22021+
*patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22022+
*patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22023+
BPF_ADD | BPF_K, BPF_REG_AX,
22024+
0, 0, 1);
22025+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22026+
BPF_JGT | BPF_K, BPF_REG_AX,
22027+
0, 4, 1);
22028+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029+
BPF_JEQ | BPF_K, BPF_REG_AX,
22030+
0, 1, 0);
22031+
*patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22032+
BPF_MOV | BPF_K, insn->dst_reg,
22033+
0, 0, 0);
2205322034
/* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
22054-
BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22055-
BPF_NEG | BPF_K, insn->dst_reg,
22056-
0, 0, 0),
22057-
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22058-
*insn,
22059-
};
22060-
struct bpf_insn chk_and_smod[] = {
22035+
*patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22036+
BPF_NEG | BPF_K, insn->dst_reg,
22037+
0, 0, 0);
22038+
*patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22039+
*patch++ = *insn;
22040+
cnt = patch - insn_buf;
22041+
} else if (is_smod) {
2206122042
/* [R,W]x mod 0 -> [R,W]x */
2206222043
/* [R,W]x mod -1 -> 0 */
22063-
BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22064-
BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22065-
BPF_ADD | BPF_K, BPF_REG_AX,
22066-
0, 0, 1),
22067-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22068-
BPF_JGT | BPF_K, BPF_REG_AX,
22069-
0, 3, 1),
22070-
BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22071-
BPF_JEQ | BPF_K, BPF_REG_AX,
22072-
0, 3 + (is64 ? 0 : 1), 1),
22073-
BPF_MOV32_IMM(insn->dst_reg, 0),
22074-
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22075-
*insn,
22076-
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22077-
BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22078-
};
22079-
22080-
if (is_sdiv) {
22081-
patchlet = chk_and_sdiv;
22082-
cnt = ARRAY_SIZE(chk_and_sdiv);
22083-
} else if (is_smod) {
22084-
patchlet = chk_and_smod;
22085-
cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0);
22044+
*patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22045+
*patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22046+
BPF_ADD | BPF_K, BPF_REG_AX,
22047+
0, 0, 1);
22048+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22049+
BPF_JGT | BPF_K, BPF_REG_AX,
22050+
0, 3, 1);
22051+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22052+
BPF_JEQ | BPF_K, BPF_REG_AX,
22053+
0, 3 + (is64 ? 0 : 1), 1);
22054+
*patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
22055+
*patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22056+
*patch++ = *insn;
22057+
22058+
if (!is64) {
22059+
*patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22060+
*patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22061+
}
22062+
cnt = patch - insn_buf;
22063+
} else if (isdiv) {
22064+
/* [R,W]x div 0 -> 0 */
22065+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22066+
BPF_JNE | BPF_K, insn->src_reg,
22067+
0, 2, 0);
22068+
*patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg);
22069+
*patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22070+
*patch++ = *insn;
22071+
cnt = patch - insn_buf;
2208622072
} else {
22087-
patchlet = isdiv ? chk_and_div : chk_and_mod;
22088-
cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
22089-
ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
22073+
/* [R,W]x mod 0 -> [R,W]x */
22074+
*patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22075+
BPF_JEQ | BPF_K, insn->src_reg,
22076+
0, 1 + (is64 ? 0 : 1), 0);
22077+
*patch++ = *insn;
22078+
22079+
if (!is64) {
22080+
*patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22081+
*patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22082+
}
22083+
cnt = patch - insn_buf;
2209022084
}
2209122085

22092-
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
22086+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2209322087
if (!new_prog)
2209422088
return -ENOMEM;
2209522089

0 commit comments

Comments
 (0)