@@ -1914,19 +1914,19 @@ static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callc
19141914 */
19151915static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
19161916{
1917- struct bpf_scc_callchain callchain;
1917+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
19181918 struct bpf_scc_visit *visit;
19191919
1920- if (!compute_scc_callchain(env, st, & callchain))
1920+ if (!compute_scc_callchain(env, st, callchain))
19211921 return 0;
1922- visit = scc_visit_lookup(env, & callchain);
1923- visit = visit ?: scc_visit_alloc(env, & callchain);
1922+ visit = scc_visit_lookup(env, callchain);
1923+ visit = visit ?: scc_visit_alloc(env, callchain);
19241924 if (!visit)
19251925 return -ENOMEM;
19261926 if (!visit->entry_state) {
19271927 visit->entry_state = st;
19281928 if (env->log.level & BPF_LOG_LEVEL2)
1929- verbose(env, "SCC enter %s\n", format_callchain(env, & callchain));
1929+ verbose(env, "SCC enter %s\n", format_callchain(env, callchain));
19301930 }
19311931 return 0;
19321932}
@@ -1939,21 +1939,21 @@ static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visi
19391939 */
19401940static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
19411941{
1942- struct bpf_scc_callchain callchain;
1942+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
19431943 struct bpf_scc_visit *visit;
19441944
1945- if (!compute_scc_callchain(env, st, & callchain))
1945+ if (!compute_scc_callchain(env, st, callchain))
19461946 return 0;
1947- visit = scc_visit_lookup(env, & callchain);
1947+ visit = scc_visit_lookup(env, callchain);
19481948 if (!visit) {
19491949 verifier_bug(env, "scc exit: no visit info for call chain %s",
1950- format_callchain(env, & callchain));
1950+ format_callchain(env, callchain));
19511951 return -EFAULT;
19521952 }
19531953 if (visit->entry_state != st)
19541954 return 0;
19551955 if (env->log.level & BPF_LOG_LEVEL2)
1956- verbose(env, "SCC exit %s\n", format_callchain(env, & callchain));
1956+ verbose(env, "SCC exit %s\n", format_callchain(env, callchain));
19571957 visit->entry_state = NULL;
19581958 env->num_backedges -= visit->num_backedges;
19591959 visit->num_backedges = 0;
@@ -1968,22 +1968,22 @@ static int add_scc_backedge(struct bpf_verifier_env *env,
19681968 struct bpf_verifier_state *st,
19691969 struct bpf_scc_backedge *backedge)
19701970{
1971- struct bpf_scc_callchain callchain;
1971+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
19721972 struct bpf_scc_visit *visit;
19731973
1974- if (!compute_scc_callchain(env, st, & callchain)) {
1974+ if (!compute_scc_callchain(env, st, callchain)) {
19751975 verifier_bug(env, "add backedge: no SCC in verification path, insn_idx %d",
19761976 st->insn_idx);
19771977 return -EFAULT;
19781978 }
1979- visit = scc_visit_lookup(env, & callchain);
1979+ visit = scc_visit_lookup(env, callchain);
19801980 if (!visit) {
19811981 verifier_bug(env, "add backedge: no visit info for call chain %s",
1982- format_callchain(env, & callchain));
1982+ format_callchain(env, callchain));
19831983 return -EFAULT;
19841984 }
19851985 if (env->log.level & BPF_LOG_LEVEL2)
1986- verbose(env, "SCC backedge %s\n", format_callchain(env, & callchain));
1986+ verbose(env, "SCC backedge %s\n", format_callchain(env, callchain));
19871987 backedge->next = visit->backedges;
19881988 visit->backedges = backedge;
19891989 visit->num_backedges++;
@@ -1999,12 +1999,12 @@ static int add_scc_backedge(struct bpf_verifier_env *env,
19991999static bool incomplete_read_marks(struct bpf_verifier_env *env,
20002000 struct bpf_verifier_state *st)
20012001{
2002- struct bpf_scc_callchain callchain;
2002+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
20032003 struct bpf_scc_visit *visit;
20042004
2005- if (!compute_scc_callchain(env, st, & callchain))
2005+ if (!compute_scc_callchain(env, st, callchain))
20062006 return false;
2007- visit = scc_visit_lookup(env, & callchain);
2007+ visit = scc_visit_lookup(env, callchain);
20082008 if (!visit)
20092009 return false;
20102010 return !!visit->backedges;
@@ -21011,7 +21011,10 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
2101121011static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2101221012 const union bpf_attr *attr)
2101321013{
21014- struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
21014+ struct bpf_insn *patch;
21015+ /* use env->insn_buf as two independent buffers */
21016+ struct bpf_insn *zext_patch = env->insn_buf;
21017+ struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2];
2101521018 struct bpf_insn_aux_data *aux = env->insn_aux_data;
2101621019 int i, patch_len, delta = 0, len = env->prog->len;
2101721020 struct bpf_insn *insns = env->prog->insnsi;
@@ -21189,13 +21192,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2118921192
2119021193 if (env->insn_aux_data[i + delta].nospec) {
2119121194 WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state);
21192- struct bpf_insn patch[] = {
21193- BPF_ST_NOSPEC(),
21194- *insn,
21195- };
21195+ struct bpf_insn *patch = insn_buf;
2119621196
21197- cnt = ARRAY_SIZE(patch);
21198- new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21197+ *patch++ = BPF_ST_NOSPEC();
21198+ *patch++ = *insn;
21199+ cnt = patch - insn_buf;
21200+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2119921201 if (!new_prog)
2120021202 return -ENOMEM;
2120121203
@@ -21263,13 +21265,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
2126321265 /* nospec_result is only used to mitigate Spectre v4 and
2126421266 * to limit verification-time for Spectre v1.
2126521267 */
21266- struct bpf_insn patch[] = {
21267- *insn,
21268- BPF_ST_NOSPEC(),
21269- };
21268+ struct bpf_insn *patch = insn_buf;
2127021269
21271- cnt = ARRAY_SIZE(patch);
21272- new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21270+ *patch++ = *insn;
21271+ *patch++ = BPF_ST_NOSPEC();
21272+ cnt = patch - insn_buf;
21273+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2127321274 if (!new_prog)
2127421275 return -ENOMEM;
2127521276
@@ -21939,13 +21940,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2193921940 u16 stack_depth_extra = 0;
2194021941
2194121942 if (env->seen_exception && !env->exception_callback_subprog) {
21942- struct bpf_insn patch[] = {
21943- env->prog->insnsi[insn_cnt - 1],
21944- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
21945- BPF_EXIT_INSN(),
21946- };
21943+ struct bpf_insn *patch = insn_buf;
2194721944
21948- ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch));
21945+ *patch++ = env->prog->insnsi[insn_cnt - 1];
21946+ *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
21947+ *patch++ = BPF_EXIT_INSN();
21948+ ret = add_hidden_subprog(env, insn_buf, patch - insn_buf);
2194921949 if (ret < 0)
2195021950 return ret;
2195121951 prog = env->prog;
@@ -21981,20 +21981,18 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2198121981 insn->off == 1 && insn->imm == -1) {
2198221982 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
2198321983 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
21984- struct bpf_insn *patchlet;
21985- struct bpf_insn chk_and_sdiv[] = {
21986- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21987- BPF_NEG | BPF_K, insn->dst_reg,
21988- 0, 0, 0),
21989- };
21990- struct bpf_insn chk_and_smod[] = {
21991- BPF_MOV32_IMM(insn->dst_reg, 0),
21992- };
21984+ struct bpf_insn *patch = insn_buf;
21985+
21986+ if (isdiv)
21987+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21988+ BPF_NEG | BPF_K, insn->dst_reg,
21989+ 0, 0, 0);
21990+ else
21991+ *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
2199321992
21994- patchlet = isdiv ? chk_and_sdiv : chk_and_smod;
21995- cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod);
21993+ cnt = patch - insn_buf;
2199621994
21997- new_prog = bpf_patch_insn_data(env, i + delta, patchlet , cnt);
21995+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf , cnt);
2199821996 if (!new_prog)
2199921997 return -ENOMEM;
2200021998
@@ -22013,83 +22011,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2201322011 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
2201422012 bool is_sdiv = isdiv && insn->off == 1;
2201522013 bool is_smod = !isdiv && insn->off == 1;
22016- struct bpf_insn *patchlet;
22017- struct bpf_insn chk_and_div[] = {
22018- /* [R,W]x div 0 -> 0 */
22019- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22020- BPF_JNE | BPF_K, insn->src_reg,
22021- 0, 2, 0),
22022- BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
22023- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22024- *insn,
22025- };
22026- struct bpf_insn chk_and_mod[] = {
22027- /* [R,W]x mod 0 -> [R,W]x */
22028- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029- BPF_JEQ | BPF_K, insn->src_reg,
22030- 0, 1 + (is64 ? 0 : 1), 0),
22031- *insn,
22032- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22033- BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22034- };
22035- struct bpf_insn chk_and_sdiv[] = {
22014+ struct bpf_insn *patch = insn_buf;
22015+
22016+ if (is_sdiv) {
2203622017 /* [R,W]x sdiv 0 -> 0
2203722018 * LLONG_MIN sdiv -1 -> LLONG_MIN
2203822019 * INT_MIN sdiv -1 -> INT_MIN
2203922020 */
22040- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22041- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22042- BPF_ADD | BPF_K, BPF_REG_AX,
22043- 0, 0, 1),
22044- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22045- BPF_JGT | BPF_K, BPF_REG_AX,
22046- 0, 4, 1),
22047- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22048- BPF_JEQ | BPF_K, BPF_REG_AX,
22049- 0, 1, 0),
22050- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22051- BPF_MOV | BPF_K, insn->dst_reg,
22052- 0, 0, 0),
22021+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22022+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22023+ BPF_ADD | BPF_K, BPF_REG_AX,
22024+ 0, 0, 1);
22025+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22026+ BPF_JGT | BPF_K, BPF_REG_AX,
22027+ 0, 4, 1);
22028+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029+ BPF_JEQ | BPF_K, BPF_REG_AX,
22030+ 0, 1, 0);
22031+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22032+ BPF_MOV | BPF_K, insn->dst_reg,
22033+ 0, 0, 0);
2205322034 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
22054- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22055- BPF_NEG | BPF_K, insn->dst_reg,
22056- 0, 0, 0),
22057- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22058- *insn,
22059- } ;
22060- struct bpf_insn chk_and_smod[] = {
22035+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22036+ BPF_NEG | BPF_K, insn->dst_reg,
22037+ 0, 0, 0);
22038+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22039+ *patch++ = * insn;
22040+ cnt = patch - insn_buf ;
22041+ } else if (is_smod) {
2206122042 /* [R,W]x mod 0 -> [R,W]x */
2206222043 /* [R,W]x mod -1 -> 0 */
22063- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22064- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22065- BPF_ADD | BPF_K, BPF_REG_AX,
22066- 0, 0, 1),
22067- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22068- BPF_JGT | BPF_K, BPF_REG_AX,
22069- 0, 3, 1),
22070- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22071- BPF_JEQ | BPF_K, BPF_REG_AX,
22072- 0, 3 + (is64 ? 0 : 1), 1),
22073- BPF_MOV32_IMM(insn->dst_reg, 0),
22074- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22075- *insn,
22076- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22077- BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22078- };
22079-
22080- if (is_sdiv) {
22081- patchlet = chk_and_sdiv;
22082- cnt = ARRAY_SIZE(chk_and_sdiv);
22083- } else if (is_smod) {
22084- patchlet = chk_and_smod;
22085- cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0);
22044+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22045+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22046+ BPF_ADD | BPF_K, BPF_REG_AX,
22047+ 0, 0, 1);
22048+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22049+ BPF_JGT | BPF_K, BPF_REG_AX,
22050+ 0, 3, 1);
22051+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22052+ BPF_JEQ | BPF_K, BPF_REG_AX,
22053+ 0, 3 + (is64 ? 0 : 1), 1);
22054+ *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
22055+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22056+ *patch++ = *insn;
22057+
22058+ if (!is64) {
22059+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22060+ *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22061+ }
22062+ cnt = patch - insn_buf;
22063+ } else if (isdiv) {
22064+ /* [R,W]x div 0 -> 0 */
22065+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22066+ BPF_JNE | BPF_K, insn->src_reg,
22067+ 0, 2, 0);
22068+ *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg);
22069+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22070+ *patch++ = *insn;
22071+ cnt = patch - insn_buf;
2208622072 } else {
22087- patchlet = isdiv ? chk_and_div : chk_and_mod;
22088- cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
22089- ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
22073+ /* [R,W]x mod 0 -> [R,W]x */
22074+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22075+ BPF_JEQ | BPF_K, insn->src_reg,
22076+ 0, 1 + (is64 ? 0 : 1), 0);
22077+ *patch++ = *insn;
22078+
22079+ if (!is64) {
22080+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22081+ *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22082+ }
22083+ cnt = patch - insn_buf;
2209022084 }
2209122085
22092- new_prog = bpf_patch_insn_data(env, i + delta, patchlet , cnt);
22086+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf , cnt);
2209322087 if (!new_prog)
2209422088 return -ENOMEM;
2209522089
@@ -22103,7 +22097,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2210322097 if (BPF_CLASS(insn->code) == BPF_LDX &&
2210422098 (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2210522099 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
22106- struct bpf_insn *patch = & insn_buf[0] ;
22100+ struct bpf_insn *patch = insn_buf;
2210722101 u64 uaddress_limit = bpf_arch_uaddress_limit();
2210822102
2210922103 if (!uaddress_limit)
@@ -22154,7 +22148,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2215422148 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
2215522149 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
2215622150 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
22157- struct bpf_insn *patch = & insn_buf[0] ;
22151+ struct bpf_insn *patch = insn_buf;
2215822152 bool issrc, isneg, isimm;
2215922153 u32 off_reg;
2216022154
0 commit comments