@@ -1914,19 +1914,19 @@ static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callc
1914
1914
*/
1915
1915
static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1916
1916
{
1917
- struct bpf_scc_callchain callchain;
1917
+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
1918
1918
struct bpf_scc_visit *visit;
1919
1919
1920
- if (!compute_scc_callchain(env, st, & callchain))
1920
+ if (!compute_scc_callchain(env, st, callchain))
1921
1921
return 0;
1922
- visit = scc_visit_lookup(env, & callchain);
1923
- visit = visit ?: scc_visit_alloc(env, & callchain);
1922
+ visit = scc_visit_lookup(env, callchain);
1923
+ visit = visit ?: scc_visit_alloc(env, callchain);
1924
1924
if (!visit)
1925
1925
return -ENOMEM;
1926
1926
if (!visit->entry_state) {
1927
1927
visit->entry_state = st;
1928
1928
if (env->log.level & BPF_LOG_LEVEL2)
1929
- verbose(env, "SCC enter %s\n", format_callchain(env, & callchain));
1929
+ verbose(env, "SCC enter %s\n", format_callchain(env, callchain));
1930
1930
}
1931
1931
return 0;
1932
1932
}
@@ -1939,21 +1939,21 @@ static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visi
1939
1939
*/
1940
1940
static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1941
1941
{
1942
- struct bpf_scc_callchain callchain;
1942
+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
1943
1943
struct bpf_scc_visit *visit;
1944
1944
1945
- if (!compute_scc_callchain(env, st, & callchain))
1945
+ if (!compute_scc_callchain(env, st, callchain))
1946
1946
return 0;
1947
- visit = scc_visit_lookup(env, & callchain);
1947
+ visit = scc_visit_lookup(env, callchain);
1948
1948
if (!visit) {
1949
1949
verifier_bug(env, "scc exit: no visit info for call chain %s",
1950
- format_callchain(env, & callchain));
1950
+ format_callchain(env, callchain));
1951
1951
return -EFAULT;
1952
1952
}
1953
1953
if (visit->entry_state != st)
1954
1954
return 0;
1955
1955
if (env->log.level & BPF_LOG_LEVEL2)
1956
- verbose(env, "SCC exit %s\n", format_callchain(env, & callchain));
1956
+ verbose(env, "SCC exit %s\n", format_callchain(env, callchain));
1957
1957
visit->entry_state = NULL;
1958
1958
env->num_backedges -= visit->num_backedges;
1959
1959
visit->num_backedges = 0;
@@ -1968,22 +1968,22 @@ static int add_scc_backedge(struct bpf_verifier_env *env,
1968
1968
struct bpf_verifier_state *st,
1969
1969
struct bpf_scc_backedge *backedge)
1970
1970
{
1971
- struct bpf_scc_callchain callchain;
1971
+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
1972
1972
struct bpf_scc_visit *visit;
1973
1973
1974
- if (!compute_scc_callchain(env, st, & callchain)) {
1974
+ if (!compute_scc_callchain(env, st, callchain)) {
1975
1975
verifier_bug(env, "add backedge: no SCC in verification path, insn_idx %d",
1976
1976
st->insn_idx);
1977
1977
return -EFAULT;
1978
1978
}
1979
- visit = scc_visit_lookup(env, & callchain);
1979
+ visit = scc_visit_lookup(env, callchain);
1980
1980
if (!visit) {
1981
1981
verifier_bug(env, "add backedge: no visit info for call chain %s",
1982
- format_callchain(env, & callchain));
1982
+ format_callchain(env, callchain));
1983
1983
return -EFAULT;
1984
1984
}
1985
1985
if (env->log.level & BPF_LOG_LEVEL2)
1986
- verbose(env, "SCC backedge %s\n", format_callchain(env, & callchain));
1986
+ verbose(env, "SCC backedge %s\n", format_callchain(env, callchain));
1987
1987
backedge->next = visit->backedges;
1988
1988
visit->backedges = backedge;
1989
1989
visit->num_backedges++;
@@ -1999,12 +1999,12 @@ static int add_scc_backedge(struct bpf_verifier_env *env,
1999
1999
static bool incomplete_read_marks(struct bpf_verifier_env *env,
2000
2000
struct bpf_verifier_state *st)
2001
2001
{
2002
- struct bpf_scc_callchain callchain;
2002
+ struct bpf_scc_callchain * callchain = &env->callchain_buf ;
2003
2003
struct bpf_scc_visit *visit;
2004
2004
2005
- if (!compute_scc_callchain(env, st, & callchain))
2005
+ if (!compute_scc_callchain(env, st, callchain))
2006
2006
return false;
2007
- visit = scc_visit_lookup(env, & callchain);
2007
+ visit = scc_visit_lookup(env, callchain);
2008
2008
if (!visit)
2009
2009
return false;
2010
2010
return !!visit->backedges;
@@ -21011,7 +21011,10 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
21011
21011
static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
21012
21012
const union bpf_attr *attr)
21013
21013
{
21014
- struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
21014
+ struct bpf_insn *patch;
21015
+ /* use env->insn_buf as two independent buffers */
21016
+ struct bpf_insn *zext_patch = env->insn_buf;
21017
+ struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2];
21015
21018
struct bpf_insn_aux_data *aux = env->insn_aux_data;
21016
21019
int i, patch_len, delta = 0, len = env->prog->len;
21017
21020
struct bpf_insn *insns = env->prog->insnsi;
@@ -21189,13 +21192,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
21189
21192
21190
21193
if (env->insn_aux_data[i + delta].nospec) {
21191
21194
WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state);
21192
- struct bpf_insn patch[] = {
21193
- BPF_ST_NOSPEC(),
21194
- *insn,
21195
- };
21195
+ struct bpf_insn *patch = insn_buf;
21196
21196
21197
- cnt = ARRAY_SIZE(patch);
21198
- new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21197
+ *patch++ = BPF_ST_NOSPEC();
21198
+ *patch++ = *insn;
21199
+ cnt = patch - insn_buf;
21200
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
21199
21201
if (!new_prog)
21200
21202
return -ENOMEM;
21201
21203
@@ -21263,13 +21265,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
21263
21265
/* nospec_result is only used to mitigate Spectre v4 and
21264
21266
* to limit verification-time for Spectre v1.
21265
21267
*/
21266
- struct bpf_insn patch[] = {
21267
- *insn,
21268
- BPF_ST_NOSPEC(),
21269
- };
21268
+ struct bpf_insn *patch = insn_buf;
21270
21269
21271
- cnt = ARRAY_SIZE(patch);
21272
- new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
21270
+ *patch++ = *insn;
21271
+ *patch++ = BPF_ST_NOSPEC();
21272
+ cnt = patch - insn_buf;
21273
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
21273
21274
if (!new_prog)
21274
21275
return -ENOMEM;
21275
21276
@@ -21939,13 +21940,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
21939
21940
u16 stack_depth_extra = 0;
21940
21941
21941
21942
if (env->seen_exception && !env->exception_callback_subprog) {
21942
- struct bpf_insn patch[] = {
21943
- env->prog->insnsi[insn_cnt - 1],
21944
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
21945
- BPF_EXIT_INSN(),
21946
- };
21943
+ struct bpf_insn *patch = insn_buf;
21947
21944
21948
- ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch));
21945
+ *patch++ = env->prog->insnsi[insn_cnt - 1];
21946
+ *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
21947
+ *patch++ = BPF_EXIT_INSN();
21948
+ ret = add_hidden_subprog(env, insn_buf, patch - insn_buf);
21949
21949
if (ret < 0)
21950
21950
return ret;
21951
21951
prog = env->prog;
@@ -21981,20 +21981,18 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
21981
21981
insn->off == 1 && insn->imm == -1) {
21982
21982
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
21983
21983
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
21984
- struct bpf_insn *patchlet;
21985
- struct bpf_insn chk_and_sdiv[] = {
21986
- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21987
- BPF_NEG | BPF_K, insn->dst_reg,
21988
- 0, 0, 0),
21989
- };
21990
- struct bpf_insn chk_and_smod[] = {
21991
- BPF_MOV32_IMM(insn->dst_reg, 0),
21992
- };
21984
+ struct bpf_insn *patch = insn_buf;
21985
+
21986
+ if (isdiv)
21987
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
21988
+ BPF_NEG | BPF_K, insn->dst_reg,
21989
+ 0, 0, 0);
21990
+ else
21991
+ *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
21993
21992
21994
- patchlet = isdiv ? chk_and_sdiv : chk_and_smod;
21995
- cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod);
21993
+ cnt = patch - insn_buf;
21996
21994
21997
- new_prog = bpf_patch_insn_data(env, i + delta, patchlet , cnt);
21995
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf , cnt);
21998
21996
if (!new_prog)
21999
21997
return -ENOMEM;
22000
21998
@@ -22013,83 +22011,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
22013
22011
bool isdiv = BPF_OP(insn->code) == BPF_DIV;
22014
22012
bool is_sdiv = isdiv && insn->off == 1;
22015
22013
bool is_smod = !isdiv && insn->off == 1;
22016
- struct bpf_insn *patchlet;
22017
- struct bpf_insn chk_and_div[] = {
22018
- /* [R,W]x div 0 -> 0 */
22019
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22020
- BPF_JNE | BPF_K, insn->src_reg,
22021
- 0, 2, 0),
22022
- BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
22023
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22024
- *insn,
22025
- };
22026
- struct bpf_insn chk_and_mod[] = {
22027
- /* [R,W]x mod 0 -> [R,W]x */
22028
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029
- BPF_JEQ | BPF_K, insn->src_reg,
22030
- 0, 1 + (is64 ? 0 : 1), 0),
22031
- *insn,
22032
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22033
- BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22034
- };
22035
- struct bpf_insn chk_and_sdiv[] = {
22014
+ struct bpf_insn *patch = insn_buf;
22015
+
22016
+ if (is_sdiv) {
22036
22017
/* [R,W]x sdiv 0 -> 0
22037
22018
* LLONG_MIN sdiv -1 -> LLONG_MIN
22038
22019
* INT_MIN sdiv -1 -> INT_MIN
22039
22020
*/
22040
- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22041
- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22042
- BPF_ADD | BPF_K, BPF_REG_AX,
22043
- 0, 0, 1),
22044
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22045
- BPF_JGT | BPF_K, BPF_REG_AX,
22046
- 0, 4, 1),
22047
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22048
- BPF_JEQ | BPF_K, BPF_REG_AX,
22049
- 0, 1, 0),
22050
- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22051
- BPF_MOV | BPF_K, insn->dst_reg,
22052
- 0, 0, 0),
22021
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22022
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22023
+ BPF_ADD | BPF_K, BPF_REG_AX,
22024
+ 0, 0, 1);
22025
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22026
+ BPF_JGT | BPF_K, BPF_REG_AX,
22027
+ 0, 4, 1);
22028
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22029
+ BPF_JEQ | BPF_K, BPF_REG_AX,
22030
+ 0, 1, 0);
22031
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22032
+ BPF_MOV | BPF_K, insn->dst_reg,
22033
+ 0, 0, 0);
22053
22034
/* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */
22054
- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22055
- BPF_NEG | BPF_K, insn->dst_reg,
22056
- 0, 0, 0),
22057
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22058
- *insn,
22059
- } ;
22060
- struct bpf_insn chk_and_smod[] = {
22035
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22036
+ BPF_NEG | BPF_K, insn->dst_reg,
22037
+ 0, 0, 0);
22038
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22039
+ *patch++ = * insn;
22040
+ cnt = patch - insn_buf ;
22041
+ } else if (is_smod) {
22061
22042
/* [R,W]x mod 0 -> [R,W]x */
22062
22043
/* [R,W]x mod -1 -> 0 */
22063
- BPF_MOV64_REG(BPF_REG_AX, insn->src_reg),
22064
- BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22065
- BPF_ADD | BPF_K, BPF_REG_AX,
22066
- 0, 0, 1),
22067
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22068
- BPF_JGT | BPF_K, BPF_REG_AX,
22069
- 0, 3, 1),
22070
- BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22071
- BPF_JEQ | BPF_K, BPF_REG_AX,
22072
- 0, 3 + (is64 ? 0 : 1), 1),
22073
- BPF_MOV32_IMM(insn->dst_reg, 0),
22074
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22075
- *insn,
22076
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
22077
- BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
22078
- };
22079
-
22080
- if (is_sdiv) {
22081
- patchlet = chk_and_sdiv;
22082
- cnt = ARRAY_SIZE(chk_and_sdiv);
22083
- } else if (is_smod) {
22084
- patchlet = chk_and_smod;
22085
- cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0);
22044
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
22045
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) |
22046
+ BPF_ADD | BPF_K, BPF_REG_AX,
22047
+ 0, 0, 1);
22048
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22049
+ BPF_JGT | BPF_K, BPF_REG_AX,
22050
+ 0, 3, 1);
22051
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22052
+ BPF_JEQ | BPF_K, BPF_REG_AX,
22053
+ 0, 3 + (is64 ? 0 : 1), 1);
22054
+ *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
22055
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22056
+ *patch++ = *insn;
22057
+
22058
+ if (!is64) {
22059
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22060
+ *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22061
+ }
22062
+ cnt = patch - insn_buf;
22063
+ } else if (isdiv) {
22064
+ /* [R,W]x div 0 -> 0 */
22065
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22066
+ BPF_JNE | BPF_K, insn->src_reg,
22067
+ 0, 2, 0);
22068
+ *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg);
22069
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22070
+ *patch++ = *insn;
22071
+ cnt = patch - insn_buf;
22086
22072
} else {
22087
- patchlet = isdiv ? chk_and_div : chk_and_mod;
22088
- cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
22089
- ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
22073
+ /* [R,W]x mod 0 -> [R,W]x */
22074
+ *patch++ = BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
22075
+ BPF_JEQ | BPF_K, insn->src_reg,
22076
+ 0, 1 + (is64 ? 0 : 1), 0);
22077
+ *patch++ = *insn;
22078
+
22079
+ if (!is64) {
22080
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
22081
+ *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22082
+ }
22083
+ cnt = patch - insn_buf;
22090
22084
}
22091
22085
22092
- new_prog = bpf_patch_insn_data(env, i + delta, patchlet , cnt);
22086
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf , cnt);
22093
22087
if (!new_prog)
22094
22088
return -ENOMEM;
22095
22089
@@ -22103,7 +22097,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
22103
22097
if (BPF_CLASS(insn->code) == BPF_LDX &&
22104
22098
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||
22105
22099
BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
22106
- struct bpf_insn *patch = & insn_buf[0] ;
22100
+ struct bpf_insn *patch = insn_buf;
22107
22101
u64 uaddress_limit = bpf_arch_uaddress_limit();
22108
22102
22109
22103
if (!uaddress_limit)
@@ -22154,7 +22148,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
22154
22148
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
22155
22149
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
22156
22150
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
22157
- struct bpf_insn *patch = & insn_buf[0] ;
22151
+ struct bpf_insn *patch = insn_buf;
22158
22152
bool issrc, isneg, isimm;
22159
22153
u32 off_reg;
22160
22154
0 commit comments