@@ -3663,7 +3663,7 @@ static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state
3663
3663
* code only. It returns TRUE if the source or destination register operates
3664
3664
* on 64-bit, otherwise return FALSE.
3665
3665
*/
3666
- static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
3666
+ static bool is_reg64(struct bpf_insn *insn,
3667
3667
u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
3668
3668
{
3669
3669
u8 code, class, op;
@@ -3774,14 +3774,14 @@ static int insn_def_regno(const struct bpf_insn *insn)
3774
3774
}
3775
3775
3776
3776
/* Return TRUE if INSN has defined any 32-bit value explicitly. */
3777
- static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
3777
+ static bool insn_has_def32(struct bpf_insn *insn)
3778
3778
{
3779
3779
int dst_reg = insn_def_regno(insn);
3780
3780
3781
3781
if (dst_reg == -1)
3782
3782
return false;
3783
3783
3784
- return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
3784
+ return !is_reg64(insn, dst_reg, NULL, DST_OP);
3785
3785
}
3786
3786
3787
3787
static void mark_insn_zext(struct bpf_verifier_env *env,
@@ -3812,7 +3812,7 @@ static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *r
3812
3812
mark_reg_scratched(env, regno);
3813
3813
3814
3814
reg = ®s[regno];
3815
- rw64 = is_reg64(env, insn, regno, reg, t);
3815
+ rw64 = is_reg64(insn, regno, reg, t);
3816
3816
if (t == SRC_OP) {
3817
3817
/* check whether register used as source operand can be read */
3818
3818
if (reg->type == NOT_INIT) {
@@ -20699,35 +20699,32 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
20699
20699
* [0, off) and [off, end) to new locations, so the patched range stays zero
20700
20700
*/
20701
20701
static void adjust_insn_aux_data(struct bpf_verifier_env *env,
20702
- struct bpf_insn_aux_data *new_data,
20703
20702
struct bpf_prog *new_prog, u32 off, u32 cnt)
20704
20703
{
20705
- struct bpf_insn_aux_data *old_data = env->insn_aux_data;
20704
+ struct bpf_insn_aux_data *data = env->insn_aux_data;
20706
20705
struct bpf_insn *insn = new_prog->insnsi;
20707
- u32 old_seen = old_data [off].seen;
20706
+ u32 old_seen = data [off].seen;
20708
20707
u32 prog_len;
20709
20708
int i;
20710
20709
20711
20710
/* aux info at OFF always needs adjustment, no matter fast path
20712
20711
* (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
20713
20712
* original insn at old prog.
20714
20713
*/
20715
- old_data [off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
20714
+ data [off].zext_dst = insn_has_def32(insn + off + cnt - 1);
20716
20715
20717
20716
if (cnt == 1)
20718
20717
return;
20719
20718
prog_len = new_prog->len;
20720
20719
20721
- memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
20722
- memcpy(new_data + off + cnt - 1, old_data + off,
20723
- sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
20720
+ memmove(data + off + cnt - 1, data + off,
20721
+ sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
20722
+ memset(data + off, 0, sizeof(struct bpf_insn_aux_data) * (cnt - 1));
20724
20723
for (i = off; i < off + cnt - 1; i++) {
20725
20724
/* Expand insni[off]'s seen count to the patched range. */
20726
- new_data [i].seen = old_seen;
20727
- new_data [i].zext_dst = insn_has_def32(env, insn + i);
20725
+ data [i].seen = old_seen;
20726
+ data [i].zext_dst = insn_has_def32(insn + i);
20728
20727
}
20729
- env->insn_aux_data = new_data;
20730
- vfree(old_data);
20731
20728
}
20732
20729
20733
20730
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
@@ -20765,10 +20762,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
20765
20762
struct bpf_insn_aux_data *new_data = NULL;
20766
20763
20767
20764
if (len > 1) {
20768
- new_data = vzalloc(array_size(env->prog->len + len - 1,
20769
- sizeof(struct bpf_insn_aux_data)));
20765
+ new_data = vrealloc(env->insn_aux_data,
20766
+ array_size(env->prog->len + len - 1,
20767
+ sizeof(struct bpf_insn_aux_data)),
20768
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
20770
20769
if (!new_data)
20771
20770
return NULL;
20771
+
20772
+ env->insn_aux_data = new_data;
20772
20773
}
20773
20774
20774
20775
new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
@@ -20780,7 +20781,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
20780
20781
vfree(new_data);
20781
20782
return NULL;
20782
20783
}
20783
- adjust_insn_aux_data(env, new_data, new_prog, off, len);
20784
+ adjust_insn_aux_data(env, new_prog, off, len);
20784
20785
adjust_subprog_starts(env, off, len);
20785
20786
adjust_poke_descs(new_prog, off, len);
20786
20787
return new_prog;
@@ -21131,7 +21132,7 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
21131
21132
* BPF_STX + SRC_OP, so it is safe to pass NULL
21132
21133
* here.
21133
21134
*/
21134
- if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
21135
+ if (is_reg64(&insn, load_reg, NULL, DST_OP)) {
21135
21136
if (class == BPF_LD &&
21136
21137
BPF_MODE(code) == BPF_IMM)
21137
21138
i++;
0 commit comments