Skip to content

Commit baaebe0

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
Revert "bpf: use common instruction history across all states"
This reverts commit 96a30e4. Next patches in the series modify propagate_precision() to allow arbitrary starting state. Precision propagation requires access to jump history, and arbitrary states represent history not belonging to `env->cur_state`. Signed-off-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 517b088 commit baaebe0

File tree

2 files changed

+64
-64
lines changed

2 files changed

+64
-64
lines changed

include/linux/bpf_verifier.h

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ struct bpf_func_state {
344344

345345
#define MAX_CALL_FRAMES 8
346346

347-
/* instruction history flags, used in bpf_insn_hist_entry.flags field */
347+
/* instruction history flags, used in bpf_jmp_history_entry.flags field */
348348
enum {
349349
/* instruction references stack slot through PTR_TO_STACK register;
350350
* we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
@@ -366,7 +366,7 @@ enum {
366366
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
367367
static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
368368

369-
struct bpf_insn_hist_entry {
369+
struct bpf_jmp_history_entry {
370370
u32 idx;
371371
/* insn idx can't be bigger than 1 million */
372372
u32 prev_idx : 20;
@@ -459,14 +459,13 @@ struct bpf_verifier_state {
459459
* See get_loop_entry() for more information.
460460
*/
461461
struct bpf_verifier_state *loop_entry;
462-
/* Sub-range of env->insn_hist[] corresponding to this state's
463-
* instruction history.
464-
* Backtracking is using it to go from last to first.
465-
* For most states instruction history is short, 0-3 instructions.
462+
/* jmp history recorded from first to last.
463+
* backtracking is using it to go from last to first.
464+
* For most states jmp_history_cnt is [0-3].
466465
* For loops can go up to ~40.
467466
*/
468-
u32 insn_hist_start;
469-
u32 insn_hist_end;
467+
struct bpf_jmp_history_entry *jmp_history;
468+
u32 jmp_history_cnt;
470469
u32 dfs_depth;
471470
u32 callback_unroll_depth;
472471
u32 may_goto_depth;
@@ -776,9 +775,7 @@ struct bpf_verifier_env {
776775
int cur_postorder;
777776
} cfg;
778777
struct backtrack_state bt;
779-
struct bpf_insn_hist_entry *insn_hist;
780-
struct bpf_insn_hist_entry *cur_hist_ent;
781-
u32 insn_hist_cap;
778+
struct bpf_jmp_history_entry *cur_hist_ent;
782779
u32 pass_cnt; /* number of times do_check() was called */
783780
u32 subprog_cnt;
784781
/* number of instructions analyzed by the verifier */

kernel/bpf/verifier.c

Lines changed: 56 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1660,6 +1660,13 @@ static void free_func_state(struct bpf_func_state *state)
16601660
kfree(state);
16611661
}
16621662

1663+
static void clear_jmp_history(struct bpf_verifier_state *state)
1664+
{
1665+
kfree(state->jmp_history);
1666+
state->jmp_history = NULL;
1667+
state->jmp_history_cnt = 0;
1668+
}
1669+
16631670
static void free_verifier_state(struct bpf_verifier_state *state,
16641671
bool free_self)
16651672
{
@@ -1670,6 +1677,7 @@ static void free_verifier_state(struct bpf_verifier_state *state,
16701677
state->frame[i] = NULL;
16711678
}
16721679
kfree(state->refs);
1680+
clear_jmp_history(state);
16731681
if (free_self)
16741682
kfree(state);
16751683
}
@@ -1734,6 +1742,13 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
17341742
struct bpf_func_state *dst;
17351743
int i, err;
17361744

1745+
dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1746+
src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
1747+
GFP_USER);
1748+
if (!dst_state->jmp_history)
1749+
return -ENOMEM;
1750+
dst_state->jmp_history_cnt = src->jmp_history_cnt;
1751+
17371752
/* if dst has more stack frames then src frame, free them, this is also
17381753
* necessary in case of exceptional exits using bpf_throw.
17391754
*/
@@ -1751,8 +1766,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
17511766
dst_state->parent = src->parent;
17521767
dst_state->first_insn_idx = src->first_insn_idx;
17531768
dst_state->last_insn_idx = src->last_insn_idx;
1754-
dst_state->insn_hist_start = src->insn_hist_start;
1755-
dst_state->insn_hist_end = src->insn_hist_end;
17561769
dst_state->dfs_depth = src->dfs_depth;
17571770
dst_state->callback_unroll_depth = src->callback_unroll_depth;
17581771
dst_state->used_as_loop_entry = src->used_as_loop_entry;
@@ -2820,14 +2833,9 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
28202833
* The caller state doesn't matter.
28212834
* This is async callback. It starts in a fresh stack.
28222835
* Initialize it similar to do_check_common().
2823-
* But we do need to make sure to not clobber insn_hist, so we keep
2824-
* chaining insn_hist_start/insn_hist_end indices as for a normal
2825-
* child state.
28262836
*/
28272837
elem->st.branches = 1;
28282838
elem->st.in_sleepable = is_sleepable;
2829-
elem->st.insn_hist_start = env->cur_state->insn_hist_end;
2830-
elem->st.insn_hist_end = elem->st.insn_hist_start;
28312839
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
28322840
if (!frame)
28332841
goto err;
@@ -3856,10 +3864,11 @@ static void linked_regs_unpack(u64 val, struct linked_regs *s)
38563864
}
38573865

38583866
/* for any branch, call, exit record the history of jmps in the given state */
3859-
static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
3860-
int insn_flags, u64 linked_regs)
3867+
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
3868+
int insn_flags, u64 linked_regs)
38613869
{
3862-
struct bpf_insn_hist_entry *p;
3870+
u32 cnt = cur->jmp_history_cnt;
3871+
struct bpf_jmp_history_entry *p;
38633872
size_t alloc_size;
38643873

38653874
/* combine instruction flags if we already recorded this instruction */
@@ -3879,32 +3888,29 @@ static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_s
38793888
return 0;
38803889
}
38813890

3882-
if (cur->insn_hist_end + 1 > env->insn_hist_cap) {
3883-
alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p));
3884-
p = kvrealloc(env->insn_hist, alloc_size, GFP_USER);
3885-
if (!p)
3886-
return -ENOMEM;
3887-
env->insn_hist = p;
3888-
env->insn_hist_cap = alloc_size / sizeof(*p);
3889-
}
3891+
cnt++;
3892+
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
3893+
p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
3894+
if (!p)
3895+
return -ENOMEM;
3896+
cur->jmp_history = p;
38903897

3891-
p = &env->insn_hist[cur->insn_hist_end];
3898+
p = &cur->jmp_history[cnt - 1];
38923899
p->idx = env->insn_idx;
38933900
p->prev_idx = env->prev_insn_idx;
38943901
p->flags = insn_flags;
38953902
p->linked_regs = linked_regs;
3896-
3897-
cur->insn_hist_end++;
3903+
cur->jmp_history_cnt = cnt;
38983904
env->cur_hist_ent = p;
38993905

39003906
return 0;
39013907
}
39023908

3903-
static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *env,
3904-
u32 hist_start, u32 hist_end, int insn_idx)
3909+
static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
3910+
u32 hist_end, int insn_idx)
39053911
{
3906-
if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx)
3907-
return &env->insn_hist[hist_end - 1];
3912+
if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
3913+
return &st->jmp_history[hist_end - 1];
39083914
return NULL;
39093915
}
39103916

@@ -3921,26 +3927,25 @@ static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *
39213927
* history entry recording a jump from last instruction of parent state and
39223928
* first instruction of given state.
39233929
*/
3924-
static int get_prev_insn_idx(const struct bpf_verifier_env *env,
3925-
struct bpf_verifier_state *st,
3926-
int insn_idx, u32 hist_start, u32 *hist_endp)
3930+
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
3931+
u32 *history)
39273932
{
3928-
u32 hist_end = *hist_endp;
3929-
u32 cnt = hist_end - hist_start;
3933+
u32 cnt = *history;
39303934

3931-
if (insn_idx == st->first_insn_idx) {
3935+
if (i == st->first_insn_idx) {
39323936
if (cnt == 0)
39333937
return -ENOENT;
3934-
if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx)
3938+
if (cnt == 1 && st->jmp_history[0].idx == i)
39353939
return -ENOENT;
39363940
}
39373941

3938-
if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) {
3939-
(*hist_endp)--;
3940-
return env->insn_hist[hist_end - 1].prev_idx;
3942+
if (cnt && st->jmp_history[cnt - 1].idx == i) {
3943+
i = st->jmp_history[cnt - 1].prev_idx;
3944+
(*history)--;
39413945
} else {
3942-
return insn_idx - 1;
3946+
i--;
39433947
}
3948+
return i;
39443949
}
39453950

39463951
static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
@@ -4121,7 +4126,7 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
41214126
/* If any register R in hist->linked_regs is marked as precise in bt,
41224127
* do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
41234128
*/
4124-
static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_insn_hist_entry *hist)
4129+
static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist)
41254130
{
41264131
struct linked_regs linked_regs;
41274132
bool some_precise = false;
@@ -4166,7 +4171,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
41664171
* - *was* processed previously during backtracking.
41674172
*/
41684173
static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
4169-
struct bpf_insn_hist_entry *hist, struct backtrack_state *bt)
4174+
struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
41704175
{
41714176
struct bpf_insn *insn = env->prog->insnsi + idx;
41724177
u8 class = BPF_CLASS(insn->code);
@@ -4584,7 +4589,7 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
45844589
* SCALARS, as well as any other registers and slots that contribute to
45854590
* a tracked state of given registers/stack slots, depending on specific BPF
45864591
* assembly instructions (see backtrack_insns() for exact instruction handling
4587-
* logic). This backtracking relies on recorded insn_hist and is able to
4592+
* logic). This backtracking relies on recorded jmp_history and is able to
45884593
* traverse entire chain of parent states. This process ends only when all the
45894594
* necessary registers/slots and their transitive dependencies are marked as
45904595
* precise.
@@ -4701,9 +4706,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
47014706

47024707
for (;;) {
47034708
DECLARE_BITMAP(mask, 64);
4704-
u32 hist_start = st->insn_hist_start;
4705-
u32 hist_end = st->insn_hist_end;
4706-
struct bpf_insn_hist_entry *hist;
4709+
u32 history = st->jmp_history_cnt;
4710+
struct bpf_jmp_history_entry *hist;
47074711

47084712
if (env->log.level & BPF_LOG_LEVEL2) {
47094713
verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
@@ -4741,7 +4745,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
47414745
err = 0;
47424746
skip_first = false;
47434747
} else {
4744-
hist = get_insn_hist_entry(env, hist_start, hist_end, i);
4748+
hist = get_jmp_hist_entry(st, history, i);
47454749
err = backtrack_insn(env, i, subseq_idx, hist, bt);
47464750
}
47474751
if (err == -ENOTSUPP) {
@@ -4758,7 +4762,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
47584762
*/
47594763
return 0;
47604764
subseq_idx = i;
4761-
i = get_prev_insn_idx(env, st, i, hist_start, &hist_end);
4765+
i = get_prev_insn_idx(st, i, &history);
47624766
if (i == -ENOENT)
47634767
break;
47644768
if (i >= env->prog->len) {
@@ -5122,7 +5126,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
51225126
}
51235127

51245128
if (insn_flags)
5125-
return push_insn_history(env, env->cur_state, insn_flags, 0);
5129+
return push_jmp_history(env, env->cur_state, insn_flags, 0);
51265130
return 0;
51275131
}
51285132

@@ -5429,7 +5433,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
54295433
insn_flags = 0; /* we are not restoring spilled register */
54305434
}
54315435
if (insn_flags)
5432-
return push_insn_history(env, env->cur_state, insn_flags, 0);
5436+
return push_jmp_history(env, env->cur_state, insn_flags, 0);
54335437
return 0;
54345438
}
54355439

@@ -16496,7 +16500,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1649616500
}
1649716501

1649816502
if (insn_flags) {
16499-
err = push_insn_history(env, this_branch, insn_flags, 0);
16503+
err = push_jmp_history(env, this_branch, insn_flags, 0);
1650016504
if (err)
1650116505
return err;
1650216506
}
@@ -16554,7 +16558,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
1655416558
if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
1655516559
collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
1655616560
if (linked_regs.cnt > 1) {
16557-
err = push_insn_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
16561+
err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
1655816562
if (err)
1655916563
return err;
1656016564
}
@@ -19052,7 +19056,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
1905219056

1905319057
force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) ||
1905419058
/* Avoid accumulating infinitely long jmp history */
19055-
cur->insn_hist_end - cur->insn_hist_start > 40;
19059+
cur->jmp_history_cnt > 40;
1905619060

1905719061
/* bpf progs typically have pruning point every 4 instructions
1905819062
* http://vger.kernel.org/bpfconf2019.html#session-1
@@ -19251,7 +19255,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
1925119255
* the current state.
1925219256
*/
1925319257
if (is_jmp_point(env, env->insn_idx))
19254-
err = err ? : push_insn_history(env, cur, 0, 0);
19258+
err = err ? : push_jmp_history(env, cur, 0, 0);
1925519259
err = err ? : propagate_precision(env, &sl->state);
1925619260
if (err)
1925719261
return err;
@@ -19333,8 +19337,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
1933319337

1933419338
cur->parent = new;
1933519339
cur->first_insn_idx = insn_idx;
19336-
cur->insn_hist_start = cur->insn_hist_end;
1933719340
cur->dfs_depth = new->dfs_depth + 1;
19341+
clear_jmp_history(cur);
1933819342
list_add(&new_sl->node, head);
1933919343

1934019344
/* connect new state to parentage chain. Current frame needs all
@@ -19704,7 +19708,7 @@ static int do_check(struct bpf_verifier_env *env)
1970419708
}
1970519709

1970619710
if (is_jmp_point(env, env->insn_idx)) {
19707-
err = push_insn_history(env, state, 0, 0);
19711+
err = push_jmp_history(env, state, 0, 0);
1970819712
if (err)
1970919713
return err;
1971019714
}
@@ -24291,7 +24295,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2429124295
if (!is_priv)
2429224296
mutex_unlock(&bpf_verifier_lock);
2429324297
vfree(env->insn_aux_data);
24294-
kvfree(env->insn_hist);
2429524298
err_free_env:
2429624299
kvfree(env->cfg.insn_postorder);
2429724300
kvfree(env);

0 commit comments

Comments
 (0)