Skip to content

Commit 5c51b4d

Browse files
sidchintamaneniKernel Patches Daemon
authored andcommitted
bpf: Creating call sites table to stub instructions during runtime
Create callsites tables and store jit indexes of RET_NULL calls to poke them later with dummy functions. Additional to jit indexes, meta data about helpers/kfuncs/loops is stored. Later this could be extended to remaining potential long running iterator helpers/kfuncs. Signed-off-by: Raj Sahu <[email protected]> Signed-off-by: Siddharth Chintamaneni <[email protected]>
1 parent d88f2f2 commit 5c51b4d

File tree

4 files changed

+137
-13
lines changed

4 files changed

+137
-13
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3733,6 +3733,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
37333733
}
37343734

37353735
if (!image || !prog->is_func || extra_pass) {
3736+
3737+
if (addrs) {
3738+
struct bpf_term_patch_call_sites *patch_call_sites = prog->term_states->patch_call_sites;
3739+
for (int i = 0; i < patch_call_sites->call_sites_cnt; i++) {
3740+
struct call_aux_states *call_states = patch_call_sites->call_states + i;
3741+
call_states->jit_call_idx = addrs[call_states->call_bpf_insn_idx];
3742+
}
3743+
}
3744+
37363745
if (image)
37373746
bpf_prog_fill_jited_linfo(prog, addrs + 1);
37383747
out_addrs:

include/linux/bpf_verifier.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -677,6 +677,7 @@ struct bpf_subprog_info {
677677
bool is_cb: 1;
678678
bool is_async_cb: 1;
679679
bool is_exception_cb: 1;
680+
bool is_bpf_loop_cb_non_inline: 1;
680681
bool args_cached: 1;
681682
/* true if bpf_fastcall stack region is used by functions that can't be inlined */
682683
bool keep_fastcall_stack: 1;

kernel/bpf/core.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
136136
fp->term_states = term_states;
137137
fp->term_states->patch_call_sites = patch_call_sites;
138138
fp->term_states->patch_call_sites->call_sites_cnt = 0;
139+
fp->term_states->patch_call_sites->call_states = NULL;
139140
fp->term_states->prog = fp;
140141

141142
#ifdef CONFIG_CGROUP_BPF
@@ -314,8 +315,10 @@ void __bpf_prog_free(struct bpf_prog *fp)
314315
kfree(fp->aux);
315316
}
316317
if (fp->term_states) {
317-
if (fp->term_states->patch_call_sites)
318+
if (fp->term_states->patch_call_sites) {
319+
vfree(fp->term_states->patch_call_sites->call_states);
318320
kfree(fp->term_states->patch_call_sites);
321+
}
319322
kfree(fp->term_states);
320323
}
321324
free_percpu(fp->stats);

kernel/bpf/verifier.c

Lines changed: 123 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3491,6 +3491,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
34913491
* logic. 'subprog_cnt' should not be increased.
34923492
*/
34933493
subprog[env->subprog_cnt].start = insn_cnt;
3494+
subprog[env->subprog_cnt].is_bpf_loop_cb_non_inline = false;
34943495

34953496
if (env->log.level & BPF_LOG_LEVEL2)
34963497
for (i = 0; i < env->subprog_cnt; i++)
@@ -11319,19 +11320,30 @@ static bool loop_flag_is_zero(struct bpf_verifier_env *env)
1131911320
static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
1132011321
{
1132111322
struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
11323+
struct bpf_subprog_info *prev_info, *info = subprog_info(env, subprogno);
1132211324

1132311325
if (!state->initialized) {
1132411326
state->initialized = 1;
1132511327
state->fit_for_inline = loop_flag_is_zero(env);
1132611328
state->callback_subprogno = subprogno;
11329+
if (!state->fit_for_inline)
11330+
info->is_bpf_loop_cb_non_inline = 1;
1132711331
return;
1132811332
}
1132911333

11330-
if (!state->fit_for_inline)
11334+
if (!state->fit_for_inline) {
11335+
info->is_bpf_loop_cb_non_inline = 1;
1133111336
return;
11337+
}
1133211338

1133311339
state->fit_for_inline = (loop_flag_is_zero(env) &&
1133411340
state->callback_subprogno == subprogno);
11341+
11342+
if (state->callback_subprogno != subprogno) {
11343+
info->is_bpf_loop_cb_non_inline = 1;
11344+
prev_info = subprog_info(env, state->callback_subprogno);
11345+
prev_info->is_bpf_loop_cb_non_inline = 1;
11346+
}
1133511347
}
1133611348

1133711349
/* Returns whether or not the given map type can potentially elide
@@ -21120,6 +21132,9 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2112021132
int i, patch_len, delta = 0, len = env->prog->len;
2112121133
struct bpf_insn *insns = env->prog->insnsi;
2112221134
struct bpf_prog *new_prog;
21135+
struct bpf_term_aux_states *term_states = env->prog->term_states;
21136+
u32 call_sites_cnt = term_states->patch_call_sites->call_sites_cnt;
21137+
struct call_aux_states *call_states = term_states->patch_call_sites->call_states;
2112321138
bool rnd_hi32;
2112421139

2112521140
rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
@@ -21205,6 +21220,15 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2120521220
insns = new_prog->insnsi;
2120621221
aux = env->insn_aux_data;
2120721222
delta += patch_len - 1;
21223+
21224+
/* Adust call instruction offsets
21225+
* w.r.t adj_idx
21226+
*/
21227+
for (int iter = 0; iter < call_sites_cnt; iter++) {
21228+
if (call_states[iter].call_bpf_insn_idx < adj_idx)
21229+
continue;
21230+
call_states[iter].call_bpf_insn_idx += patch_len - 1;
21231+
}
2120821232
}
2120921233

2121021234
return 0;
@@ -21597,6 +21621,26 @@ static int jit_subprogs(struct bpf_verifier_env *env)
2159721621
func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
2159821622
func[i]->aux->poke_tab = prog->aux->poke_tab;
2159921623
func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
21624+
func[i]->aux->is_bpf_loop_cb_non_inline = env->subprog_info[i].is_bpf_loop_cb_non_inline;
21625+
21626+
if (prog->term_states->patch_call_sites->call_sites_cnt != 0) {
21627+
int call_sites_cnt = 0;
21628+
struct call_aux_states *func_call_states;
21629+
func_call_states = vzalloc(sizeof(*func_call_states) * len);
21630+
if (!func_call_states)
21631+
goto out_free;
21632+
for (int iter = 0; iter < prog->term_states->patch_call_sites->call_sites_cnt; iter++) {
21633+
struct call_aux_states call_states = prog->term_states->patch_call_sites->call_states[iter];
21634+
if (call_states.call_bpf_insn_idx >= subprog_start
21635+
&& call_states.call_bpf_insn_idx < subprog_end) {
21636+
func_call_states[call_sites_cnt] = call_states;
21637+
func_call_states[call_sites_cnt].call_bpf_insn_idx -= subprog_start;
21638+
call_sites_cnt++;
21639+
}
21640+
}
21641+
func[i]->term_states->patch_call_sites->call_sites_cnt = call_sites_cnt;
21642+
func[i]->term_states->patch_call_sites->call_states = func_call_states;
21643+
}
2160021644

2160121645
for (j = 0; j < prog->aux->size_poke_tab; j++) {
2160221646
struct bpf_jit_poke_descriptor *poke;
@@ -21886,15 +21930,21 @@ static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
2188621930
}
2188721931

2188821932
static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
21889-
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
21933+
struct bpf_insn *insn_buf, int insn_idx, int *cnt, int *kfunc_btf_id)
2189021934
{
2189121935
const struct bpf_kfunc_desc *desc;
21936+
struct bpf_kfunc_call_arg_meta meta;
21937+
int err;
2189221938

2189321939
if (!insn->imm) {
2189421940
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
2189521941
return -EINVAL;
2189621942
}
2189721943

21944+
err = fetch_kfunc_meta(env, insn, &meta, NULL);
21945+
if (err)
21946+
return err;
21947+
2189821948
*cnt = 0;
2189921949

2190021950
/* insn->imm has the btf func_id. Replace it with an offset relative to
@@ -21908,8 +21958,11 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2190821958
return -EFAULT;
2190921959
}
2191021960

21911-
if (!bpf_jit_supports_far_kfunc_call())
21961+
if (!bpf_jit_supports_far_kfunc_call()) {
21962+
if (meta.kfunc_flags & KF_RET_NULL)
21963+
*kfunc_btf_id = insn->imm;
2191221964
insn->imm = BPF_CALL_IMM(desc->addr);
21965+
}
2191321966
if (insn->off)
2191421967
return 0;
2191521968
if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
@@ -22019,6 +22072,13 @@ static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *pat
2201922072
return 0;
2202022073
}
2202122074

22075+
static bool is_bpf_loop_call(struct bpf_insn *insn)
22076+
{
22077+
return insn->code == (BPF_JMP | BPF_CALL) &&
22078+
insn->src_reg == 0 &&
22079+
insn->imm == BPF_FUNC_loop;
22080+
}
22081+
2202222082
/* Do various post-verification rewrites in a single program pass.
2202322083
* These rewrites simplify JIT and interpreter implementations.
2202422084
*/
@@ -22039,6 +22099,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2203922099
struct bpf_subprog_info *subprogs = env->subprog_info;
2204022100
u16 stack_depth = subprogs[cur_subprog].stack_depth;
2204122101
u16 stack_depth_extra = 0;
22102+
u32 call_sites_cnt = 0;
22103+
struct call_aux_states *call_states;
22104+
22105+
call_states = vzalloc(sizeof(*call_states) * prog->len);
22106+
if (!call_states)
22107+
return -ENOMEM;
2204222108

2204322109
if (env->seen_exception && !env->exception_callback_subprog) {
2204422110
struct bpf_insn *patch = insn_buf;
@@ -22368,11 +22434,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2236822434
if (insn->src_reg == BPF_PSEUDO_CALL)
2236922435
goto next_insn;
2237022436
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
22371-
ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
22437+
int kfunc_btf_id = 0;
22438+
ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt, &kfunc_btf_id);
2237222439
if (ret)
2237322440
return ret;
2237422441
if (cnt == 0)
22375-
goto next_insn;
22442+
goto store_call_indices;
2237622443

2237722444
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2237822445
if (!new_prog)
@@ -22381,6 +22448,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2238122448
delta += cnt - 1;
2238222449
env->prog = prog = new_prog;
2238322450
insn = new_prog->insnsi + i + delta;
22451+
store_call_indices:
22452+
if (kfunc_btf_id != 0) {
22453+
call_states[call_sites_cnt].call_bpf_insn_idx = i + delta;
22454+
call_states[call_sites_cnt].is_helper_kfunc = 1;
22455+
call_sites_cnt++;
22456+
}
2238422457
goto next_insn;
2238522458
}
2238622459

@@ -22859,6 +22932,15 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2285922932
func_id_name(insn->imm), insn->imm);
2286022933
return -EFAULT;
2286122934
}
22935+
22936+
if ((fn->ret_type & PTR_MAYBE_NULL) || is_bpf_loop_call(insn)) {
22937+
call_states[call_sites_cnt].call_bpf_insn_idx = i + delta;
22938+
if (is_bpf_loop_call(insn))
22939+
call_states[call_sites_cnt].is_bpf_loop = 1;
22940+
else
22941+
call_states[call_sites_cnt].is_helper_kfunc = 1;
22942+
call_sites_cnt++;
22943+
}
2286222944
insn->imm = fn->func - __bpf_call_base;
2286322945
next_insn:
2286422946
if (subprogs[cur_subprog + 1].start == i + delta + 1) {
@@ -22879,6 +22961,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2287922961
insn++;
2288022962
}
2288122963

22964+
env->prog->term_states->patch_call_sites->call_sites_cnt = call_sites_cnt;
22965+
env->prog->term_states->patch_call_sites->call_states = call_states;
2288222966
env->prog->aux->stack_depth = subprogs[0].stack_depth;
2288322967
for (i = 0; i < env->subprog_cnt; i++) {
2288422968
int delta = bpf_jit_supports_timed_may_goto() ? 2 : 1;
@@ -23014,17 +23098,12 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
2301423098
call_insn_offset = position + 12;
2301523099
callback_offset = callback_start - call_insn_offset - 1;
2301623100
new_prog->insnsi[call_insn_offset].imm = callback_offset;
23101+
/* Marking offset field to identify loop cb */
23102+
new_prog->insnsi[call_insn_offset].off = 0x1;
2301723103

2301823104
return new_prog;
2301923105
}
2302023106

23021-
static bool is_bpf_loop_call(struct bpf_insn *insn)
23022-
{
23023-
return insn->code == (BPF_JMP | BPF_CALL) &&
23024-
insn->src_reg == 0 &&
23025-
insn->imm == BPF_FUNC_loop;
23026-
}
23027-
2302823107
/* For all sub-programs in the program (including main) check
2302923108
* insn_aux_data to see if there are bpf_loop calls that require
2303023109
* inlining. If such calls are found the calls are replaced with a
@@ -24584,6 +24663,35 @@ static int compute_scc(struct bpf_verifier_env *env)
2458424663
return err;
2458524664
}
2458624665

24666+
static int fix_call_sites(struct bpf_verifier_env *env)
24667+
{
24668+
int err = 0, i, subprog;
24669+
struct bpf_insn *insn;
24670+
struct bpf_prog *prog = env->prog;
24671+
struct bpf_term_aux_states *term_states = env->prog->term_states;
24672+
u32 *call_sites_cnt = &term_states->patch_call_sites->call_sites_cnt;
24673+
struct call_aux_states *call_states = term_states->patch_call_sites->call_states;
24674+
24675+
if (!env->subprog_cnt)
24676+
return 0;
24677+
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
24678+
if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
24679+
continue;
24680+
24681+
subprog = find_subprog(env, i + insn->imm + 1);
24682+
if (subprog < 0)
24683+
return -EFAULT;
24684+
24685+
if (insn->off == 0x1) {
24686+
call_states[*call_sites_cnt].call_bpf_insn_idx = i;
24687+
call_states[*call_sites_cnt].is_bpf_loop_cb_inline = 1;
24688+
*call_sites_cnt = *call_sites_cnt + 1;
24689+
prog->insnsi[i].off = 0x0; /* Removing the marker */
24690+
}
24691+
}
24692+
return err;
24693+
}
24694+
2458724695
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
2458824696
{
2458924697
u64 start_time = ktime_get_ns();
@@ -24769,6 +24877,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2476924877
: false;
2477024878
}
2477124879

24880+
if (ret == 0)
24881+
ret = fix_call_sites(env);
24882+
2477224883
if (ret == 0)
2477324884
ret = fixup_call_args(env);
2477424885

0 commit comments

Comments
 (0)