@@ -3491,6 +3491,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
34913491 * logic. 'subprog_cnt' should not be increased.
34923492 */
34933493 subprog[env->subprog_cnt].start = insn_cnt;
3494+ subprog[env->subprog_cnt].is_bpf_loop_cb_non_inline = false;
34943495
34953496 if (env->log.level & BPF_LOG_LEVEL2)
34963497 for (i = 0; i < env->subprog_cnt; i++)
@@ -11319,19 +11320,30 @@ static bool loop_flag_is_zero(struct bpf_verifier_env *env)
1131911320static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
1132011321{
1132111322 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
11323+ struct bpf_subprog_info *prev_info, *info = subprog_info(env, subprogno);
1132211324
1132311325 if (!state->initialized) {
1132411326 state->initialized = 1;
1132511327 state->fit_for_inline = loop_flag_is_zero(env);
1132611328 state->callback_subprogno = subprogno;
11329+ if (!state->fit_for_inline)
11330+ info->is_bpf_loop_cb_non_inline = 1;
1132711331 return;
1132811332 }
1132911333
11330- if (!state->fit_for_inline)
11334+ if (!state->fit_for_inline) {
11335+ info->is_bpf_loop_cb_non_inline = 1;
1133111336 return;
11337+ }
1133211338
1133311339 state->fit_for_inline = (loop_flag_is_zero(env) &&
1133411340 state->callback_subprogno == subprogno);
11341+
11342+ if (state->callback_subprogno != subprogno) {
11343+ info->is_bpf_loop_cb_non_inline = 1;
11344+ prev_info = subprog_info(env, state->callback_subprogno);
11345+ prev_info->is_bpf_loop_cb_non_inline = 1;
11346+ }
1133511347}
1133611348
1133711349/* Returns whether or not the given map type can potentially elide
@@ -21120,6 +21132,9 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2112021132 int i, patch_len, delta = 0, len = env->prog->len;
2112121133 struct bpf_insn *insns = env->prog->insnsi;
2112221134 struct bpf_prog *new_prog;
21135+ struct bpf_term_aux_states *term_states = env->prog->term_states;
21136+ u32 call_sites_cnt = term_states->patch_call_sites->call_sites_cnt;
21137+ struct call_aux_states *call_states = term_states->patch_call_sites->call_states;
2112321138 bool rnd_hi32;
2112421139
2112521140 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
@@ -21205,6 +21220,15 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
2120521220 insns = new_prog->insnsi;
2120621221 aux = env->insn_aux_data;
2120721222 delta += patch_len - 1;
21223+
21224+ /* Adust call instruction offsets
21225+ * w.r.t adj_idx
21226+ */
21227+ for (int iter = 0; iter < call_sites_cnt; iter++) {
21228+ if (call_states[iter].call_bpf_insn_idx < adj_idx)
21229+ continue;
21230+ call_states[iter].call_bpf_insn_idx += patch_len - 1;
21231+ }
2120821232 }
2120921233
2121021234 return 0;
@@ -21597,6 +21621,26 @@ static int jit_subprogs(struct bpf_verifier_env *env)
2159721621 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
2159821622 func[i]->aux->poke_tab = prog->aux->poke_tab;
2159921623 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
21624+ func[i]->aux->is_bpf_loop_cb_non_inline = env->subprog_info[i].is_bpf_loop_cb_non_inline;
21625+
21626+ if (prog->term_states->patch_call_sites->call_sites_cnt != 0) {
21627+ int call_sites_cnt = 0;
21628+ struct call_aux_states *func_call_states;
21629+ func_call_states = vzalloc(sizeof(*func_call_states) * len);
21630+ if (!func_call_states)
21631+ goto out_free;
21632+ for (int iter = 0; iter < prog->term_states->patch_call_sites->call_sites_cnt; iter++) {
21633+ struct call_aux_states call_states = prog->term_states->patch_call_sites->call_states[iter];
21634+ if (call_states.call_bpf_insn_idx >= subprog_start
21635+ && call_states.call_bpf_insn_idx < subprog_end) {
21636+ func_call_states[call_sites_cnt] = call_states;
21637+ func_call_states[call_sites_cnt].call_bpf_insn_idx -= subprog_start;
21638+ call_sites_cnt++;
21639+ }
21640+ }
21641+ func[i]->term_states->patch_call_sites->call_sites_cnt = call_sites_cnt;
21642+ func[i]->term_states->patch_call_sites->call_states = func_call_states;
21643+ }
2160021644
2160121645 for (j = 0; j < prog->aux->size_poke_tab; j++) {
2160221646 struct bpf_jit_poke_descriptor *poke;
@@ -21886,15 +21930,21 @@ static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
2188621930}
2188721931
2188821932static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
21889- struct bpf_insn *insn_buf, int insn_idx, int *cnt)
21933+ struct bpf_insn *insn_buf, int insn_idx, int *cnt, int *kfunc_btf_id )
2189021934{
2189121935 const struct bpf_kfunc_desc *desc;
21936+ struct bpf_kfunc_call_arg_meta meta;
21937+ int err;
2189221938
2189321939 if (!insn->imm) {
2189421940 verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
2189521941 return -EINVAL;
2189621942 }
2189721943
21944+ err = fetch_kfunc_meta(env, insn, &meta, NULL);
21945+ if (err)
21946+ return err;
21947+
2189821948 *cnt = 0;
2189921949
2190021950 /* insn->imm has the btf func_id. Replace it with an offset relative to
@@ -21908,8 +21958,11 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2190821958 return -EFAULT;
2190921959 }
2191021960
21911- if (!bpf_jit_supports_far_kfunc_call())
21961+ if (!bpf_jit_supports_far_kfunc_call()) {
21962+ if (meta.kfunc_flags & KF_RET_NULL)
21963+ *kfunc_btf_id = insn->imm;
2191221964 insn->imm = BPF_CALL_IMM(desc->addr);
21965+ }
2191321966 if (insn->off)
2191421967 return 0;
2191521968 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
@@ -22019,6 +22072,13 @@ static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *pat
2201922072 return 0;
2202022073}
2202122074
22075+ static bool is_bpf_loop_call(struct bpf_insn *insn)
22076+ {
22077+ return insn->code == (BPF_JMP | BPF_CALL) &&
22078+ insn->src_reg == 0 &&
22079+ insn->imm == BPF_FUNC_loop;
22080+ }
22081+
2202222082/* Do various post-verification rewrites in a single program pass.
2202322083 * These rewrites simplify JIT and interpreter implementations.
2202422084 */
@@ -22039,6 +22099,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2203922099 struct bpf_subprog_info *subprogs = env->subprog_info;
2204022100 u16 stack_depth = subprogs[cur_subprog].stack_depth;
2204122101 u16 stack_depth_extra = 0;
22102+ u32 call_sites_cnt = 0;
22103+ struct call_aux_states *call_states;
22104+
22105+ call_states = vzalloc(sizeof(*call_states) * prog->len);
22106+ if (!call_states)
22107+ return -ENOMEM;
2204222108
2204322109 if (env->seen_exception && !env->exception_callback_subprog) {
2204422110 struct bpf_insn *patch = insn_buf;
@@ -22368,11 +22434,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2236822434 if (insn->src_reg == BPF_PSEUDO_CALL)
2236922435 goto next_insn;
2237022436 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
22371- ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
22437+ int kfunc_btf_id = 0;
22438+ ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt, &kfunc_btf_id);
2237222439 if (ret)
2237322440 return ret;
2237422441 if (cnt == 0)
22375- goto next_insn ;
22442+ goto store_call_indices ;
2237622443
2237722444 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
2237822445 if (!new_prog)
@@ -22381,6 +22448,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2238122448 delta += cnt - 1;
2238222449 env->prog = prog = new_prog;
2238322450 insn = new_prog->insnsi + i + delta;
22451+ store_call_indices:
22452+ if (kfunc_btf_id != 0) {
22453+ call_states[call_sites_cnt].call_bpf_insn_idx = i + delta;
22454+ call_states[call_sites_cnt].is_helper_kfunc = 1;
22455+ call_sites_cnt++;
22456+ }
2238422457 goto next_insn;
2238522458 }
2238622459
@@ -22859,6 +22932,15 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2285922932 func_id_name(insn->imm), insn->imm);
2286022933 return -EFAULT;
2286122934 }
22935+
22936+ if ((fn->ret_type & PTR_MAYBE_NULL) || is_bpf_loop_call(insn)) {
22937+ call_states[call_sites_cnt].call_bpf_insn_idx = i + delta;
22938+ if (is_bpf_loop_call(insn))
22939+ call_states[call_sites_cnt].is_bpf_loop = 1;
22940+ else
22941+ call_states[call_sites_cnt].is_helper_kfunc = 1;
22942+ call_sites_cnt++;
22943+ }
2286222944 insn->imm = fn->func - __bpf_call_base;
2286322945next_insn:
2286422946 if (subprogs[cur_subprog + 1].start == i + delta + 1) {
@@ -22879,6 +22961,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2287922961 insn++;
2288022962 }
2288122963
22964+ env->prog->term_states->patch_call_sites->call_sites_cnt = call_sites_cnt;
22965+ env->prog->term_states->patch_call_sites->call_states = call_states;
2288222966 env->prog->aux->stack_depth = subprogs[0].stack_depth;
2288322967 for (i = 0; i < env->subprog_cnt; i++) {
2288422968 int delta = bpf_jit_supports_timed_may_goto() ? 2 : 1;
@@ -23014,17 +23098,12 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
2301423098 call_insn_offset = position + 12;
2301523099 callback_offset = callback_start - call_insn_offset - 1;
2301623100 new_prog->insnsi[call_insn_offset].imm = callback_offset;
23101+ /* Marking offset field to identify loop cb */
23102+ new_prog->insnsi[call_insn_offset].off = 0x1;
2301723103
2301823104 return new_prog;
2301923105}
2302023106
23021- static bool is_bpf_loop_call(struct bpf_insn *insn)
23022- {
23023- return insn->code == (BPF_JMP | BPF_CALL) &&
23024- insn->src_reg == 0 &&
23025- insn->imm == BPF_FUNC_loop;
23026- }
23027-
2302823107/* For all sub-programs in the program (including main) check
2302923108 * insn_aux_data to see if there are bpf_loop calls that require
2303023109 * inlining. If such calls are found the calls are replaced with a
@@ -24584,6 +24663,35 @@ static int compute_scc(struct bpf_verifier_env *env)
2458424663 return err;
2458524664}
2458624665
24666+ static int fix_call_sites(struct bpf_verifier_env *env)
24667+ {
24668+ int err = 0, i, subprog;
24669+ struct bpf_insn *insn;
24670+ struct bpf_prog *prog = env->prog;
24671+ struct bpf_term_aux_states *term_states = env->prog->term_states;
24672+ u32 *call_sites_cnt = &term_states->patch_call_sites->call_sites_cnt;
24673+ struct call_aux_states *call_states = term_states->patch_call_sites->call_states;
24674+
24675+ if (!env->subprog_cnt)
24676+ return 0;
24677+ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
24678+ if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
24679+ continue;
24680+
24681+ subprog = find_subprog(env, i + insn->imm + 1);
24682+ if (subprog < 0)
24683+ return -EFAULT;
24684+
24685+ if (insn->off == 0x1) {
24686+ call_states[*call_sites_cnt].call_bpf_insn_idx = i;
24687+ call_states[*call_sites_cnt].is_bpf_loop_cb_inline = 1;
24688+ *call_sites_cnt = *call_sites_cnt + 1;
24689+ prog->insnsi[i].off = 0x0; /* Removing the marker */
24690+ }
24691+ }
24692+ return err;
24693+ }
24694+
2458724695int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
2458824696{
2458924697 u64 start_time = ktime_get_ns();
@@ -24769,6 +24877,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2476924877 : false;
2477024878 }
2477124879
24880+ if (ret == 0)
24881+ ret = fix_call_sites(env);
24882+
2477224883 if (ret == 0)
2477324884 ret = fixup_call_args(env);
2477424885
0 commit comments