diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ab83089c3d8fe..06f4bd6c67555 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2788,6 +2788,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, void *image, *tmp; int ret; + if (tlinks[BPF_TRAMP_SESSION].nr_links) + return -EOPNOTSUPP; + /* image doesn't need to be in module memory range, so we can * use kvmalloc. */ diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index cbe53d0b7fb06..ad596341658a4 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1739,6 +1739,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, void *image, *tmp; struct jit_ctx ctx; + if (tlinks[BPF_TRAMP_SESSION].nr_links) + return -EOPNOTSUPP; + size = ro_image_end - ro_image; image = kvmalloc(size, GFP_KERNEL); if (!image) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 88ad5ba7b87fd..bcc0ce09f6fae 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1017,6 +1017,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *rw_image, *tmp; int ret; + if (tlinks[BPF_TRAMP_SESSION].nr_links) + return -EOPNOTSUPP; + /* * rw_image doesn't need to be in module memory range, so we can * use kvmalloc. diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 45cbc7c6fe490..55b0284bf1770 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -1286,6 +1286,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, struct rv_jit_context ctx; u32 size = ro_image_end - ro_image; + if (tlinks[BPF_TRAMP_SESSION].nr_links) + return -EOPNOTSUPP; + image = kvmalloc(size, GFP_KERNEL); if (!image) return -ENOMEM; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index cf461d76e9da3..3f25bf55b1500 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2924,6 +2924,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, struct bpf_tramp_jit tjit; int ret; + if (tlinks[BPF_TRAMP_SESSION].nr_links) + return -EOPNOTSUPP; + /* Compute offsets, check whether the code fits. */ memset(&tjit, 0, sizeof(tjit)); ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d4c93d9e73e40..2fffc530c88cc 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2940,7 +2940,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_link *l, int stack_size, - int run_ctx_off, bool save_ret, + int run_ctx_off, bool save_ret, int ret_off, void *image, void *rw_image) { u8 *prog = *pprog; @@ -3005,7 +3005,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, * value of BPF_PROG_TYPE_STRUCT_OPS prog. */ if (save_ret) - emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off); /* replace 2 nops with JE insn, since jmp target is known */ jmp_insn[0] = X86_JE; @@ -3055,7 +3055,7 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_links *tl, int stack_size, - int run_ctx_off, bool save_ret, + int run_ctx_off, bool save_ret, int ret_off, void *image, void *rw_image) { int i; @@ -3063,7 +3063,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, for (i = 0; i < tl->nr_links; i++) { if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, - run_ctx_off, save_ret, image, rw_image)) + run_ctx_off, save_ret, ret_off, image, + rw_image)) return -EINVAL; } *pprog = prog; @@ -3072,7 +3073,7 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_links *tl, int stack_size, - int run_ctx_off, u8 **branches, + int run_ctx_off, int ret_off, u8 **branches, void *image, void *rw_image) { u8 *prog = *pprog; @@ -3082,18 +3083,18 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, * Set this to 0 to avoid confusing the program. */ emit_mov_imm32(&prog, false, BPF_REG_0, 0); - emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off); for (i = 0; i < tl->nr_links; i++) { if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, - image, rw_image)) + ret_off, image, rw_image)) return -EINVAL; - /* mod_ret prog stored return value into [rbp - 8]. Emit: - * if (*(u64 *)(rbp - 8) != 0) + /* mod_ret prog stored return value into [rbp - ret_off]. Emit: + * if (*(u64 *)(rbp - ret_off) != 0) * goto do_fexit; */ - /* cmp QWORD PTR [rbp - 0x8], 0x0 */ - EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); + /* cmp QWORD PTR [rbp - ret_off], 0x0 */ + EMIT4(0x48, 0x83, 0x7d, -ret_off); EMIT1(0x00); /* Save the location of the branch and Generate 6 nops * (4 bytes for an offset and 2 bytes for the jump) These nops @@ -3108,6 +3109,148 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, return 0; } +static int invoke_bpf_session_entry(const struct btf_func_model *m, u8 **pprog, + struct bpf_tramp_links *tl, int stack_size, + int run_ctx_off, int ret_off, int sflags_off, + int cookies_off, void *image, void *rw_image) +{ + int i, j = 0, cur_cookie_off; + u64 session_flags; + u8 *prog = *pprog; + u8 *jmp_insn; + + /* clear the session flags: + * xor rax, rax + * mov QWORD PTR [rbp - sflags_off], rax + */ + EMIT3(0x48, 0x31, 0xC0); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -sflags_off); + /* + * clear the return value to make sure bpf_get_func_ret() always + * get 0 in fentry: + * mov QWORD PTR [rbp - 0x8], rax + */ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off); + /* clear all the cookies in the cookie array */ + for (i = 0; i < tl->nr_links; i++) { + if (tl->links[i]->link.prog->call_session_cookie) { + cur_cookie_off = -cookies_off + j * 8; + /* mov QWORD PTR [rbp - sflags_off], rax */ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, + cur_cookie_off); + j++; + } + } + + j = 0; + for (i = 0; i < tl->nr_links; i++) { + if (tl->links[i]->link.prog->call_session_cookie) { + cur_cookie_off = -cookies_off + j * 8; + /* + * save the cookie address to rbp - sflags_off + 8: + * lea rax, [rbp - cur_cookie_off] + * mov QWORD PTR [rbp - sflags_off + 8], rax + */ + if (!is_imm8(cur_cookie_off)) + EMIT3_off32(0x48, 0x8D, 0x85, cur_cookie_off); + else + EMIT4(0x48, 0x8D, 0x45, cur_cookie_off); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -sflags_off + 8); + j++; + } + if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, + ret_off, image, rw_image)) + return -EINVAL; + + /* fentry prog stored return value into [rbp - 8]. Emit: + * if (*(u64 *)(rbp - ret_off) != 0) { + * *(u64 *)(rbp - sflags_off) |= (1 << (i + 1)); + * *(u64 *)(rbp - ret_off) = 0; + * } + */ + /* cmp QWORD PTR [rbp - ret_off], 0x0 */ + EMIT4(0x48, 0x83, 0x7d, -ret_off); EMIT1(0x00); + /* emit 2 nops that will be replaced with JE insn */ + jmp_insn = prog; + emit_nops(&prog, 2); + + session_flags = (1ULL << (i + 1)); + /* mov rax, $session_flags */ + emit_mov_imm64(&prog, BPF_REG_0, session_flags >> 32, (u32) session_flags); + /* or QWORD PTR [rbp - sflags_off], rax */ + EMIT2(0x48, 0x09); + emit_insn_suffix(&prog, BPF_REG_FP, BPF_REG_0, -sflags_off); + + /* mov QWORD PTR [rbp - ret_off], 0x0 */ + EMIT4(0x48, 0xC7, 0x45, -ret_off); EMIT4(0x00, 0x00, 0x00, 0x00); + + jmp_insn[0] = X86_JE; + jmp_insn[1] = prog - jmp_insn - 2; + } + + *pprog = prog; + return 0; +} + +static int invoke_bpf_session_exit(const struct btf_func_model *m, u8 **pprog, + struct bpf_tramp_links *tl, int stack_size, + int run_ctx_off, int ret_off, int sflags_off, + int cookies_off, void *image, void *rw_image) +{ + int i, j = 0, cur_cookie_off; + u64 session_flags; + u8 *prog = *pprog; + u8 *jmp_insn; + + /* + * set the bpf_trace_is_exit flag to the session flags: + * mov rax, 1 + * or QWORD PTR [rbp - sflags_off], rax + */ + emit_mov_imm32(&prog, false, BPF_REG_0, 1); + EMIT2(0x48, 0x09); + emit_insn_suffix(&prog, BPF_REG_FP, BPF_REG_0, -sflags_off); + + for (i = 0; i < tl->nr_links; i++) { + if (tl->links[i]->link.prog->call_session_cookie) { + cur_cookie_off = -cookies_off + j * 8; + /* + * save the cookie address to rbp - sflags_off + 8: + * lea rax, [rbp - cur_cookie_off] + * mov QWORD PTR [rbp - sflags_off + 8], rax + */ + if (!is_imm8(cur_cookie_off)) + EMIT3_off32(0x48, 0x8D, 0x85, cur_cookie_off); + else + EMIT4(0x48, 0x8D, 0x45, cur_cookie_off); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -sflags_off + 8); + j++; + } + /* check if (1 << (i+1)) is set in the session flags, and + * skip the execution of the fexit program if it is. + */ + session_flags = 1ULL << (i + 1); + /* mov rax, $session_flags */ + emit_mov_imm64(&prog, BPF_REG_0, session_flags >> 32, (u32) session_flags); + /* test QWORD PTR [rbp - sflags_off], rax */ + EMIT2(0x48, 0x85); + emit_insn_suffix(&prog, BPF_REG_FP, BPF_REG_0, -sflags_off); + /* emit 2 nops that will be replaced with JE insn */ + jmp_insn = prog; + emit_nops(&prog, 2); + + if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, false, + ret_off, image, rw_image)) + return -EINVAL; + + jmp_insn[0] = X86_JNE; + jmp_insn[1] = prog - jmp_insn - 2; + } + + *pprog = prog; + return 0; +} + /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ __LOAD_TCC_PTR(-round_up(stack, 8) - 8) @@ -3179,8 +3322,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im void *func_addr) { int i, ret, nr_regs = m->nr_args, stack_size = 0; - int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; + int ret_off, regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, + rbx_off, sflags_off = 0, cookies_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; + struct bpf_tramp_links *session = &tlinks[BPF_TRAMP_SESSION]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; void *orig_call = func_addr; @@ -3213,7 +3358,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * RBP + 8 [ return address ] * RBP + 0 [ RBP ] * - * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or + * [ cookie ptr ] tracing session + * RBP - sflags_off [ session flags ] tracing session + * + * RBP - ret_off [ return value ] BPF_TRAMP_F_CALL_ORIG or * BPF_TRAMP_F_RET_FENTRY_RET flags * * [ reg_argN ] always @@ -3228,6 +3376,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * * RBP - run_ctx_off [ bpf_tramp_run_ctx ] * + * [ session cookieN ] + * [ ... ] + * RBP - cookies_off [ session cookie1 ] tracing session + * * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG * [ ... ] * [ stack_arg2 ] @@ -3235,10 +3387,17 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX */ + /* room for session flags and cookie ptr */ + if (session->nr_links) { + stack_size += 8 + 8; + sflags_off = stack_size; + } + /* room for return value of orig_call or fentry prog */ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); if (save_ret) stack_size += 8; + ret_off = stack_size; stack_size += nr_regs * 8; regs_off = stack_size; @@ -3258,6 +3417,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; run_ctx_off = stack_size; + if (session->nr_links) { + for (i = 0; i < session->nr_links; i++) { + if (session->links[i]->link.prog->call_session_cookie) + stack_size += 8; + } + } + cookies_off = stack_size; + if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { /* the space that used to pass arguments on-stack */ stack_size += (nr_regs - get_nr_used_regs(m)) * 8; @@ -3341,7 +3508,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im if (fentry->nr_links) { if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, - flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) + flags & BPF_TRAMP_F_RET_FENTRY_RET, ret_off, + image, rw_image)) + return -EINVAL; + } + + if (session->nr_links) { + if (invoke_bpf_session_entry(m, &prog, session, regs_off, + run_ctx_off, ret_off, sflags_off, + cookies_off, image, rw_image)) return -EINVAL; } @@ -3352,7 +3527,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im return -ENOMEM; if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, - run_ctx_off, branches, image, rw_image)) { + run_ctx_off, ret_off, branches, + image, rw_image)) { ret = -EINVAL; goto cleanup; } @@ -3380,7 +3556,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im } } /* remember return value in a stack for bpf prog to access */ - emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off); im->ip_after_call = image + (prog - (u8 *)rw_image); emit_nops(&prog, X86_PATCH_SIZE); } @@ -3403,7 +3579,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im if (fexit->nr_links) { if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, - false, image, rw_image)) { + false, ret_off, image, rw_image)) { + ret = -EINVAL; + goto cleanup; + } + } + + if (session->nr_links) { + if (invoke_bpf_session_exit(m, &prog, session, regs_off, + run_ctx_off, ret_off, sflags_off, + cookies_off, image, rw_image)) { ret = -EINVAL; goto cleanup; } @@ -3433,7 +3618,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -ret_off); emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); EMIT1(0xC9); /* leave */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a47d67db3be5a..b88e8e68a0d5d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1276,6 +1276,7 @@ enum bpf_tramp_prog_type { BPF_TRAMP_FENTRY, BPF_TRAMP_FEXIT, BPF_TRAMP_MODIFY_RETURN, + BPF_TRAMP_SESSION, BPF_TRAMP_MAX, BPF_TRAMP_REPLACE, /* more than MAX */ }; @@ -1743,6 +1744,7 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ + call_session_cookie:1, /* Do we call bpf_fsession_cookie() */ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ sleepable:1; /* BPF program is sleepable */ enum bpf_prog_type type; /* Type of BPF program */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 1d73f165394d0..74903917a5a5c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1133,6 +1133,7 @@ enum bpf_attach_type { BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, BPF_TRACE_UPROBE_SESSION, + BPF_TRACE_SESSION, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 0de8fc8a0e0b3..2c1c3e0caff89 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6107,6 +6107,7 @@ static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: + case BPF_TRACE_SESSION: /* allow u64* as ctx */ if (btf_is_int(t) && t->size == 8) return 0; @@ -6704,6 +6705,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, fallthrough; case BPF_LSM_CGROUP: case BPF_TRACE_FEXIT: + case BPF_TRACE_SESSION: /* When LSM programs are attached to void LSM hooks * they use FEXIT trampolines and when attached to * int LSM hooks, they use MODIFY_RETURN trampolines. diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8a129746bd6cc..cb483701fe393 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3564,6 +3564,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, case BPF_PROG_TYPE_TRACING: if (prog->expected_attach_type != BPF_TRACE_FENTRY && prog->expected_attach_type != BPF_TRACE_FEXIT && + prog->expected_attach_type != BPF_TRACE_SESSION && prog->expected_attach_type != BPF_MODIFY_RETURN) { err = -EINVAL; goto out_put_prog; @@ -4337,6 +4338,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) case BPF_TRACE_RAW_TP: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: + case BPF_TRACE_SESSION: case BPF_MODIFY_RETURN: return BPF_PROG_TYPE_TRACING; case BPF_LSM_MAC: diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 5949095e51c3d..f6d4dea3461ef 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -111,7 +111,7 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog) return (ptype == BPF_PROG_TYPE_TRACING && (eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT || - eatype == BPF_MODIFY_RETURN)) || + eatype == BPF_MODIFY_RETURN || eatype == BPF_TRACE_SESSION)) || (ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC); } @@ -418,6 +418,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX); if (tlinks[BPF_TRAMP_FEXIT].nr_links || + tlinks[BPF_TRAMP_SESSION].nr_links || tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) { /* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME * should not be set together. @@ -515,6 +516,8 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) return BPF_TRAMP_MODIFY_RETURN; case BPF_TRACE_FEXIT: return BPF_TRAMP_FEXIT; + case BPF_TRACE_SESSION: + return BPF_TRAMP_SESSION; case BPF_LSM_MAC: if (!prog->aux->attach_func_proto->type) /* The function returns void, we cannot modify its diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 542e23fb19c7b..da76189327035 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12290,6 +12290,8 @@ enum special_kfunc_type { KF___bpf_trap, KF_bpf_task_work_schedule_signal, KF_bpf_task_work_schedule_resume, + KF_bpf_tracing_is_exit, + KF_bpf_fsession_cookie, }; BTF_ID_LIST(special_kfunc_list) @@ -12364,6 +12366,8 @@ BTF_ID(func, bpf_dynptr_file_discard) BTF_ID(func, __bpf_trap) BTF_ID(func, bpf_task_work_schedule_signal) BTF_ID(func, bpf_task_work_schedule_resume) +BTF_ID(func, bpf_tracing_is_exit) +BTF_ID(func, bpf_fsession_cookie) static bool is_task_work_add_kfunc(u32 func_id) { @@ -12418,7 +12422,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg = ®s[regno]; bool arg_mem_size = false; - if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) + if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + meta->func_id == special_kfunc_list[KF_bpf_tracing_is_exit] || + meta->func_id == special_kfunc_list[KF_bpf_fsession_cookie]) return KF_ARG_PTR_TO_CTX; /* In this function, we verify the kfunc's BTF as per the argument type, @@ -13916,7 +13922,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_session_cookie]) { + if (meta.func_id == special_kfunc_list[KF_bpf_session_cookie] || + meta.func_id == special_kfunc_list[KF_bpf_fsession_cookie]) { meta.r0_size = sizeof(u64); meta.r0_rdonly = false; } @@ -14203,6 +14210,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return err; } + if (meta.func_id == special_kfunc_list[KF_bpf_fsession_cookie]) + env->prog->call_session_cookie = true; + return 0; } @@ -17291,6 +17301,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char break; case BPF_TRACE_RAW_TP: case BPF_MODIFY_RETURN: + case BPF_TRACE_SESSION: return 0; case BPF_TRACE_ITER: break; @@ -22040,6 +22051,25 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; + } else if (desc->func_id == special_kfunc_list[KF_bpf_tracing_is_exit]) { + /* Load nr_args from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + /* add rax, 1 */ + insn_buf[1] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1); + insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); + insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); + insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); + insn_buf[5] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1); + *cnt = 6; + } else if (desc->func_id == special_kfunc_list[KF_bpf_fsession_cookie]) { + /* Load nr_args from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + /* add rax, 2 */ + insn_buf[1] = BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2); + insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); + insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); + insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); + *cnt = 5; } if (env->insn_aux_data[insn_idx].arg_prog) { @@ -22783,6 +22813,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) if (prog_type == BPF_PROG_TYPE_TRACING && insn->imm == BPF_FUNC_get_func_ret) { if (eatype == BPF_TRACE_FEXIT || + eatype == BPF_TRACE_SESSION || eatype == BPF_MODIFY_RETURN) { /* Load nr_args from ctx - 8 */ insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); @@ -23724,7 +23755,8 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, if (tgt_prog->type == BPF_PROG_TYPE_TRACING && prog_extension && (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || - tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { + tgt_prog->expected_attach_type == BPF_TRACE_FEXIT || + tgt_prog->expected_attach_type == BPF_TRACE_SESSION)) { /* Program extensions can extend all program types * except fentry/fexit. The reason is the following. * The fentry/fexit programs are used for performance @@ -23739,7 +23771,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, * beyond reasonable stack size. Hence extending fentry * is not allowed. */ - bpf_log(log, "Cannot extend fentry/fexit\n"); + bpf_log(log, "Cannot extend fentry/fexit/session\n"); return -EINVAL; } } else { @@ -23823,6 +23855,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, case BPF_LSM_CGROUP: case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: + case BPF_TRACE_SESSION: if (!btf_type_is_func(t)) { bpf_log(log, "attach_btf_id %u is not a function\n", btf_id); @@ -23989,6 +24022,7 @@ static bool can_be_sleepable(struct bpf_prog *prog) case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: case BPF_TRACE_ITER: + case BPF_TRACE_SESSION: return true; default: return false; @@ -24070,9 +24104,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) tgt_info.tgt_name); return -EINVAL; } else if ((prog->expected_attach_type == BPF_TRACE_FEXIT || + prog->expected_attach_type == BPF_TRACE_SESSION || prog->expected_attach_type == BPF_MODIFY_RETURN) && btf_id_set_contains(&noreturn_deny, btf_id)) { - verbose(env, "Attaching fexit/fmod_ret to __noreturn function '%s' is rejected.\n", + verbose(env, "Attaching fexit/session/fmod_ret to __noreturn function '%s' is rejected.\n", tgt_info.tgt_name); return -EINVAL; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a795f7afbf3de..6d561dcd711cd 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3356,12 +3356,65 @@ static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = { .filter = bpf_kprobe_multi_filter, }; -static int __init bpf_kprobe_multi_kfuncs_init(void) +__bpf_kfunc_start_defs(); + +__bpf_kfunc bool bpf_tracing_is_exit(void *ctx) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); + /* This helper call is inlined by verifier. */ + u64 nr_args = ((u64 *)ctx)[-1]; + + /* + * ctx[nr_args + 1] is the session flags, and the last bit is + * is_exit. + */ + return ((u64 *)ctx)[nr_args + 1] & 1; +} + +__bpf_kfunc u64 *bpf_fsession_cookie(void *ctx) +{ + /* This helper call is inlined by verifier. */ + u64 nr_args = ((u64 *)ctx)[-1]; + + /* ctx[nr_args + 2] is the session cookie address */ + return (u64 *)((u64 *)ctx)[nr_args + 2]; +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(tracing_kfunc_set_ids) +BTF_ID_FLAGS(func, bpf_tracing_is_exit, KF_FASTCALL) +BTF_ID_FLAGS(func, bpf_fsession_cookie, KF_FASTCALL) +BTF_KFUNCS_END(tracing_kfunc_set_ids) + +static int bpf_tracing_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + if (!btf_id_set8_contains(&tracing_kfunc_set_ids, kfunc_id)) + return 0; + + if (prog->type != BPF_PROG_TYPE_TRACING || + prog->expected_attach_type != BPF_TRACE_SESSION) + return -EINVAL; + + return 0; +} + +static const struct btf_kfunc_id_set bpf_tracing_kfunc_set = { + .owner = THIS_MODULE, + .set = &tracing_kfunc_set_ids, + .filter = bpf_tracing_filter, +}; + +static int __init bpf_trace_kfuncs_init(void) +{ + int err = 0; + + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_tracing_kfunc_set); + + return err; } -late_initcall(bpf_kprobe_multi_kfuncs_init); +late_initcall(bpf_trace_kfuncs_init); typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 655efac6f1334..ddec08b696de7 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -685,6 +685,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, switch (prog->expected_attach_type) { case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: + case BPF_TRACE_SESSION: if (bpf_fentry_test1(1) != 2 || bpf_fentry_test2(2, 3) != 5 || bpf_fentry_test3(4, 5, 6) != 15 || diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d3fbaf89a698d..8da8834aa1342 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -365,6 +365,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) return true; case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: + case BPF_TRACE_SESSION: return !!strncmp(prog->aux->attach_func_name, "bpf_sk_storage", strlen("bpf_sk_storage")); default: diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index e8daf963ecef4..534be6cfa2be9 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -1191,6 +1191,7 @@ const char *bpf_attach_type_input_str(enum bpf_attach_type t) case BPF_TRACE_FENTRY: return "fentry"; case BPF_TRACE_FEXIT: return "fexit"; case BPF_MODIFY_RETURN: return "mod_ret"; + case BPF_TRACE_SESSION: return "fsession"; case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select"; case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate"; default: return libbpf_bpf_attach_type_str(t); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1d73f165394d0..74903917a5a5c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1133,6 +1133,7 @@ enum bpf_attach_type { BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, BPF_TRACE_UPROBE_SESSION, + BPF_TRACE_SESSION, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index b66f5fbfbbb29..1205ad75f740b 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -794,6 +794,7 @@ int bpf_link_create(int prog_fd, int target_fd, case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: + case BPF_TRACE_SESSION: case BPF_LSM_MAC: attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0); if (!OPTS_ZEROED(opts, tracing)) @@ -917,6 +918,7 @@ int bpf_link_create(int prog_fd, int target_fd, case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: + case BPF_TRACE_SESSION: return bpf_raw_tracepoint_open(NULL, prog_fd); default: return libbpf_err(err); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fbe74686c97da..dbed713e4a1c5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -115,6 +115,7 @@ static const char * const attach_type_name[] = { [BPF_TRACE_FENTRY] = "trace_fentry", [BPF_TRACE_FEXIT] = "trace_fexit", [BPF_MODIFY_RETURN] = "modify_return", + [BPF_TRACE_SESSION] = "trace_session", [BPF_LSM_MAC] = "lsm_mac", [BPF_LSM_CGROUP] = "lsm_cgroup", [BPF_SK_LOOKUP] = "sk_lookup", @@ -9607,6 +9608,8 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), + SEC_DEF("fsession+", TRACING, BPF_TRACE_SESSION, SEC_ATTACH_BTF, attach_trace), + SEC_DEF("fsession.s+", TRACING, BPF_TRACE_SESSION, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace), SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), diff --git a/tools/testing/selftests/bpf/prog_tests/fsession_test.c b/tools/testing/selftests/bpf/prog_tests/fsession_test.c new file mode 100644 index 0000000000000..d70bdb683691e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fsession_test.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 ChinaTelecom */ +#include +#include "fsession_test.skel.h" + +static int check_result(struct fsession_test *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + /* Trigger test function calls */ + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return err; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return topts.retval; + + for (int i = 0; i < sizeof(*skel->bss) / sizeof(__u64); i++) { + if (!ASSERT_EQ(((__u64 *)skel->bss)[i], 1, "test_result")) + return -EINVAL; + } + + /* some fields go to the "data" sections, not "bss" */ + for (int i = 0; i < sizeof(*skel->data) / sizeof(__u64); i++) { + if (!ASSERT_EQ(((__u64 *)skel->data)[i], 1, "test_result")) + return -EINVAL; + } + return 0; +} + +static void test_fsession_basic(void) +{ + struct fsession_test *skel = NULL; + int err; + + skel = fsession_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fsession_test__open_and_load")) + goto cleanup; + + err = fsession_test__attach(skel); + if (!ASSERT_OK(err, "fsession_attach")) + goto cleanup; + + check_result(skel); +cleanup: + fsession_test__destroy(skel); +} + +static void test_fsession_reattach(void) +{ + struct fsession_test *skel = NULL; + int err; + + skel = fsession_test__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fsession_test__open_and_load")) + goto cleanup; + + /* First attach */ + err = fsession_test__attach(skel); + if (!ASSERT_OK(err, "fsession_first_attach")) + goto cleanup; + + if (check_result(skel)) + goto cleanup; + + /* Detach */ + fsession_test__detach(skel); + + /* Reset counters */ + memset(skel->bss, 0, sizeof(*skel->bss)); + + /* Second attach */ + err = fsession_test__attach(skel); + if (!ASSERT_OK(err, "fsession_second_attach")) + goto cleanup; + + if (check_result(skel)) + goto cleanup; + +cleanup: + fsession_test__destroy(skel); +} + +void test_fsession_test(void) +{ +#if !defined(__x86_64__) + test__skip(); + return; +#endif + if (test__start_subtest("fsession_basic")) + test_fsession_basic(); + if (test__start_subtest("fsession_reattach")) + test_fsession_reattach(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c index 10e231965589e..58b02552507d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c @@ -73,7 +73,7 @@ static void test_tracing_deny(void) static void test_fexit_noreturns(void) { test_tracing_fail_prog("fexit_noreturns", - "Attaching fexit/fmod_ret to __noreturn function 'do_exit' is rejected."); + "Attaching fexit/session/fmod_ret to __noreturn function 'do_exit' is rejected."); } void test_tracing_failure(void) diff --git a/tools/testing/selftests/bpf/progs/fsession_test.c b/tools/testing/selftests/bpf/progs/fsession_test.c new file mode 100644 index 0000000000000..f78348e541a48 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fsession_test.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 ChinaTelecom */ +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +__u64 test1_entry_result = 0; +__u64 test1_exit_result = 0; + +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test1, int a, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + /* This is entry */ + test1_entry_result = a == 1 && ret == 0; + /* Return 0 to allow exit to be called */ + return 0; + } + + /* This is exit */ + test1_exit_result = a == 1 && ret == 2; + return 0; +} + +__u64 test2_entry_result = 0; +__u64 test2_exit_result = 1; + +SEC("fsession/bpf_fentry_test2") +int BPF_PROG(test2, int a, __u64 b, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + /* This is entry */ + test2_entry_result = a == 2 && b == 3 && ret == 0; + /* Return non-zero value to block exit call */ + return 1; + } + + /* This is exit - should not be called due to blocking */ + test2_exit_result = 0; + return 0; +} + +__u64 test3_entry_result = 0; +__u64 test3_exit_result = 0; + +SEC("fsession/bpf_fentry_test3") +int BPF_PROG(test3, char a, int b, __u64 c, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test3_entry_result = a == 4 && b == 5 && c == 6 && ret == 0; + return 0; + } + + test3_exit_result = a == 4 && b == 5 && c == 6 && ret == 15; + return 0; +} + +__u64 test4_entry_result = 0; +__u64 test4_exit_result = 0; + +SEC("fsession/bpf_fentry_test4") +int BPF_PROG(test4, void *a, char b, int c, __u64 d, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test4_entry_result = a == (void *)7 && b == 8 && c == 9 && d == 10 && ret == 0; + return 0; + } + + test4_exit_result = a == (void *)7 && b == 8 && c == 9 && d == 10 && ret == 34; + return 0; +} + +__u64 test5_entry_result = 0; +__u64 test5_exit_result = 0; + +SEC("fsession/bpf_fentry_test5") +int BPF_PROG(test5, __u64 a, void *b, short c, int d, __u64 e, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test5_entry_result = a == 11 && b == (void *)12 && c == 13 && d == 14 && + e == 15 && ret == 0; + return 0; + } + + test5_exit_result = a == 11 && b == (void *)12 && c == 13 && d == 14 && + e == 15 && ret == 65; + return 0; +} + +__u64 test6_entry_result = 0; +__u64 test6_exit_result = 1; + +SEC("fsession/bpf_fentry_test6") +int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test6_entry_result = a == 16 && b == (void *)17 && c == 18 && d == 19 && + e == (void *)20 && f == 21 && ret == 0; + return 1; + } + + test6_exit_result = 0; + return 0; +} + +__u64 test7_entry_result = 0; +__u64 test7_exit_result = 0; + +SEC("fsession/bpf_fentry_test7") +int BPF_PROG(test7, struct bpf_fentry_test_t *arg, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + if (!arg) + test7_entry_result = ret == 0; + return 0; + } + + if (!arg) + test7_exit_result = 1; + return 0; +} + +__u64 test8_entry_result = 0; +__u64 test8_exit_result = 1; +/* + * test1, test8 and test9 hook the same target to verify the "ret" is always + * 0 in the entry. + */ +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test8, int a, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test8_entry_result = a == 1 && ret == 0; + return -21; + } + + /* This is exit */ + test8_exit_result = 0; + return 0; +} + +__u64 test9_entry_result = 0; +__u64 test9_exit_result = 1; + +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test9, int a, int ret) +{ + bool is_exit = bpf_tracing_is_exit(ctx); + + if (!is_exit) { + test9_entry_result = a == 1 && ret == 0; + return -22; + } + + test9_exit_result = 0; + return 0; +} + +__u64 test10_entry_result = 0; +__u64 test10_exit_result = 0; +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test10, int a) +{ + __u64 addr = bpf_get_func_ip(ctx); + + if (bpf_tracing_is_exit(ctx)) + test10_exit_result = (const void *) addr == &bpf_fentry_test1; + else + test10_entry_result = (const void *) addr == &bpf_fentry_test1; + return 0; +} + +__u64 test11_entry_ok = 0; +__u64 test11_exit_ok = 0; +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test11, int a) +{ + __u64 *cookie = bpf_fsession_cookie(ctx); + + if (!bpf_tracing_is_exit(ctx)) { + if (cookie) { + *cookie = 0xAAAABBBBCCCCDDDDull; + test11_entry_ok = *cookie == 0xAAAABBBBCCCCDDDDull; + } + return 0; + } + + if (cookie) + test11_exit_ok = *cookie == 0xAAAABBBBCCCCDDDDull; + return 0; +} + +__u64 test12_entry_ok = 0; +__u64 test12_exit_ok = 0; + +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test12, int a) +{ + __u64 *cookie = bpf_fsession_cookie(ctx); + + if (!bpf_tracing_is_exit(ctx)) { + if (cookie) { + *cookie = 0x1111222233334444ull; + test12_entry_ok = *cookie == 0x1111222233334444ull; + } + return 0; + } + + if (cookie) + test12_exit_ok = *cookie == 0x1111222233334444ull; + return 0; +} + +__u64 test13_entry_result = 0; +__u64 test13_exit_result = 0; + +SEC("fsession/bpf_fentry_test1") +int BPF_PROG(test13, int a, int ret) +{ + __u64 *cookie = bpf_fsession_cookie(ctx); + + if (!bpf_tracing_is_exit(ctx)) { + test13_entry_result = a == 1 && ret == 0; + *cookie = 0x123456ULL; + return 0; + } + + test13_exit_result = a == 1 && ret == 2 && *cookie == 0x123456ULL; + return 0; +} + +__u64 test14_result = 0; +SEC("fexit/bpf_fentry_test1") +int BPF_PROG(test14, int a, int ret) +{ + test14_result = a == 1 && ret == 2; + return 0; +} + +__u64 test15_result = 0; +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test15, int a) +{ + test15_result = a == 1; + return 0; +}