Skip to content

Commit 6677ab1

Browse files
image-dragonKernel Patches Daemon
authored andcommitted
bpf: add tracing session support
The tracing session is something that similar to kprobe session. It allow to attach a single BPF program to both the entry and the exit of the target functions. While a non-zero value is returned by the fentry, the fexit will be skipped, which is similar to kprobe session. Signed-off-by: Menglong Dong <[email protected]> Co-developed-by: Leon Hwang <[email protected]> Signed-off-by: Leon Hwang <[email protected]>
1 parent 583dec7 commit 6677ab1

File tree

16 files changed

+41
-5
lines changed

16 files changed

+41
-5
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2788,6 +2788,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
27882788
void *image, *tmp;
27892789
int ret;
27902790

2791+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
2792+
return -EOPNOTSUPP;
2793+
27912794
/* image doesn't need to be in module memory range, so we can
27922795
* use kvmalloc.
27932796
*/

arch/loongarch/net/bpf_jit.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1739,6 +1739,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
17391739
void *image, *tmp;
17401740
struct jit_ctx ctx;
17411741

1742+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
1743+
return -EOPNOTSUPP;
1744+
17421745
size = ro_image_end - ro_image;
17431746
image = kvmalloc(size, GFP_KERNEL);
17441747
if (!image)

arch/powerpc/net/bpf_jit_comp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1017,6 +1017,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
10171017
void *rw_image, *tmp;
10181018
int ret;
10191019

1020+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
1021+
return -EOPNOTSUPP;
1022+
10201023
/*
10211024
* rw_image doesn't need to be in module memory range, so we can
10221025
* use kvmalloc.

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1286,6 +1286,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
12861286
struct rv_jit_context ctx;
12871287
u32 size = ro_image_end - ro_image;
12881288

1289+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
1290+
return -EOPNOTSUPP;
1291+
12891292
image = kvmalloc(size, GFP_KERNEL);
12901293
if (!image)
12911294
return -ENOMEM;

arch/s390/net/bpf_jit_comp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2924,6 +2924,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
29242924
struct bpf_tramp_jit tjit;
29252925
int ret;
29262926

2927+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
2928+
return -EOPNOTSUPP;
2929+
29272930
/* Compute offsets, check whether the code fits. */
29282931
memset(&tjit, 0, sizeof(tjit));
29292932
ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,

arch/x86/net/bpf_jit_comp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3478,6 +3478,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
34783478
int ret;
34793479
u32 size = image_end - image;
34803480

3481+
if (tlinks[BPF_TRAMP_SESSION].nr_links)
3482+
return -EOPNOTSUPP;
3483+
34813484
/* rw_image doesn't need to be in module memory range, so we can
34823485
* use kvmalloc.
34833486
*/

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,6 +1276,7 @@ enum bpf_tramp_prog_type {
12761276
BPF_TRAMP_FENTRY,
12771277
BPF_TRAMP_FEXIT,
12781278
BPF_TRAMP_MODIFY_RETURN,
1279+
BPF_TRAMP_SESSION,
12791280
BPF_TRAMP_MAX,
12801281
BPF_TRAMP_REPLACE, /* more than MAX */
12811282
};

include/uapi/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,6 +1133,7 @@ enum bpf_attach_type {
11331133
BPF_NETKIT_PEER,
11341134
BPF_TRACE_KPROBE_SESSION,
11351135
BPF_TRACE_UPROBE_SESSION,
1136+
BPF_TRACE_SESSION,
11361137
__MAX_BPF_ATTACH_TYPE
11371138
};
11381139

kernel/bpf/btf.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6107,6 +6107,7 @@ static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct
61076107
case BPF_TRACE_FENTRY:
61086108
case BPF_TRACE_FEXIT:
61096109
case BPF_MODIFY_RETURN:
6110+
case BPF_TRACE_SESSION:
61106111
/* allow u64* as ctx */
61116112
if (btf_is_int(t) && t->size == 8)
61126113
return 0;
@@ -6704,6 +6705,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
67046705
fallthrough;
67056706
case BPF_LSM_CGROUP:
67066707
case BPF_TRACE_FEXIT:
6708+
case BPF_TRACE_SESSION:
67076709
/* When LSM programs are attached to void LSM hooks
67086710
* they use FEXIT trampolines and when attached to
67096711
* int LSM hooks, they use MODIFY_RETURN trampolines.

kernel/bpf/syscall.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3564,6 +3564,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
35643564
case BPF_PROG_TYPE_TRACING:
35653565
if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
35663566
prog->expected_attach_type != BPF_TRACE_FEXIT &&
3567+
prog->expected_attach_type != BPF_TRACE_SESSION &&
35673568
prog->expected_attach_type != BPF_MODIFY_RETURN) {
35683569
err = -EINVAL;
35693570
goto out_put_prog;
@@ -4337,6 +4338,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
43374338
case BPF_TRACE_RAW_TP:
43384339
case BPF_TRACE_FENTRY:
43394340
case BPF_TRACE_FEXIT:
4341+
case BPF_TRACE_SESSION:
43404342
case BPF_MODIFY_RETURN:
43414343
return BPF_PROG_TYPE_TRACING;
43424344
case BPF_LSM_MAC:

0 commit comments

Comments
 (0)