Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ arch_ftrace_get_regs(struct ftrace_regs *fregs)
}

#define arch_ftrace_partial_regs(regs) do { \
regs->flags &= ~X86_EFLAGS_FIXED; \
regs->flags |= X86_EFLAGS_FIXED; \
regs->cs = __KERNEL_CS; \
} while (0)

Expand Down
5 changes: 4 additions & 1 deletion arch/x86/kernel/ftrace_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,9 @@ SYM_CODE_START(return_to_handler)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR

/* Store original rsp for pt_regs.sp value. */
movq %rsp, %rdi

/* Restore return_to_handler value that got eaten by previous ret instruction. */
subq $8, %rsp
UNWIND_HINT_FUNC
Expand All @@ -374,7 +377,7 @@ SYM_CODE_START(return_to_handler)
movq %rax, RAX(%rsp)
movq %rdx, RDX(%rsp)
movq %rbp, RBP(%rsp)
movq %rsp, RSP(%rsp)
movq %rdi, RSP(%rsp)
movq %rsp, %rdi

call ftrace_return_to_handler
Expand Down
4 changes: 4 additions & 0 deletions tools/testing/selftests/bpf/bench.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ static const struct argp_option opts[] = {
{ "verbose", 'v', NULL, 0, "Verbose debug output"},
{ "affinity", 'a', NULL, 0, "Set consumer/producer thread affinity"},
{ "quiet", 'q', NULL, 0, "Be more quiet"},
{ "stacktrace", 's', NULL, 0, "Get stack trace"},
{ "prod-affinity", ARG_PROD_AFFINITY_SET, "CPUSET", 0,
"Set of CPUs for producer threads; implies --affinity"},
{ "cons-affinity", ARG_CONS_AFFINITY_SET, "CPUSET", 0,
Expand Down Expand Up @@ -350,6 +351,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
case 'q':
env.quiet = true;
break;
case 's':
env.stacktrace = true;
break;
case ARG_PROD_AFFINITY_SET:
env.affinity = true;
if (parse_num_list(arg, &env.prod_cpus.cpus,
Expand Down
1 change: 1 addition & 0 deletions tools/testing/selftests/bpf/bench.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ struct env {
bool list;
bool affinity;
bool quiet;
bool stacktrace;
int consumer_cnt;
int producer_cnt;
int nr_cpus;
Expand Down
1 change: 1 addition & 0 deletions tools/testing/selftests/bpf/benchs/bench_trigger.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,7 @@ static void setup_ctx(void)
bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true);

ctx.skel->rodata->batch_iters = args.batch_iters;
ctx.skel->rodata->stacktrace = env.stacktrace;
}

static void load_ctx(void)
Expand Down
120 changes: 115 additions & 5 deletions tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,20 @@ static void test_stacktrace_ips_kprobe_multi(bool retprobe)

load_kallsyms();

check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
if (retprobe) {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
} else {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
ksym_get_addr("bpf_testmod_stacktrace_test"),
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
}

cleanup:
stacktrace_ips__destroy(skel);
Expand Down Expand Up @@ -128,6 +137,99 @@ static void test_stacktrace_ips_raw_tp(void)
stacktrace_ips__destroy(skel);
}

static void test_stacktrace_ips_kprobe(bool retprobe)
{
LIBBPF_OPTS(bpf_kprobe_opts, opts,
.retprobe = retprobe
);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct stacktrace_ips *skel;

skel = stacktrace_ips__open_and_load();
if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
return;

if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
test__skip();
goto cleanup;
}

skel->links.kprobe_test = bpf_program__attach_kprobe_opts(
skel->progs.kprobe_test,
"bpf_testmod_stacktrace_test", &opts);
if (!ASSERT_OK_PTR(skel->links.kprobe_test, "bpf_program__attach_kprobe_opts"))
goto cleanup;

trigger_module_test_read(1);

load_kallsyms();

if (retprobe) {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
} else {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
ksym_get_addr("bpf_testmod_stacktrace_test"),
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
}

cleanup:
stacktrace_ips__destroy(skel);
}

static void test_stacktrace_ips_trampoline(bool retprobe)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct stacktrace_ips *skel;

skel = stacktrace_ips__open_and_load();
if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
return;

if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
test__skip();
goto cleanup;
}

if (retprobe) {
skel->links.fexit_test = bpf_program__attach_trace(skel->progs.fexit_test);
if (!ASSERT_OK_PTR(skel->links.fexit_test, "bpf_program__attach_trace"))
goto cleanup;
} else {
skel->links.fentry_test = bpf_program__attach_trace(skel->progs.fentry_test);
if (!ASSERT_OK_PTR(skel->links.fentry_test, "bpf_program__attach_trace"))
goto cleanup;
}

trigger_module_test_read(1);

load_kallsyms();

if (retprobe) {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
} else {
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 5,
ksym_get_addr("bpf_testmod_stacktrace_test"),
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
}

cleanup:
stacktrace_ips__destroy(skel);
}

static void __test_stacktrace_ips(void)
{
if (test__start_subtest("kprobe_multi"))
Expand All @@ -136,6 +238,14 @@ static void __test_stacktrace_ips(void)
test_stacktrace_ips_kprobe_multi(true);
if (test__start_subtest("raw_tp"))
test_stacktrace_ips_raw_tp();
if (test__start_subtest("kprobe"))
test_stacktrace_ips_kprobe(false);
if (test__start_subtest("kretprobe"))
test_stacktrace_ips_kprobe(true);
if (test__start_subtest("fentry"))
test_stacktrace_ips_trampoline(false);
if (test__start_subtest("fexit"))
test_stacktrace_ips_trampoline(true);
}
#else
static void __test_stacktrace_ips(void)
Expand Down
27 changes: 27 additions & 0 deletions tools/testing/selftests/bpf/progs/stacktrace_ips.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,13 @@ int unused(void)

__u32 stack_key;

SEC("kprobe")
int kprobe_test(struct pt_regs *ctx)
{
stack_key = bpf_get_stackid(ctx, &stackmap, 0);
return 0;
}

SEC("kprobe.multi")
int kprobe_multi_test(struct pt_regs *ctx)
{
Expand All @@ -46,4 +53,24 @@ int rawtp_test(void *ctx)
return 0;
}

SEC("fentry/bpf_testmod_stacktrace_test")
int fentry_test(struct pt_regs *ctx)
{
/*
* Skip 2 bpf_program/trampoline stack entries:
* - bpf_prog_bd1f7a949f55fb03_fentry_test
* - bpf_trampoline_182536277701
*/
stack_key = bpf_get_stackid(ctx, &stackmap, 2);
return 0;
}

SEC("fexit/bpf_testmod_stacktrace_test")
int fexit_test(struct pt_regs *ctx)
{
/* Skip 2 bpf_program/trampoline stack entries, check fentry_test. */
stack_key = bpf_get_stackid(ctx, &stackmap, 2);
return 0;
}

char _license[] SEC("license") = "GPL";
46 changes: 37 additions & 9 deletions tools/testing/selftests/bpf/progs/trigger_bench.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,34 @@ static __always_inline void inc_counter(void)
__sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
}

volatile const int stacktrace;

typedef __u64 stack_trace_t[128];

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, stack_trace_t);
} stack_heap SEC(".maps");

static __always_inline void do_stacktrace(void *ctx)
{
if (!stacktrace)
return;

__u64 *ptr = bpf_map_lookup_elem(&stack_heap, &(__u32){0});

if (ptr)
bpf_get_stack(ctx, ptr, sizeof(stack_trace_t), 0);
}

static __always_inline void handle(void *ctx)
{
inc_counter();
do_stacktrace(ctx);
}

SEC("?uprobe")
int bench_trigger_uprobe(void *ctx)
{
Expand Down Expand Up @@ -81,21 +109,21 @@ int trigger_driver_kfunc(void *ctx)
SEC("?kprobe/bpf_get_numa_node_id")
int bench_trigger_kprobe(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

SEC("?kretprobe/bpf_get_numa_node_id")
int bench_trigger_kretprobe(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

SEC("?kprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kprobe_multi(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

Expand All @@ -108,7 +136,7 @@ int bench_kprobe_multi_empty(void *ctx)
SEC("?kretprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kretprobe_multi(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

Expand All @@ -121,34 +149,34 @@ int bench_kretprobe_multi_empty(void *ctx)
SEC("?fentry/bpf_get_numa_node_id")
int bench_trigger_fentry(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

SEC("?fexit/bpf_get_numa_node_id")
int bench_trigger_fexit(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

SEC("?fmod_ret/bpf_modify_return_test_tp")
int bench_trigger_fmodret(void *ctx)
{
inc_counter();
handle(ctx);
return -22;
}

SEC("?tp/bpf_test_run/bpf_trigger_tp")
int bench_trigger_tp(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}

SEC("?raw_tp/bpf_trigger_tp")
int bench_trigger_rawtp(void *ctx)
{
inc_counter();
handle(ctx);
return 0;
}
Loading