Skip to content

Commit c6ae18e

Browse files
mykyta5Alexei Starovoitov
authored andcommitted
selftests/bpf: add bpf task work stress tests
Add stress tests for BPF task-work scheduling kfuncs. The tests spawn multiple threads that concurrently schedule task_work callbacks against the same and different map values to exercise the kfuncs under high contention. Verify callbacks are reliably enqueued and executed with no drops. Signed-off-by: Mykyta Yatsenko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 39fd74d commit c6ae18e

File tree

2 files changed

+203
-0
lines changed

2 files changed

+203
-0
lines changed
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
#include <test_progs.h>
4+
#include <string.h>
5+
#include <stdio.h>
6+
#include "task_work_stress.skel.h"
7+
#include <linux/bpf.h>
8+
#include <linux/perf_event.h>
9+
#include <sys/syscall.h>
10+
#include <time.h>
11+
#include <stdlib.h>
12+
#include <stdatomic.h>
13+
14+
struct test_data {
15+
int prog_fd;
16+
atomic_int exit;
17+
};
18+
19+
void *runner(void *test_data)
20+
{
21+
struct test_data *td = test_data;
22+
int err = 0;
23+
LIBBPF_OPTS(bpf_test_run_opts, opts);
24+
25+
while (!err && !atomic_load(&td->exit))
26+
err = bpf_prog_test_run_opts(td->prog_fd, &opts);
27+
28+
return NULL;
29+
}
30+
31+
static int get_env_int(const char *str, int def)
32+
{
33+
const char *s = getenv(str);
34+
char *end;
35+
int retval;
36+
37+
if (!s || !*s)
38+
return def;
39+
errno = 0;
40+
retval = strtol(s, &end, 10);
41+
if (errno || *end || retval < 0)
42+
return def;
43+
return retval;
44+
}
45+
46+
static void task_work_run(bool enable_delete)
47+
{
48+
struct task_work_stress *skel;
49+
struct bpf_program *scheduler, *deleter;
50+
int nthreads = 16;
51+
int test_time_s = get_env_int("BPF_TASK_WORK_TEST_TIME", 1);
52+
pthread_t tid[nthreads], tid_del;
53+
bool started[nthreads], started_del = false;
54+
struct test_data td_sched = { .exit = 0 }, td_del = { .exit = 1 };
55+
int i, err;
56+
57+
skel = task_work_stress__open();
58+
if (!ASSERT_OK_PTR(skel, "task_work__open"))
59+
return;
60+
61+
scheduler = bpf_object__find_program_by_name(skel->obj, "schedule_task_work");
62+
bpf_program__set_autoload(scheduler, true);
63+
64+
deleter = bpf_object__find_program_by_name(skel->obj, "delete_task_work");
65+
bpf_program__set_autoload(deleter, true);
66+
67+
err = task_work_stress__load(skel);
68+
if (!ASSERT_OK(err, "skel_load"))
69+
goto cleanup;
70+
71+
for (i = 0; i < nthreads; ++i)
72+
started[i] = false;
73+
74+
td_sched.prog_fd = bpf_program__fd(scheduler);
75+
for (i = 0; i < nthreads; ++i) {
76+
if (pthread_create(&tid[i], NULL, runner, &td_sched) != 0) {
77+
fprintf(stderr, "could not start thread");
78+
goto cancel;
79+
}
80+
started[i] = true;
81+
}
82+
83+
if (enable_delete)
84+
atomic_store(&td_del.exit, 0);
85+
86+
td_del.prog_fd = bpf_program__fd(deleter);
87+
if (pthread_create(&tid_del, NULL, runner, &td_del) != 0) {
88+
fprintf(stderr, "could not start thread");
89+
goto cancel;
90+
}
91+
started_del = true;
92+
93+
/* Run stress test for some time */
94+
sleep(test_time_s);
95+
96+
cancel:
97+
atomic_store(&td_sched.exit, 1);
98+
atomic_store(&td_del.exit, 1);
99+
for (i = 0; i < nthreads; ++i) {
100+
if (started[i])
101+
pthread_join(tid[i], NULL);
102+
}
103+
104+
if (started_del)
105+
pthread_join(tid_del, NULL);
106+
107+
ASSERT_GT(skel->bss->callback_scheduled, 0, "work scheduled");
108+
/* Some scheduling attempts should have failed due to contention */
109+
ASSERT_GT(skel->bss->schedule_error, 0, "schedule error");
110+
111+
if (enable_delete) {
112+
/* If delete thread is enabled, it has cancelled some callbacks */
113+
ASSERT_GT(skel->bss->delete_success, 0, "delete success");
114+
ASSERT_LT(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
115+
} else {
116+
/* Without delete thread number of scheduled callbacks is the same as fired */
117+
ASSERT_EQ(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
118+
}
119+
120+
cleanup:
121+
task_work_stress__destroy(skel);
122+
}
123+
124+
void test_task_work_stress(void)
125+
{
126+
if (test__start_subtest("no_delete"))
127+
task_work_run(false);
128+
if (test__start_subtest("with_delete"))
129+
task_work_run(true);
130+
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
4+
#include <vmlinux.h>
5+
#include <string.h>
6+
#include <stdbool.h>
7+
#include <bpf/bpf_helpers.h>
8+
#include <bpf/bpf_tracing.h>
9+
#include "bpf_misc.h"
10+
11+
#define ENTRIES 128
12+
13+
char _license[] SEC("license") = "GPL";
14+
15+
__u64 callback_scheduled = 0;
16+
__u64 callback_success = 0;
17+
__u64 schedule_error = 0;
18+
__u64 delete_success = 0;
19+
20+
struct elem {
21+
__u32 count;
22+
struct bpf_task_work tw;
23+
};
24+
25+
struct {
26+
__uint(type, BPF_MAP_TYPE_HASH);
27+
__uint(map_flags, BPF_F_NO_PREALLOC);
28+
__uint(max_entries, ENTRIES);
29+
__type(key, int);
30+
__type(value, struct elem);
31+
} hmap SEC(".maps");
32+
33+
static int process_work(struct bpf_map *map, void *key, void *value)
34+
{
35+
__sync_fetch_and_add(&callback_success, 1);
36+
return 0;
37+
}
38+
39+
SEC("syscall")
40+
int schedule_task_work(void *ctx)
41+
{
42+
struct elem empty_work = {.count = 0};
43+
struct elem *work;
44+
int key = 0, err;
45+
46+
key = bpf_ktime_get_ns() % ENTRIES;
47+
work = bpf_map_lookup_elem(&hmap, &key);
48+
if (!work) {
49+
bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
50+
work = bpf_map_lookup_elem(&hmap, &key);
51+
if (!work)
52+
return 0;
53+
}
54+
err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
55+
process_work, NULL);
56+
if (err)
57+
__sync_fetch_and_add(&schedule_error, 1);
58+
else
59+
__sync_fetch_and_add(&callback_scheduled, 1);
60+
return 0;
61+
}
62+
63+
SEC("syscall")
64+
int delete_task_work(void *ctx)
65+
{
66+
int key = 0, err;
67+
68+
key = bpf_get_prandom_u32() % ENTRIES;
69+
err = bpf_map_delete_elem(&hmap, &key);
70+
if (!err)
71+
__sync_fetch_and_add(&delete_success, 1);
72+
return 0;
73+
}

0 commit comments

Comments
 (0)