Skip to content

Commit 9904a47

Browse files
committed
selftests/bpf: BPF task work scheduling tests
Introducing selftests that check BPF task work scheduling mechanism. Validate that verifier does not accepts incorrect calls to bpf_task_work_schedule kfunc. Signed-off-by: Mykyta Yatsenko <[email protected]>
1 parent 95ceb3b commit 9904a47

File tree

3 files changed

+355
-0
lines changed

3 files changed

+355
-0
lines changed
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
#include <test_progs.h>
4+
#include <string.h>
5+
#include <stdio.h>
6+
#include "task_work.skel.h"
7+
#include "task_work_fail.skel.h"
8+
#include <linux/bpf.h>
9+
#include <linux/perf_event.h>
10+
#include <sys/syscall.h>
11+
#include <time.h>
12+
13+
static int perf_event_open(__u32 type, __u64 config, int pid)
14+
{
15+
struct perf_event_attr attr = {
16+
.type = type,
17+
.config = config,
18+
.size = sizeof(struct perf_event_attr),
19+
.sample_period = 100000,
20+
};
21+
22+
return syscall(__NR_perf_event_open, &attr, pid, -1, -1, 0);
23+
}
24+
25+
struct elem {
26+
char data[128];
27+
struct bpf_task_work tw;
28+
};
29+
30+
static int verify_map(struct bpf_map *map, const char *expected_data)
31+
{
32+
int err;
33+
struct elem value;
34+
int processed_values = 0;
35+
int k, sz;
36+
37+
sz = bpf_map__max_entries(map);
38+
for (k = 0; k < sz; ++k) {
39+
err = bpf_map__lookup_elem(map, &k, sizeof(int), &value, sizeof(struct elem), 0);
40+
if (err)
41+
continue;
42+
if (!ASSERT_EQ(strcmp(expected_data, value.data), 0, "map data")) {
43+
fprintf(stderr, "expected '%s', found '%s' in %s map", expected_data,
44+
value.data, bpf_map__name(map));
45+
return 2;
46+
}
47+
processed_values++;
48+
}
49+
50+
return processed_values == 0;
51+
}
52+
53+
static void task_work_run(const char *prog_name, const char *map_name)
54+
{
55+
struct task_work *skel;
56+
struct bpf_program *prog;
57+
struct bpf_map *map;
58+
struct bpf_link *link;
59+
int err, pe_fd = 0, pid, status, pipefd[2];
60+
char user_string[] = "hello world";
61+
62+
if (!ASSERT_NEQ(pipe(pipefd), -1, "pipe"))
63+
return;
64+
65+
pid = fork();
66+
if (pid == 0) {
67+
__u64 num = 1;
68+
int i;
69+
char buf;
70+
71+
close(pipefd[1]);
72+
read(pipefd[0], &buf, sizeof(buf));
73+
close(pipefd[0]);
74+
75+
for (i = 0; i < 10000; ++i)
76+
num *= time(0) % 7;
77+
(void)num;
78+
exit(0);
79+
}
80+
skel = task_work__open();
81+
if (!ASSERT_OK_PTR(skel, "task_work__open"))
82+
return;
83+
84+
bpf_object__for_each_program(prog, skel->obj) {
85+
bpf_program__set_autoload(prog, false);
86+
}
87+
88+
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
89+
if (!ASSERT_OK_PTR(prog, "prog_name"))
90+
goto cleanup;
91+
bpf_program__set_autoload(prog, true);
92+
bpf_program__set_type(prog, BPF_PROG_TYPE_PERF_EVENT);
93+
skel->bss->user_ptr = (char *)user_string;
94+
95+
err = task_work__load(skel);
96+
if (!ASSERT_OK(err, "skel_load"))
97+
goto cleanup;
98+
99+
pe_fd = perf_event_open(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES, pid);
100+
if (pe_fd == -1 && (errno == ENOENT || errno == EOPNOTSUPP)) {
101+
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
102+
test__skip();
103+
goto cleanup;
104+
}
105+
if (!ASSERT_NEQ(pe_fd, -1, "pe_fd")) {
106+
fprintf(stderr, "perf_event_open errno: %d, pid: %d\n", errno, pid);
107+
goto cleanup;
108+
}
109+
110+
link = bpf_program__attach_perf_event(prog, pe_fd);
111+
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
112+
goto cleanup;
113+
114+
close(pipefd[0]);
115+
write(pipefd[1], user_string, 1);
116+
close(pipefd[1]);
117+
/* Wait to collect some samples */
118+
waitpid(pid, &status, 0);
119+
pid = 0;
120+
map = bpf_object__find_map_by_name(skel->obj, map_name);
121+
if (!ASSERT_OK_PTR(map, "find map_name"))
122+
goto cleanup;
123+
if (!ASSERT_OK(verify_map(map, user_string), "verify map"))
124+
goto cleanup;
125+
cleanup:
126+
if (pe_fd >= 0)
127+
close(pe_fd);
128+
task_work__destroy(skel);
129+
if (pid) {
130+
close(pipefd[0]);
131+
write(pipefd[1], user_string, 1);
132+
close(pipefd[1]);
133+
waitpid(pid, &status, 0);
134+
}
135+
}
136+
137+
void test_task_work(void)
138+
{
139+
if (test__start_subtest("test_task_work_hash_map"))
140+
task_work_run("oncpu_hash_map", "hmap");
141+
142+
if (test__start_subtest("test_task_work_array_map"))
143+
task_work_run("oncpu_array_map", "arrmap");
144+
145+
if (test__start_subtest("test_task_work_lru_map"))
146+
task_work_run("oncpu_lru_map", "lrumap");
147+
148+
RUN_TESTS(task_work_fail);
149+
}
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
4+
#include <vmlinux.h>
5+
#include <string.h>
6+
#include <stdbool.h>
7+
#include <bpf/bpf_helpers.h>
8+
#include <bpf/bpf_tracing.h>
9+
#include "bpf_misc.h"
10+
#include "errno.h"
11+
12+
char _license[] SEC("license") = "GPL";
13+
14+
const void *user_ptr = NULL;
15+
16+
struct elem {
17+
char data[128];
18+
struct bpf_task_work tw;
19+
};
20+
21+
struct {
22+
__uint(type, BPF_MAP_TYPE_HASH);
23+
__uint(map_flags, BPF_F_NO_PREALLOC);
24+
__uint(max_entries, 1);
25+
__type(key, int);
26+
__type(value, struct elem);
27+
} hmap SEC(".maps");
28+
29+
struct {
30+
__uint(type, BPF_MAP_TYPE_ARRAY);
31+
__uint(max_entries, 1);
32+
__type(key, int);
33+
__type(value, struct elem);
34+
} arrmap SEC(".maps");
35+
36+
struct {
37+
__uint(type, BPF_MAP_TYPE_LRU_HASH);
38+
__uint(max_entries, 1);
39+
__type(key, int);
40+
__type(value, struct elem);
41+
} lrumap SEC(".maps");
42+
43+
static void process_work(struct bpf_map *map, void *key, void *value)
44+
{
45+
struct elem *work = value;
46+
47+
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
48+
}
49+
50+
int key = 0;
51+
52+
SEC("perf_event")
53+
int oncpu_hash_map(struct pt_regs *args)
54+
{
55+
struct elem empty_work = { .data = { 0 } };
56+
struct elem *work;
57+
struct task_struct *task;
58+
int err;
59+
60+
task = bpf_get_current_task_btf();
61+
err = bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
62+
if (err)
63+
return 0;
64+
work = bpf_map_lookup_elem(&hmap, &key);
65+
if (!work)
66+
return 0;
67+
68+
bpf_task_work_schedule_resume(task, &work->tw, (struct bpf_map *)&hmap, process_work, NULL);
69+
return 0;
70+
}
71+
72+
SEC("perf_event")
73+
int oncpu_array_map(struct pt_regs *args)
74+
{
75+
struct elem *work;
76+
struct task_struct *task;
77+
78+
task = bpf_get_current_task_btf();
79+
work = bpf_map_lookup_elem(&arrmap, &key);
80+
if (!work)
81+
return 0;
82+
bpf_task_work_schedule_signal(task, &work->tw, (struct bpf_map *)&arrmap, process_work,
83+
NULL);
84+
return 0;
85+
}
86+
87+
SEC("perf_event")
88+
int oncpu_lru_map(struct pt_regs *args)
89+
{
90+
struct elem empty_work = { .data = { 0 } };
91+
struct elem *work;
92+
struct task_struct *task;
93+
int err;
94+
95+
task = bpf_get_current_task_btf();
96+
work = bpf_map_lookup_elem(&lrumap, &key);
97+
if (work)
98+
return 0;
99+
err = bpf_map_update_elem(&lrumap, &key, &empty_work, BPF_NOEXIST);
100+
if (err)
101+
return 0;
102+
work = bpf_map_lookup_elem(&lrumap, &key);
103+
if (!work || work->data[0])
104+
return 0;
105+
bpf_task_work_schedule_resume(task, &work->tw, (struct bpf_map *)&lrumap, process_work,
106+
NULL);
107+
return 0;
108+
}
Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
4+
#include <vmlinux.h>
5+
#include <string.h>
6+
#include <stdbool.h>
7+
#include <bpf/bpf_helpers.h>
8+
#include <bpf/bpf_tracing.h>
9+
#include "bpf_misc.h"
10+
11+
char _license[] SEC("license") = "GPL";
12+
13+
const void *user_ptr = NULL;
14+
15+
struct elem {
16+
char data[128];
17+
struct bpf_task_work tw;
18+
};
19+
20+
struct {
21+
__uint(type, BPF_MAP_TYPE_HASH);
22+
__uint(map_flags, BPF_F_NO_PREALLOC);
23+
__uint(max_entries, 1);
24+
__type(key, int);
25+
__type(value, struct elem);
26+
} hmap SEC(".maps");
27+
28+
struct {
29+
__uint(type, BPF_MAP_TYPE_ARRAY);
30+
__uint(max_entries, 1);
31+
__type(key, int);
32+
__type(value, struct elem);
33+
} arrmap SEC(".maps");
34+
35+
static void process_work(struct bpf_map *map, void *key, void *value)
36+
{
37+
struct elem *work = value;
38+
39+
bpf_copy_from_user_str(work->data, sizeof(work->data), (const void *)user_ptr, 0);
40+
}
41+
42+
int key = 0;
43+
44+
SEC("perf_event")
45+
__failure __msg("doesn't match map pointer in R3")
46+
int mismatch_map(struct pt_regs *args)
47+
{
48+
struct elem *work;
49+
struct task_struct *task;
50+
51+
task = bpf_get_current_task_btf();
52+
work = bpf_map_lookup_elem(&arrmap, &key);
53+
if (!work)
54+
return 0;
55+
bpf_task_work_schedule_resume(task, &work->tw, (struct bpf_map *)&hmap,
56+
process_work, NULL);
57+
return 0;
58+
}
59+
60+
SEC("perf_event")
61+
__failure __msg("arg#1 doesn't point to a map value")
62+
int no_map_task_work(struct pt_regs *args)
63+
{
64+
struct task_struct *task;
65+
struct bpf_task_work tw;
66+
67+
task = bpf_get_current_task_btf();
68+
bpf_task_work_schedule_resume(task, &tw, (struct bpf_map *)&hmap,
69+
process_work, NULL);
70+
return 0;
71+
}
72+
73+
SEC("perf_event")
74+
__failure __msg("Possibly NULL pointer passed to trusted arg1")
75+
int task_work_null(struct pt_regs *args)
76+
{
77+
struct task_struct *task;
78+
79+
task = bpf_get_current_task_btf();
80+
bpf_task_work_schedule_resume(task, NULL, (struct bpf_map *)&hmap,
81+
process_work, NULL);
82+
return 0;
83+
}
84+
85+
SEC("perf_event")
86+
__failure __msg("Possibly NULL pointer passed to trusted arg2")
87+
int map_null(struct pt_regs *args)
88+
{
89+
struct elem *work;
90+
struct task_struct *task;
91+
92+
task = bpf_get_current_task_btf();
93+
work = bpf_map_lookup_elem(&arrmap, &key);
94+
if (!work)
95+
return 0;
96+
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL);
97+
return 0;
98+
}

0 commit comments

Comments
 (0)