Skip to content

Commit 0e92a91

Browse files
image-dragonKernel Patches Daemon
authored andcommitted
selftests/bpf: test map deadlock caused by NMI
In this testing, map updating and deleting both happen in NMI context and user context, which is used to detect the possible deadlock. For now, LRU is added in the testing, and more map type can be added in the feature. Signed-off-by: Menglong Dong <[email protected]>
1 parent 3ab326d commit 0e92a91

File tree

2 files changed

+188
-0
lines changed

2 files changed

+188
-0
lines changed
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
#include <test_progs.h>
3+
#include <bpf/libbpf.h>
4+
#include <linux/perf_event.h>
5+
#include <sys/syscall.h>
6+
#include <sys/ioctl.h>
7+
#include <pthread.h>
8+
#include "map_deadlock.skel.h"
9+
10+
11+
static int perf_open_all_cpus(struct perf_event_attr *attr, int fds[], int max_cpus)
12+
{
13+
int n = 0;
14+
15+
for (int cpu = 0; cpu < max_cpus; cpu++) {
16+
int fd = syscall(__NR_perf_event_open, attr, -1 /* pid: all */, cpu,
17+
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
18+
if (fd < 0)
19+
continue;
20+
fds[cpu] = fd;
21+
n++;
22+
}
23+
return n;
24+
}
25+
26+
struct thread_arg {
27+
int map_fd;
28+
bool *stop;
29+
};
30+
31+
static void *user_update_thread(void *argp)
32+
{
33+
struct thread_arg *arg = argp;
34+
u32 key = 0;
35+
u64 val = 1;
36+
37+
while (!*arg->stop) {
38+
key++;
39+
val++;
40+
bpf_map_update_elem(arg->map_fd, &key, &val, BPF_ANY);
41+
if ((key & 0x7) == 0)
42+
bpf_map_delete_elem(arg->map_fd, &key);
43+
}
44+
return NULL;
45+
}
46+
47+
static void test_map(const char *map_name, int map_index)
48+
{
49+
struct perf_event_attr attr = {
50+
.type = PERF_TYPE_HARDWARE,
51+
.size = sizeof(struct perf_event_attr),
52+
.config = PERF_COUNT_HW_CPU_CYCLES,
53+
.sample_period = 1000000,
54+
.freq = 0,
55+
.disabled = 0,
56+
.wakeup_events = 1,
57+
};
58+
int map_fd, nfd = 0, max_cpus, err;
59+
struct bpf_link **links = NULL;
60+
struct map_deadlock *skel;
61+
struct bpf_program *prog;
62+
struct thread_arg targ;
63+
bool stop = false;
64+
int *fds = NULL;
65+
pthread_t thr;
66+
67+
skel = map_deadlock__open();
68+
if (!ASSERT_OK_PTR(skel, "map_deadlock__open"))
69+
return;
70+
skel->rodata->map_index = map_index;
71+
err = map_deadlock__load(skel);
72+
if (!ASSERT_OK(err, "map_deadlock__load"))
73+
goto out;
74+
75+
prog = skel->progs.on_perf;
76+
map_fd = bpf_object__find_map_fd_by_name(skel->obj, map_name);
77+
if (!ASSERT_GE(map_fd, 0, map_name))
78+
goto out;
79+
80+
max_cpus = libbpf_num_possible_cpus();
81+
if (!ASSERT_GT(max_cpus, 0, "num cpus"))
82+
goto out;
83+
84+
links = calloc(max_cpus, sizeof(*links));
85+
ASSERT_OK_PTR(links, "alloc links");
86+
fds = calloc(max_cpus, sizeof(*fds));
87+
ASSERT_OK_PTR(fds, "alloc fds");
88+
for (int i = 0; i < max_cpus; i++)
89+
fds[i] = -1;
90+
91+
nfd = perf_open_all_cpus(&attr, fds, max_cpus);
92+
if (!ASSERT_GT(nfd, 0, "perf fds"))
93+
goto out;
94+
95+
for (int cpu = 0; cpu < max_cpus; cpu++) {
96+
if (fds[cpu] < 0)
97+
continue;
98+
links[cpu] = bpf_program__attach_perf_event(prog, fds[cpu]);
99+
if (!ASSERT_OK_PTR(links[cpu], "attach perf"))
100+
goto out;
101+
}
102+
103+
targ.map_fd = map_fd;
104+
targ.stop = &stop;
105+
err = pthread_create(&thr, NULL, user_update_thread, &targ);
106+
if (!ASSERT_OK(err, "create thr"))
107+
goto out;
108+
109+
/* 1 second should be enough to trigger the deadlock */
110+
sleep(1);
111+
stop = true;
112+
(void)pthread_join(thr, NULL);
113+
/* TODO: read dmesg to check the deadlock? */
114+
out:
115+
if (links) {
116+
for (int cpu = 0; cpu < max_cpus; cpu++) {
117+
if (links[cpu])
118+
bpf_link__destroy(links[cpu]);
119+
}
120+
}
121+
if (fds) {
122+
for (int cpu = 0; cpu < max_cpus; cpu++) {
123+
if (fds[cpu] >= 0)
124+
close(fds[cpu]);
125+
}
126+
}
127+
free(links);
128+
free(fds);
129+
map_deadlock__destroy(skel);
130+
}
131+
132+
void test_map_deadlock(void)
133+
{
134+
if (test__start_subtest("lru"))
135+
test_map("lru_map", 0);
136+
}
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
#include "vmlinux.h"
3+
#include <bpf/bpf_helpers.h>
4+
#include <bpf/bpf_tracing.h>
5+
6+
char LICENSE[] SEC("license") = "GPL";
7+
8+
struct lru_map {
9+
__uint(type, BPF_MAP_TYPE_LRU_HASH);
10+
__uint(max_entries, 1024);
11+
__type(key, u32);
12+
__type(value, u64);
13+
} lru_map SEC(".maps");
14+
15+
struct map_list {
16+
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
17+
__uint(max_entries, 1);
18+
__uint(key_size, sizeof(int));
19+
__uint(value_size, sizeof(int));
20+
__array(values, struct lru_map);
21+
} map_list SEC(".maps") = {
22+
.values = { [0] = &lru_map },
23+
};
24+
25+
const volatile int map_index;
26+
27+
static __always_inline void do_update_delete(void *map)
28+
{
29+
u64 ts = bpf_ktime_get_ns();
30+
u32 key = (u32)(ts >> 12);
31+
u64 val = ts;
32+
33+
if ((ts & 1) == 0)
34+
bpf_map_update_elem(map, &key, &val, BPF_ANY);
35+
else
36+
bpf_map_delete_elem(map, &key);
37+
}
38+
39+
SEC("perf_event")
40+
int on_perf(struct bpf_perf_event_data *ctx)
41+
{
42+
int key = map_index;
43+
void *target_map;
44+
45+
target_map = bpf_map_lookup_elem(&map_list, &key);
46+
if (!target_map)
47+
return 0;
48+
49+
for (int i = 0; i < 4; i++)
50+
do_update_delete(target_map);
51+
return 0;
52+
}

0 commit comments

Comments
 (0)