Skip to content

Commit ac4d838

Browse files
aspskAlexei Starovoitov
authored andcommitted
selftests/bpf: add C-level selftests for indirect jumps
Add C-level selftests for indirect jumps to validate LLVM and libbpf functionality. The tests are intentionally disabled, to be run locally by developers, but will not make the CI red. Signed-off-by: Anton Protopopov <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent ccbdb48 commit ac4d838

File tree

3 files changed

+743
-1
lines changed

3 files changed

+743
-1
lines changed

tools/testing/selftests/bpf/Makefile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,9 @@ BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
453453
-I$(abspath $(OUTPUT)/../usr/include) \
454454
-std=gnu11 \
455455
-fno-strict-aliasing \
456-
-Wno-compare-distinct-pointer-types
456+
-Wno-compare-distinct-pointer-types \
457+
-Wno-initializer-overrides \
458+
#
457459
# TODO: enable me -Wsign-compare
458460

459461
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
Lines changed: 292 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,292 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#include <test_progs.h>
4+
5+
#include <linux/if_ether.h>
6+
#include <linux/in.h>
7+
#include <linux/ip.h>
8+
#include <linux/ipv6.h>
9+
#include <linux/in6.h>
10+
#include <linux/udp.h>
11+
#include <linux/tcp.h>
12+
13+
#include <sys/syscall.h>
14+
#include <bpf/bpf.h>
15+
16+
#include "bpf_gotox.skel.h"
17+
18+
static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in)
19+
{
20+
LIBBPF_OPTS(bpf_test_run_opts, topts,
21+
.ctx_in = ctx_in,
22+
.ctx_size_in = ctx_size_in,
23+
);
24+
int err, prog_fd;
25+
26+
prog_fd = bpf_program__fd(prog);
27+
err = bpf_prog_test_run_opts(prog_fd, &topts);
28+
ASSERT_OK(err, "test_run_opts err");
29+
}
30+
31+
static void __subtest(struct bpf_gotox *skel, void (*check)(struct bpf_gotox *))
32+
{
33+
if (skel->data->skip)
34+
test__skip();
35+
else
36+
check(skel);
37+
}
38+
39+
static void check_simple(struct bpf_gotox *skel,
40+
struct bpf_program *prog,
41+
__u64 ctx_in,
42+
__u64 expected)
43+
{
44+
skel->bss->ret_user = 0;
45+
46+
__test_run(prog, &ctx_in, sizeof(ctx_in));
47+
48+
if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
49+
return;
50+
}
51+
52+
static void check_simple_fentry(struct bpf_gotox *skel,
53+
struct bpf_program *prog,
54+
__u64 ctx_in,
55+
__u64 expected)
56+
{
57+
skel->bss->in_user = ctx_in;
58+
skel->bss->ret_user = 0;
59+
60+
/* trigger */
61+
usleep(1);
62+
63+
if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user"))
64+
return;
65+
}
66+
67+
/* validate that for two loads of the same jump table libbpf generates only one map */
68+
static void check_one_map_two_jumps(struct bpf_gotox *skel)
69+
{
70+
struct bpf_prog_info prog_info;
71+
struct bpf_map_info map_info;
72+
__u32 len;
73+
__u32 map_ids[16];
74+
int prog_fd, map_fd;
75+
int ret;
76+
int i;
77+
bool seen = false;
78+
79+
memset(&prog_info, 0, sizeof(prog_info));
80+
prog_info.map_ids = (long)map_ids;
81+
prog_info.nr_map_ids = ARRAY_SIZE(map_ids);
82+
prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps);
83+
if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)"))
84+
return;
85+
86+
len = sizeof(prog_info);
87+
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len);
88+
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)"))
89+
return;
90+
91+
for (i = 0; i < prog_info.nr_map_ids; i++) {
92+
map_fd = bpf_map_get_fd_by_id(map_ids[i]);
93+
if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id"))
94+
return;
95+
96+
len = sizeof(map_info);
97+
memset(&map_info, 0, len);
98+
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len);
99+
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) {
100+
close(map_fd);
101+
return;
102+
}
103+
104+
if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) {
105+
if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) {
106+
close(map_fd);
107+
return;
108+
}
109+
seen = true;
110+
}
111+
close(map_fd);
112+
}
113+
114+
ASSERT_EQ(seen, true, "no INSN_ARRAY map");
115+
}
116+
117+
static void check_one_switch(struct bpf_gotox *skel)
118+
{
119+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
120+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
121+
int i;
122+
123+
for (i = 0; i < ARRAY_SIZE(in); i++)
124+
check_simple(skel, skel->progs.one_switch, in[i], out[i]);
125+
}
126+
127+
static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel)
128+
{
129+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
130+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
131+
int i;
132+
133+
for (i = 0; i < ARRAY_SIZE(in); i++)
134+
check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]);
135+
}
136+
137+
static void check_two_switches(struct bpf_gotox *skel)
138+
{
139+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
140+
__u64 out[] = {103, 104, 107, 205, 115, 1019, 1019};
141+
int i;
142+
143+
for (i = 0; i < ARRAY_SIZE(in); i++)
144+
check_simple(skel, skel->progs.two_switches, in[i], out[i]);
145+
}
146+
147+
static void check_big_jump_table(struct bpf_gotox *skel)
148+
{
149+
__u64 in[] = {0, 11, 27, 31, 22, 45, 99};
150+
__u64 out[] = {2, 3, 4, 5, 19, 19, 19};
151+
int i;
152+
153+
for (i = 0; i < ARRAY_SIZE(in); i++)
154+
check_simple(skel, skel->progs.big_jump_table, in[i], out[i]);
155+
}
156+
157+
static void check_one_jump_two_maps(struct bpf_gotox *skel)
158+
{
159+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
160+
__u64 out[] = {12, 15, 7 , 15, 12, 15, 15};
161+
int i;
162+
163+
for (i = 0; i < ARRAY_SIZE(in); i++)
164+
check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]);
165+
}
166+
167+
static void check_static_global(struct bpf_gotox *skel)
168+
{
169+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
170+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
171+
int i;
172+
173+
for (i = 0; i < ARRAY_SIZE(in); i++)
174+
check_simple(skel, skel->progs.use_static_global1, in[i], out[i]);
175+
for (i = 0; i < ARRAY_SIZE(in); i++)
176+
check_simple(skel, skel->progs.use_static_global2, in[i], out[i]);
177+
}
178+
179+
static void check_nonstatic_global(struct bpf_gotox *skel)
180+
{
181+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
182+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
183+
int i;
184+
185+
for (i = 0; i < ARRAY_SIZE(in); i++)
186+
check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]);
187+
188+
for (i = 0; i < ARRAY_SIZE(in); i++)
189+
check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]);
190+
}
191+
192+
static void check_other_sec(struct bpf_gotox *skel)
193+
{
194+
struct bpf_link *link;
195+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
196+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
197+
int i;
198+
199+
link = bpf_program__attach(skel->progs.simple_test_other_sec);
200+
if (!ASSERT_OK_PTR(link, "link"))
201+
return;
202+
203+
for (i = 0; i < ARRAY_SIZE(in); i++)
204+
check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]);
205+
206+
bpf_link__destroy(link);
207+
}
208+
209+
static void check_static_global_other_sec(struct bpf_gotox *skel)
210+
{
211+
struct bpf_link *link;
212+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
213+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
214+
int i;
215+
216+
link = bpf_program__attach(skel->progs.use_static_global_other_sec);
217+
if (!ASSERT_OK_PTR(link, "link"))
218+
return;
219+
220+
for (i = 0; i < ARRAY_SIZE(in); i++)
221+
check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]);
222+
223+
bpf_link__destroy(link);
224+
}
225+
226+
static void check_nonstatic_global_other_sec(struct bpf_gotox *skel)
227+
{
228+
struct bpf_link *link;
229+
__u64 in[] = {0, 1, 2, 3, 4, 5, 77};
230+
__u64 out[] = {2, 3, 4, 5, 7, 19, 19};
231+
int i;
232+
233+
link = bpf_program__attach(skel->progs.use_nonstatic_global_other_sec);
234+
if (!ASSERT_OK_PTR(link, "link"))
235+
return;
236+
237+
for (i = 0; i < ARRAY_SIZE(in); i++)
238+
check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]);
239+
240+
bpf_link__destroy(link);
241+
}
242+
243+
void test_bpf_gotox(void)
244+
{
245+
struct bpf_gotox *skel;
246+
int ret;
247+
248+
skel = bpf_gotox__open();
249+
if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open"))
250+
return;
251+
252+
ret = bpf_gotox__load(skel);
253+
if (!ASSERT_OK(ret, "bpf_gotox__load"))
254+
return;
255+
256+
skel->bss->pid = getpid();
257+
258+
if (test__start_subtest("one-switch"))
259+
__subtest(skel, check_one_switch);
260+
261+
if (test__start_subtest("one-switch-non-zero-sec-offset"))
262+
__subtest(skel, check_one_switch_non_zero_sec_off);
263+
264+
if (test__start_subtest("two-switches"))
265+
__subtest(skel, check_two_switches);
266+
267+
if (test__start_subtest("big-jump-table"))
268+
__subtest(skel, check_big_jump_table);
269+
270+
if (test__start_subtest("static-global"))
271+
__subtest(skel, check_static_global);
272+
273+
if (test__start_subtest("nonstatic-global"))
274+
__subtest(skel, check_nonstatic_global);
275+
276+
if (test__start_subtest("other-sec"))
277+
__subtest(skel, check_other_sec);
278+
279+
if (test__start_subtest("static-global-other-sec"))
280+
__subtest(skel, check_static_global_other_sec);
281+
282+
if (test__start_subtest("nonstatic-global-other-sec"))
283+
__subtest(skel, check_nonstatic_global_other_sec);
284+
285+
if (test__start_subtest("one-jump-two-maps"))
286+
__subtest(skel, check_one_jump_two_maps);
287+
288+
if (test__start_subtest("one-map-two-jumps"))
289+
__subtest(skel, check_one_map_two_jumps);
290+
291+
bpf_gotox__destroy(skel);
292+
}

0 commit comments

Comments
 (0)