|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +#include <test_progs.h> |
| 4 | + |
| 5 | +#include <linux/if_ether.h> |
| 6 | +#include <linux/in.h> |
| 7 | +#include <linux/ip.h> |
| 8 | +#include <linux/ipv6.h> |
| 9 | +#include <linux/in6.h> |
| 10 | +#include <linux/udp.h> |
| 11 | +#include <linux/tcp.h> |
| 12 | + |
| 13 | +#include <sys/syscall.h> |
| 14 | +#include <bpf/bpf.h> |
| 15 | + |
| 16 | +#include "bpf_gotox.skel.h" |
| 17 | + |
| 18 | +#if 0 |
| 19 | +static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in) |
| 20 | +{ |
| 21 | + LIBBPF_OPTS(bpf_test_run_opts, topts, |
| 22 | + .ctx_in = ctx_in, |
| 23 | + .ctx_size_in = ctx_size_in, |
| 24 | + ); |
| 25 | + int err, prog_fd; |
| 26 | + |
| 27 | + prog_fd = bpf_program__fd(prog); |
| 28 | + err = bpf_prog_test_run_opts(prog_fd, &topts); |
| 29 | + ASSERT_OK(err, "test_run_opts err"); |
| 30 | +} |
| 31 | + |
| 32 | +static void check_simple(struct bpf_gotox *skel, |
| 33 | + struct bpf_program *prog, |
| 34 | + __u64 ctx_in, |
| 35 | + __u64 expected) |
| 36 | +{ |
| 37 | + skel->bss->ret_user = 0; |
| 38 | + |
| 39 | + __test_run(prog, &ctx_in, sizeof(ctx_in)); |
| 40 | + |
| 41 | + if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user")) |
| 42 | + return; |
| 43 | +} |
| 44 | + |
| 45 | +static void check_simple_fentry(struct bpf_gotox *skel, |
| 46 | + struct bpf_program *prog, |
| 47 | + __u64 ctx_in, |
| 48 | + __u64 expected) |
| 49 | +{ |
| 50 | + skel->bss->in_user = ctx_in; |
| 51 | + skel->bss->ret_user = 0; |
| 52 | + |
| 53 | + /* trigger */ |
| 54 | + usleep(1); |
| 55 | + |
| 56 | + if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user")) |
| 57 | + return; |
| 58 | +} |
| 59 | + |
| 60 | +/* validate that for two loads of the same jump table libbpf generates only one map */ |
| 61 | +static void check_one_map_two_jumps(struct bpf_gotox *skel) |
| 62 | +{ |
| 63 | + struct bpf_prog_info prog_info; |
| 64 | + struct bpf_map_info map_info; |
| 65 | + __u32 len; |
| 66 | + __u32 map_ids[16]; |
| 67 | + int prog_fd, map_fd; |
| 68 | + int ret; |
| 69 | + int i; |
| 70 | + bool seen = false; |
| 71 | + |
| 72 | + memset(&prog_info, 0, sizeof(prog_info)); |
| 73 | + prog_info.map_ids = (long)map_ids; |
| 74 | + prog_info.nr_map_ids = ARRAY_SIZE(map_ids); |
| 75 | + prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps); |
| 76 | + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)")) |
| 77 | + return; |
| 78 | + |
| 79 | + len = sizeof(prog_info); |
| 80 | + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len); |
| 81 | + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)")) |
| 82 | + return; |
| 83 | + |
| 84 | + for (i = 0; i < prog_info.nr_map_ids; i++) { |
| 85 | + map_fd = bpf_map_get_fd_by_id(map_ids[i]); |
| 86 | + if (!ASSERT_GE(map_fd, 0, "bpf_program__fd(one_map_two_jumps)")) |
| 87 | + return; |
| 88 | + |
| 89 | + len = sizeof(map_info); |
| 90 | + memset(&map_info, 0, len); |
| 91 | + ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len); |
| 92 | + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) { |
| 93 | + close(map_fd); |
| 94 | + return; |
| 95 | + } |
| 96 | + |
| 97 | + if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) { |
| 98 | + if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) { |
| 99 | + close(map_fd); |
| 100 | + return; |
| 101 | + } |
| 102 | + seen = true; |
| 103 | + } |
| 104 | + close(map_fd); |
| 105 | + } |
| 106 | + |
| 107 | + ASSERT_EQ(seen, true, "no INSN_ARRAY map"); |
| 108 | +} |
| 109 | + |
| 110 | +static void check_one_switch(struct bpf_gotox *skel) |
| 111 | +{ |
| 112 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 113 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 114 | + int i; |
| 115 | + |
| 116 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 117 | + check_simple(skel, skel->progs.one_switch, in[i], out[i]); |
| 118 | +} |
| 119 | + |
| 120 | +static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel) |
| 121 | +{ |
| 122 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 123 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 124 | + int i; |
| 125 | + |
| 126 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 127 | + check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]); |
| 128 | +} |
| 129 | + |
| 130 | +static void check_two_switches(struct bpf_gotox *skel) |
| 131 | +{ |
| 132 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 133 | + __u64 out[] = {103, 104, 107, 205, 115, 1019, 1019}; |
| 134 | + int i; |
| 135 | + |
| 136 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 137 | + check_simple(skel, skel->progs.two_switches, in[i], out[i]); |
| 138 | +} |
| 139 | + |
| 140 | +static void check_big_jump_table(struct bpf_gotox *skel) |
| 141 | +{ |
| 142 | + __u64 in[] = {0, 11, 27, 31, 22, 45, 99}; |
| 143 | + __u64 out[] = {2, 3, 4, 5, 19, 19, 19}; |
| 144 | + int i; |
| 145 | + |
| 146 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 147 | + check_simple(skel, skel->progs.big_jump_table, in[i], out[i]); |
| 148 | +} |
| 149 | + |
| 150 | +static void check_one_jump_two_maps(struct bpf_gotox *skel) |
| 151 | +{ |
| 152 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 153 | + __u64 out[] = {12, 15, 7 , 15, 12, 15, 15}; |
| 154 | + int i; |
| 155 | + |
| 156 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 157 | + check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]); |
| 158 | +} |
| 159 | + |
| 160 | +static void check_static_global(struct bpf_gotox *skel) |
| 161 | +{ |
| 162 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 163 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 164 | + int i; |
| 165 | + |
| 166 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 167 | + check_simple(skel, skel->progs.use_static_global1, in[i], out[i]); |
| 168 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 169 | + check_simple(skel, skel->progs.use_static_global2, in[i], out[i]); |
| 170 | +} |
| 171 | + |
| 172 | +static void check_nonstatic_global(struct bpf_gotox *skel) |
| 173 | +{ |
| 174 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 175 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 176 | + int i; |
| 177 | + |
| 178 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 179 | + check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]); |
| 180 | + |
| 181 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 182 | + check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]); |
| 183 | +} |
| 184 | + |
| 185 | +static void check_other_sec(struct bpf_gotox *skel) |
| 186 | +{ |
| 187 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 188 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 189 | + int i; |
| 190 | + |
| 191 | + bpf_program__attach(skel->progs.simple_test_other_sec); |
| 192 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 193 | + check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]); |
| 194 | +} |
| 195 | + |
| 196 | +static void check_static_global_other_sec(struct bpf_gotox *skel) |
| 197 | +{ |
| 198 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 199 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 200 | + int i; |
| 201 | + |
| 202 | + bpf_program__attach(skel->progs.use_static_global_other_sec); |
| 203 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 204 | + check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]); |
| 205 | +} |
| 206 | + |
| 207 | +static void check_nonstatic_global_other_sec(struct bpf_gotox *skel) |
| 208 | +{ |
| 209 | + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; |
| 210 | + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; |
| 211 | + int i; |
| 212 | + |
| 213 | + bpf_program__attach(skel->progs.use_nonstatic_global_other_sec); |
| 214 | + for (i = 0; i < ARRAY_SIZE(in); i++) |
| 215 | + check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]); |
| 216 | +} |
| 217 | + |
| 218 | +static void __test_bpf_gotox(void) |
| 219 | +{ |
| 220 | + struct bpf_gotox *skel; |
| 221 | + int ret; |
| 222 | + |
| 223 | + skel = bpf_gotox__open(); |
| 224 | + if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open")) |
| 225 | + return; |
| 226 | + |
| 227 | + ret = bpf_gotox__load(skel); |
| 228 | + if (!ASSERT_OK(ret, "bpf_gotox__load")) |
| 229 | + return; |
| 230 | + |
| 231 | + if (test__start_subtest("one-switch")) |
| 232 | + check_one_switch(skel); |
| 233 | + |
| 234 | + if (test__start_subtest("one-switch-non-zero-sec-offset")) |
| 235 | + check_one_switch_non_zero_sec_off(skel); |
| 236 | + |
| 237 | + if (test__start_subtest("two-switches")) |
| 238 | + check_two_switches(skel); |
| 239 | + |
| 240 | + if (test__start_subtest("big-jump-table")) |
| 241 | + check_big_jump_table(skel); |
| 242 | + |
| 243 | + if (test__start_subtest("static-global")) |
| 244 | + check_static_global(skel); |
| 245 | + |
| 246 | + if (test__start_subtest("nonstatic-global")) |
| 247 | + check_nonstatic_global(skel); |
| 248 | + |
| 249 | + if (test__start_subtest("other-sec")) |
| 250 | + check_other_sec(skel); |
| 251 | + |
| 252 | + if (test__start_subtest("static-global-other-sec")) |
| 253 | + check_static_global_other_sec(skel); |
| 254 | + |
| 255 | + if (test__start_subtest("nonstatic-global-other-sec")) |
| 256 | + check_nonstatic_global_other_sec(skel); |
| 257 | + |
| 258 | + if (test__start_subtest("one-jump-two-maps")) |
| 259 | + check_one_jump_two_maps(skel); |
| 260 | + |
| 261 | + if (test__start_subtest("one-map-two-jumps")) |
| 262 | + check_one_map_two_jumps(skel); |
| 263 | + |
| 264 | + bpf_gotox__destroy(skel); |
| 265 | +} |
| 266 | +#else |
| 267 | +static void __test_bpf_gotox(void) |
| 268 | +{ |
| 269 | + test__skip(); |
| 270 | +} |
| 271 | +#endif |
| 272 | + |
| 273 | +void test_bpf_gotox(void) |
| 274 | +{ |
| 275 | + __test_bpf_gotox(); |
| 276 | +} |
0 commit comments