Skip to content

Commit a7c1eb9

Browse files
anakryikogregkh
authored andcommitted
selftests/bpf: make test_align selftest more robust
[ Upstream commit 4f999b7 ] test_align selftest relies on BPF verifier log emitting register states for specific instructions in expected format. Unfortunately, BPF verifier precision backtracking log interferes with such expectations. And instruction on which precision propagation happens sometimes don't output full expected register states. This does indeed look like something to be improved in BPF verifier, but is beyond the scope of this patch set. So to make test_align a bit more robust, inject few dummy R4 = R5 instructions which capture desired state of R5 and won't have precision tracking logs on them. This fixes tests until we can improve BPF verifier output in the presence of precision tracking. Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Eduard Zingerman <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 4c8f30a commit a7c1eb9

File tree

1 file changed

+24
-14
lines changed
  • tools/testing/selftests/bpf/prog_tests

1 file changed

+24
-14
lines changed

tools/testing/selftests/bpf/prog_tests/align.c

Lines changed: 24 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#include <test_progs.h>
33

44
#define MAX_INSNS 512
5-
#define MAX_MATCHES 16
5+
#define MAX_MATCHES 24
66

77
struct bpf_reg_match {
88
unsigned int line;
@@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = {
267267
*/
268268
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
269269
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
270+
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
270271
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
271272
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
272273
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
@@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = {
280281
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
281282
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
282283
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
284+
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
283285
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
284286
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
285287
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
@@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = {
311313
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
312314
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
313315
/* Variable offset is added to R5 packet pointer,
314-
* resulting in auxiliary alignment of 4.
316+
* resulting in auxiliary alignment of 4. To avoid BPF
317+
* verifier's precision backtracking logging
318+
* interfering we also have a no-op R4 = R5
319+
* instruction to validate R5 state. We also check
320+
* that R4 is what it should be in such case.
315321
*/
316-
{17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
322+
{18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
323+
{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
317324
/* Constant offset is added to R5, resulting in
318325
* reg->off of 14.
319326
*/
320-
{18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
327+
{19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
321328
/* At the time the word size load is performed from R5,
322329
* its total fixed offset is NET_IP_ALIGN + reg->off
323330
* (14) which is 16. Then the variable offset is 4-byte
324331
* aligned, so the total offset is 4-byte aligned and
325332
* meets the load's requirements.
326333
*/
327-
{23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
328-
{23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
334+
{24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
335+
{24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
329336
/* Constant offset is added to R5 packet pointer,
330337
* resulting in reg->off value of 14.
331338
*/
332-
{25, "R5_w=pkt(off=14,r=8"},
339+
{26, "R5_w=pkt(off=14,r=8"},
333340
/* Variable offset is added to R5, resulting in a
334-
* variable offset of (4n).
341+
* variable offset of (4n). See comment for insn #18
342+
* for R4 = R5 trick.
335343
*/
336-
{26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
344+
{28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
345+
{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
337346
/* Constant is added to R5 again, setting reg->off to 18. */
338-
{27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
347+
{29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
339348
/* And once more we add a variable; resulting var_off
340349
* is still (4n), fixed offset is not changed.
341350
* Also, we create a new reg->id.
342351
*/
343-
{28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
352+
{31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
353+
{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
344354
/* At the time the word size load is performed from R5,
345355
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
346356
* which is 20. Then the variable offset is (4n), so
347357
* the total offset is 4-byte aligned and meets the
348358
* load's requirements.
349359
*/
350-
{33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
351-
{33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
360+
{35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
361+
{35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
352362
},
353363
},
354364
{
@@ -681,6 +691,6 @@ void test_align(void)
681691
if (!test__start_subtest(test->descr))
682692
continue;
683693

684-
CHECK_FAIL(do_test_single(test));
694+
ASSERT_OK(do_test_single(test), test->descr);
685695
}
686696
}

0 commit comments

Comments
 (0)