Skip to content

Commit 2fb7618

Browse files
kkdwivediAlexei Starovoitov
authored andcommitted
bpf, x86: Add x86 JIT support for timed may_goto
Implement the arch_bpf_timed_may_goto function using inline assembly to have control over which registers are spilled, and use our special protocol of using BPF_REG_AX as an argument into the function, and as the return value when going back. Emit call depth accounting for the call made from this stub, and ensure we don't have naked returns (when rethunk mitigations are enabled) by falling back to the RET macro (instead of retq). After popping all saved registers, the return address into the BPF program should be on top of the stack. Since the JIT support is now enabled, ensure selftests which are checking the produced may_goto sequences do not break by adjusting them. Make sure we still test the old may_goto sequence on other architectures, while testing the new sequence on x86_64. Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent e723608 commit 2fb7618

File tree

5 files changed

+130
-13
lines changed

5 files changed

+130
-13
lines changed

arch/x86/net/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@
66
ifeq ($(CONFIG_X86_32),y)
77
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
88
else
9-
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
9+
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
1010
endif

arch/x86/net/bpf_jit_comp.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3791,3 +3791,8 @@ u64 bpf_arch_uaddress_limit(void)
37913791
{
37923792
return 0;
37933793
}
3794+
3795+
bool bpf_jit_supports_timed_may_goto(void)
3796+
{
3797+
return true;
3798+
}

arch/x86/net/bpf_timed_may_goto.S

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
4+
#include <linux/export.h>
5+
#include <linux/linkage.h>
6+
#include <asm/nospec-branch.h>
7+
8+
.code64
9+
.section .text, "ax"
10+
11+
SYM_FUNC_START(arch_bpf_timed_may_goto)
12+
ANNOTATE_NOENDBR
13+
14+
/* Save r0-r5. */
15+
pushq %rax
16+
pushq %rdi
17+
pushq %rsi
18+
pushq %rdx
19+
pushq %rcx
20+
pushq %r8
21+
22+
/*
23+
* r10 passes us stack depth, load the pointer to count and timestamp as
24+
* first argument to the call below.
25+
*/
26+
leaq (%rbp, %r10, 1), %rdi
27+
28+
/* Emit call depth accounting for call below. */
29+
CALL_DEPTH_ACCOUNT
30+
call bpf_check_timed_may_goto
31+
32+
/* BPF_REG_AX=r10 will be stored into count, so move return value to it. */
33+
movq %rax, %r10
34+
35+
/* Restore r5-r0. */
36+
popq %r8
37+
popq %rcx
38+
popq %rdx
39+
popq %rsi
40+
popq %rdi
41+
popq %rax
42+
43+
RET
44+
SYM_FUNC_END(arch_bpf_timed_may_goto)

tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c

Lines changed: 48 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -620,23 +620,61 @@ __naked void helper_call_does_not_prevent_bpf_fastcall(void)
620620

621621
SEC("raw_tp")
622622
__arch_x86_64
623+
__log_level(4) __msg("stack depth 24")
624+
/* may_goto counter at -24 */
625+
__xlated("0: *(u64 *)(r10 -24) =")
626+
/* may_goto timestamp at -16 */
627+
__xlated("1: *(u64 *)(r10 -16) =")
628+
__xlated("2: r1 = 1")
629+
__xlated("...")
630+
__xlated("4: r0 = &(void __percpu *)(r0)")
631+
__xlated("...")
632+
/* may_goto expansion starts */
633+
__xlated("6: r11 = *(u64 *)(r10 -24)")
634+
__xlated("7: if r11 == 0x0 goto pc+6")
635+
__xlated("8: r11 -= 1")
636+
__xlated("9: if r11 != 0x0 goto pc+2")
637+
__xlated("10: r11 = -24")
638+
__xlated("11: call unknown")
639+
__xlated("12: *(u64 *)(r10 -24) = r11")
640+
/* may_goto expansion ends */
641+
__xlated("13: *(u64 *)(r10 -8) = r1")
642+
__xlated("14: exit")
643+
__success
644+
__naked void may_goto_interaction_x86_64(void)
645+
{
646+
asm volatile (
647+
"r1 = 1;"
648+
"*(u64 *)(r10 - 16) = r1;"
649+
"call %[bpf_get_smp_processor_id];"
650+
"r1 = *(u64 *)(r10 - 16);"
651+
".8byte %[may_goto];"
652+
/* just touch some stack at -8 */
653+
"*(u64 *)(r10 - 8) = r1;"
654+
"exit;"
655+
:
656+
: __imm(bpf_get_smp_processor_id),
657+
__imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, +1 /* offset */, 0))
658+
: __clobber_all);
659+
}
660+
661+
SEC("raw_tp")
662+
__arch_arm64
623663
__log_level(4) __msg("stack depth 16")
624664
/* may_goto counter at -16 */
625665
__xlated("0: *(u64 *)(r10 -16) =")
626666
__xlated("1: r1 = 1")
627-
__xlated("...")
628-
__xlated("3: r0 = &(void __percpu *)(r0)")
629-
__xlated("...")
667+
__xlated("2: call bpf_get_smp_processor_id")
630668
/* may_goto expansion starts */
631-
__xlated("5: r11 = *(u64 *)(r10 -16)")
632-
__xlated("6: if r11 == 0x0 goto pc+3")
633-
__xlated("7: r11 -= 1")
634-
__xlated("8: *(u64 *)(r10 -16) = r11")
669+
__xlated("3: r11 = *(u64 *)(r10 -16)")
670+
__xlated("4: if r11 == 0x0 goto pc+3")
671+
__xlated("5: r11 -= 1")
672+
__xlated("6: *(u64 *)(r10 -16) = r11")
635673
/* may_goto expansion ends */
636-
__xlated("9: *(u64 *)(r10 -8) = r1")
637-
__xlated("10: exit")
674+
__xlated("7: *(u64 *)(r10 -8) = r1")
675+
__xlated("8: exit")
638676
__success
639-
__naked void may_goto_interaction(void)
677+
__naked void may_goto_interaction_arm64(void)
640678
{
641679
asm volatile (
642680
"r1 = 1;"

tools/testing/selftests/bpf/progs/verifier_may_goto_1.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,38 @@ __naked void may_goto_batch_1(void)
6969
}
7070

7171
SEC("raw_tp")
72-
__description("may_goto batch with offsets 2/0")
72+
__description("may_goto batch with offsets 2/0 - x86_64")
7373
__arch_x86_64
74+
__xlated("0: *(u64 *)(r10 -16) = 65535")
75+
__xlated("1: *(u64 *)(r10 -8) = 0")
76+
__xlated("2: r11 = *(u64 *)(r10 -16)")
77+
__xlated("3: if r11 == 0x0 goto pc+6")
78+
__xlated("4: r11 -= 1")
79+
__xlated("5: if r11 != 0x0 goto pc+2")
80+
__xlated("6: r11 = -16")
81+
__xlated("7: call unknown")
82+
__xlated("8: *(u64 *)(r10 -16) = r11")
83+
__xlated("9: r0 = 1")
84+
__xlated("10: r0 = 2")
85+
__xlated("11: exit")
86+
__success
87+
__naked void may_goto_batch_2_x86_64(void)
88+
{
89+
asm volatile (
90+
".8byte %[may_goto1];"
91+
".8byte %[may_goto3];"
92+
"r0 = 1;"
93+
"r0 = 2;"
94+
"exit;"
95+
:
96+
: __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
97+
__imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
98+
: __clobber_all);
99+
}
100+
101+
SEC("raw_tp")
102+
__description("may_goto batch with offsets 2/0 - arm64")
103+
__arch_arm64
74104
__xlated("0: *(u64 *)(r10 -8) = 8388608")
75105
__xlated("1: r11 = *(u64 *)(r10 -8)")
76106
__xlated("2: if r11 == 0x0 goto pc+3")
@@ -80,7 +110,7 @@ __xlated("5: r0 = 1")
80110
__xlated("6: r0 = 2")
81111
__xlated("7: exit")
82112
__success
83-
__naked void may_goto_batch_2(void)
113+
__naked void may_goto_batch_2_arm64(void)
84114
{
85115
asm volatile (
86116
".8byte %[may_goto1];"

0 commit comments

Comments
 (0)