Skip to content

Commit d0f6392

Browse files
puranjaymohanKernel Patches Daemon
authored andcommitted
bpf, arm64: Add JIT support for timed may_goto
When verifier sees a timed may_goto instruction, it emits a call to arch_bpf_timed_may_goto() with a stack offset in BPF_REG_AX (arm64 r9) and expects a count value to be returned in the same register. The verifier doesn't save or restore any registers before emitting this call. arch_bpf_timed_may_goto() should act as a trampoline to call bpf_check_timed_may_goto() with AAPCS64 calling convention. To support this custom calling convention, implement arch_bpf_timed_may_goto() in assembly and make sure BPF caller saved registers are saved and restored, call bpf_check_timed_may_goto with arm64 calling convention where first argument and return value both are in x0, then put the result back into BPF_REG_AX before returning. Signed-off-by: Puranjay Mohan <[email protected]>
1 parent 601ea2d commit d0f6392

File tree

3 files changed

+53
-2
lines changed

3 files changed

+53
-2
lines changed

arch/arm64/net/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
#
33
# ARM64 networking code
44
#
5-
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
5+
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o

arch/arm64/net/bpf_jit_comp.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1558,7 +1558,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
15581558
if (ret < 0)
15591559
return ret;
15601560
emit_call(func_addr, ctx);
1561-
emit(A64_MOV(1, r0, A64_R(0)), ctx);
1561+
/*
1562+
* Call to arch_bpf_timed_may_goto() is emitted by the
1563+
* verifier and called with custom calling convention with
1564+
* first argument and return value in BPF_REG_AX (x9).
1565+
*/
1566+
if (func_addr != (u64)arch_bpf_timed_may_goto)
1567+
emit(A64_MOV(1, r0, A64_R(0)), ctx);
15621568
break;
15631569
}
15641570
/* tail call */
@@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void)
30383044
return true;
30393045
}
30403046

3047+
bool bpf_jit_supports_timed_may_goto(void)
3048+
{
3049+
return true;
3050+
}
3051+
30413052
bool bpf_jit_inlines_helper_call(s32 imm)
30423053
{
30433054
switch (imm) {

arch/arm64/net/bpf_timed_may_goto.S

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (c) 2025 Puranjay Mohan <[email protected]> */
3+
4+
#include <linux/linkage.h>
5+
6+
SYM_FUNC_START(arch_bpf_timed_may_goto)
7+
/* Allocate stack space and emit frame record */
8+
stp x29, x30, [sp, #-64]!
9+
mov x29, sp
10+
11+
/* Save BPF registers R0 - R5 (x7, x0-x4)*/
12+
stp x7, x0, [sp, #16]
13+
stp x1, x2, [sp, #32]
14+
stp x3, x4, [sp, #48]
15+
16+
/*
17+
* Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP
18+
* (x25) to get the pointer to count and timestamp and pass it as the
19+
* first argument in x0.
20+
*
21+
* Before generating the call to arch_bpf_timed_may_goto, the verifier
22+
* generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP -
23+
* stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64
24+
* jit in this case.
25+
*/
26+
add x0, x9, x25
27+
bl bpf_check_timed_may_goto
28+
/* BPF_REG_AX(x9) will be stored into count, so move return value to it. */
29+
mov x9, x0
30+
31+
/* Restore BPF registers R0 - R5 (x7, x0-x4) */
32+
ldp x7, x0, [sp, #16]
33+
ldp x1, x2, [sp, #32]
34+
ldp x3, x4, [sp, #48]
35+
36+
/* Restore FP and LR */
37+
ldp x29, x30, [sp], #64
38+
39+
ret
40+
SYM_FUNC_END(arch_bpf_timed_may_goto)

0 commit comments

Comments
 (0)