Skip to content

Commit fd868f1

Browse files
lukenelswilldeacon
authored andcommitted
bpf, arm64: Optimize ADD,SUB,JMP BPF_K using arm64 add/sub immediates
The current code for BPF_{ADD,SUB} BPF_K loads the BPF immediate to a temporary register before performing the addition/subtraction. Similarly, BPF_JMP BPF_K cases load the immediate to a temporary register before comparison. This patch introduces optimizations that use arm64 immediate add, sub, cmn, or cmp instructions when the BPF immediate fits. If the immediate does not fit, it falls back to using a temporary register. Example of generated code for BPF_ALU64_IMM(BPF_ADD, R0, 2): without optimization: 24: mov x10, #0x2 28: add x7, x7, x10 with optimization: 24: add x7, x7, #0x2 The code could use A64_{ADD,SUB}_I directly and check if it returns AARCH64_BREAK_FAULT, similar to how logical immediates are handled. However, aarch64_insn_gen_add_sub_imm from insn.c prints error messages when the immediate does not fit, and it's simpler to check if the immediate fits ahead of time. Co-developed-by: Xi Wang <[email protected]> Signed-off-by: Xi Wang <[email protected]> Signed-off-by: Luke Nelson <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent fd49591 commit fd868f1

File tree

2 files changed

+38
-6
lines changed

2 files changed

+38
-6
lines changed

arch/arm64/net/bpf_jit.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,14 @@
100100
/* Rd = Rn OP imm12 */
101101
#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
102102
#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
103+
#define A64_ADDS_I(sf, Rd, Rn, imm12) \
104+
A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
105+
#define A64_SUBS_I(sf, Rd, Rn, imm12) \
106+
A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
107+
/* Rn + imm12; set condition flags */
108+
#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
109+
/* Rn - imm12; set condition flags */
110+
#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
103111
/* Rd = Rn */
104112
#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
105113

arch/arm64/net/bpf_jit_comp.c

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
167167
return to - from;
168168
}
169169

170+
static bool is_addsub_imm(u32 imm)
171+
{
172+
/* Either imm12 or shifted imm12. */
173+
return !(imm & ~0xfff) || !(imm & ~0xfff000);
174+
}
175+
170176
/* Stack must be multiples of 16B */
171177
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
172178

@@ -479,13 +485,25 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
479485
/* dst = dst OP imm */
480486
case BPF_ALU | BPF_ADD | BPF_K:
481487
case BPF_ALU64 | BPF_ADD | BPF_K:
482-
emit_a64_mov_i(is64, tmp, imm, ctx);
483-
emit(A64_ADD(is64, dst, dst, tmp), ctx);
488+
if (is_addsub_imm(imm)) {
489+
emit(A64_ADD_I(is64, dst, dst, imm), ctx);
490+
} else if (is_addsub_imm(-imm)) {
491+
emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
492+
} else {
493+
emit_a64_mov_i(is64, tmp, imm, ctx);
494+
emit(A64_ADD(is64, dst, dst, tmp), ctx);
495+
}
484496
break;
485497
case BPF_ALU | BPF_SUB | BPF_K:
486498
case BPF_ALU64 | BPF_SUB | BPF_K:
487-
emit_a64_mov_i(is64, tmp, imm, ctx);
488-
emit(A64_SUB(is64, dst, dst, tmp), ctx);
499+
if (is_addsub_imm(imm)) {
500+
emit(A64_SUB_I(is64, dst, dst, imm), ctx);
501+
} else if (is_addsub_imm(-imm)) {
502+
emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
503+
} else {
504+
emit_a64_mov_i(is64, tmp, imm, ctx);
505+
emit(A64_SUB(is64, dst, dst, tmp), ctx);
506+
}
489507
break;
490508
case BPF_ALU | BPF_AND | BPF_K:
491509
case BPF_ALU64 | BPF_AND | BPF_K:
@@ -639,8 +657,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
639657
case BPF_JMP32 | BPF_JSLT | BPF_K:
640658
case BPF_JMP32 | BPF_JSGE | BPF_K:
641659
case BPF_JMP32 | BPF_JSLE | BPF_K:
642-
emit_a64_mov_i(is64, tmp, imm, ctx);
643-
emit(A64_CMP(is64, dst, tmp), ctx);
660+
if (is_addsub_imm(imm)) {
661+
emit(A64_CMP_I(is64, dst, imm), ctx);
662+
} else if (is_addsub_imm(-imm)) {
663+
emit(A64_CMN_I(is64, dst, -imm), ctx);
664+
} else {
665+
emit_a64_mov_i(is64, tmp, imm, ctx);
666+
emit(A64_CMP(is64, dst, tmp), ctx);
667+
}
644668
goto emit_cond_jmp;
645669
case BPF_JMP | BPF_JSET | BPF_K:
646670
case BPF_JMP32 | BPF_JSET | BPF_K:

0 commit comments

Comments
 (0)