Skip to content

Commit 8fe5c37

Browse files
James Morsegregkh
authored andcommitted
arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs
commit 0dfefc2ea2f29ced2416017d7e5b1253a54c2735 upstream. A malicious BPF program may manipulate the branch history to influence what the hardware speculates will happen next. On exit from a BPF program, emit the BHB mititgation sequence. This is only applied for 'classic' cBPF programs that are loaded by seccomp. Signed-off-by: James Morse <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 4977712 commit 8fe5c37

File tree

3 files changed

+52
-5
lines changed

3 files changed

+52
-5
lines changed

arch/arm64/include/asm/spectre.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ enum mitigation_state arm64_get_meltdown_state(void);
9696

9797
enum mitigation_state arm64_get_spectre_bhb_state(void);
9898
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
99+
extern bool __nospectre_bhb;
99100
u8 get_spectre_bhb_loop_value(void);
100101
bool is_spectre_bhb_fw_mitigated(void);
101102
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);

arch/arm64/kernel/proton-pack.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1035,7 +1035,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
10351035
isb();
10361036
}
10371037

1038-
static bool __read_mostly __nospectre_bhb;
1038+
bool __read_mostly __nospectre_bhb;
10391039
static int __init parse_spectre_bhb_param(char *str)
10401040
{
10411041
__nospectre_bhb = true;

arch/arm64/net/bpf_jit_comp.c

Lines changed: 50 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#define pr_fmt(fmt) "bpf_jit: " fmt
99

10+
#include <linux/arm-smccc.h>
1011
#include <linux/bitfield.h>
1112
#include <linux/bpf.h>
1213
#include <linux/filter.h>
@@ -17,6 +18,7 @@
1718
#include <asm/asm-extable.h>
1819
#include <asm/byteorder.h>
1920
#include <asm/cacheflush.h>
21+
#include <asm/cpufeature.h>
2022
#include <asm/debug-monitors.h>
2123
#include <asm/insn.h>
2224
#include <asm/patching.h>
@@ -653,7 +655,48 @@ static void build_plt(struct jit_ctx *ctx)
653655
plt->target = (u64)&dummy_tramp;
654656
}
655657

656-
static void build_epilogue(struct jit_ctx *ctx)
658+
/* Clobbers BPF registers 1-4, aka x0-x3 */
659+
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
660+
{
661+
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
662+
u8 k = get_spectre_bhb_loop_value();
663+
664+
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
665+
cpu_mitigations_off() || __nospectre_bhb ||
666+
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
667+
return;
668+
669+
if (supports_clearbhb(SCOPE_SYSTEM)) {
670+
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
671+
return;
672+
}
673+
674+
if (k) {
675+
emit_a64_mov_i64(r1, k, ctx);
676+
emit(A64_B(1), ctx);
677+
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
678+
emit(A64_B_(A64_COND_NE, -2), ctx);
679+
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
680+
emit(aarch64_insn_get_isb_value(), ctx);
681+
}
682+
683+
if (is_spectre_bhb_fw_mitigated()) {
684+
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
685+
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
686+
switch (arm_smccc_1_1_get_conduit()) {
687+
case SMCCC_CONDUIT_HVC:
688+
emit(aarch64_insn_get_hvc_value(), ctx);
689+
break;
690+
case SMCCC_CONDUIT_SMC:
691+
emit(aarch64_insn_get_smc_value(), ctx);
692+
break;
693+
default:
694+
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
695+
}
696+
}
697+
}
698+
699+
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
657700
{
658701
const u8 r0 = bpf2a64[BPF_REG_0];
659702
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -675,10 +718,13 @@ static void build_epilogue(struct jit_ctx *ctx)
675718
emit(A64_POP(r8, r9, A64_SP), ctx);
676719
emit(A64_POP(r6, r7, A64_SP), ctx);
677720

721+
if (was_classic)
722+
build_bhb_mitigation(ctx);
723+
678724
/* Restore FP/LR registers */
679725
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
680726

681-
/* Set return value */
727+
/* Move the return value from bpf:r0 (aka x7) to x0 */
682728
emit(A64_MOV(1, A64_R(0), r0), ctx);
683729

684730
/* Authenticate lr */
@@ -1527,7 +1573,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
15271573
}
15281574

15291575
ctx.epilogue_offset = ctx.idx;
1530-
build_epilogue(&ctx);
1576+
build_epilogue(&ctx, was_classic);
15311577
build_plt(&ctx);
15321578

15331579
extable_align = __alignof__(struct exception_table_entry);
@@ -1563,7 +1609,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
15631609
goto out_off;
15641610
}
15651611

1566-
build_epilogue(&ctx);
1612+
build_epilogue(&ctx, was_classic);
15671613
build_plt(&ctx);
15681614

15691615
/* 3. Extra pass to validate JITed code. */

0 commit comments

Comments
 (0)