7
7
8
8
#define pr_fmt (fmt ) "bpf_jit: " fmt
9
9
10
+ #include <linux/arm-smccc.h>
10
11
#include <linux/bitfield.h>
11
12
#include <linux/bpf.h>
12
13
#include <linux/filter.h>
17
18
#include <asm/asm-extable.h>
18
19
#include <asm/byteorder.h>
19
20
#include <asm/cacheflush.h>
21
+ #include <asm/cpufeature.h>
20
22
#include <asm/debug-monitors.h>
21
23
#include <asm/insn.h>
22
24
#include <asm/text-patching.h>
@@ -939,7 +941,48 @@ static void build_plt(struct jit_ctx *ctx)
939
941
plt -> target = (u64 )& dummy_tramp ;
940
942
}
941
943
942
- static void build_epilogue (struct jit_ctx * ctx )
944
+ /* Clobbers BPF registers 1-4, aka x0-x3 */
945
+ static void __maybe_unused build_bhb_mitigation (struct jit_ctx * ctx )
946
+ {
947
+ const u8 r1 = bpf2a64 [BPF_REG_1 ]; /* aka x0 */
948
+ u8 k = get_spectre_bhb_loop_value ();
949
+
950
+ if (!IS_ENABLED (CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY ) ||
951
+ cpu_mitigations_off () || __nospectre_bhb ||
952
+ arm64_get_spectre_v2_state () == SPECTRE_VULNERABLE )
953
+ return ;
954
+
955
+ if (supports_clearbhb (SCOPE_SYSTEM )) {
956
+ emit (aarch64_insn_gen_hint (AARCH64_INSN_HINT_CLEARBHB ), ctx );
957
+ return ;
958
+ }
959
+
960
+ if (k ) {
961
+ emit_a64_mov_i64 (r1 , k , ctx );
962
+ emit (A64_B (1 ), ctx );
963
+ emit (A64_SUBS_I (true, r1 , r1 , 1 ), ctx );
964
+ emit (A64_B_ (A64_COND_NE , -2 ), ctx );
965
+ emit (aarch64_insn_gen_dsb (AARCH64_INSN_MB_ISH ), ctx );
966
+ emit (aarch64_insn_get_isb_value (), ctx );
967
+ }
968
+
969
+ if (is_spectre_bhb_fw_mitigated ()) {
970
+ emit (A64_ORR_I (false, r1 , AARCH64_INSN_REG_ZR ,
971
+ ARM_SMCCC_ARCH_WORKAROUND_3 ), ctx );
972
+ switch (arm_smccc_1_1_get_conduit ()) {
973
+ case SMCCC_CONDUIT_HVC :
974
+ emit (aarch64_insn_get_hvc_value (), ctx );
975
+ break ;
976
+ case SMCCC_CONDUIT_SMC :
977
+ emit (aarch64_insn_get_smc_value (), ctx );
978
+ break ;
979
+ default :
980
+ pr_err_once ("Firmware mitigation enabled with unknown conduit\n" );
981
+ }
982
+ }
983
+ }
984
+
985
+ static void build_epilogue (struct jit_ctx * ctx , bool was_classic )
943
986
{
944
987
const u8 r0 = bpf2a64 [BPF_REG_0 ];
945
988
const u8 ptr = bpf2a64 [TCCNT_PTR ];
@@ -952,10 +995,13 @@ static void build_epilogue(struct jit_ctx *ctx)
952
995
953
996
emit (A64_POP (A64_ZR , ptr , A64_SP ), ctx );
954
997
998
+ if (was_classic )
999
+ build_bhb_mitigation (ctx );
1000
+
955
1001
/* Restore FP/LR registers */
956
1002
emit (A64_POP (A64_FP , A64_LR , A64_SP ), ctx );
957
1003
958
- /* Set return value */
1004
+ /* Move the return value from bpf:r0 (aka x7) to x0 */
959
1005
emit (A64_MOV (1 , A64_R (0 ), r0 ), ctx );
960
1006
961
1007
/* Authenticate lr */
@@ -1898,7 +1944,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1898
1944
}
1899
1945
1900
1946
ctx .epilogue_offset = ctx .idx ;
1901
- build_epilogue (& ctx );
1947
+ build_epilogue (& ctx , was_classic );
1902
1948
build_plt (& ctx );
1903
1949
1904
1950
extable_align = __alignof__(struct exception_table_entry );
@@ -1961,7 +2007,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1961
2007
goto out_free_hdr ;
1962
2008
}
1963
2009
1964
- build_epilogue (& ctx );
2010
+ build_epilogue (& ctx , was_classic );
1965
2011
build_plt (& ctx );
1966
2012
1967
2013
/* Extra pass to validate JITed code. */
0 commit comments