Skip to content

Commit 03c68a0

Browse files
luisgerhorstAlexei Starovoitov
authored andcommitted
bpf, arm64, powerpc: Add bpf_jit_bypass_spec_v1/v4()
JITs can set bpf_jit_bypass_spec_v1/v4() if they want the verifier to skip analysis/patching for the respective vulnerability. For v4, this will reduce the number of barriers the verifier inserts. For v1, it allows more programs to be accepted. The primary motivation for this is to not regress unpriv BPF's performance on ARM64 in a future commit where BPF_NOSPEC is also used against Spectre v1. This has the user-visible change that v1-induced rejections on non-vulnerable PowerPC CPUs are avoided. For now, this does not change the semantics of BPF_NOSPEC. It is still a v4-only barrier and must not be implemented if bypass_spec_v4 is always true for the arch. Changing it to a v1 AND v4-barrier is done in a future commit. As an alternative to bypass_spec_v1/v4, one could introduce NOSPEC_V1 AND NOSPEC_V4 instructions and allow backends to skip their lowering as suggested by commit f5e81d1 ("bpf: Introduce BPF nospec instruction for mitigating Spectre v4"). Adding bpf_jit_bypass_spec_v1/v4() was found to be preferable for the following reason: * bypass_spec_v1/v4 benefits non-vulnerable CPUs: Always performing the same analysis (not taking into account whether the current CPU is vulnerable), needlessly restricts users of CPUs that are not vulnerable. The only use case for this would be portability-testing, but this can later be added easily when needed by allowing users to force bypass_spec_v1/v4 to false. * Portability is still acceptable: Directly disabling the analysis instead of skipping the lowering of BPF_NOSPEC(_V1/V4) might allow programs on non-vulnerable CPUs to be accepted while the program will be rejected on vulnerable CPUs. With the fallback to speculation barriers for Spectre v1 implemented in a future commit, this will only affect programs that do variable stack-accesses or are very complex. For PowerPC, the SEC_FTR checking in bpf_jit_bypass_spec_v4() is based on the check that was previously located in the BPF_NOSPEC case. For LoongArch, it would likely be safe to set both bpf_jit_bypass_spec_v1() and _v4() according to commit a6f6a95f2580 ("LoongArch, bpf: Fix jit to skip speculation barrier opcode"). This is omitted here as I am unable to do any testing for LoongArch. Hari's ack concerns the PowerPC part only. Signed-off-by: Luis Gerhorst <[email protected]> Acked-by: Hari Bathini <[email protected]> Cc: Henriette Herzog <[email protected]> Cc: Maximilian Ott <[email protected]> Cc: Milan Stephan <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 6b84d78 commit 03c68a0

File tree

4 files changed

+53
-15
lines changed

4 files changed

+53
-15
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1632,15 +1632,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
16321632

16331633
/* speculation barrier */
16341634
case BPF_ST | BPF_NOSPEC:
1635-
/*
1636-
* Nothing required here.
1637-
*
1638-
* In case of arm64, we rely on the firmware mitigation of
1639-
* Speculative Store Bypass as controlled via the ssbd kernel
1640-
* parameter. Whenever the mitigation is enabled, it works
1641-
* for all of the kernel code with no need to provide any
1642-
* additional instructions.
1643-
*/
1635+
/* See bpf_jit_bypass_spec_v4() */
16441636
break;
16451637

16461638
/* ST: *(size *)(dst + off) = imm */
@@ -2911,6 +2903,17 @@ bool bpf_jit_supports_percpu_insn(void)
29112903
return true;
29122904
}
29132905

2906+
bool bpf_jit_bypass_spec_v4(void)
2907+
{
2908+
/* In case of arm64, we rely on the firmware mitigation of Speculative
2909+
* Store Bypass as controlled via the ssbd kernel parameter. Whenever
2910+
* the mitigation is enabled, it works for all of the kernel code with
2911+
* no need to provide any additional instructions. Therefore, skip
2912+
* inserting nospec insns against Spectre v4.
2913+
*/
2914+
return true;
2915+
}
2916+
29142917
bool bpf_jit_inlines_helper_call(s32 imm)
29152918
{
29162919
switch (imm) {

arch/powerpc/net/bpf_jit_comp64.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,23 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
370370
return 0;
371371
}
372372

373+
bool bpf_jit_bypass_spec_v1(void)
374+
{
375+
#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
376+
return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
377+
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR));
378+
#else
379+
return true;
380+
#endif
381+
}
382+
383+
bool bpf_jit_bypass_spec_v4(void)
384+
{
385+
return !(security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
386+
security_ftr_enabled(SEC_FTR_STF_BARRIER) &&
387+
stf_barrier_type_get() != STF_BARRIER_NONE);
388+
}
389+
373390
/*
374391
* We spill into the redzone always, even if the bpf program has its own stackframe.
375392
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
@@ -791,10 +808,6 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
791808
* BPF_ST NOSPEC (speculation barrier)
792809
*/
793810
case BPF_ST | BPF_NOSPEC:
794-
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
795-
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
796-
break;
797-
798811
switch (stf_barrier) {
799812
case STF_BARRIER_EIEIO:
800813
EMIT(PPC_RAW_EIEIO() | 0x02000000);

include/linux/bpf.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2288,6 +2288,9 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
22882288
return ret;
22892289
}
22902290

2291+
bool bpf_jit_bypass_spec_v1(void);
2292+
bool bpf_jit_bypass_spec_v4(void);
2293+
22912294
#ifdef CONFIG_BPF_SYSCALL
22922295
DECLARE_PER_CPU(int, bpf_prog_active);
22932296
extern struct mutex bpf_stats_enabled_mutex;
@@ -2475,12 +2478,16 @@ static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
24752478

24762479
static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
24772480
{
2478-
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2481+
return bpf_jit_bypass_spec_v1() ||
2482+
cpu_mitigations_off() ||
2483+
bpf_token_capable(token, CAP_PERFMON);
24792484
}
24802485

24812486
static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
24822487
{
2483-
return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2488+
return bpf_jit_bypass_spec_v4() ||
2489+
cpu_mitigations_off() ||
2490+
bpf_token_capable(token, CAP_PERFMON);
24842491
}
24852492

24862493
int bpf_map_new_fd(struct bpf_map *map, int flags);

kernel/bpf/core.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3034,6 +3034,21 @@ bool __weak bpf_jit_needs_zext(void)
30343034
return false;
30353035
}
30363036

3037+
/* By default, enable the verifier's mitigations against Spectre v1 and v4 for
3038+
* all archs. The value returned must not change at runtime as there is
3039+
* currently no support for reloading programs that were loaded without
3040+
* mitigations.
3041+
*/
3042+
bool __weak bpf_jit_bypass_spec_v1(void)
3043+
{
3044+
return false;
3045+
}
3046+
3047+
bool __weak bpf_jit_bypass_spec_v4(void)
3048+
{
3049+
return false;
3050+
}
3051+
30373052
/* Return true if the JIT inlines the call to the helper corresponding to
30383053
* the imm.
30393054
*

0 commit comments

Comments
 (0)