Skip to content

Commit c704cf2

Browse files
committed
Merge branch 'for-next/alternatives' into for-next/core
* for-next/alternatives: : Alternatives (code patching) improvements arm64: fix the build with binutils 2.27 arm64: avoid BUILD_BUG_ON() in alternative-macros arm64: alternatives: add shared NOP callback arm64: alternatives: add alternative_has_feature_*() arm64: alternatives: have callbacks take a cap arm64: alternatives: make alt_region const arm64: alternatives: hoist print out of __apply_alternatives() arm64: alternatives: proton-pack: prepare for cap changes arm64: alternatives: kvm: prepare for cap changes arm64: cpufeature: make cpus_have_cap() noinstr-safe
2 parents c397623 + ba00c2a commit c704cf2

File tree

13 files changed

+146
-95
lines changed

13 files changed

+146
-95
lines changed

arch/arm64/include/asm/alternative-macros.h

Lines changed: 60 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,22 @@
22
#ifndef __ASM_ALTERNATIVE_MACROS_H
33
#define __ASM_ALTERNATIVE_MACROS_H
44

5+
#include <linux/bits.h>
6+
#include <linux/const.h>
7+
58
#include <asm/cpucaps.h>
69
#include <asm/insn-def.h>
710

8-
#define ARM64_CB_PATCH ARM64_NCAPS
11+
/*
12+
* Binutils 2.27.0 can't handle a 'UL' suffix on constants, so for the assembly
13+
* macros below we must use we must use `(1 << ARM64_CB_SHIFT)`.
14+
*/
15+
#define ARM64_CB_SHIFT 15
16+
#define ARM64_CB_BIT BIT(ARM64_CB_SHIFT)
17+
18+
#if ARM64_NCAPS >= ARM64_CB_BIT
19+
#error "cpucaps have overflown ARM64_CB_BIT"
20+
#endif
921

1022
#ifndef __ASSEMBLY__
1123

@@ -73,16 +85,16 @@
7385
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
7486
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
7587

76-
#define ALTERNATIVE_CB(oldinstr, cb) \
77-
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
88+
#define ALTERNATIVE_CB(oldinstr, feature, cb) \
89+
__ALTERNATIVE_CFG_CB(oldinstr, (1 << ARM64_CB_SHIFT) | (feature), 1, cb)
7890
#else
7991

8092
#include <asm/assembler.h>
8193

8294
.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
8395
.word \orig_offset - .
8496
.word \alt_offset - .
85-
.hword \feature
97+
.hword (\feature)
8698
.byte \orig_len
8799
.byte \alt_len
88100
.endm
@@ -141,10 +153,10 @@
141153
661:
142154
.endm
143155

144-
.macro alternative_cb cb
156+
.macro alternative_cb cap, cb
145157
.set .Lasm_alt_mode, 0
146158
.pushsection .altinstructions, "a"
147-
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
159+
altinstruction_entry 661f, \cb, (1 << ARM64_CB_SHIFT) | \cap, 662f-661f, 0
148160
.popsection
149161
661:
150162
.endm
@@ -207,4 +219,46 @@ alternative_endif
207219
#define ALTERNATIVE(oldinstr, newinstr, ...) \
208220
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
209221

222+
#ifndef __ASSEMBLY__
223+
224+
#include <linux/types.h>
225+
226+
static __always_inline bool
227+
alternative_has_feature_likely(unsigned long feature)
228+
{
229+
compiletime_assert(feature < ARM64_NCAPS,
230+
"feature must be < ARM64_NCAPS");
231+
232+
asm_volatile_goto(
233+
ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops)
234+
:
235+
: [feature] "i" (feature)
236+
:
237+
: l_no);
238+
239+
return true;
240+
l_no:
241+
return false;
242+
}
243+
244+
static __always_inline bool
245+
alternative_has_feature_unlikely(unsigned long feature)
246+
{
247+
compiletime_assert(feature < ARM64_NCAPS,
248+
"feature must be < ARM64_NCAPS");
249+
250+
asm_volatile_goto(
251+
ALTERNATIVE("nop", "b %l[l_yes]", %[feature])
252+
:
253+
: [feature] "i" (feature)
254+
:
255+
: l_yes);
256+
257+
return false;
258+
l_yes:
259+
return true;
260+
}
261+
262+
#endif /* __ASSEMBLY__ */
263+
210264
#endif /* __ASM_ALTERNATIVE_MACROS_H */

arch/arm64/include/asm/assembler.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ alternative_endif
293293
alternative_if_not ARM64_KVM_PROTECTED_MODE
294294
ASM_BUG()
295295
alternative_else_nop_endif
296-
alternative_cb kvm_compute_final_ctr_el0
296+
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
297297
movz \reg, #0
298298
movk \reg, #0, lsl #16
299299
movk \reg, #0, lsl #32
@@ -877,7 +877,7 @@ alternative_endif
877877

878878
.macro __mitigate_spectre_bhb_loop tmp
879879
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
880-
alternative_cb spectre_bhb_patch_loop_iter
880+
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
881881
mov \tmp, #32 // Patched to correct the immediate
882882
alternative_cb_end
883883
.Lspectre_bhb_loop\@:
@@ -890,7 +890,7 @@ alternative_cb_end
890890

891891
.macro mitigate_spectre_bhb_loop tmp
892892
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
893-
alternative_cb spectre_bhb_patch_loop_mitigation_enable
893+
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
894894
b .L_spectre_bhb_loop_done\@ // Patched to NOP
895895
alternative_cb_end
896896
__mitigate_spectre_bhb_loop \tmp
@@ -904,7 +904,7 @@ alternative_cb_end
904904
stp x0, x1, [sp, #-16]!
905905
stp x2, x3, [sp, #-16]!
906906
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
907-
alternative_cb smccc_patch_fw_mitigation_conduit
907+
alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
908908
nop // Patched to SMC/HVC #0
909909
alternative_cb_end
910910
ldp x2, x3, [sp], #16
@@ -914,7 +914,7 @@ alternative_cb_end
914914

915915
.macro mitigate_spectre_bhb_clear_insn
916916
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
917-
alternative_cb spectre_bhb_patch_clearbhb
917+
alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
918918
/* Patched to NOP when not supported */
919919
clearbhb
920920
isb

arch/arm64/include/asm/cpufeature.h

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#ifndef __ASM_CPUFEATURE_H
77
#define __ASM_CPUFEATURE_H
88

9+
#include <asm/alternative-macros.h>
910
#include <asm/cpucaps.h>
1011
#include <asm/cputype.h>
1112
#include <asm/hwcap.h>
@@ -419,12 +420,8 @@ static __always_inline bool is_hyp_code(void)
419420
}
420421

421422
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
422-
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
423-
extern struct static_key_false arm64_const_caps_ready;
424423

425-
/* ARM64 CAPS + alternative_cb */
426-
#define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
427-
extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
424+
extern DECLARE_BITMAP(boot_capabilities, ARM64_NCAPS);
428425

429426
#define for_each_available_cap(cap) \
430427
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
@@ -440,19 +437,19 @@ unsigned long cpu_get_elf_hwcap2(void);
440437

441438
static __always_inline bool system_capabilities_finalized(void)
442439
{
443-
return static_branch_likely(&arm64_const_caps_ready);
440+
return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM);
444441
}
445442

446443
/*
447444
* Test for a capability with a runtime check.
448445
*
449446
* Before the capability is detected, this returns false.
450447
*/
451-
static inline bool cpus_have_cap(unsigned int num)
448+
static __always_inline bool cpus_have_cap(unsigned int num)
452449
{
453450
if (num >= ARM64_NCAPS)
454451
return false;
455-
return test_bit(num, cpu_hwcaps);
452+
return arch_test_bit(num, cpu_hwcaps);
456453
}
457454

458455
/*
@@ -467,7 +464,7 @@ static __always_inline bool __cpus_have_const_cap(int num)
467464
{
468465
if (num >= ARM64_NCAPS)
469466
return false;
470-
return static_branch_unlikely(&cpu_hwcap_keys[num]);
467+
return alternative_has_feature_unlikely(num);
471468
}
472469

473470
/*

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
* specific registers encoded in the instructions).
6464
*/
6565
.macro kern_hyp_va reg
66-
alternative_cb kvm_update_va_mask
66+
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
6767
and \reg, \reg, #1 /* mask with va_mask */
6868
ror \reg, \reg, #1 /* rotate to the first tag bit */
6969
add \reg, \reg, #0 /* insert the low 12 bits of the tag */
@@ -97,7 +97,7 @@ alternative_cb_end
9797
hyp_pa \reg, \tmp
9898

9999
/* Load kimage_voffset. */
100-
alternative_cb kvm_get_kimage_voffset
100+
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
101101
movz \tmp, #0
102102
movk \tmp, #0, lsl #16
103103
movk \tmp, #0, lsl #32
@@ -131,6 +131,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
131131
"add %0, %0, #0\n"
132132
"add %0, %0, #0, lsl 12\n"
133133
"ror %0, %0, #63\n",
134+
ARM64_ALWAYS_SYSTEM,
134135
kvm_update_va_mask)
135136
: "+r" (v));
136137
return v;

arch/arm64/include/asm/lse.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,13 @@
1313
#include <linux/jump_label.h>
1414
#include <linux/stringify.h>
1515
#include <asm/alternative.h>
16+
#include <asm/alternative-macros.h>
1617
#include <asm/atomic_lse.h>
1718
#include <asm/cpucaps.h>
1819

19-
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
20-
2120
static __always_inline bool system_uses_lse_atomics(void)
2221
{
23-
return static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
22+
return alternative_has_feature_likely(ARM64_HAS_LSE_ATOMICS);
2423
}
2524

2625
#define __lse_ll_sc_body(op, ...) \

0 commit comments

Comments
 (0)