Skip to content

Commit 0829a07

Browse files
committed
Merge branch 'for-next/asm-annotations' into for-next/core
* for-next/asm-annotations: : Modernise arm64 assembly annotations arm64: head: Convert install_el2_stub to SYM_INNER_LABEL arm64: Mark call_smc_arch_workaround_1 as __maybe_unused arm64: entry-ftrace.S: Fix missing argument for CONFIG_FUNCTION_GRAPH_TRACER=y arm64: vdso32: Convert to modern assembler annotations arm64: vdso: Convert to modern assembler annotations arm64: sdei: Annotate SDEI entry points using new style annotations arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs arm64: kvm: Annotate assembly using modern annoations arm64: kernel: Convert to modern annotations for assembly data arm64: head: Annotate stext and preserve_boot_args as code arm64: head.S: Convert to modern annotations for assembly functions arm64: ftrace: Modernise annotation of return_to_handler arm64: ftrace: Correct annotation of ftrace_caller assembly arm64: entry-ftrace.S: Convert to modern annotations for assembly functions arm64: entry: Additional annotation conversions for entry.S arm64: entry: Annotate ret_from_fork as code arm64: entry: Annotate vector table and handlers as code arm64: crypto: Modernize names for AES function macros arm64: crypto: Modernize some extra assembly annotations
2 parents da12d27 + d4abd29 commit 0829a07

File tree

17 files changed

+212
-207
lines changed

17 files changed

+212
-207
lines changed

arch/arm64/crypto/aes-ce.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
#include <linux/linkage.h>
1010
#include <asm/assembler.h>
1111

12-
#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
13-
#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
12+
#define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func)
13+
#define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func)
1414

1515
.arch armv8-a+crypto
1616

arch/arm64/crypto/aes-modes.S

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
5151
* int blocks)
5252
*/
5353

54-
AES_ENTRY(aes_ecb_encrypt)
54+
AES_FUNC_START(aes_ecb_encrypt)
5555
stp x29, x30, [sp, #-16]!
5656
mov x29, sp
5757

@@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 )
7979
.Lecbencout:
8080
ldp x29, x30, [sp], #16
8181
ret
82-
AES_ENDPROC(aes_ecb_encrypt)
82+
AES_FUNC_END(aes_ecb_encrypt)
8383

8484

85-
AES_ENTRY(aes_ecb_decrypt)
85+
AES_FUNC_START(aes_ecb_decrypt)
8686
stp x29, x30, [sp, #-16]!
8787
mov x29, sp
8888

@@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
110110
.Lecbdecout:
111111
ldp x29, x30, [sp], #16
112112
ret
113-
AES_ENDPROC(aes_ecb_decrypt)
113+
AES_FUNC_END(aes_ecb_decrypt)
114114

115115

116116
/*
@@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt)
126126
* u32 const rk2[]);
127127
*/
128128

129-
AES_ENTRY(aes_essiv_cbc_encrypt)
129+
AES_FUNC_START(aes_essiv_cbc_encrypt)
130130
ld1 {v4.16b}, [x5] /* get iv */
131131

132132
mov w8, #14 /* AES-256: 14 rounds */
@@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt)
135135
enc_switch_key w3, x2, x6
136136
b .Lcbcencloop4x
137137

138-
AES_ENTRY(aes_cbc_encrypt)
138+
AES_FUNC_START(aes_cbc_encrypt)
139139
ld1 {v4.16b}, [x5] /* get iv */
140140
enc_prepare w3, x2, x6
141141

@@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt)
167167
.Lcbcencout:
168168
st1 {v4.16b}, [x5] /* return iv */
169169
ret
170-
AES_ENDPROC(aes_cbc_encrypt)
171-
AES_ENDPROC(aes_essiv_cbc_encrypt)
170+
AES_FUNC_END(aes_cbc_encrypt)
171+
AES_FUNC_END(aes_essiv_cbc_encrypt)
172172

173-
AES_ENTRY(aes_essiv_cbc_decrypt)
173+
AES_FUNC_START(aes_essiv_cbc_decrypt)
174174
stp x29, x30, [sp, #-16]!
175175
mov x29, sp
176176

@@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt)
181181
encrypt_block cbciv, w8, x6, x7, w9
182182
b .Lessivcbcdecstart
183183

184-
AES_ENTRY(aes_cbc_decrypt)
184+
AES_FUNC_START(aes_cbc_decrypt)
185185
stp x29, x30, [sp, #-16]!
186186
mov x29, sp
187187

@@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
238238
st1 {cbciv.16b}, [x5] /* return iv */
239239
ldp x29, x30, [sp], #16
240240
ret
241-
AES_ENDPROC(aes_cbc_decrypt)
242-
AES_ENDPROC(aes_essiv_cbc_decrypt)
241+
AES_FUNC_END(aes_cbc_decrypt)
242+
AES_FUNC_END(aes_essiv_cbc_decrypt)
243243

244244

245245
/*
@@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt)
249249
* int rounds, int bytes, u8 const iv[])
250250
*/
251251

252-
AES_ENTRY(aes_cbc_cts_encrypt)
252+
AES_FUNC_START(aes_cbc_cts_encrypt)
253253
adr_l x8, .Lcts_permute_table
254254
sub x4, x4, #16
255255
add x9, x8, #32
@@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt)
276276
st1 {v0.16b}, [x4] /* overlapping stores */
277277
st1 {v1.16b}, [x0]
278278
ret
279-
AES_ENDPROC(aes_cbc_cts_encrypt)
279+
AES_FUNC_END(aes_cbc_cts_encrypt)
280280

281-
AES_ENTRY(aes_cbc_cts_decrypt)
281+
AES_FUNC_START(aes_cbc_cts_decrypt)
282282
adr_l x8, .Lcts_permute_table
283283
sub x4, x4, #16
284284
add x9, x8, #32
@@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt)
305305
st1 {v2.16b}, [x4] /* overlapping stores */
306306
st1 {v0.16b}, [x0]
307307
ret
308-
AES_ENDPROC(aes_cbc_cts_decrypt)
308+
AES_FUNC_END(aes_cbc_cts_decrypt)
309309

310310
.section ".rodata", "a"
311311
.align 6
@@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt)
324324
* int blocks, u8 ctr[])
325325
*/
326326

327-
AES_ENTRY(aes_ctr_encrypt)
327+
AES_FUNC_START(aes_ctr_encrypt)
328328
stp x29, x30, [sp, #-16]!
329329
mov x29, sp
330330

@@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
409409
rev x7, x7
410410
ins vctr.d[0], x7
411411
b .Lctrcarrydone
412-
AES_ENDPROC(aes_ctr_encrypt)
412+
AES_FUNC_END(aes_ctr_encrypt)
413413

414414

415415
/*
@@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt)
433433
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
434434
.endm
435435

436-
AES_ENTRY(aes_xts_encrypt)
436+
AES_FUNC_START(aes_xts_encrypt)
437437
stp x29, x30, [sp, #-16]!
438438
mov x29, sp
439439

@@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt)
518518
st1 {v2.16b}, [x4] /* overlapping stores */
519519
mov w4, wzr
520520
b .Lxtsencctsout
521-
AES_ENDPROC(aes_xts_encrypt)
521+
AES_FUNC_END(aes_xts_encrypt)
522522

523-
AES_ENTRY(aes_xts_decrypt)
523+
AES_FUNC_START(aes_xts_decrypt)
524524
stp x29, x30, [sp, #-16]!
525525
mov x29, sp
526526

@@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt)
612612
st1 {v2.16b}, [x4] /* overlapping stores */
613613
mov w4, wzr
614614
b .Lxtsdecctsout
615-
AES_ENDPROC(aes_xts_decrypt)
615+
AES_FUNC_END(aes_xts_decrypt)
616616

617617
/*
618618
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
619619
* int blocks, u8 dg[], int enc_before, int enc_after)
620620
*/
621-
AES_ENTRY(aes_mac_update)
621+
AES_FUNC_START(aes_mac_update)
622622
frame_push 6
623623

624624
mov x19, x0
@@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update)
676676
ld1 {v0.16b}, [x23] /* get dg */
677677
enc_prepare w21, x20, x0
678678
b .Lmacloop4x
679-
AES_ENDPROC(aes_mac_update)
679+
AES_FUNC_END(aes_mac_update)

arch/arm64/crypto/aes-neon.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88
#include <linux/linkage.h>
99
#include <asm/assembler.h>
1010

11-
#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
12-
#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
11+
#define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func)
12+
#define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func)
1313

1414
xtsmask .req v7
1515
cbciv .req v7

arch/arm64/crypto/ghash-ce-core.S

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 )
587587
* struct ghash_key const *k, u64 dg[], u8 ctr[],
588588
* int rounds, u8 tag)
589589
*/
590-
ENTRY(pmull_gcm_encrypt)
590+
SYM_FUNC_START(pmull_gcm_encrypt)
591591
pmull_gcm_do_crypt 1
592-
ENDPROC(pmull_gcm_encrypt)
592+
SYM_FUNC_END(pmull_gcm_encrypt)
593593

594594
/*
595595
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
596596
* struct ghash_key const *k, u64 dg[], u8 ctr[],
597597
* int rounds, u8 tag)
598598
*/
599-
ENTRY(pmull_gcm_decrypt)
599+
SYM_FUNC_START(pmull_gcm_decrypt)
600600
pmull_gcm_do_crypt 0
601-
ENDPROC(pmull_gcm_decrypt)
601+
SYM_FUNC_END(pmull_gcm_decrypt)
602602

603-
pmull_gcm_ghash_4x:
603+
SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
604604
movi MASK.16b, #0xe1
605605
shl MASK.2d, MASK.2d, #57
606606

@@ -681,9 +681,9 @@ pmull_gcm_ghash_4x:
681681
eor XL.16b, XL.16b, T2.16b
682682

683683
ret
684-
ENDPROC(pmull_gcm_ghash_4x)
684+
SYM_FUNC_END(pmull_gcm_ghash_4x)
685685

686-
pmull_gcm_enc_4x:
686+
SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
687687
ld1 {KS0.16b}, [x5] // load upper counter
688688
sub w10, w8, #4
689689
sub w11, w8, #3
@@ -746,7 +746,7 @@ pmull_gcm_enc_4x:
746746
eor INP3.16b, INP3.16b, KS3.16b
747747

748748
ret
749-
ENDPROC(pmull_gcm_enc_4x)
749+
SYM_FUNC_END(pmull_gcm_enc_4x)
750750

751751
.section ".rodata", "a"
752752
.align 6

arch/arm64/include/asm/kvm_asm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
*/
3737
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
3838

39+
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
40+
3941
#ifndef __ASSEMBLY__
4042

4143
#include <linux/mm.h>
@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void);
7577

7678
extern u32 __kvm_get_mdcr_el2(void);
7779

80+
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
81+
7882
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
7983
#define __hyp_this_cpu_ptr(sym) \
8084
({ \

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void)
480480
int slot = -1;
481481

482482
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
483-
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
483+
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
484484
slot = data->hyp_vectors_slot;
485485
}
486486

@@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void)
509509
* HBP + HEL2 -> use hardened vertors and use exec mapping
510510
*/
511511
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
512-
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
512+
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
513513
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
514514
}
515515

516516
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
517-
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
518-
unsigned long size = (__bp_harden_hyp_vecs_end -
519-
__bp_harden_hyp_vecs_start);
517+
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
518+
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
520519

521520
/*
522521
* Always allocate a spare vector slot, as we don't

arch/arm64/include/asm/mmu.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#define TTBR_ASID_MASK (UL(0xffff) << 48)
1414

1515
#define BP_HARDEN_EL2_SLOTS 4
16+
#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
1617

1718
#ifndef __ASSEMBLY__
1819

@@ -45,7 +46,8 @@ struct bp_hardening_data {
4546

4647
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
4748
defined(CONFIG_HARDEN_EL2_VECTORS))
48-
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
49+
50+
extern char __bp_harden_hyp_vecs[];
4951
extern atomic_t arm64_el2_vector_last_slot;
5052
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
5153

arch/arm64/kernel/cpu_errata.c

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <asm/cpu.h>
1212
#include <asm/cputype.h>
1313
#include <asm/cpufeature.h>
14+
#include <asm/kvm_asm.h>
1415
#include <asm/smp_plat.h>
1516

1617
static bool __maybe_unused
@@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
113114
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
114115

115116
#ifdef CONFIG_KVM_INDIRECT_VECTORS
116-
extern char __smccc_workaround_1_smc_start[];
117-
extern char __smccc_workaround_1_smc_end[];
118-
119117
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
120118
const char *hyp_vecs_end)
121119
{
122-
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
120+
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
123121
int i;
124122

125123
for (i = 0; i < SZ_2K; i += 0x80)
@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
163161
raw_spin_unlock(&bp_lock);
164162
}
165163
#else
166-
#define __smccc_workaround_1_smc_start NULL
167-
#define __smccc_workaround_1_smc_end NULL
168-
169164
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
170165
const char *hyp_vecs_start,
171166
const char *hyp_vecs_end)
@@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
176171

177172
#include <linux/arm-smccc.h>
178173

179-
static void call_smc_arch_workaround_1(void)
174+
static void __maybe_unused call_smc_arch_workaround_1(void)
180175
{
181176
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
182177
}
@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
239234
smccc_end = NULL;
240235
break;
241236

237+
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
242238
case SMCCC_CONDUIT_SMC:
243239
cb = call_smc_arch_workaround_1;
244-
smccc_start = __smccc_workaround_1_smc_start;
245-
smccc_end = __smccc_workaround_1_smc_end;
240+
smccc_start = __smccc_workaround_1_smc;
241+
smccc_end = __smccc_workaround_1_smc +
242+
__SMCCC_WORKAROUND_1_SMC_SZ;
246243
break;
244+
#endif
247245

248246
default:
249247
return -1;

0 commit comments

Comments
 (0)