Skip to content

Commit c50cb04

Browse files
David BrazdilMarc Zyngier
authored andcommitted
KVM: arm64: Remove __hyp_text macro, use build rules instead
With nVHE code now fully separated from the rest of the kernel, the effects of the __hyp_text macro (which had to be applied on all nVHE code) can be achieved with build rules instead. The macro used to: (a) move code to .hyp.text ELF section, now done by renaming .text using `objcopy`, and (b) `notrace` and `__noscs` would negate effects of CC_FLAGS_FTRACE and CC_FLAGS_SCS, respectivelly, now those flags are erased from KBUILD_CFLAGS (same way as in EFI stub). Note that by removing __hyp_text from code shared with VHE, all VHE code is now compiled into .text and without `notrace` and `__noscs`. Use of '.pushsection .hyp.text' removed from assembly files as this is now also covered by the build rules. For MAINTAINERS: if needed to re-run, uses of macro were removed with the following command. Formatting was fixed up manually. find arch/arm64/kvm/hyp -type f -name '*.c' -o -name '*.h' \ -exec sed -i 's/ __hyp_text//g' {} + Signed-off-by: David Brazdil <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent c04dd45 commit c50cb04

File tree

17 files changed

+132
-147
lines changed

17 files changed

+132
-147
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
516516
* Skip an instruction which has been emulated at hyp while most guest sysregs
517517
* are live.
518518
*/
519-
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
519+
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
520520
{
521521
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
522522
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
#include <asm/alternative.h>
1313
#include <asm/sysreg.h>
1414

15-
#define __hyp_text __section(.hyp.text) notrace __noscs
16-
1715
#define read_sysreg_elx(r,nvh,vh) \
1816
({ \
1917
u64 reg; \

arch/arm64/kvm/hyp/aarch32.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = {
4444
/*
4545
* Check if a trapped instruction should have been executed or not.
4646
*/
47-
bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
47+
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
4848
{
4949
unsigned long cpsr;
5050
u32 cpsr_cond;
@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
9393
*
9494
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
9595
*/
96-
static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
96+
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
9797
{
9898
unsigned long itbits, cond;
9999
unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
123123
* kvm_skip_instr - skip a trapped instruction and proceed to the next
124124
* @vcpu: The vcpu pointer
125125
*/
126-
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
126+
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
127127
{
128128
u32 pc = *vcpu_pc(vcpu);
129129
bool is_thumb;

arch/arm64/kvm/hyp/entry.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
2222

2323
.text
24-
.pushsection .hyp.text, "ax"
2524

2625
/*
2726
* We treat x18 as callee-saved as the host may use it as a platform

arch/arm64/kvm/hyp/fpsimd.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
#include <asm/fpsimdmacros.h>
1010

1111
.text
12-
.pushsection .hyp.text, "ax"
1312

1413
SYM_FUNC_START(__fpsimd_save_state)
1514
fpsimd_save x0, 1

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
#include <asm/mmu.h>
1717

1818
.text
19-
.pushsection .hyp.text, "ax"
2019

2120
.macro do_el2_call
2221
/*

arch/arm64/kvm/hyp/include/hyp/debug-sr.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@
8888
default: write_debug(ptr[0], reg, 0); \
8989
}
9090

91-
static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
92-
struct kvm_guest_debug_arch *dbg,
93-
struct kvm_cpu_context *ctxt)
91+
static inline void __debug_save_state(struct kvm_vcpu *vcpu,
92+
struct kvm_guest_debug_arch *dbg,
93+
struct kvm_cpu_context *ctxt)
9494
{
9595
u64 aa64dfr0;
9696
int brps, wrps;
@@ -107,9 +107,9 @@ static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
107107
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
108108
}
109109

110-
static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
111-
struct kvm_guest_debug_arch *dbg,
112-
struct kvm_cpu_context *ctxt)
110+
static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
111+
struct kvm_guest_debug_arch *dbg,
112+
struct kvm_cpu_context *ctxt)
113113
{
114114
u64 aa64dfr0;
115115
int brps, wrps;
@@ -127,7 +127,7 @@ static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
127127
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
128128
}
129129

130-
static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
130+
static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
131131
{
132132
struct kvm_cpu_context *host_ctxt;
133133
struct kvm_cpu_context *guest_ctxt;
@@ -146,7 +146,7 @@ static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vc
146146
__debug_restore_state(vcpu, guest_dbg, guest_ctxt);
147147
}
148148

149-
static inline void __hyp_text __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
149+
static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
150150
{
151151
struct kvm_cpu_context *host_ctxt;
152152
struct kvm_cpu_context *guest_ctxt;

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
extern const char __hyp_panic_string[];
3131

3232
/* Check whether the FP regs were dirtied while in the host-side run loop: */
33-
static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
33+
static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
3434
{
3535
/*
3636
* When the system doesn't support FP/SIMD, we cannot rely on
@@ -48,15 +48,15 @@ static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
4848
}
4949

5050
/* Save the 32-bit only FPSIMD system register state */
51-
static inline void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
51+
static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
5252
{
5353
if (!vcpu_el1_is_32bit(vcpu))
5454
return;
5555

5656
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
5757
}
5858

59-
static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
59+
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
6060
{
6161
/*
6262
* We are about to set CPTR_EL2.TFP to trap all floating point
@@ -73,7 +73,7 @@ static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
7373
}
7474
}
7575

76-
static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
76+
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
7777
{
7878
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
7979
write_sysreg(1 << 15, hstr_el2);
@@ -89,13 +89,13 @@ static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
8989
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
9090
}
9191

92-
static inline void __hyp_text __deactivate_traps_common(void)
92+
static inline void __deactivate_traps_common(void)
9393
{
9494
write_sysreg(0, hstr_el2);
9595
write_sysreg(0, pmuserenr_el0);
9696
}
9797

98-
static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
98+
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
9999
{
100100
u64 hcr = vcpu->arch.hcr_el2;
101101

@@ -108,7 +108,7 @@ static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
108108
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
109109
}
110110

111-
static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
111+
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
112112
{
113113
/*
114114
* If we pended a virtual abort, preserve it until it gets
@@ -122,12 +122,12 @@ static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
122122
}
123123
}
124124

125-
static inline void __hyp_text __activate_vm(struct kvm *kvm)
125+
static inline void __activate_vm(struct kvm *kvm)
126126
{
127127
__load_guest_stage2(kvm);
128128
}
129129

130-
static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
130+
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
131131
{
132132
u64 par, tmp;
133133

@@ -156,7 +156,7 @@ static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
156156
return true;
157157
}
158158

159-
static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
159+
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
160160
{
161161
u8 ec;
162162
u64 esr;
@@ -196,7 +196,7 @@ static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
196196
}
197197

198198
/* Check for an FPSIMD/SVE trap and handle as appropriate */
199-
static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
199+
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
200200
{
201201
bool vhe, sve_guest, sve_host;
202202
u8 hsr_ec;
@@ -283,7 +283,7 @@ static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
283283
return true;
284284
}
285285

286-
static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
286+
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
287287
{
288288
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
289289
int rt = kvm_vcpu_sys_get_rt(vcpu);
@@ -338,7 +338,7 @@ static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
338338
return true;
339339
}
340340

341-
static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
341+
static inline bool esr_is_ptrauth_trap(u32 esr)
342342
{
343343
u32 ec = ESR_ELx_EC(esr);
344344

@@ -371,7 +371,7 @@ static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
371371
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
372372
})
373373

374-
static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
374+
static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
375375
{
376376
struct kvm_cpu_context *ctxt;
377377
u64 val;
@@ -401,7 +401,7 @@ static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
401401
* the guest, false when we should restore the host state and return to the
402402
* main run loop.
403403
*/
404-
static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
404+
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
405405
{
406406
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
407407
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@@ -473,15 +473,15 @@ static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_
473473
return false;
474474
}
475475

476-
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
476+
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
477477
{
478478
if (!cpus_have_final_cap(ARM64_SSBD))
479479
return false;
480480

481481
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
482482
}
483483

484-
static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
484+
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
485485
{
486486
#ifdef CONFIG_ARM64_SSBD
487487
/*
@@ -494,7 +494,7 @@ static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu
494494
#endif
495495
}
496496

497-
static inline void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
497+
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
498498
{
499499
#ifdef CONFIG_ARM64_SSBD
500500
/*

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,18 @@
1515
#include <asm/kvm_emulate.h>
1616
#include <asm/kvm_hyp.h>
1717

18-
static inline void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
18+
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
1919
{
2020
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
2121
}
2222

23-
static inline void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
23+
static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
2424
{
2525
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
2626
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
2727
}
2828

29-
static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
29+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
3030
{
3131
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
3232
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
@@ -51,7 +51,7 @@ static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ct
5151
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
5252
}
5353

54-
static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
54+
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
5555
{
5656
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
5757
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
@@ -60,18 +60,18 @@ static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_cont
6060
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
6161
}
6262

63-
static inline void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
63+
static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
6464
{
6565
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
6666
}
6767

68-
static inline void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
68+
static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
6969
{
7070
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
7171
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
7272
}
7373

74-
static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
74+
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
7575
{
7676
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
7777
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
@@ -130,7 +130,7 @@ static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context
130130
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
131131
}
132132

133-
static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
133+
static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
134134
{
135135
u64 pstate = ctxt->gp_regs.regs.pstate;
136136
u64 mode = pstate & PSR_AA32_MODE_MASK;
@@ -156,7 +156,7 @@ static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_c
156156
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
157157
}
158158

159-
static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
159+
static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
160160
{
161161
u64 *spsr, *sysreg;
162162

@@ -178,7 +178,7 @@ static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
178178
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
179179
}
180180

181-
static inline void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
181+
static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
182182
{
183183
u64 *spsr, *sysreg;
184184

arch/arm64/kvm/hyp/nvhe/Makefile

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,13 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
2121
$(call if_changed,hypcopy)
2222

2323
quiet_cmd_hypcopy = HYPCOPY $@
24-
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
24+
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
25+
--rename-section=.text=.hyp.text \
26+
$< $@
27+
28+
# Remove ftrace and Shadow Call Stack CFLAGS.
29+
# This is equivalent to the 'notrace' and '__noscs' annotations.
30+
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
2531

2632
# KVM nVHE code is run at a different exception code with a different map, so
2733
# compiler instrumentation that inserts callbacks or checks into the code may

0 commit comments

Comments
 (0)