Skip to content

Commit fa5e404

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/misc-6.14 into kvmarm-master/next
* kvm-arm64/misc-6.14: : . : Misc KVM/arm64 changes for 6.14 : : - Don't expose AArch32 EL0 capability when NV is enabled : : - Update documentation to reflect the full gamut of kvm-arm.mode : behaviours : : - Use the hypervisor VA bit width when dumping stacktraces : : - Decouple the hypervisor stack size from PAGE_SIZE, at least : on the surface... : : - Make use of str_enabled_disabled() when advertising GICv4.1 support : : - Explicitly handle BRBE traps as UNDEFINED : . KVM: arm64: Explicitly handle BRBE traps as UNDEFINED KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe() arm64: kvm: Introduce nvhe stack size constants KVM: arm64: Fix nVHE stacktrace VA bits mask Documentation: Update the behaviour of "kvm-arm.mode" KVM: arm64: nv: Advertise the lack of AArch32 EL0 support Signed-off-by: Marc Zyngier <[email protected]>
2 parents 3643b33 + a7f1fa5 commit fa5e404

File tree

13 files changed

+68
-41
lines changed

13 files changed

+68
-41
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2748,17 +2748,21 @@
27482748
nvhe: Standard nVHE-based mode, without support for
27492749
protected guests.
27502750

2751-
protected: nVHE-based mode with support for guests whose
2752-
state is kept private from the host.
2751+
protected: Mode with support for guests whose state is
2752+
kept private from the host, using VHE or
2753+
nVHE depending on HW support.
27532754

27542755
nested: VHE-based mode with support for nested
2755-
virtualization. Requires at least ARMv8.3
2756-
hardware.
2756+
virtualization. Requires at least ARMv8.4
2757+
hardware (with FEAT_NV2).
27572758

27582759
Defaults to VHE/nVHE based on hardware support. Setting
27592760
mode to "protected" will disable kexec and hibernation
2760-
for the host. "nested" is experimental and should be
2761-
used with extreme caution.
2761+
for the host. To force nVHE on VHE hardware, add
2762+
"arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" to the
2763+
command-line.
2764+
"nested" is experimental and should be used with
2765+
extreme caution.
27622766

27632767
kvm-arm.vgic_v3_group0_trap=
27642768
[KVM,ARM,EARLY] Trap guest accesses to GICv3 group-0

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
139139

140140
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
141141

142+
extern u32 __hyp_va_bits;
143+
142144
/*
143145
* We currently support using a VM-specified IPA size. For backward
144146
* compatibility, the default IPA size is fixed to 40bits.

arch/arm64/include/asm/memory.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,13 +145,16 @@
145145

146146
#define OVERFLOW_STACK_SIZE SZ_4K
147147

148+
#define NVHE_STACK_SHIFT PAGE_SHIFT
149+
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
150+
148151
/*
149152
* With the minimum frame size of [x29, x30], exactly half the combined
150153
* sizes of the hyp and overflow stacks is the maximum size needed to
151154
* save the unwinded stacktrace; plus an additional entry to delimit the
152155
* end.
153156
*/
154-
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
157+
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
155158

156159
/*
157160
* Alignment of kernel segments (e.g. .text, .data).

arch/arm64/include/asm/stacktrace/nvhe.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
4747

4848
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
4949
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
50-
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
50+
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
5151

5252
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
5353

arch/arm64/kvm/arm.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR
6161

6262
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
6363

64-
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
64+
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
6565
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
6666

6767
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -2329,7 +2329,7 @@ static void __init teardown_hyp_mode(void)
23292329

23302330
free_hyp_pgds();
23312331
for_each_possible_cpu(cpu) {
2332-
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2332+
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
23332333
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
23342334

23352335
if (free_sve) {
@@ -2517,15 +2517,15 @@ static int __init init_hyp_mode(void)
25172517
* Allocate stack pages for Hypervisor-mode
25182518
*/
25192519
for_each_possible_cpu(cpu) {
2520-
unsigned long stack_page;
2520+
unsigned long stack_base;
25212521

2522-
stack_page = __get_free_page(GFP_KERNEL);
2523-
if (!stack_page) {
2522+
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
2523+
if (!stack_base) {
25242524
err = -ENOMEM;
25252525
goto out_err;
25262526
}
25272527

2528-
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2528+
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
25292529
}
25302530

25312531
/*
@@ -2594,9 +2594,9 @@ static int __init init_hyp_mode(void)
25942594
*/
25952595
for_each_possible_cpu(cpu) {
25962596
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2597-
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2597+
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
25982598

2599-
err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
2599+
err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
26002600
if (err) {
26012601
kvm_err("Cannot map hyp stack\n");
26022602
goto out_err;
@@ -2608,7 +2608,7 @@ static int __init init_hyp_mode(void)
26082608
* __hyp_pa() won't do the right thing there, since the stack
26092609
* has been mapped in the flexible private VA space.
26102610
*/
2611-
params->stack_pa = __pa(stack_page);
2611+
params->stack_pa = __pa(stack_base);
26122612
}
26132613

26142614
for_each_possible_cpu(cpu) {

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)
188188

189189
/*
190190
* Test whether the SP has overflowed, without corrupting a GPR.
191-
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
191+
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
192192
* of SP should always be 1.
193193
*/
194194
add sp, sp, x0 // sp' = sp + x0
195195
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
196-
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
196+
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
197197
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
198198
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
199199

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
360360

361361
prev_base = __io_map_base;
362362
/*
363-
* Efficient stack verification using the PAGE_SHIFT bit implies
363+
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
364364
* an alignment of our allocation on the order of the size.
365365
*/
366-
size = PAGE_SIZE * 2;
366+
size = NVHE_STACK_SIZE * 2;
367367
addr = ALIGN(__io_map_base, size);
368368

369369
ret = __pkvm_alloc_private_va_range(addr, size);
@@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
373373
* at the higher address and leave the lower guard page
374374
* unbacked.
375375
*
376-
* Any valid stack address now has the PAGE_SHIFT bit as 1
376+
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
377377
* and addresses corresponding to the guard page have the
378-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
378+
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
379379
*/
380-
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
381-
PAGE_SIZE, phys, PAGE_HYP);
380+
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
381+
NVHE_STACK_SIZE, phys, PAGE_HYP);
382382
if (ret)
383383
__io_map_base = prev_base;
384384
}

arch/arm64/kvm/hyp/nvhe/stacktrace.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
2828
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
2929
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
3030

31-
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
31+
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
3232
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
3333
stacktrace_info->fp = fp;
3434
stacktrace_info->pc = pc;
@@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
5454
{
5555
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
5656
unsigned long high = params->stack_hyp_va;
57-
unsigned long low = high - PAGE_SIZE;
57+
unsigned long low = high - NVHE_STACK_SIZE;
5858

5959
return (struct stack_info) {
6060
.low = low,

arch/arm64/kvm/mmu.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ static unsigned long __ro_after_init hyp_idmap_start;
3030
static unsigned long __ro_after_init hyp_idmap_end;
3131
static phys_addr_t __ro_after_init hyp_idmap_vector;
3232

33+
u32 __ro_after_init __hyp_va_bits;
34+
3335
static unsigned long __ro_after_init io_map_base;
3436

3537
#define KVM_PGT_FN(fn) (!is_protected_kvm_enabled() ? fn : p ## fn)
@@ -715,10 +717,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
715717

716718
mutex_lock(&kvm_hyp_pgd_mutex);
717719
/*
718-
* Efficient stack verification using the PAGE_SHIFT bit implies
720+
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
719721
* an alignment of our allocation on the order of the size.
720722
*/
721-
size = PAGE_SIZE * 2;
723+
size = NVHE_STACK_SIZE * 2;
722724
base = ALIGN_DOWN(io_map_base - size, size);
723725

724726
ret = __hyp_alloc_private_va_range(base);
@@ -735,12 +737,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
735737
* at the higher address and leave the lower guard page
736738
* unbacked.
737739
*
738-
* Any valid stack address now has the PAGE_SHIFT bit as 1
740+
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
739741
* and addresses corresponding to the guard page have the
740-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
742+
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
741743
*/
742-
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
743-
PAGE_HYP);
744+
ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
745+
phys_addr, PAGE_HYP);
744746
if (ret)
745747
kvm_err("Cannot map hyp stack\n");
746748

@@ -2085,6 +2087,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits)
20852087
goto out_destroy_pgtable;
20862088

20872089
io_map_base = hyp_idmap_start;
2090+
__hyp_va_bits = *hyp_va_bits;
20882091
return 0;
20892092

20902093
out_destroy_pgtable:

arch/arm64/kvm/nested.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -830,8 +830,10 @@ static void limit_nv_id_regs(struct kvm *kvm)
830830
NV_FTR(PFR0, RAS) |
831831
NV_FTR(PFR0, EL3) |
832832
NV_FTR(PFR0, EL2) |
833-
NV_FTR(PFR0, EL1));
834-
/* 64bit EL1/EL2/EL3 only */
833+
NV_FTR(PFR0, EL1) |
834+
NV_FTR(PFR0, EL0));
835+
/* 64bit only at any EL */
836+
val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
835837
val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
836838
val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
837839
val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);

0 commit comments

Comments
 (0)