Skip to content

Commit 38f9e4b

Browse files
Kalesh SinghMarc Zyngier
authored andcommitted
arm64: kvm: Introduce nvhe stack size constants
Refactor nvhe stack code to use NVHE_STACK_SIZE/SHIFT constants, instead of directly using PAGE_SIZE/SHIFT. This makes the code a bit easier to read, without introducing any functional changes. Cc: Marc Zyngier <[email protected]> Cc: Mark Brown <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Kalesh Singh <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 6834403 commit 38f9e4b

File tree

8 files changed

+33
-30
lines changed

8 files changed

+33
-30
lines changed

arch/arm64/include/asm/memory.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,13 +145,16 @@
145145

146146
#define OVERFLOW_STACK_SIZE SZ_4K
147147

148+
#define NVHE_STACK_SHIFT PAGE_SHIFT
149+
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
150+
148151
/*
149152
* With the minimum frame size of [x29, x30], exactly half the combined
150153
* sizes of the hyp and overflow stacks is the maximum size needed to
151154
* save the unwinded stacktrace; plus an additional entry to delimit the
152155
* end.
153156
*/
154-
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
157+
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
155158

156159
/*
157160
* Alignment of kernel segments (e.g. .text, .data).

arch/arm64/include/asm/stacktrace/nvhe.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
4747

4848
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
4949
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
50-
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
50+
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
5151

5252
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
5353

arch/arm64/kvm/arm.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR
6161

6262
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
6363

64-
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
64+
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
6565
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
6666

6767
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -2339,7 +2339,7 @@ static void __init teardown_hyp_mode(void)
23392339

23402340
free_hyp_pgds();
23412341
for_each_possible_cpu(cpu) {
2342-
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2342+
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
23432343
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
23442344

23452345
if (free_sve) {
@@ -2527,15 +2527,15 @@ static int __init init_hyp_mode(void)
25272527
* Allocate stack pages for Hypervisor-mode
25282528
*/
25292529
for_each_possible_cpu(cpu) {
2530-
unsigned long stack_page;
2530+
unsigned long stack_base;
25312531

2532-
stack_page = __get_free_page(GFP_KERNEL);
2533-
if (!stack_page) {
2532+
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
2533+
if (!stack_base) {
25342534
err = -ENOMEM;
25352535
goto out_err;
25362536
}
25372537

2538-
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2538+
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
25392539
}
25402540

25412541
/*
@@ -2604,9 +2604,9 @@ static int __init init_hyp_mode(void)
26042604
*/
26052605
for_each_possible_cpu(cpu) {
26062606
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2607-
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2607+
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
26082608

2609-
err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
2609+
err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
26102610
if (err) {
26112611
kvm_err("Cannot map hyp stack\n");
26122612
goto out_err;
@@ -2618,7 +2618,7 @@ static int __init init_hyp_mode(void)
26182618
* __hyp_pa() won't do the right thing there, since the stack
26192619
* has been mapped in the flexible private VA space.
26202620
*/
2621-
params->stack_pa = __pa(stack_page);
2621+
params->stack_pa = __pa(stack_base);
26222622
}
26232623

26242624
for_each_possible_cpu(cpu) {

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)
188188

189189
/*
190190
* Test whether the SP has overflowed, without corrupting a GPR.
191-
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
191+
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
192192
* of SP should always be 1.
193193
*/
194194
add sp, sp, x0 // sp' = sp + x0
195195
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
196-
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
196+
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
197197
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
198198
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
199199

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
360360

361361
prev_base = __io_map_base;
362362
/*
363-
* Efficient stack verification using the PAGE_SHIFT bit implies
363+
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
364364
* an alignment of our allocation on the order of the size.
365365
*/
366-
size = PAGE_SIZE * 2;
366+
size = NVHE_STACK_SIZE * 2;
367367
addr = ALIGN(__io_map_base, size);
368368

369369
ret = __pkvm_alloc_private_va_range(addr, size);
@@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
373373
* at the higher address and leave the lower guard page
374374
* unbacked.
375375
*
376-
* Any valid stack address now has the PAGE_SHIFT bit as 1
376+
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
377377
* and addresses corresponding to the guard page have the
378-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
378+
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
379379
*/
380-
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
381-
PAGE_SIZE, phys, PAGE_HYP);
380+
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE,
381+
NVHE_STACK_SIZE, phys, PAGE_HYP);
382382
if (ret)
383383
__io_map_base = prev_base;
384384
}

arch/arm64/kvm/hyp/nvhe/stacktrace.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
2828
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
2929
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
3030

31-
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
31+
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
3232
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
3333
stacktrace_info->fp = fp;
3434
stacktrace_info->pc = pc;
@@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void)
5454
{
5555
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
5656
unsigned long high = params->stack_hyp_va;
57-
unsigned long low = high - PAGE_SIZE;
57+
unsigned long low = high - NVHE_STACK_SIZE;
5858

5959
return (struct stack_info) {
6060
.low = low,

arch/arm64/kvm/mmu.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -706,10 +706,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
706706

707707
mutex_lock(&kvm_hyp_pgd_mutex);
708708
/*
709-
* Efficient stack verification using the PAGE_SHIFT bit implies
709+
* Efficient stack verification using the NVHE_STACK_SHIFT bit implies
710710
* an alignment of our allocation on the order of the size.
711711
*/
712-
size = PAGE_SIZE * 2;
712+
size = NVHE_STACK_SIZE * 2;
713713
base = ALIGN_DOWN(io_map_base - size, size);
714714

715715
ret = __hyp_alloc_private_va_range(base);
@@ -726,12 +726,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
726726
* at the higher address and leave the lower guard page
727727
* unbacked.
728728
*
729-
* Any valid stack address now has the PAGE_SHIFT bit as 1
729+
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
730730
* and addresses corresponding to the guard page have the
731-
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
731+
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
732732
*/
733-
ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
734-
PAGE_HYP);
733+
ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
734+
phys_addr, PAGE_HYP);
735735
if (ret)
736736
kvm_err("Cannot map hyp stack\n");
737737

arch/arm64/kvm/stacktrace.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void)
5151
struct kvm_nvhe_stacktrace_info *stacktrace_info
5252
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
5353
unsigned long low = (unsigned long)stacktrace_info->stack_base;
54-
unsigned long high = low + PAGE_SIZE;
54+
unsigned long high = low + NVHE_STACK_SIZE;
5555

5656
return (struct stack_info) {
5757
.low = low,
@@ -61,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void)
6161

6262
static struct stack_info stackinfo_get_hyp_kern_va(void)
6363
{
64-
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
65-
unsigned long high = low + PAGE_SIZE;
64+
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
65+
unsigned long high = low + NVHE_STACK_SIZE;
6666

6767
return (struct stack_info) {
6868
.low = low,

0 commit comments

Comments
 (0)