Skip to content

Commit 9429f4b

Browse files
willdeaconMarc Zyngier
authored andcommitted
KVM: arm64: Move host EL1 code out of hyp/ directory
kvm/hyp/reserved_mem.c contains host code executing at EL1 and is not linked into the hypervisor object. Move the file into kvm/pkvm.c and rework the headers so that the definitions shared between the host and the hypervisor live in asm/kvm_pkvm.h. Signed-off-by: Will Deacon <[email protected]> Tested-by: Fuad Tabba <[email protected]> Reviewed-by: Fuad Tabba <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent ed4ed15 commit 9429f4b

File tree

8 files changed

+80
-63
lines changed

8 files changed

+80
-63
lines changed

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (C) 2020 - Google LLC
4+
* Author: Quentin Perret <[email protected]>
5+
*/
6+
#ifndef __ARM64_KVM_PKVM_H__
7+
#define __ARM64_KVM_PKVM_H__
8+
9+
#include <linux/memblock.h>
10+
#include <asm/kvm_pgtable.h>
11+
12+
#define HYP_MEMBLOCK_REGIONS 128
13+
14+
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
15+
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
16+
17+
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
18+
{
19+
unsigned long total = 0, i;
20+
21+
/* Provision the worst case scenario */
22+
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
23+
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
24+
total += nr_pages;
25+
}
26+
27+
return total;
28+
}
29+
30+
static inline unsigned long __hyp_pgtable_total_pages(void)
31+
{
32+
unsigned long res = 0, i;
33+
34+
/* Cover all of memory with page-granularity */
35+
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
36+
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
37+
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
38+
}
39+
40+
return res;
41+
}
42+
43+
static inline unsigned long hyp_s1_pgtable_pages(void)
44+
{
45+
unsigned long res;
46+
47+
res = __hyp_pgtable_total_pages();
48+
49+
/* Allow 1 GiB for private mappings */
50+
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
51+
52+
return res;
53+
}
54+
55+
static inline unsigned long host_s2_pgtable_pages(void)
56+
{
57+
unsigned long res;
58+
59+
/*
60+
* Include an extra 16 pages to safely upper-bound the worst case of
61+
* concatenated pgds.
62+
*/
63+
res = __hyp_pgtable_total_pages() + 16;
64+
65+
/* Allow 1 GiB for MMIO mappings */
66+
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
67+
68+
return res;
69+
}
70+
71+
#endif /* __ARM64_KVM_PKVM_H__ */

arch/arm64/kvm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
1515
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
1616
inject_fault.o va_layout.o handle_exit.o \
1717
guest.o debug.o reset.o sys_regs.o \
18-
vgic-sys-reg-v3.o fpsimd.o pmu.o \
18+
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
1919
arch_timer.o trng.o\
2020
vgic/vgic.o vgic/vgic-init.o \
2121
vgic/vgic-irqfd.o vgic/vgic-v2.o \

arch/arm64/kvm/hyp/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
1010
-DDISABLE_BRANCH_PROFILING \
1111
$(DISABLE_STACKLEAK_PLUGIN)
1212

13-
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
13+
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o

arch/arm64/kvm/hyp/include/nvhe/mm.h

Lines changed: 0 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@
1010
#include <nvhe/memory.h>
1111
#include <nvhe/spinlock.h>
1212

13-
#define HYP_MEMBLOCK_REGIONS 128
14-
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
15-
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
1613
extern struct kvm_pgtable pkvm_pgtable;
1714
extern hyp_spinlock_t pkvm_pgd_lock;
1815
extern struct hyp_pool hpool;
@@ -39,58 +36,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
3936
*end = ALIGN(*end, PAGE_SIZE);
4037
}
4138

42-
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
43-
{
44-
unsigned long total = 0, i;
45-
46-
/* Provision the worst case scenario */
47-
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
48-
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
49-
total += nr_pages;
50-
}
51-
52-
return total;
53-
}
54-
55-
static inline unsigned long __hyp_pgtable_total_pages(void)
56-
{
57-
unsigned long res = 0, i;
58-
59-
/* Cover all of memory with page-granularity */
60-
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
61-
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
62-
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
63-
}
64-
65-
return res;
66-
}
67-
68-
static inline unsigned long hyp_s1_pgtable_pages(void)
69-
{
70-
unsigned long res;
71-
72-
res = __hyp_pgtable_total_pages();
73-
74-
/* Allow 1 GiB for private mappings */
75-
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
76-
77-
return res;
78-
}
79-
80-
static inline unsigned long host_s2_pgtable_pages(void)
81-
{
82-
unsigned long res;
83-
84-
/*
85-
* Include an extra 16 pages to safely upper-bound the worst case of
86-
* concatenated pgds.
87-
*/
88-
res = __hyp_pgtable_total_pages() + 16;
89-
90-
/* Allow 1 GiB for MMIO mappings */
91-
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
92-
93-
return res;
94-
}
95-
9639
#endif /* __KVM_HYP_MM_H */

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <asm/kvm_hyp.h>
1010
#include <asm/kvm_mmu.h>
1111
#include <asm/kvm_pgtable.h>
12+
#include <asm/kvm_pkvm.h>
1213
#include <asm/stage2_pgtable.h>
1314

1415
#include <hyp/fault.h>

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <asm/kvm_hyp.h>
99
#include <asm/kvm_mmu.h>
1010
#include <asm/kvm_pgtable.h>
11+
#include <asm/kvm_pkvm.h>
1112
#include <asm/spectre.h>
1213

1314
#include <nvhe/early_alloc.h>

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <asm/kvm_hyp.h>
99
#include <asm/kvm_mmu.h>
1010
#include <asm/kvm_pgtable.h>
11+
#include <asm/kvm_pkvm.h>
1112

1213
#include <nvhe/early_alloc.h>
1314
#include <nvhe/fixed_config.h>

arch/arm64/kvm/hyp/reserved_mem.c renamed to arch/arm64/kvm/pkvm.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,9 @@
88
#include <linux/memblock.h>
99
#include <linux/sort.h>
1010

11-
#include <asm/kvm_host.h>
11+
#include <asm/kvm_pkvm.h>
1212

13-
#include <nvhe/memory.h>
14-
#include <nvhe/mm.h>
13+
#include "hyp_constants.h"
1514

1615
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
1716
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
@@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void)
8281
do {
8382
prev = nr_pages;
8483
nr_pages = hyp_mem_pages + prev;
85-
nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
84+
nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
85+
PAGE_SIZE);
8686
nr_pages += __hyp_pgtable_max_pages(nr_pages);
8787
} while (nr_pages != prev);
8888
hyp_mem_pages += nr_pages;

0 commit comments

Comments
 (0)