Skip to content

Commit cfa7299

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/pkvm-vcpu-state into kvmarm-master/next
* kvm-arm64/pkvm-vcpu-state: (25 commits) : . : Large drop of pKVM patches from Will Deacon and co, adding : a private vm/vcpu state at EL2, managed independently from : the EL1 state. From the cover letter: : : "This is version six of the pKVM EL2 state series, extending the pKVM : hypervisor code so that it can dynamically instantiate and manage VM : data structures without the host being able to access them directly. : These structures consist of a hyp VM, a set of hyp vCPUs and the stage-2 : page-table for the MMU. The pages used to hold the hypervisor structures : are returned to the host when the VM is destroyed." : . KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run() KVM: arm64: Don't unnecessarily map host kernel sections at EL2 KVM: arm64: Explicitly map 'kvm_vgic_global_state' at EL2 KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache KVM: arm64: Instantiate guest stage-2 page-tables at EL2 KVM: arm64: Consolidate stage-2 initialisation into a single function KVM: arm64: Add generic hyp_memcache helpers KVM: arm64: Provide I-cache invalidation by virtual address at EL2 KVM: arm64: Initialise hypervisor copies of host symbols unconditionally KVM: arm64: Add per-cpu fixmap infrastructure at EL2 KVM: arm64: Instantiate pKVM hypervisor VM and vCPU structures from EL1 KVM: arm64: Add infrastructure to create and track pKVM instances at EL2 KVM: arm64: Rename 'host_kvm' to 'host_mmu' KVM: arm64: Add hyp_spinlock_t static initializer KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h KVM: arm64: Add helpers to pin memory shared with the hypervisor at EL2 KVM: arm64: Prevent the donation of no-map pages KVM: arm64: Implement do_donate() helper for donating memory ... Signed-off-by: Marc Zyngier <[email protected]>
2 parents fe8e3f4 + be66e67 commit cfa7299

27 files changed

+1747
-214
lines changed

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@
135135
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
136136
* not known to exist and will break with this configuration.
137137
*
138-
* The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
138+
* The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu.
139139
*
140140
* Note that when using 4K pages, we concatenate two first level page tables
141141
* together. With 16K pages, we concatenate 16 first level page tables.

arch/arm64/include/asm/kvm_asm.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,9 @@ enum __kvm_host_smccc_func {
7676
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
7777
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
7878
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
79+
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
80+
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
81+
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
7982
};
8083

8184
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -106,7 +109,7 @@ enum __kvm_host_smccc_func {
106109
#define per_cpu_ptr_nvhe_sym(sym, cpu) \
107110
({ \
108111
unsigned long base, off; \
109-
base = kvm_arm_hyp_percpu_base[cpu]; \
112+
base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
110113
off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
111114
(unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
112115
base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
@@ -211,7 +214,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
211214
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
212215
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
213216

214-
extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
217+
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
215218
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
216219
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
217220

arch/arm64/include/asm/kvm_host.h

Lines changed: 70 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void);
7373
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
7474
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
7575

76+
struct kvm_hyp_memcache {
77+
phys_addr_t head;
78+
unsigned long nr_pages;
79+
};
80+
81+
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
82+
phys_addr_t *p,
83+
phys_addr_t (*to_pa)(void *virt))
84+
{
85+
*p = mc->head;
86+
mc->head = to_pa(p);
87+
mc->nr_pages++;
88+
}
89+
90+
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
91+
void *(*to_va)(phys_addr_t phys))
92+
{
93+
phys_addr_t *p = to_va(mc->head);
94+
95+
if (!mc->nr_pages)
96+
return NULL;
97+
98+
mc->head = *p;
99+
mc->nr_pages--;
100+
101+
return p;
102+
}
103+
104+
static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
105+
unsigned long min_pages,
106+
void *(*alloc_fn)(void *arg),
107+
phys_addr_t (*to_pa)(void *virt),
108+
void *arg)
109+
{
110+
while (mc->nr_pages < min_pages) {
111+
phys_addr_t *p = alloc_fn(arg);
112+
113+
if (!p)
114+
return -ENOMEM;
115+
push_hyp_memcache(mc, p, to_pa);
116+
}
117+
118+
return 0;
119+
}
120+
121+
static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
122+
void (*free_fn)(void *virt, void *arg),
123+
void *(*to_va)(phys_addr_t phys),
124+
void *arg)
125+
{
126+
while (mc->nr_pages)
127+
free_fn(pop_hyp_memcache(mc, to_va), arg);
128+
}
129+
130+
void free_hyp_memcache(struct kvm_hyp_memcache *mc);
131+
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
132+
76133
struct kvm_vmid {
77134
atomic64_t id;
78135
};
@@ -115,6 +172,13 @@ struct kvm_smccc_features {
115172
unsigned long vendor_hyp_bmap;
116173
};
117174

175+
typedef unsigned int pkvm_handle_t;
176+
177+
struct kvm_protected_vm {
178+
pkvm_handle_t handle;
179+
struct kvm_hyp_memcache teardown_mc;
180+
};
181+
118182
struct kvm_arch {
119183
struct kvm_s2_mmu mmu;
120184

@@ -166,6 +230,12 @@ struct kvm_arch {
166230

167231
/* Hypercall features firmware registers' descriptor */
168232
struct kvm_smccc_features smccc_feat;
233+
234+
/*
235+
* For an untrusted host VM, 'pkvm.handle' is used to lookup
236+
* the associated pKVM instance in the hypervisor.
237+
*/
238+
struct kvm_protected_vm pkvm;
169239
};
170240

171241
struct kvm_vcpu_fault_info {
@@ -915,8 +985,6 @@ int kvm_set_ipa_limit(void);
915985
#define __KVM_HAVE_ARCH_VM_ALLOC
916986
struct kvm *kvm_arch_alloc_vm(void);
917987

918-
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
919-
920988
static inline bool kvm_vm_is_protected(struct kvm *kvm)
921989
{
922990
return false;

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,4 +123,7 @@ extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
123123
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
124124
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
125125

126+
extern unsigned long kvm_nvhe_sym(__icache_flags);
127+
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
128+
126129
#endif /* __ARM64_KVM_HYP_H__ */

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
166166
void free_hyp_pgds(void);
167167

168168
void stage2_unmap_vm(struct kvm *kvm);
169-
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
169+
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
170170
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
171171
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
172172
phys_addr_t pa, unsigned long size, bool writable);

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ typedef u64 kvm_pte_t;
4242
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
4343
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
4444

45+
#define KVM_PHYS_INVALID (-1ULL)
46+
4547
static inline bool kvm_pte_valid(kvm_pte_t pte)
4648
{
4749
return pte & KVM_PTE_VALID;
@@ -57,6 +59,18 @@ static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
5759
return pa;
5860
}
5961

62+
static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
63+
{
64+
kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
65+
66+
if (PAGE_SHIFT == 16) {
67+
pa &= GENMASK(51, 48);
68+
pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
69+
}
70+
71+
return pte;
72+
}
73+
6074
static inline u64 kvm_granule_shift(u32 level)
6175
{
6276
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
@@ -381,6 +395,14 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
381395
*/
382396
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
383397

398+
/**
399+
* kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
400+
* @vtcr: Content of the VTCR register.
401+
*
402+
* Return: the size (in bytes) of the stage-2 PGD
403+
*/
404+
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
405+
384406
/**
385407
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
386408
* @pgt: Uninitialised page-table structure to initialise.

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,49 @@
99
#include <linux/memblock.h>
1010
#include <asm/kvm_pgtable.h>
1111

12+
/* Maximum number of VMs that can co-exist under pKVM. */
13+
#define KVM_MAX_PVMS 255
14+
1215
#define HYP_MEMBLOCK_REGIONS 128
1316

17+
int pkvm_init_host_vm(struct kvm *kvm);
18+
int pkvm_create_hyp_vm(struct kvm *kvm);
19+
void pkvm_destroy_hyp_vm(struct kvm *kvm);
20+
1421
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
1522
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
1623

24+
static inline unsigned long
25+
hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
26+
{
27+
unsigned long nr_pages = reg->size >> PAGE_SHIFT;
28+
unsigned long start, end;
29+
30+
start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
31+
end = start + nr_pages * vmemmap_entry_size;
32+
start = ALIGN_DOWN(start, PAGE_SIZE);
33+
end = ALIGN(end, PAGE_SIZE);
34+
35+
return end - start;
36+
}
37+
38+
static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
39+
{
40+
unsigned long res = 0, i;
41+
42+
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
43+
res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
44+
vmemmap_entry_size);
45+
}
46+
47+
return res >> PAGE_SHIFT;
48+
}
49+
50+
static inline unsigned long hyp_vm_table_pages(void)
51+
{
52+
return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
53+
}
54+
1755
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
1856
{
1957
unsigned long total = 0, i;

arch/arm64/kernel/image-vars.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,6 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler);
7171
/* Vectors installed by hyp-init on reset HVC. */
7272
KVM_NVHE_ALIAS(__hyp_stub_vectors);
7373

74-
/* Kernel symbol used by icache_is_vpipt(). */
75-
KVM_NVHE_ALIAS(__icache_flags);
76-
77-
/* VMID bits set by the KVM VMID allocator */
78-
KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
79-
8074
/* Static keys which are set if a vGIC trap should be handled in hyp. */
8175
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
8276
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
@@ -92,9 +86,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
9286
KVM_NVHE_ALIAS(__start___kvm_ex_table);
9387
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
9488

95-
/* Array containing bases of nVHE per-CPU memory regions. */
96-
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
97-
9889
/* PMU available static key */
9990
#ifdef CONFIG_HW_PERF_EVENTS
10091
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
@@ -111,12 +102,6 @@ KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy);
111102
KVM_NVHE_ALIAS_HYP(__memset, __pi_memset);
112103
#endif
113104

114-
/* Kernel memory sections */
115-
KVM_NVHE_ALIAS(__start_rodata);
116-
KVM_NVHE_ALIAS(__end_rodata);
117-
KVM_NVHE_ALIAS(__bss_start);
118-
KVM_NVHE_ALIAS(__bss_stop);
119-
120105
/* Hyp memory sections */
121106
KVM_NVHE_ALIAS(__hyp_idmap_text_start);
122107
KVM_NVHE_ALIAS(__hyp_idmap_text_end);

0 commit comments

Comments
 (0)