Skip to content

Commit 5304002

Browse files
willdeaconMarc Zyngier
authored andcommitted
KVM: arm64: Rename 'host_kvm' to 'host_mmu'
In preparation for introducing VM and vCPU state at EL2, rename the existing 'struct host_kvm' and its singleton 'host_kvm' instance to 'host_mmu' so as to avoid confusion between the structure tracking the host stage-2 MMU state and the host instance of a 'struct kvm' for a protected guest. Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Tested-by: Vincent Donnefort <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1c80002 commit 5304002

File tree

2 files changed

+26
-26
lines changed

2 files changed

+26
-26
lines changed

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
4444
return prot & PKVM_PAGE_STATE_PROT_MASK;
4545
}
4646

47-
struct host_kvm {
47+
struct host_mmu {
4848
struct kvm_arch arch;
4949
struct kvm_pgtable pgt;
5050
struct kvm_pgtable_mm_ops mm_ops;
5151
hyp_spinlock_t lock;
5252
};
53-
extern struct host_kvm host_kvm;
53+
extern struct host_mmu host_mmu;
5454

5555
/* This corresponds to page-table locking order */
5656
enum pkvm_component_id {
@@ -76,7 +76,7 @@ void hyp_unpin_shared_mem(void *from, void *to);
7676
static __always_inline void __load_host_stage2(void)
7777
{
7878
if (static_branch_likely(&kvm_protected_mode_initialized))
79-
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
79+
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
8080
else
8181
write_sysreg(0, vttbr_el2);
8282
}

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,18 @@
2222
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
2323

2424
extern unsigned long hyp_nr_cpus;
25-
struct host_kvm host_kvm;
25+
struct host_mmu host_mmu;
2626

2727
static struct hyp_pool host_s2_pool;
2828

2929
static void host_lock_component(void)
3030
{
31-
hyp_spin_lock(&host_kvm.lock);
31+
hyp_spin_lock(&host_mmu.lock);
3232
}
3333

3434
static void host_unlock_component(void)
3535
{
36-
hyp_spin_unlock(&host_kvm.lock);
36+
hyp_spin_unlock(&host_mmu.lock);
3737
}
3838

3939
static void hyp_lock_component(void)
@@ -88,7 +88,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
8888
if (ret)
8989
return ret;
9090

91-
host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
91+
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
9292
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
9393
.zalloc_page = host_s2_zalloc_page,
9494
.phys_to_virt = hyp_phys_to_virt,
@@ -109,53 +109,53 @@ static void prepare_host_vtcr(void)
109109
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
110110
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
111111

112-
host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
112+
host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
113113
id_aa64mmfr1_el1_sys_val, phys_shift);
114114
}
115115

116116
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
117117

118118
int kvm_host_prepare_stage2(void *pgt_pool_base)
119119
{
120-
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
120+
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
121121
int ret;
122122

123123
prepare_host_vtcr();
124-
hyp_spin_lock_init(&host_kvm.lock);
125-
mmu->arch = &host_kvm.arch;
124+
hyp_spin_lock_init(&host_mmu.lock);
125+
mmu->arch = &host_mmu.arch;
126126

127127
ret = prepare_s2_pool(pgt_pool_base);
128128
if (ret)
129129
return ret;
130130

131-
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
132-
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
131+
ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
132+
&host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
133133
host_stage2_force_pte_cb);
134134
if (ret)
135135
return ret;
136136

137-
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
138-
mmu->pgt = &host_kvm.pgt;
137+
mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
138+
mmu->pgt = &host_mmu.pgt;
139139
atomic64_set(&mmu->vmid.id, 0);
140140

141141
return 0;
142142
}
143143

144144
int __pkvm_prot_finalize(void)
145145
{
146-
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
146+
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
147147
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
148148

149149
if (params->hcr_el2 & HCR_VM)
150150
return -EPERM;
151151

152152
params->vttbr = kvm_get_vttbr(mmu);
153-
params->vtcr = host_kvm.arch.vtcr;
153+
params->vtcr = host_mmu.arch.vtcr;
154154
params->hcr_el2 |= HCR_VM;
155155
kvm_flush_dcache_to_poc(params, sizeof(*params));
156156

157157
write_sysreg(params->hcr_el2, hcr_el2);
158-
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
158+
__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
159159

160160
/*
161161
* Make sure to have an ISB before the TLB maintenance below but only
@@ -173,7 +173,7 @@ int __pkvm_prot_finalize(void)
173173

174174
static int host_stage2_unmap_dev_all(void)
175175
{
176-
struct kvm_pgtable *pgt = &host_kvm.pgt;
176+
struct kvm_pgtable *pgt = &host_mmu.pgt;
177177
struct memblock_region *reg;
178178
u64 addr = 0;
179179
int i, ret;
@@ -258,7 +258,7 @@ static bool range_is_memory(u64 start, u64 end)
258258
static inline int __host_stage2_idmap(u64 start, u64 end,
259259
enum kvm_pgtable_prot prot)
260260
{
261-
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
261+
return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
262262
prot, &host_s2_pool);
263263
}
264264

@@ -271,7 +271,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
271271
#define host_stage2_try(fn, ...) \
272272
({ \
273273
int __ret; \
274-
hyp_assert_lock_held(&host_kvm.lock); \
274+
hyp_assert_lock_held(&host_mmu.lock); \
275275
__ret = fn(__VA_ARGS__); \
276276
if (__ret == -ENOMEM) { \
277277
__ret = host_stage2_unmap_dev_all(); \
@@ -294,8 +294,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
294294
u32 level;
295295
int ret;
296296

297-
hyp_assert_lock_held(&host_kvm.lock);
298-
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
297+
hyp_assert_lock_held(&host_mmu.lock);
298+
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
299299
if (ret)
300300
return ret;
301301

@@ -327,7 +327,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
327327

328328
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
329329
{
330-
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
330+
return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
331331
addr, size, &host_s2_pool, owner_id);
332332
}
333333

@@ -468,8 +468,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
468468
.get_page_state = host_get_page_state,
469469
};
470470

471-
hyp_assert_lock_held(&host_kvm.lock);
472-
return check_page_state_range(&host_kvm.pgt, addr, size, &d);
471+
hyp_assert_lock_held(&host_mmu.lock);
472+
return check_page_state_range(&host_mmu.pgt, addr, size, &d);
473473
}
474474

475475
static int __host_set_page_state_range(u64 addr, u64 size,

0 commit comments

Comments
 (0)