22
22
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
23
23
24
24
extern unsigned long hyp_nr_cpus ;
25
- struct host_kvm host_kvm ;
25
+ struct host_mmu host_mmu ;
26
26
27
27
static struct hyp_pool host_s2_pool ;
28
28
29
29
static void host_lock_component (void )
30
30
{
31
- hyp_spin_lock (& host_kvm .lock );
31
+ hyp_spin_lock (& host_mmu .lock );
32
32
}
33
33
34
34
static void host_unlock_component (void )
35
35
{
36
- hyp_spin_unlock (& host_kvm .lock );
36
+ hyp_spin_unlock (& host_mmu .lock );
37
37
}
38
38
39
39
static void hyp_lock_component (void )
@@ -88,7 +88,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
88
88
if (ret )
89
89
return ret ;
90
90
91
- host_kvm .mm_ops = (struct kvm_pgtable_mm_ops ) {
91
+ host_mmu .mm_ops = (struct kvm_pgtable_mm_ops ) {
92
92
.zalloc_pages_exact = host_s2_zalloc_pages_exact ,
93
93
.zalloc_page = host_s2_zalloc_page ,
94
94
.phys_to_virt = hyp_phys_to_virt ,
@@ -109,53 +109,53 @@ static void prepare_host_vtcr(void)
109
109
parange = kvm_get_parange (id_aa64mmfr0_el1_sys_val );
110
110
phys_shift = id_aa64mmfr0_parange_to_phys_shift (parange );
111
111
112
- host_kvm .arch .vtcr = kvm_get_vtcr (id_aa64mmfr0_el1_sys_val ,
112
+ host_mmu .arch .vtcr = kvm_get_vtcr (id_aa64mmfr0_el1_sys_val ,
113
113
id_aa64mmfr1_el1_sys_val , phys_shift );
114
114
}
115
115
116
116
static bool host_stage2_force_pte_cb (u64 addr , u64 end , enum kvm_pgtable_prot prot );
117
117
118
118
int kvm_host_prepare_stage2 (void * pgt_pool_base )
119
119
{
120
- struct kvm_s2_mmu * mmu = & host_kvm .arch .mmu ;
120
+ struct kvm_s2_mmu * mmu = & host_mmu .arch .mmu ;
121
121
int ret ;
122
122
123
123
prepare_host_vtcr ();
124
- hyp_spin_lock_init (& host_kvm .lock );
125
- mmu -> arch = & host_kvm .arch ;
124
+ hyp_spin_lock_init (& host_mmu .lock );
125
+ mmu -> arch = & host_mmu .arch ;
126
126
127
127
ret = prepare_s2_pool (pgt_pool_base );
128
128
if (ret )
129
129
return ret ;
130
130
131
- ret = __kvm_pgtable_stage2_init (& host_kvm .pgt , mmu ,
132
- & host_kvm .mm_ops , KVM_HOST_S2_FLAGS ,
131
+ ret = __kvm_pgtable_stage2_init (& host_mmu .pgt , mmu ,
132
+ & host_mmu .mm_ops , KVM_HOST_S2_FLAGS ,
133
133
host_stage2_force_pte_cb );
134
134
if (ret )
135
135
return ret ;
136
136
137
- mmu -> pgd_phys = __hyp_pa (host_kvm .pgt .pgd );
138
- mmu -> pgt = & host_kvm .pgt ;
137
+ mmu -> pgd_phys = __hyp_pa (host_mmu .pgt .pgd );
138
+ mmu -> pgt = & host_mmu .pgt ;
139
139
atomic64_set (& mmu -> vmid .id , 0 );
140
140
141
141
return 0 ;
142
142
}
143
143
144
144
int __pkvm_prot_finalize (void )
145
145
{
146
- struct kvm_s2_mmu * mmu = & host_kvm .arch .mmu ;
146
+ struct kvm_s2_mmu * mmu = & host_mmu .arch .mmu ;
147
147
struct kvm_nvhe_init_params * params = this_cpu_ptr (& kvm_init_params );
148
148
149
149
if (params -> hcr_el2 & HCR_VM )
150
150
return - EPERM ;
151
151
152
152
params -> vttbr = kvm_get_vttbr (mmu );
153
- params -> vtcr = host_kvm .arch .vtcr ;
153
+ params -> vtcr = host_mmu .arch .vtcr ;
154
154
params -> hcr_el2 |= HCR_VM ;
155
155
kvm_flush_dcache_to_poc (params , sizeof (* params ));
156
156
157
157
write_sysreg (params -> hcr_el2 , hcr_el2 );
158
- __load_stage2 (& host_kvm .arch .mmu , & host_kvm .arch );
158
+ __load_stage2 (& host_mmu .arch .mmu , & host_mmu .arch );
159
159
160
160
/*
161
161
* Make sure to have an ISB before the TLB maintenance below but only
@@ -173,7 +173,7 @@ int __pkvm_prot_finalize(void)
173
173
174
174
static int host_stage2_unmap_dev_all (void )
175
175
{
176
- struct kvm_pgtable * pgt = & host_kvm .pgt ;
176
+ struct kvm_pgtable * pgt = & host_mmu .pgt ;
177
177
struct memblock_region * reg ;
178
178
u64 addr = 0 ;
179
179
int i , ret ;
@@ -258,7 +258,7 @@ static bool range_is_memory(u64 start, u64 end)
258
258
static inline int __host_stage2_idmap (u64 start , u64 end ,
259
259
enum kvm_pgtable_prot prot )
260
260
{
261
- return kvm_pgtable_stage2_map (& host_kvm .pgt , start , end - start , start ,
261
+ return kvm_pgtable_stage2_map (& host_mmu .pgt , start , end - start , start ,
262
262
prot , & host_s2_pool );
263
263
}
264
264
@@ -271,7 +271,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
271
271
#define host_stage2_try (fn , ...) \
272
272
({ \
273
273
int __ret; \
274
- hyp_assert_lock_held(&host_kvm .lock); \
274
+ hyp_assert_lock_held(&host_mmu .lock); \
275
275
__ret = fn(__VA_ARGS__); \
276
276
if (__ret == -ENOMEM) { \
277
277
__ret = host_stage2_unmap_dev_all(); \
@@ -294,8 +294,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
294
294
u32 level ;
295
295
int ret ;
296
296
297
- hyp_assert_lock_held (& host_kvm .lock );
298
- ret = kvm_pgtable_get_leaf (& host_kvm .pgt , addr , & pte , & level );
297
+ hyp_assert_lock_held (& host_mmu .lock );
298
+ ret = kvm_pgtable_get_leaf (& host_mmu .pgt , addr , & pte , & level );
299
299
if (ret )
300
300
return ret ;
301
301
@@ -327,7 +327,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
327
327
328
328
int host_stage2_set_owner_locked (phys_addr_t addr , u64 size , u8 owner_id )
329
329
{
330
- return host_stage2_try (kvm_pgtable_stage2_set_owner , & host_kvm .pgt ,
330
+ return host_stage2_try (kvm_pgtable_stage2_set_owner , & host_mmu .pgt ,
331
331
addr , size , & host_s2_pool , owner_id );
332
332
}
333
333
@@ -468,8 +468,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
468
468
.get_page_state = host_get_page_state ,
469
469
};
470
470
471
- hyp_assert_lock_held (& host_kvm .lock );
472
- return check_page_state_range (& host_kvm .pgt , addr , size , & d );
471
+ hyp_assert_lock_held (& host_mmu .lock );
472
+ return check_page_state_range (& host_mmu .pgt , addr , size , & d );
473
473
}
474
474
475
475
static int __host_set_page_state_range (u64 addr , u64 size ,
0 commit comments