@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
101101 hyp_mem_base );
102102}
103103
104+ static void __pkvm_destroy_hyp_vm (struct kvm * host_kvm )
105+ {
106+ if (host_kvm -> arch .pkvm .handle ) {
107+ WARN_ON (kvm_call_hyp_nvhe (__pkvm_teardown_vm ,
108+ host_kvm -> arch .pkvm .handle ));
109+ }
110+
111+ host_kvm -> arch .pkvm .handle = 0 ;
112+ free_hyp_memcache (& host_kvm -> arch .pkvm .teardown_mc );
113+ }
114+
104115/*
105116 * Allocates and donates memory for hypervisor VM structs at EL2.
106117 *
@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
181192 return 0 ;
182193
183194destroy_vm :
184- pkvm_destroy_hyp_vm (host_kvm );
195+ __pkvm_destroy_hyp_vm (host_kvm );
185196 return ret ;
186197free_vm :
187198 free_pages_exact (hyp_vm , hyp_vm_sz );
@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
194205{
195206 int ret = 0 ;
196207
197- mutex_lock (& host_kvm -> lock );
208+ mutex_lock (& host_kvm -> arch . config_lock );
198209 if (!host_kvm -> arch .pkvm .handle )
199210 ret = __pkvm_create_hyp_vm (host_kvm );
200- mutex_unlock (& host_kvm -> lock );
211+ mutex_unlock (& host_kvm -> arch . config_lock );
201212
202213 return ret ;
203214}
204215
205216void pkvm_destroy_hyp_vm (struct kvm * host_kvm )
206217{
207- if (host_kvm -> arch .pkvm .handle ) {
208- WARN_ON (kvm_call_hyp_nvhe (__pkvm_teardown_vm ,
209- host_kvm -> arch .pkvm .handle ));
210- }
211-
212- host_kvm -> arch .pkvm .handle = 0 ;
213- free_hyp_memcache (& host_kvm -> arch .pkvm .teardown_mc );
218+ mutex_lock (& host_kvm -> arch .config_lock );
219+ __pkvm_destroy_hyp_vm (host_kvm );
220+ mutex_unlock (& host_kvm -> arch .config_lock );
214221}
215222
216223int pkvm_init_host_vm (struct kvm * host_kvm )
0 commit comments