@@ -203,6 +203,9 @@ static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
203
203
{
204
204
int cc ;
205
205
206
+ /* It used the destroy-fast UVC, nothing left to do here */
207
+ if (!leftover -> handle )
208
+ goto done_fast ;
206
209
cc = uv_cmd_nodata (leftover -> handle , UVC_CMD_DESTROY_SEC_CONF , rc , rrc );
207
210
KVM_UV_EVENT (kvm , 3 , "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x" , * rc , * rrc );
208
211
WARN_ONCE (cc , "protvirt destroy leftover vm failed rc %x rrc %x" , * rc , * rrc );
@@ -217,6 +220,7 @@ static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
217
220
free_pages (leftover -> stor_base , get_order (uv_info .guest_base_stor_len ));
218
221
free_pages (leftover -> old_gmap_table , CRST_ALLOC_ORDER );
219
222
vfree (leftover -> stor_var );
223
+ done_fast :
220
224
atomic_dec (& kvm -> mm -> context .protected_count );
221
225
return 0 ;
222
226
}
@@ -250,6 +254,36 @@ static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
250
254
srcu_read_unlock (& kvm -> srcu , srcu_idx );
251
255
}
252
256
257
+ static int kvm_s390_pv_deinit_vm_fast (struct kvm * kvm , u16 * rc , u16 * rrc )
258
+ {
259
+ struct uv_cb_destroy_fast uvcb = {
260
+ .header .cmd = UVC_CMD_DESTROY_SEC_CONF_FAST ,
261
+ .header .len = sizeof (uvcb ),
262
+ .handle = kvm_s390_pv_get_handle (kvm ),
263
+ };
264
+ int cc ;
265
+
266
+ cc = uv_call_sched (0 , (u64 )& uvcb );
267
+ if (rc )
268
+ * rc = uvcb .header .rc ;
269
+ if (rrc )
270
+ * rrc = uvcb .header .rrc ;
271
+ WRITE_ONCE (kvm -> arch .gmap -> guest_handle , 0 );
272
+ KVM_UV_EVENT (kvm , 3 , "PROTVIRT DESTROY VM FAST: rc %x rrc %x" ,
273
+ uvcb .header .rc , uvcb .header .rrc );
274
+ WARN_ONCE (cc , "protvirt destroy vm fast failed handle %llx rc %x rrc %x" ,
275
+ kvm_s390_pv_get_handle (kvm ), uvcb .header .rc , uvcb .header .rrc );
276
+ /* Inteded memory leak on "impossible" error */
277
+ if (!cc )
278
+ kvm_s390_pv_dealloc_vm (kvm );
279
+ return cc ? - EIO : 0 ;
280
+ }
281
+
282
+ static inline bool is_destroy_fast_available (void )
283
+ {
284
+ return test_bit_inv (BIT_UVC_CMD_DESTROY_SEC_CONF_FAST , uv_info .inst_calls_list );
285
+ }
286
+
253
287
/**
254
288
* kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
255
289
* @kvm: the VM
@@ -271,6 +305,7 @@ static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
271
305
int kvm_s390_pv_set_aside (struct kvm * kvm , u16 * rc , u16 * rrc )
272
306
{
273
307
struct pv_vm_to_be_destroyed * priv ;
308
+ int res = 0 ;
274
309
275
310
lockdep_assert_held (& kvm -> lock );
276
311
/*
@@ -283,14 +318,21 @@ int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
283
318
if (!priv )
284
319
return - ENOMEM ;
285
320
286
- priv -> stor_var = kvm -> arch .pv .stor_var ;
287
- priv -> stor_base = kvm -> arch .pv .stor_base ;
288
- priv -> handle = kvm_s390_pv_get_handle (kvm );
289
- priv -> old_gmap_table = (unsigned long )kvm -> arch .gmap -> table ;
290
- WRITE_ONCE (kvm -> arch .gmap -> guest_handle , 0 );
291
- if (s390_replace_asce (kvm -> arch .gmap )) {
321
+ if (is_destroy_fast_available ()) {
322
+ res = kvm_s390_pv_deinit_vm_fast (kvm , rc , rrc );
323
+ } else {
324
+ priv -> stor_var = kvm -> arch .pv .stor_var ;
325
+ priv -> stor_base = kvm -> arch .pv .stor_base ;
326
+ priv -> handle = kvm_s390_pv_get_handle (kvm );
327
+ priv -> old_gmap_table = (unsigned long )kvm -> arch .gmap -> table ;
328
+ WRITE_ONCE (kvm -> arch .gmap -> guest_handle , 0 );
329
+ if (s390_replace_asce (kvm -> arch .gmap ))
330
+ res = - ENOMEM ;
331
+ }
332
+
333
+ if (res ) {
292
334
kfree (priv );
293
- return - ENOMEM ;
335
+ return res ;
294
336
}
295
337
296
338
kvm_s390_destroy_lower_2g (kvm );
@@ -471,6 +513,7 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
471
513
{
472
514
struct kvm * kvm = container_of (subscription , struct kvm , arch .pv .mmu_notifier );
473
515
u16 dummy ;
516
+ int r ;
474
517
475
518
/*
476
519
* No locking is needed since this is the last thread of the last user of this
@@ -479,7 +522,9 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
479
522
* unregistered. This means that if this notifier runs, then the
480
523
* struct kvm is still valid.
481
524
*/
482
- kvm_s390_cpus_from_pv (kvm , & dummy , & dummy );
525
+ r = kvm_s390_cpus_from_pv (kvm , & dummy , & dummy );
526
+ if (!r && is_destroy_fast_available () && kvm_s390_pv_get_handle (kvm ))
527
+ kvm_s390_pv_deinit_vm_fast (kvm , & dummy , & dummy );
483
528
}
484
529
485
530
static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
0 commit comments