@@ -236,24 +236,6 @@ static enum log_mode_t host_log_mode;
236
236
static pthread_t vcpu_thread ;
237
237
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT ;
238
238
239
- static void vcpu_kick (void )
240
- {
241
- pthread_kill (vcpu_thread , SIG_IPI );
242
- }
243
-
244
- /*
245
- * In our test we do signal tricks, let's use a better version of
246
- * sem_wait to avoid signal interrupts
247
- */
248
- static void sem_wait_until (sem_t * sem )
249
- {
250
- int ret ;
251
-
252
- do
253
- ret = sem_wait (sem );
254
- while (ret == -1 && errno == EINTR );
255
- }
256
-
257
239
static bool clear_log_supported (void )
258
240
{
259
241
return kvm_has_cap (KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 );
@@ -292,17 +274,14 @@ static void vcpu_handle_sync_stop(void)
292
274
/* It means main thread is sleeping waiting */
293
275
atomic_set (& vcpu_sync_stop_requested , false);
294
276
sem_post (& sem_vcpu_stop );
295
- sem_wait_until (& sem_vcpu_cont );
277
+ sem_wait (& sem_vcpu_cont );
296
278
}
297
279
}
298
280
299
- static void default_after_vcpu_run (struct kvm_vcpu * vcpu , int ret , int err )
281
+ static void default_after_vcpu_run (struct kvm_vcpu * vcpu )
300
282
{
301
283
struct kvm_run * run = vcpu -> run ;
302
284
303
- TEST_ASSERT (ret == 0 || (ret == -1 && err == EINTR ),
304
- "vcpu run failed: errno=%d" , err );
305
-
306
285
TEST_ASSERT (get_ucall (vcpu , NULL ) == UCALL_SYNC ,
307
286
"Invalid guest sync status: exit_reason=%s" ,
308
287
exit_reason_str (run -> exit_reason ));
@@ -371,7 +350,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
371
350
"%u != %u" , cur -> slot , slot );
372
351
TEST_ASSERT (cur -> offset < num_pages , "Offset overflow: "
373
352
"0x%llx >= 0x%x" , cur -> offset , num_pages );
374
- //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
375
353
__set_bit_le (cur -> offset , bitmap );
376
354
dirty_ring_last_page = cur -> offset ;
377
355
dirty_gfn_set_collected (cur );
@@ -382,13 +360,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
382
360
return count ;
383
361
}
384
362
385
- static void dirty_ring_wait_vcpu (void )
386
- {
387
- /* This makes sure that hardware PML cache flushed */
388
- vcpu_kick ();
389
- sem_wait_until (& sem_vcpu_stop );
390
- }
391
-
392
363
static void dirty_ring_continue_vcpu (void )
393
364
{
394
365
pr_info ("Notifying vcpu to continue\n" );
@@ -400,18 +371,6 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
400
371
uint32_t * ring_buf_idx )
401
372
{
402
373
uint32_t count = 0 , cleared ;
403
- bool continued_vcpu = false;
404
-
405
- dirty_ring_wait_vcpu ();
406
-
407
- if (!dirty_ring_vcpu_ring_full ) {
408
- /*
409
- * This is not a ring-full event, it's safe to allow
410
- * vcpu to continue
411
- */
412
- dirty_ring_continue_vcpu ();
413
- continued_vcpu = true;
414
- }
415
374
416
375
/* Only have one vcpu */
417
376
count = dirty_ring_collect_one (vcpu_map_dirty_ring (vcpu ),
@@ -427,34 +386,28 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
427
386
TEST_ASSERT (cleared == count , "Reset dirty pages (%u) mismatch "
428
387
"with collected (%u)" , cleared , count );
429
388
430
- if (!continued_vcpu ) {
431
- TEST_ASSERT (dirty_ring_vcpu_ring_full ,
432
- "Didn't continue vcpu even without ring full" );
389
+ if (READ_ONCE (dirty_ring_vcpu_ring_full ))
433
390
dirty_ring_continue_vcpu ();
434
- }
435
391
436
392
pr_info ("Iteration %ld collected %u pages\n" , iteration , count );
437
393
}
438
394
439
- static void dirty_ring_after_vcpu_run (struct kvm_vcpu * vcpu , int ret , int err )
395
+ static void dirty_ring_after_vcpu_run (struct kvm_vcpu * vcpu )
440
396
{
441
397
struct kvm_run * run = vcpu -> run ;
442
398
443
399
/* A ucall-sync or ring-full event is allowed */
444
400
if (get_ucall (vcpu , NULL ) == UCALL_SYNC ) {
445
401
/* We should allow this to continue */
446
402
;
447
- } else if (run -> exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
448
- (ret == -1 && err == EINTR )) {
403
+ } else if (run -> exit_reason == KVM_EXIT_DIRTY_RING_FULL ) {
449
404
/* Update the flag first before pause */
450
- WRITE_ONCE (dirty_ring_vcpu_ring_full ,
451
- run -> exit_reason == KVM_EXIT_DIRTY_RING_FULL );
405
+ WRITE_ONCE (dirty_ring_vcpu_ring_full , true);
452
406
sem_post (& sem_vcpu_stop );
453
- pr_info ("vcpu stops because %s...\n" ,
454
- dirty_ring_vcpu_ring_full ?
455
- "dirty ring is full" : "vcpu is kicked out" );
456
- sem_wait_until (& sem_vcpu_cont );
407
+ pr_info ("Dirty ring full, waiting for it to be collected\n" );
408
+ sem_wait (& sem_vcpu_cont );
457
409
pr_info ("vcpu continues now.\n" );
410
+ WRITE_ONCE (dirty_ring_vcpu_ring_full , false);
458
411
} else {
459
412
TEST_ASSERT (false, "Invalid guest sync status: "
460
413
"exit_reason=%s" ,
@@ -473,7 +426,7 @@ struct log_mode {
473
426
void * bitmap , uint32_t num_pages ,
474
427
uint32_t * ring_buf_idx );
475
428
/* Hook to call when after each vcpu run */
476
- void (* after_vcpu_run )(struct kvm_vcpu * vcpu , int ret , int err );
429
+ void (* after_vcpu_run )(struct kvm_vcpu * vcpu );
477
430
} log_modes [LOG_MODE_NUM ] = {
478
431
{
479
432
.name = "dirty-log" ,
@@ -544,47 +497,24 @@ static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
544
497
mode -> collect_dirty_pages (vcpu , slot , bitmap , num_pages , ring_buf_idx );
545
498
}
546
499
547
- static void log_mode_after_vcpu_run (struct kvm_vcpu * vcpu , int ret , int err )
500
+ static void log_mode_after_vcpu_run (struct kvm_vcpu * vcpu )
548
501
{
549
502
struct log_mode * mode = & log_modes [host_log_mode ];
550
503
551
504
if (mode -> after_vcpu_run )
552
- mode -> after_vcpu_run (vcpu , ret , err );
505
+ mode -> after_vcpu_run (vcpu );
553
506
}
554
507
555
508
static void * vcpu_worker (void * data )
556
509
{
557
- int ret ;
558
510
struct kvm_vcpu * vcpu = data ;
559
511
uint64_t pages_count = 0 ;
560
- struct kvm_signal_mask * sigmask = alloca (offsetof(struct kvm_signal_mask , sigset )
561
- + sizeof (sigset_t ));
562
- sigset_t * sigset = (sigset_t * ) & sigmask -> sigset ;
563
-
564
- /*
565
- * SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
566
- * ioctl to return with -EINTR, but it is still pending and we need
567
- * to accept it with the sigwait.
568
- */
569
- sigmask -> len = 8 ;
570
- pthread_sigmask (0 , NULL , sigset );
571
- sigdelset (sigset , SIG_IPI );
572
- vcpu_ioctl (vcpu , KVM_SET_SIGNAL_MASK , sigmask );
573
-
574
- sigemptyset (sigset );
575
- sigaddset (sigset , SIG_IPI );
576
512
577
513
while (!READ_ONCE (host_quit )) {
578
- /* Clear any existing kick signals */
579
514
pages_count += TEST_PAGES_PER_LOOP ;
580
515
/* Let the guest dirty the random pages */
581
- ret = __vcpu_run (vcpu );
582
- if (ret == -1 && errno == EINTR ) {
583
- int sig = -1 ;
584
- sigwait (sigset , & sig );
585
- assert (sig == SIG_IPI );
586
- }
587
- log_mode_after_vcpu_run (vcpu , ret , errno );
516
+ vcpu_run (vcpu );
517
+ log_mode_after_vcpu_run (vcpu );
588
518
}
589
519
590
520
pr_info ("Dirtied %" PRIu64 " pages\n" , pages_count );
@@ -838,7 +768,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
838
768
* we need to stop vcpu when verify data.
839
769
*/
840
770
atomic_set (& vcpu_sync_stop_requested , true);
841
- sem_wait_until (& sem_vcpu_stop );
771
+ sem_wait (& sem_vcpu_stop );
842
772
/*
843
773
* NOTE: for dirty ring, it's possible that we didn't stop at
844
774
* GUEST_SYNC but instead we stopped because ring is full;
@@ -905,7 +835,6 @@ int main(int argc, char *argv[])
905
835
.interval = TEST_HOST_LOOP_INTERVAL ,
906
836
};
907
837
int opt , i ;
908
- sigset_t sigset ;
909
838
910
839
sem_init (& sem_vcpu_stop , 0 , 0 );
911
840
sem_init (& sem_vcpu_cont , 0 , 0 );
@@ -964,11 +893,6 @@ int main(int argc, char *argv[])
964
893
965
894
srandom (time (0 ));
966
895
967
- /* Ensure that vCPU threads start with SIG_IPI blocked. */
968
- sigemptyset (& sigset );
969
- sigaddset (& sigset , SIG_IPI );
970
- pthread_sigmask (SIG_BLOCK , & sigset , NULL );
971
-
972
896
if (host_log_mode_option == LOG_MODE_ALL ) {
973
897
/* Run each log mode */
974
898
for (i = 0 ; i < LOG_MODE_NUM ; i ++ ) {
0 commit comments