Skip to content

Commit ff0efc7

Browse files
committed
KVM: selftests: Drop signal/kick from dirty ring testcase
Drop the signal/kick from dirty_log_test's dirty ring handling, as kicking the vCPU adds marginal value, at the cost of adding significant complexity to the test. Asynchronously interrupting the vCPU isn't novel; unless the kernel is fully tickless, the vCPU will be interrupted by IRQs for any decently large interval. And exiting to userspace mode in the middle of a sequence isn't novel either, as the vCPU will do so every time the ring becomes full. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 67428ee commit ff0efc7

File tree

1 file changed

+15
-91
lines changed

1 file changed

+15
-91
lines changed

tools/testing/selftests/kvm/dirty_log_test.c

Lines changed: 15 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -236,24 +236,6 @@ static enum log_mode_t host_log_mode;
236236
static pthread_t vcpu_thread;
237237
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
238238

239-
static void vcpu_kick(void)
240-
{
241-
pthread_kill(vcpu_thread, SIG_IPI);
242-
}
243-
244-
/*
245-
* In our test we do signal tricks, let's use a better version of
246-
* sem_wait to avoid signal interrupts
247-
*/
248-
static void sem_wait_until(sem_t *sem)
249-
{
250-
int ret;
251-
252-
do
253-
ret = sem_wait(sem);
254-
while (ret == -1 && errno == EINTR);
255-
}
256-
257239
static bool clear_log_supported(void)
258240
{
259241
return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
@@ -292,17 +274,14 @@ static void vcpu_handle_sync_stop(void)
292274
/* It means main thread is sleeping waiting */
293275
atomic_set(&vcpu_sync_stop_requested, false);
294276
sem_post(&sem_vcpu_stop);
295-
sem_wait_until(&sem_vcpu_cont);
277+
sem_wait(&sem_vcpu_cont);
296278
}
297279
}
298280

299-
static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
281+
static void default_after_vcpu_run(struct kvm_vcpu *vcpu)
300282
{
301283
struct kvm_run *run = vcpu->run;
302284

303-
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
304-
"vcpu run failed: errno=%d", err);
305-
306285
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
307286
"Invalid guest sync status: exit_reason=%s",
308287
exit_reason_str(run->exit_reason));
@@ -371,7 +350,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
371350
"%u != %u", cur->slot, slot);
372351
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
373352
"0x%llx >= 0x%x", cur->offset, num_pages);
374-
//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
375353
__set_bit_le(cur->offset, bitmap);
376354
dirty_ring_last_page = cur->offset;
377355
dirty_gfn_set_collected(cur);
@@ -382,13 +360,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
382360
return count;
383361
}
384362

385-
static void dirty_ring_wait_vcpu(void)
386-
{
387-
/* This makes sure that hardware PML cache flushed */
388-
vcpu_kick();
389-
sem_wait_until(&sem_vcpu_stop);
390-
}
391-
392363
static void dirty_ring_continue_vcpu(void)
393364
{
394365
pr_info("Notifying vcpu to continue\n");
@@ -400,18 +371,6 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
400371
uint32_t *ring_buf_idx)
401372
{
402373
uint32_t count = 0, cleared;
403-
bool continued_vcpu = false;
404-
405-
dirty_ring_wait_vcpu();
406-
407-
if (!dirty_ring_vcpu_ring_full) {
408-
/*
409-
* This is not a ring-full event, it's safe to allow
410-
* vcpu to continue
411-
*/
412-
dirty_ring_continue_vcpu();
413-
continued_vcpu = true;
414-
}
415374

416375
/* Only have one vcpu */
417376
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
@@ -427,34 +386,28 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
427386
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
428387
"with collected (%u)", cleared, count);
429388

430-
if (!continued_vcpu) {
431-
TEST_ASSERT(dirty_ring_vcpu_ring_full,
432-
"Didn't continue vcpu even without ring full");
389+
if (READ_ONCE(dirty_ring_vcpu_ring_full))
433390
dirty_ring_continue_vcpu();
434-
}
435391

436392
pr_info("Iteration %ld collected %u pages\n", iteration, count);
437393
}
438394

439-
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
395+
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
440396
{
441397
struct kvm_run *run = vcpu->run;
442398

443399
/* A ucall-sync or ring-full event is allowed */
444400
if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
445401
/* We should allow this to continue */
446402
;
447-
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
448-
(ret == -1 && err == EINTR)) {
403+
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
449404
/* Update the flag first before pause */
450-
WRITE_ONCE(dirty_ring_vcpu_ring_full,
451-
run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
405+
WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
452406
sem_post(&sem_vcpu_stop);
453-
pr_info("vcpu stops because %s...\n",
454-
dirty_ring_vcpu_ring_full ?
455-
"dirty ring is full" : "vcpu is kicked out");
456-
sem_wait_until(&sem_vcpu_cont);
407+
pr_info("Dirty ring full, waiting for it to be collected\n");
408+
sem_wait(&sem_vcpu_cont);
457409
pr_info("vcpu continues now.\n");
410+
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
458411
} else {
459412
TEST_ASSERT(false, "Invalid guest sync status: "
460413
"exit_reason=%s",
@@ -473,7 +426,7 @@ struct log_mode {
473426
void *bitmap, uint32_t num_pages,
474427
uint32_t *ring_buf_idx);
475428
/* Hook to call when after each vcpu run */
476-
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
429+
void (*after_vcpu_run)(struct kvm_vcpu *vcpu);
477430
} log_modes[LOG_MODE_NUM] = {
478431
{
479432
.name = "dirty-log",
@@ -544,47 +497,24 @@ static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
544497
mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
545498
}
546499

547-
static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
500+
static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu)
548501
{
549502
struct log_mode *mode = &log_modes[host_log_mode];
550503

551504
if (mode->after_vcpu_run)
552-
mode->after_vcpu_run(vcpu, ret, err);
505+
mode->after_vcpu_run(vcpu);
553506
}
554507

555508
static void *vcpu_worker(void *data)
556509
{
557-
int ret;
558510
struct kvm_vcpu *vcpu = data;
559511
uint64_t pages_count = 0;
560-
struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
561-
+ sizeof(sigset_t));
562-
sigset_t *sigset = (sigset_t *) &sigmask->sigset;
563-
564-
/*
565-
* SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
566-
* ioctl to return with -EINTR, but it is still pending and we need
567-
* to accept it with the sigwait.
568-
*/
569-
sigmask->len = 8;
570-
pthread_sigmask(0, NULL, sigset);
571-
sigdelset(sigset, SIG_IPI);
572-
vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
573-
574-
sigemptyset(sigset);
575-
sigaddset(sigset, SIG_IPI);
576512

577513
while (!READ_ONCE(host_quit)) {
578-
/* Clear any existing kick signals */
579514
pages_count += TEST_PAGES_PER_LOOP;
580515
/* Let the guest dirty the random pages */
581-
ret = __vcpu_run(vcpu);
582-
if (ret == -1 && errno == EINTR) {
583-
int sig = -1;
584-
sigwait(sigset, &sig);
585-
assert(sig == SIG_IPI);
586-
}
587-
log_mode_after_vcpu_run(vcpu, ret, errno);
516+
vcpu_run(vcpu);
517+
log_mode_after_vcpu_run(vcpu);
588518
}
589519

590520
pr_info("Dirtied %"PRIu64" pages\n", pages_count);
@@ -838,7 +768,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
838768
* we need to stop vcpu when verify data.
839769
*/
840770
atomic_set(&vcpu_sync_stop_requested, true);
841-
sem_wait_until(&sem_vcpu_stop);
771+
sem_wait(&sem_vcpu_stop);
842772
/*
843773
* NOTE: for dirty ring, it's possible that we didn't stop at
844774
* GUEST_SYNC but instead we stopped because ring is full;
@@ -905,7 +835,6 @@ int main(int argc, char *argv[])
905835
.interval = TEST_HOST_LOOP_INTERVAL,
906836
};
907837
int opt, i;
908-
sigset_t sigset;
909838

910839
sem_init(&sem_vcpu_stop, 0, 0);
911840
sem_init(&sem_vcpu_cont, 0, 0);
@@ -964,11 +893,6 @@ int main(int argc, char *argv[])
964893

965894
srandom(time(0));
966895

967-
/* Ensure that vCPU threads start with SIG_IPI blocked. */
968-
sigemptyset(&sigset);
969-
sigaddset(&sigset, SIG_IPI);
970-
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
971-
972896
if (host_log_mode_option == LOG_MODE_ALL) {
973897
/* Run each log mode */
974898
for (i = 0; i < LOG_MODE_NUM; i++) {

0 commit comments

Comments
 (0)