Skip to content

Commit 9b1feec

Browse files
committed
KVM: selftests: Continuously reap dirty ring while vCPU is running
Continue collecting entries from the dirty ring for the entire time the vCPU is running. Collecting exactly once all but guarantees the vCPU will encounter a "ring full" event and stop. While testing ring full is interesting, stopping and doing nothing is not, especially for larger intervals as the test effectively does nothing for a much longer time. To balance continuous collection with letting the guest make forward progress, chunk the interval waiting into 1ms loops (which also makes the math dead simple). To maintain coverage for "ring full", collect entries on subsequent iterations if and only if the ring has been filled at least once. I.e. let the ring fill up (if the interval allows), but after that contiuously empty it so that the vCPU can keep running. Opportunistically drop unnecessary zero-initialization of "count". Reviewed-by: Maxim Levitsky <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent f2228aa commit 9b1feec

File tree

1 file changed

+46
-17
lines changed

1 file changed

+46
-17
lines changed

tools/testing/selftests/kvm/dirty_log_test.c

Lines changed: 46 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -340,8 +340,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
340340
struct kvm_dirty_gfn *cur;
341341
uint32_t count = 0;
342342

343-
dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
344-
345343
while (true) {
346344
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
347345
if (!dirty_gfn_is_dirtied(cur))
@@ -360,17 +358,11 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
360358
return count;
361359
}
362360

363-
static void dirty_ring_continue_vcpu(void)
364-
{
365-
pr_info("Notifying vcpu to continue\n");
366-
sem_post(&sem_vcpu_cont);
367-
}
368-
369361
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
370362
void *bitmap, uint32_t num_pages,
371363
uint32_t *ring_buf_idx)
372364
{
373-
uint32_t count = 0, cleared;
365+
uint32_t count, cleared;
374366

375367
/* Only have one vcpu */
376368
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
@@ -385,9 +377,6 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
385377
*/
386378
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
387379
"with collected (%u)", cleared, count);
388-
389-
if (READ_ONCE(dirty_ring_vcpu_ring_full))
390-
dirty_ring_continue_vcpu();
391380
}
392381

393382
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
@@ -404,7 +393,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
404393
sem_post(&sem_vcpu_stop);
405394
pr_info("Dirty ring full, waiting for it to be collected\n");
406395
sem_wait(&sem_vcpu_cont);
407-
pr_info("vcpu continues now.\n");
408396
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
409397
} else {
410398
TEST_ASSERT(false, "Invalid guest sync status: "
@@ -755,11 +743,52 @@ static void run_test(enum vm_guest_mode mode, void *arg)
755743
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
756744

757745
while (iteration < p->iterations) {
746+
bool saw_dirty_ring_full = false;
747+
unsigned long i;
748+
749+
dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
750+
758751
/* Give the vcpu thread some time to dirty some pages */
759-
usleep(p->interval * 1000);
760-
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
761-
bmap, host_num_pages,
762-
&ring_buf_idx);
752+
for (i = 0; i < p->interval; i++) {
753+
usleep(1000);
754+
755+
/*
756+
* Reap dirty pages while the guest is running so that
757+
* dirty ring full events are resolved, i.e. so that a
758+
* larger interval doesn't always end up with a vCPU
759+
* that's effectively blocked. Collecting while the
760+
* guest is running also verifies KVM doesn't lose any
761+
* state.
762+
*
763+
* For bitmap modes, KVM overwrites the entire bitmap,
764+
* i.e. collecting the bitmaps is destructive. Collect
765+
* the bitmap only on the first pass, otherwise this
766+
* test would lose track of dirty pages.
767+
*/
768+
if (i && host_log_mode != LOG_MODE_DIRTY_RING)
769+
continue;
770+
771+
/*
772+
* For the dirty ring, empty the ring on subsequent
773+
* passes only if the ring was filled at least once,
774+
* to verify KVM's handling of a full ring (emptying
775+
* the ring on every pass would make it unlikely the
776+
* vCPU would ever fill the fing).
777+
*/
778+
if (READ_ONCE(dirty_ring_vcpu_ring_full))
779+
saw_dirty_ring_full = true;
780+
if (i && !saw_dirty_ring_full)
781+
continue;
782+
783+
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
784+
bmap, host_num_pages,
785+
&ring_buf_idx);
786+
787+
if (READ_ONCE(dirty_ring_vcpu_ring_full)) {
788+
pr_info("Dirty ring emptied, restarting vCPU\n");
789+
sem_post(&sem_vcpu_cont);
790+
}
791+
}
763792

764793
/*
765794
* See vcpu_sync_stop_requested definition for details on why

0 commit comments

Comments
 (0)