Skip to content

Commit 9a91f65

Browse files
committed
KVM: selftests: Post to sem_vcpu_stop if and only if vcpu_stop is true
When running dirty_log_test using the dirty ring, post to sem_vcpu_stop only when the main thread has explicitly requested that the vCPU stop. Synchronizing the vCPU and main thread whenever the dirty ring happens to be full is unnecessary, as KVM's ABI is to actively prevent the vCPU from running until the ring is no longer full. I.e. attempting to run the vCPU will simply result in KVM_EXIT_DIRTY_RING_FULL without ever entering the guest. And if KVM doesn't exit, e.g. let's the vCPU dirty more pages, then that's a KVM bug worth finding. Posting to sem_vcpu_stop on ring full also makes it difficult to get the test logic right, e.g. it's easy to let the vCPU keep running when it shouldn't, as a ring full can essentially happen at any given time. Opportunistically rework the handling of dirty_ring_vcpu_ring_full to leave it set for the remainder of the iteration in order to simplify the surrounding logic. Link: https://lore.kernel.org/r/20250111003004.1235645-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0a818b3 commit 9a91f65

File tree

1 file changed

+4
-14
lines changed

1 file changed

+4
-14
lines changed

tools/testing/selftests/kvm/dirty_log_test.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -379,12 +379,8 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
379379
if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
380380
vcpu_handle_sync_stop();
381381
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
382-
/* Update the flag first before pause */
383382
WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
384-
sem_post(&sem_vcpu_stop);
385-
pr_info("Dirty ring full, waiting for it to be collected\n");
386-
sem_wait(&sem_vcpu_cont);
387-
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
383+
vcpu_handle_sync_stop();
388384
} else {
389385
TEST_ASSERT(false, "Invalid guest sync status: "
390386
"exit_reason=%s",
@@ -743,7 +739,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
743739
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
744740

745741
while (iteration < p->iterations) {
746-
bool saw_dirty_ring_full = false;
747742
unsigned long i;
748743

749744
dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
@@ -775,19 +770,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
775770
* the ring on every pass would make it unlikely the
776771
* vCPU would ever fill the fing).
777772
*/
778-
if (READ_ONCE(dirty_ring_vcpu_ring_full))
779-
saw_dirty_ring_full = true;
780-
if (i && !saw_dirty_ring_full)
773+
if (i && !READ_ONCE(dirty_ring_vcpu_ring_full))
781774
continue;
782775

783776
log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
784777
bmap, host_num_pages,
785778
&ring_buf_idx);
786-
787-
if (READ_ONCE(dirty_ring_vcpu_ring_full)) {
788-
pr_info("Dirty ring emptied, restarting vCPU\n");
789-
sem_post(&sem_vcpu_cont);
790-
}
791779
}
792780

793781
/*
@@ -829,6 +817,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
829817
WRITE_ONCE(host_quit, true);
830818
sync_global_to_guest(vm, iteration);
831819

820+
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
821+
832822
sem_post(&sem_vcpu_cont);
833823
}
834824

0 commit comments

Comments
 (0)