From 2ca76c12c48b7a2792b21a673ca01a6d8fb2e835 Mon Sep 17 00:00:00 2001 From: Anish Moorthy Date: Thu, 15 Feb 2024 23:54:01 +0000 Subject: [PATCH] KVM: selftests: Report per-vcpu demand paging rate from demand paging test Using the overall demand paging rate to measure performance can be slightly misleading when vCPU accesses are not overlapped. Adding more vCPUs will (usually) increase the overall demand paging rate even if performance remains constant or even degrades on a per-vcpu basis. As such, it makes sense to report both the total and per-vcpu paging rates. Signed-off-by: Anish Moorthy Link: https://lore.kernel.org/r/20240215235405.368539-11-amoorthy@google.com [sean: fix formatting] Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/demand_paging_test.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index bf3609f71854..ddacbc6fd1ed 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -133,6 +133,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct uffd_desc **uffd_descs = NULL; struct timespec start; struct timespec ts_diff; + double vcpu_paging_rate; struct kvm_vm *vm; int i; @@ -191,11 +192,15 @@ static void run_test(enum vm_guest_mode mode, void *arg) uffd_stop_demand_paging(uffd_descs[i]); } - pr_info("Total guest execution time: %ld.%.9lds\n", + pr_info("Total guest execution time:\t%ld.%.9lds\n", ts_diff.tv_sec, ts_diff.tv_nsec); - pr_info("Overall demand paging rate: %f pgs/sec\n", - memstress_args.vcpu_args[0].pages * nr_vcpus / - ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC)); + + vcpu_paging_rate = memstress_args.vcpu_args[0].pages / + ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC); + pr_info("Per-vcpu demand paging rate:\t%f pgs/sec/vcpu\n", + vcpu_paging_rate); + pr_info("Overall demand paging rate:\t%f pgs/sec\n", + vcpu_paging_rate * nr_vcpus); memstress_destroy_vm(vm);