Skip to content

Commit 3c40777

Browse files
yosrym93sean-jc
authored andcommitted
KVM: selftests: Extend vmx_tsc_adjust_test to cover SVM
Add SVM L1 code to run the nested guest, and allow the test to run with SVM as well as VMX. Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251021074736.1324328-8-yosry.ahmed@linux.dev Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 91423b0 commit 3c40777

File tree

2 files changed

+45
-26
lines changed

2 files changed

+45
-26
lines changed

tools/testing/selftests/kvm/Makefile.kvm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test
9292
TEST_GEN_PROGS_x86 += x86/nested_emulation_test
9393
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
9494
TEST_GEN_PROGS_x86 += x86/nested_invalid_cr3_test
95+
TEST_GEN_PROGS_x86 += x86/nested_tsc_adjust_test
9596
TEST_GEN_PROGS_x86 += x86/nested_tsc_scaling_test
9697
TEST_GEN_PROGS_x86 += x86/platform_info_test
9798
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
@@ -119,7 +120,6 @@ TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
119120
TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
120121
TEST_GEN_PROGS_x86 += x86/vmx_invalid_nested_guest_state
121122
TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
122-
TEST_GEN_PROGS_x86 += x86/vmx_tsc_adjust_test
123123
TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
124124
TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
125125
TEST_GEN_PROGS_x86 += x86/xapic_state_test

tools/testing/selftests/kvm/x86/vmx_tsc_adjust_test.c renamed to tools/testing/selftests/kvm/x86/nested_tsc_adjust_test.c

Lines changed: 44 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
// SPDX-License-Identifier: GPL-2.0-only
22
/*
3-
* vmx_tsc_adjust_test
4-
*
53
* Copyright (C) 2018, Google LLC.
64
*
75
* IA32_TSC_ADJUST test
@@ -22,6 +20,7 @@
2220
#include "kvm_util.h"
2321
#include "processor.h"
2422
#include "vmx.h"
23+
#include "svm_util.h"
2524

2625
#include <string.h>
2726
#include <sys/ioctl.h>
@@ -35,6 +34,8 @@
3534
#define TSC_ADJUST_VALUE (1ll << 32)
3635
#define TSC_OFFSET_VALUE -(1ll << 48)
3736

37+
#define L2_GUEST_STACK_SIZE 64
38+
3839
enum {
3940
PORT_ABORT = 0x1000,
4041
PORT_REPORT,
@@ -72,32 +73,47 @@ static void l2_guest_code(void)
7273
__asm__ __volatile__("vmcall");
7374
}
7475

75-
static void l1_guest_code(struct vmx_pages *vmx_pages)
76+
static void l1_guest_code(void *data)
7677
{
77-
#define L2_GUEST_STACK_SIZE 64
7878
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
79-
uint32_t control;
8079

80+
/* Set TSC from L1 and make sure TSC_ADJUST is updated correctly */
8181
GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
8282
wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
8383
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
8484

85-
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
86-
GUEST_ASSERT(load_vmcs(vmx_pages));
87-
88-
/* Prepare the VMCS for L2 execution. */
89-
prepare_vmcs(vmx_pages, l2_guest_code,
90-
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
91-
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
92-
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
93-
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
94-
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
95-
96-
GUEST_ASSERT(!vmlaunch());
97-
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
85+
/*
86+
* Run L2 with TSC_OFFSET. L2 will write to TSC, and L1 is not
87+
* intercepting the write so it should update L1's TSC_ADJUST.
88+
*/
89+
if (this_cpu_has(X86_FEATURE_VMX)) {
90+
struct vmx_pages *vmx_pages = data;
91+
uint32_t control;
92+
93+
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
94+
GUEST_ASSERT(load_vmcs(vmx_pages));
95+
96+
prepare_vmcs(vmx_pages, l2_guest_code,
97+
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
98+
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
99+
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
100+
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
101+
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
102+
103+
GUEST_ASSERT(!vmlaunch());
104+
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
105+
} else {
106+
struct svm_test_data *svm = data;
107+
108+
generic_svm_setup(svm, l2_guest_code,
109+
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
110+
111+
svm->vmcb->control.tsc_offset = TSC_OFFSET_VALUE;
112+
run_guest(svm->vmcb, svm->vmcb_gpa);
113+
GUEST_ASSERT(svm->vmcb->control.exit_code == SVM_EXIT_VMMCALL);
114+
}
98115

99116
check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
100-
101117
GUEST_DONE();
102118
}
103119

@@ -109,16 +125,19 @@ static void report(int64_t val)
109125

110126
int main(int argc, char *argv[])
111127
{
112-
vm_vaddr_t vmx_pages_gva;
128+
vm_vaddr_t nested_gva;
113129
struct kvm_vcpu *vcpu;
114130

115-
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
131+
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) ||
132+
kvm_cpu_has(X86_FEATURE_SVM));
116133

117-
vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
134+
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
135+
if (kvm_cpu_has(X86_FEATURE_VMX))
136+
vcpu_alloc_vmx(vm, &nested_gva);
137+
else
138+
vcpu_alloc_svm(vm, &nested_gva);
118139

119-
/* Allocate VMX pages and shared descriptors (vmx_pages). */
120-
vcpu_alloc_vmx(vm, &vmx_pages_gva);
121-
vcpu_args_set(vcpu, 1, vmx_pages_gva);
140+
vcpu_args_set(vcpu, 1, nested_gva);
122141

123142
for (;;) {
124143
struct ucall uc;

0 commit comments

Comments
 (0)