11// SPDX-License-Identifier: GPL-2.0-only
22/*
3- * vmx_tsc_adjust_test
4- *
53 * Copyright (C) 2018, Google LLC.
64 *
75 * IA32_TSC_ADJUST test
2220#include "kvm_util.h"
2321#include "processor.h"
2422#include "vmx.h"
23+ #include "svm_util.h"
2524
2625#include <string.h>
2726#include <sys/ioctl.h>
3534#define TSC_ADJUST_VALUE (1ll << 32)
3635#define TSC_OFFSET_VALUE -(1ll << 48)
3736
37+ #define L2_GUEST_STACK_SIZE 64
38+
3839enum {
3940 PORT_ABORT = 0x1000 ,
4041 PORT_REPORT ,
@@ -72,32 +73,47 @@ static void l2_guest_code(void)
7273 __asm__ __volatile__("vmcall" );
7374}
7475
75- static void l1_guest_code (struct vmx_pages * vmx_pages )
76+ static void l1_guest_code (void * data )
7677{
77- #define L2_GUEST_STACK_SIZE 64
7878 unsigned long l2_guest_stack [L2_GUEST_STACK_SIZE ];
79- uint32_t control ;
8079
80+ /* Set TSC from L1 and make sure TSC_ADJUST is updated correctly */
8181 GUEST_ASSERT (rdtsc () < TSC_ADJUST_VALUE );
8282 wrmsr (MSR_IA32_TSC , rdtsc () - TSC_ADJUST_VALUE );
8383 check_ia32_tsc_adjust (-1 * TSC_ADJUST_VALUE );
8484
85- GUEST_ASSERT (prepare_for_vmx_operation (vmx_pages ));
86- GUEST_ASSERT (load_vmcs (vmx_pages ));
87-
88- /* Prepare the VMCS for L2 execution. */
89- prepare_vmcs (vmx_pages , l2_guest_code ,
90- & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
91- control = vmreadz (CPU_BASED_VM_EXEC_CONTROL );
92- control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING ;
93- vmwrite (CPU_BASED_VM_EXEC_CONTROL , control );
94- vmwrite (TSC_OFFSET , TSC_OFFSET_VALUE );
95-
96- GUEST_ASSERT (!vmlaunch ());
97- GUEST_ASSERT (vmreadz (VM_EXIT_REASON ) == EXIT_REASON_VMCALL );
85+ /*
86+ * Run L2 with TSC_OFFSET. L2 will write to TSC, and L1 is not
87+ * intercepting the write so it should update L1's TSC_ADJUST.
88+ */
89+ if (this_cpu_has (X86_FEATURE_VMX )) {
90+ struct vmx_pages * vmx_pages = data ;
91+ uint32_t control ;
92+
93+ GUEST_ASSERT (prepare_for_vmx_operation (vmx_pages ));
94+ GUEST_ASSERT (load_vmcs (vmx_pages ));
95+
96+ prepare_vmcs (vmx_pages , l2_guest_code ,
97+ & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
98+ control = vmreadz (CPU_BASED_VM_EXEC_CONTROL );
99+ control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING ;
100+ vmwrite (CPU_BASED_VM_EXEC_CONTROL , control );
101+ vmwrite (TSC_OFFSET , TSC_OFFSET_VALUE );
102+
103+ GUEST_ASSERT (!vmlaunch ());
104+ GUEST_ASSERT (vmreadz (VM_EXIT_REASON ) == EXIT_REASON_VMCALL );
105+ } else {
106+ struct svm_test_data * svm = data ;
107+
108+ generic_svm_setup (svm , l2_guest_code ,
109+ & l2_guest_stack [L2_GUEST_STACK_SIZE ]);
110+
111+ svm -> vmcb -> control .tsc_offset = TSC_OFFSET_VALUE ;
112+ run_guest (svm -> vmcb , svm -> vmcb_gpa );
113+ GUEST_ASSERT (svm -> vmcb -> control .exit_code == SVM_EXIT_VMMCALL );
114+ }
98115
99116 check_ia32_tsc_adjust (-2 * TSC_ADJUST_VALUE );
100-
101117 GUEST_DONE ();
102118}
103119
@@ -109,16 +125,19 @@ static void report(int64_t val)
109125
110126int main (int argc , char * argv [])
111127{
112- vm_vaddr_t vmx_pages_gva ;
128+ vm_vaddr_t nested_gva ;
113129 struct kvm_vcpu * vcpu ;
114130
115- TEST_REQUIRE (kvm_cpu_has (X86_FEATURE_VMX ));
131+ TEST_REQUIRE (kvm_cpu_has (X86_FEATURE_VMX ) ||
132+ kvm_cpu_has (X86_FEATURE_SVM ));
116133
117- vm = vm_create_with_one_vcpu (& vcpu , (void * ) l1_guest_code );
134+ vm = vm_create_with_one_vcpu (& vcpu , l1_guest_code );
135+ if (kvm_cpu_has (X86_FEATURE_VMX ))
136+ vcpu_alloc_vmx (vm , & nested_gva );
137+ else
138+ vcpu_alloc_svm (vm , & nested_gva );
118139
119- /* Allocate VMX pages and shared descriptors (vmx_pages). */
120- vcpu_alloc_vmx (vm , & vmx_pages_gva );
121- vcpu_args_set (vcpu , 1 , vmx_pages_gva );
140+ vcpu_args_set (vcpu , 1 , nested_gva );
122141
123142 for (;;) {
124143 struct ucall uc ;
0 commit comments