|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Copyright (C) 2025, Google LLC. |
| 4 | + * |
| 5 | + * Test KVM's ability to save and restore nested state when the L1 guest |
| 6 | + * is using 5-level paging and the L2 guest is using 4-level paging. |
| 7 | + * |
| 8 | + * This test would have failed prior to commit 9245fd6b8531 ("KVM: x86: |
| 9 | + * model canonical checks more precisely"). |
| 10 | + */ |
| 11 | +#include "test_util.h" |
| 12 | +#include "kvm_util.h" |
| 13 | +#include "processor.h" |
| 14 | +#include "vmx.h" |
| 15 | + |
| 16 | +#define LA57_GS_BASE 0xff2bc0311fb00000ull |
| 17 | + |
| 18 | +static void l2_guest_code(void) |
| 19 | +{ |
| 20 | + /* |
| 21 | + * Sync with L0 to trigger save/restore. After |
| 22 | + * resuming, execute VMCALL to exit back to L1. |
| 23 | + */ |
| 24 | + GUEST_SYNC(1); |
| 25 | + vmcall(); |
| 26 | +} |
| 27 | + |
| 28 | +static void l1_guest_code(struct vmx_pages *vmx_pages) |
| 29 | +{ |
| 30 | +#define L2_GUEST_STACK_SIZE 64 |
| 31 | + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; |
| 32 | + u64 guest_cr4; |
| 33 | + vm_paddr_t pml5_pa, pml4_pa; |
| 34 | + u64 *pml5; |
| 35 | + u64 exit_reason; |
| 36 | + |
| 37 | + /* Set GS_BASE to a value that is only canonical with LA57. */ |
| 38 | + wrmsr(MSR_GS_BASE, LA57_GS_BASE); |
| 39 | + GUEST_ASSERT(rdmsr(MSR_GS_BASE) == LA57_GS_BASE); |
| 40 | + |
| 41 | + GUEST_ASSERT(vmx_pages->vmcs_gpa); |
| 42 | + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); |
| 43 | + GUEST_ASSERT(load_vmcs(vmx_pages)); |
| 44 | + |
| 45 | + prepare_vmcs(vmx_pages, l2_guest_code, |
| 46 | + &l2_guest_stack[L2_GUEST_STACK_SIZE]); |
| 47 | + |
| 48 | + /* |
| 49 | + * Set up L2 with a 4-level page table by pointing its CR3 to |
| 50 | + * L1's first PML4 table and clearing CR4.LA57. This creates |
| 51 | + * the CR4.LA57 mismatch that exercises the bug. |
| 52 | + */ |
| 53 | + pml5_pa = get_cr3() & PHYSICAL_PAGE_MASK; |
| 54 | + pml5 = (u64 *)pml5_pa; |
| 55 | + pml4_pa = pml5[0] & PHYSICAL_PAGE_MASK; |
| 56 | + vmwrite(GUEST_CR3, pml4_pa); |
| 57 | + |
| 58 | + guest_cr4 = vmreadz(GUEST_CR4); |
| 59 | + guest_cr4 &= ~X86_CR4_LA57; |
| 60 | + vmwrite(GUEST_CR4, guest_cr4); |
| 61 | + |
| 62 | + GUEST_ASSERT(!vmlaunch()); |
| 63 | + |
| 64 | + exit_reason = vmreadz(VM_EXIT_REASON); |
| 65 | + GUEST_ASSERT(exit_reason == EXIT_REASON_VMCALL); |
| 66 | +} |
| 67 | + |
| 68 | +void guest_code(struct vmx_pages *vmx_pages) |
| 69 | +{ |
| 70 | + l1_guest_code(vmx_pages); |
| 71 | + GUEST_DONE(); |
| 72 | +} |
| 73 | + |
| 74 | +int main(int argc, char *argv[]) |
| 75 | +{ |
| 76 | + vm_vaddr_t vmx_pages_gva = 0; |
| 77 | + struct kvm_vm *vm; |
| 78 | + struct kvm_vcpu *vcpu; |
| 79 | + struct kvm_x86_state *state; |
| 80 | + struct ucall uc; |
| 81 | + int stage; |
| 82 | + |
| 83 | + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); |
| 84 | + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_LA57)); |
| 85 | + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); |
| 86 | + |
| 87 | + vm = vm_create_with_one_vcpu(&vcpu, guest_code); |
| 88 | + |
| 89 | + /* |
| 90 | + * L1 needs to read its own PML5 table to set up L2. Identity map |
| 91 | + * the PML5 table to facilitate this. |
| 92 | + */ |
| 93 | + virt_map(vm, vm->pgd, vm->pgd, 1); |
| 94 | + |
| 95 | + vcpu_alloc_vmx(vm, &vmx_pages_gva); |
| 96 | + vcpu_args_set(vcpu, 1, vmx_pages_gva); |
| 97 | + |
| 98 | + for (stage = 1;; stage++) { |
| 99 | + vcpu_run(vcpu); |
| 100 | + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); |
| 101 | + |
| 102 | + switch (get_ucall(vcpu, &uc)) { |
| 103 | + case UCALL_ABORT: |
| 104 | + REPORT_GUEST_ASSERT(uc); |
| 105 | + /* NOT REACHED */ |
| 106 | + case UCALL_SYNC: |
| 107 | + break; |
| 108 | + case UCALL_DONE: |
| 109 | + goto done; |
| 110 | + default: |
| 111 | + TEST_FAIL("Unknown ucall %lu", uc.cmd); |
| 112 | + } |
| 113 | + |
| 114 | + TEST_ASSERT(uc.args[1] == stage, |
| 115 | + "Expected stage %d, got stage %lu", stage, (ulong)uc.args[1]); |
| 116 | + if (stage == 1) { |
| 117 | + pr_info("L2 is active; performing save/restore.\n"); |
| 118 | + state = vcpu_save_state(vcpu); |
| 119 | + |
| 120 | + kvm_vm_release(vm); |
| 121 | + |
| 122 | + /* Restore state in a new VM. */ |
| 123 | + vcpu = vm_recreate_with_one_vcpu(vm); |
| 124 | + vcpu_load_state(vcpu, state); |
| 125 | + kvm_x86_state_cleanup(state); |
| 126 | + } |
| 127 | + } |
| 128 | + |
| 129 | +done: |
| 130 | + kvm_vm_free(vm); |
| 131 | + return 0; |
| 132 | +} |
0 commit comments