|
15 | 15 | #include <stdlib.h>
|
16 | 16 | #include <string.h>
|
17 | 17 | #include <sys/ioctl.h>
|
| 18 | +#include <pthread.h> |
18 | 19 |
|
19 | 20 | #include "test_util.h"
|
20 | 21 | #include "kvm_util.h"
|
@@ -80,6 +81,75 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
|
80 | 81 | #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
|
81 | 82 | #define INVALID_SYNC_FIELD 0x80000000
|
82 | 83 |
|
| 84 | +/* |
| 85 | + * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is |
| 86 | + * illegal, and KVM's MMU heavily relies on vCPU state being valid. |
| 87 | + */ |
| 88 | +static noinline void *race_sregs_cr4(void *arg) |
| 89 | +{ |
| 90 | + struct kvm_run *run = (struct kvm_run *)arg; |
| 91 | + __u64 *cr4 = &run->s.regs.sregs.cr4; |
| 92 | + __u64 pae_enabled = *cr4; |
| 93 | + __u64 pae_disabled = *cr4 & ~X86_CR4_PAE; |
| 94 | + |
| 95 | + for (;;) { |
| 96 | + WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS); |
| 97 | + WRITE_ONCE(*cr4, pae_enabled); |
| 98 | + asm volatile(".rept 512\n\t" |
| 99 | + "nop\n\t" |
| 100 | + ".endr"); |
| 101 | + WRITE_ONCE(*cr4, pae_disabled); |
| 102 | + |
| 103 | + pthread_testcancel(); |
| 104 | + } |
| 105 | + |
| 106 | + return NULL; |
| 107 | +} |
| 108 | + |
| 109 | +static void race_sync_regs(void *racer) |
| 110 | +{ |
| 111 | + const time_t TIMEOUT = 2; /* seconds, roughly */ |
| 112 | + struct kvm_translation tr; |
| 113 | + struct kvm_vcpu *vcpu; |
| 114 | + struct kvm_run *run; |
| 115 | + struct kvm_vm *vm; |
| 116 | + pthread_t thread; |
| 117 | + time_t t; |
| 118 | + |
| 119 | + vm = vm_create_with_one_vcpu(&vcpu, guest_code); |
| 120 | + run = vcpu->run; |
| 121 | + |
| 122 | + run->kvm_valid_regs = KVM_SYNC_X86_SREGS; |
| 123 | + vcpu_run(vcpu); |
| 124 | + run->kvm_valid_regs = 0; |
| 125 | + |
| 126 | + /* |
| 127 | + * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE |
| 128 | + * should already be set in guest state. |
| 129 | + */ |
| 130 | + TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) && |
| 131 | + (run->s.regs.sregs.efer & EFER_LME), |
| 132 | + "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d", |
| 133 | + !!(run->s.regs.sregs.cr4 & X86_CR4_PAE), |
| 134 | + !!(run->s.regs.sregs.efer & EFER_LME)); |
| 135 | + |
| 136 | + ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0); |
| 137 | + |
| 138 | + for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { |
| 139 | + __vcpu_run(vcpu); |
| 140 | + |
| 141 | + if (racer == race_sregs_cr4) { |
| 142 | + tr = (struct kvm_translation) { .linear_address = 0 }; |
| 143 | + __vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr); |
| 144 | + } |
| 145 | + } |
| 146 | + |
| 147 | + ASSERT_EQ(pthread_cancel(thread), 0); |
| 148 | + ASSERT_EQ(pthread_join(thread, NULL), 0); |
| 149 | + |
| 150 | + kvm_vm_free(vm); |
| 151 | +} |
| 152 | + |
83 | 153 | int main(int argc, char *argv[])
|
84 | 154 | {
|
85 | 155 | struct kvm_vcpu *vcpu;
|
@@ -218,5 +288,7 @@ int main(int argc, char *argv[])
|
218 | 288 |
|
219 | 289 | kvm_vm_free(vm);
|
220 | 290 |
|
| 291 | + race_sync_regs(race_sregs_cr4); |
| 292 | + |
221 | 293 | return 0;
|
222 | 294 | }
|
0 commit comments