@@ -158,10 +158,10 @@ bool kvm_is_tdp_enabled(void)
158158
159159void virt_arch_pgd_alloc (struct kvm_vm * vm )
160160{
161- TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K , "Attempt to use "
162- "unknown or unsupported guest mode, mode: 0x%x" , vm -> mode );
161+ TEST_ASSERT (vm -> mode == VM_MODE_PXXVYY_4K ,
162+ "Unknown or unsupported guest mode: 0x%x" , vm -> mode );
163163
164- /* If needed, create page map l4 table. */
164+ /* If needed, create the top-level page table. */
165165 if (!vm -> pgd_created ) {
166166 vm -> pgd = vm_alloc_page_table (vm );
167167 vm -> pgd_created = true;
@@ -221,8 +221,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
221221 uint64_t * pte = & vm -> pgd ;
222222 int current_level ;
223223
224- TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K ,
225- "Unknown or unsupported guest mode, mode : 0x%x" , vm -> mode );
224+ TEST_ASSERT (vm -> mode == VM_MODE_PXXVYY_4K ,
225+ "Unknown or unsupported guest mode: 0x%x" , vm -> mode );
226226
227227 TEST_ASSERT ((vaddr % pg_size ) == 0 ,
228228 "Virtual address not aligned,\n"
@@ -307,27 +307,28 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
307307uint64_t * __vm_get_page_table_entry (struct kvm_vm * vm , uint64_t vaddr ,
308308 int * level )
309309{
310+ int va_width = 12 + (vm -> pgtable_levels ) * 9 ;
310311 uint64_t * pte = & vm -> pgd ;
311312 int current_level ;
312313
313314 TEST_ASSERT (!vm -> arch .is_pt_protected ,
314315 "Walking page tables of protected guests is impossible" );
315316
316- TEST_ASSERT (* level >= PG_LEVEL_NONE && * level < PG_LEVEL_NUM ,
317+ TEST_ASSERT (* level >= PG_LEVEL_NONE && * level <= vm -> pgtable_levels ,
317318 "Invalid PG_LEVEL_* '%d'" , * level );
318319
319- TEST_ASSERT (vm -> mode == VM_MODE_PXXV48_4K , "Attempt to use "
320- "unknown or unsupported guest mode, mode: 0x%x" , vm -> mode );
320+ TEST_ASSERT (vm -> mode == VM_MODE_PXXVYY_4K ,
321+ "Unknown or unsupported guest mode: 0x%x" , vm -> mode );
321322 TEST_ASSERT (sparsebit_is_set (vm -> vpages_valid ,
322323 (vaddr >> vm -> page_shift )),
323324 "Invalid virtual address, vaddr: 0x%lx" ,
324325 vaddr );
325326 /*
326- * Based on the mode check above there are 48 bits in the vaddr, so
327- * shift 16 to sign extend the last bit (bit-47),
327+ * Check that the vaddr is a sign-extended va_width value.
328328 */
329- TEST_ASSERT (vaddr == (((int64_t )vaddr << 16 ) >> 16 ),
330- "Canonical check failed. The virtual address is invalid." );
329+ TEST_ASSERT (vaddr ==
330+ (((int64_t )vaddr << (64 - va_width ) >> (64 - va_width ))),
331+ "Canonical check failed. The virtual address is invalid." );
331332
332333 for (current_level = vm -> pgtable_levels ;
333334 current_level > PG_LEVEL_4K ;
@@ -520,7 +521,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
520521{
521522 struct kvm_sregs sregs ;
522523
523- TEST_ASSERT_EQ (vm -> mode , VM_MODE_PXXV48_4K );
524+ TEST_ASSERT (vm -> mode == VM_MODE_PXXVYY_4K ,
525+ "Unknown or unsupported guest mode: 0x%x" , vm -> mode );
524526
525527 /* Set mode specific system register values. */
526528 vcpu_sregs_get (vcpu , & sregs );
@@ -534,6 +536,8 @@ static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
534536 sregs .cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR ;
535537 if (kvm_cpu_has (X86_FEATURE_XSAVE ))
536538 sregs .cr4 |= X86_CR4_OSXSAVE ;
539+ if (vm -> pgtable_levels == 5 )
540+ sregs .cr4 |= X86_CR4_LA57 ;
537541 sregs .efer |= (EFER_LME | EFER_LMA | EFER_NX );
538542
539543 kvm_seg_set_unusable (& sregs .ldt );
0 commit comments