@@ -292,6 +292,18 @@ int vmx_wrmsr_pat(struct vcpu *vcpu, uint64_t value)
292
292
return 0 ;
293
293
}
294
294
295
+ static void load_pdptrs (struct vcpu * vcpu )
296
+ {
297
+ uint64_t guest_cr3 = exec_vmread (VMX_GUEST_CR3 );
298
+ /* TODO: check whether guest cr3 is valid */
299
+ uint64_t * guest_cr3_hva = (uint64_t * )gpa2hva (vcpu -> vm , guest_cr3 );
300
+
301
+ exec_vmwrite64 (VMX_GUEST_PDPTE0_FULL , get_pgentry (guest_cr3_hva + 0UL ));
302
+ exec_vmwrite64 (VMX_GUEST_PDPTE1_FULL , get_pgentry (guest_cr3_hva + 1UL ));
303
+ exec_vmwrite64 (VMX_GUEST_PDPTE2_FULL , get_pgentry (guest_cr3_hva + 2UL ));
304
+ exec_vmwrite64 (VMX_GUEST_PDPTE3_FULL , get_pgentry (guest_cr3_hva + 3UL ));
305
+ }
306
+
295
307
static bool is_cr0_write_valid (struct vcpu * vcpu , uint64_t cr0 )
296
308
{
297
309
/* Shouldn't set always off bit */
@@ -348,7 +360,8 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
348
360
{
349
361
uint64_t cr0_vmx ;
350
362
uint32_t entry_ctrls ;
351
- bool paging_enabled = is_paging_enabled (vcpu );
363
+ bool old_paging_enabled = is_paging_enabled (vcpu );
364
+ uint64_t cr0_changed_bits = vcpu_get_cr0 (vcpu ) ^ cr0 ;
352
365
353
366
if (!is_cr0_write_valid (vcpu , cr0 )) {
354
367
pr_dbg ("Invalid cr0 write operation from guest" );
@@ -360,37 +373,41 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
360
373
* When loading a control register, reserved bit should always set
361
374
* to the value previously read.
362
375
*/
363
- cr0 = (cr0 & ~CR0_RESERVED_MASK ) |
364
- (vcpu_get_cr0 (vcpu ) & CR0_RESERVED_MASK );
365
-
366
- if (((vcpu_get_efer (vcpu ) & MSR_IA32_EFER_LME_BIT ) != 0UL ) &&
367
- !paging_enabled && ((cr0 & CR0_PG ) != 0UL )) {
368
- /* Enable long mode */
369
- pr_dbg ("VMM: Enable long mode" );
370
- entry_ctrls = exec_vmread32 (VMX_ENTRY_CONTROLS );
371
- entry_ctrls |= VMX_ENTRY_CTLS_IA32E_MODE ;
372
- exec_vmwrite32 (VMX_ENTRY_CONTROLS , entry_ctrls );
373
-
374
- vcpu_set_efer (vcpu ,
375
- vcpu_get_efer (vcpu ) | MSR_IA32_EFER_LMA_BIT );
376
- } else if (((vcpu_get_efer (vcpu ) & MSR_IA32_EFER_LME_BIT ) != 0UL ) &&
377
- paging_enabled && ((cr0 & CR0_PG ) == 0UL )){
378
- /* Disable long mode */
379
- pr_dbg ("VMM: Disable long mode" );
380
- entry_ctrls = exec_vmread32 (VMX_ENTRY_CONTROLS );
381
- entry_ctrls &= ~VMX_ENTRY_CTLS_IA32E_MODE ;
382
- exec_vmwrite32 (VMX_ENTRY_CONTROLS , entry_ctrls );
383
-
384
- vcpu_set_efer (vcpu ,
385
- vcpu_get_efer (vcpu ) & ~MSR_IA32_EFER_LMA_BIT );
386
- } else {
387
- /* CR0.PG unchanged. */
376
+ cr0 &= ~CR0_RESERVED_MASK ;
377
+
378
+ if (!old_paging_enabled && ((cr0 & CR0_PG ) != 0UL )) {
379
+ if ((vcpu_get_efer (vcpu ) & MSR_IA32_EFER_LME_BIT ) != 0UL ) {
380
+ /* Enable long mode */
381
+ pr_dbg ("VMM: Enable long mode" );
382
+ entry_ctrls = exec_vmread32 (VMX_ENTRY_CONTROLS );
383
+ entry_ctrls |= VMX_ENTRY_CTLS_IA32E_MODE ;
384
+ exec_vmwrite32 (VMX_ENTRY_CONTROLS , entry_ctrls );
385
+
386
+ vcpu_set_efer (vcpu ,
387
+ vcpu_get_efer (vcpu ) | MSR_IA32_EFER_LMA_BIT );
388
+ } else if (is_pae (vcpu )) {
389
+ /* enabled PAE from paging disabled */
390
+ load_pdptrs (vcpu );
391
+ } else {
392
+ }
393
+ } else if (old_paging_enabled && ((cr0 & CR0_PG ) == 0UL )) {
394
+ if ((vcpu_get_efer (vcpu ) & MSR_IA32_EFER_LME_BIT ) != 0UL ) {
395
+ /* Disable long mode */
396
+ pr_dbg ("VMM: Disable long mode" );
397
+ entry_ctrls = exec_vmread32 (VMX_ENTRY_CONTROLS );
398
+ entry_ctrls &= ~VMX_ENTRY_CTLS_IA32E_MODE ;
399
+ exec_vmwrite32 (VMX_ENTRY_CONTROLS , entry_ctrls );
400
+
401
+ vcpu_set_efer (vcpu ,
402
+ vcpu_get_efer (vcpu ) & ~MSR_IA32_EFER_LMA_BIT );
403
+ } else {
404
+ }
388
405
}
389
406
390
- /* If CR0.CD or CR0.NW get changed */
391
- if ((( vcpu_get_cr0 ( vcpu ) ^ cr0 ) & (CR0_CD | CR0_NW )) != 0UL ) {
392
- /* No action if only CR0.NW is changed */
393
- if ((( vcpu_get_cr0 ( vcpu ) ^ cr0 ) & CR0_CD ) != 0UL ) {
407
+ /* If CR0.CD or CR0.NW get cr0_changed_bits */
408
+ if ((cr0_changed_bits & (CR0_CD | CR0_NW )) != 0UL ) {
409
+ /* No action if only CR0.NW is cr0_changed_bits */
410
+ if ((cr0_changed_bits & CR0_CD ) != 0UL ) {
394
411
if ((cr0 & CR0_CD ) != 0UL ) {
395
412
/*
396
413
* When the guest requests to set CR0.CD, we don't allow
@@ -409,6 +426,10 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
409
426
}
410
427
}
411
428
429
+ if ((cr0_changed_bits & (CR0_PG | CR0_WP )) != 0UL ) {
430
+ vcpu_make_request (vcpu , ACRN_REQUEST_EPT_FLUSH );
431
+ }
432
+
412
433
/* CR0 has no always off bits, except the always on bits, and reserved
413
434
* bits, allow to set according to guest.
414
435
*/
@@ -426,7 +447,7 @@ void vmx_write_cr0(struct vcpu *vcpu, uint64_t cr0)
426
447
cr0 , cr0_vmx );
427
448
}
428
449
429
- static bool is_cr4_write_valid (uint64_t cr4 )
450
+ static bool is_cr4_write_valid (struct vcpu * vcpu , uint64_t cr4 )
430
451
{
431
452
/* Check if guest try to set fixed to 0 bits or reserved bits */
432
453
if ((cr4 & cr4_always_off_mask ) != 0U )
@@ -440,6 +461,12 @@ static bool is_cr4_write_valid(uint64_t cr4)
440
461
if ((cr4 & CR4_PCIDE ) != 0UL )
441
462
return false;
442
463
464
+ if (is_long_mode (vcpu )) {
465
+ if ((cr4 & CR4_PAE ) == 0UL ) {
466
+ return false;
467
+ }
468
+ }
469
+
443
470
return true;
444
471
}
445
472
@@ -481,13 +508,24 @@ static bool is_cr4_write_valid(uint64_t cr4)
481
508
void vmx_write_cr4 (struct vcpu * vcpu , uint64_t cr4 )
482
509
{
483
510
uint64_t cr4_vmx ;
511
+ uint64_t old_cr4 = vcpu_get_cr4 (vcpu );
484
512
485
- if (!is_cr4_write_valid (cr4 )) {
513
+ if (!is_cr4_write_valid (vcpu , cr4 )) {
486
514
pr_dbg ("Invalid cr4 write operation from guest" );
487
515
vcpu_inject_gp (vcpu , 0U );
488
516
return ;
489
517
}
490
518
519
+ if (((cr4 ^ old_cr4 ) & (CR4_PGE | CR4_PSE | CR4_PAE |
520
+ CR4_SMEP | CR4_SMAP | CR4_PKE )) != 0UL ) {
521
+ if (((cr4 & CR4_PAE ) != 0UL ) && is_paging_enabled (vcpu ) &&
522
+ (is_long_mode (vcpu ))) {
523
+ load_pdptrs (vcpu );
524
+ }
525
+
526
+ vcpu_make_request (vcpu , ACRN_REQUEST_EPT_FLUSH );
527
+ }
528
+
491
529
/* Aways off bits and reserved bits has been filtered above */
492
530
cr4_vmx = cr4_always_on_mask | cr4 ;
493
531
exec_vmwrite (VMX_GUEST_CR4 , cr4_vmx & 0xFFFFFFFFUL );
0 commit comments