Skip to content

Commit 476c9bd

Browse files
suomilewisbonzini
authored andcommitted
KVM: x86: Prepare MSR bitmaps for userspace tracked MSRs
Prepare vmx and svm for a subsequent change that ensures the MSR permission bitmap is set to allow an MSR that userspace is tracking to force a vmx_vmexit in the guest. Signed-off-by: Aaron Lewis <aaronlewis@google.com> Reviewed-by: Oliver Upton <oupton@google.com> [agraf: rebase, adapt SVM scheme to nested changes that came in between] Signed-off-by: Alexander Graf <graf@amazon.com> Message-Id: <20200925143422.21718-5-graf@amazon.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 51de815 commit 476c9bd

File tree

4 files changed

+77
-70
lines changed

4 files changed

+77
-70
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 35 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,7 @@ static bool valid_msr_intercept(u32 index)
564564
return false;
565565
}
566566

567-
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
567+
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
568568
{
569569
u8 bit_write;
570570
unsigned long tmp;
@@ -583,7 +583,7 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
583583
return !!test_bit(bit_write, &tmp);
584584
}
585585

586-
static void set_msr_interception(u32 *msrpm, unsigned msr,
586+
static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
587587
int read, int write)
588588
{
589589
u8 bit_read, bit_write;
@@ -609,24 +609,29 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
609609
msrpm[offset] = tmp;
610610
}
611611

612-
static u32 *svm_vcpu_init_msrpm(void)
612+
static u32 *svm_vcpu_alloc_msrpm(void)
613613
{
614-
int i;
615-
u32 *msrpm;
616614
struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
615+
u32 *msrpm;
617616

618617
if (!pages)
619618
return NULL;
620619

621620
msrpm = page_address(pages);
622621
memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
623622

623+
return msrpm;
624+
}
625+
626+
static void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
627+
{
628+
int i;
629+
624630
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
625631
if (!direct_access_msrs[i].always)
626632
continue;
627-
set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
633+
set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
628634
}
629-
return msrpm;
630635
}
631636

632637
static void svm_vcpu_free_msrpm(u32 *msrpm)
@@ -677,26 +682,26 @@ static void init_msrpm_offsets(void)
677682
}
678683
}
679684

680-
static void svm_enable_lbrv(struct vcpu_svm *svm)
685+
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
681686
{
682-
u32 *msrpm = svm->msrpm;
687+
struct vcpu_svm *svm = to_svm(vcpu);
683688

684689
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
685-
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
686-
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
687-
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
688-
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
690+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
691+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
692+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
693+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
689694
}
690695

691-
static void svm_disable_lbrv(struct vcpu_svm *svm)
696+
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
692697
{
693-
u32 *msrpm = svm->msrpm;
698+
struct vcpu_svm *svm = to_svm(vcpu);
694699

695700
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
696-
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
697-
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
698-
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
699-
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
701+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
702+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
703+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
704+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
700705
}
701706

702707
void disable_nmi_singlestep(struct vcpu_svm *svm)
@@ -1230,14 +1235,19 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
12301235

12311236
svm->nested.hsave = page_address(hsave_page);
12321237

1233-
svm->msrpm = svm_vcpu_init_msrpm();
1238+
svm->msrpm = svm_vcpu_alloc_msrpm();
12341239
if (!svm->msrpm)
12351240
goto error_free_hsave_page;
12361241

1237-
svm->nested.msrpm = svm_vcpu_init_msrpm();
1242+
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1243+
1244+
svm->nested.msrpm = svm_vcpu_alloc_msrpm();
12381245
if (!svm->nested.msrpm)
12391246
goto error_free_msrpm;
12401247

1248+
/* We only need the L1 pass-through MSR state, so leave vcpu as NULL */
1249+
svm_vcpu_init_msrpm(vcpu, svm->nested.msrpm);
1250+
12411251
svm->vmcb = page_address(vmcb_page);
12421252
svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
12431253
svm->asid_generation = 0;
@@ -2574,7 +2584,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
25742584
* We update the L1 MSR bit as well since it will end up
25752585
* touching the MSR anyway now.
25762586
*/
2577-
set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2587+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
25782588
break;
25792589
case MSR_IA32_PRED_CMD:
25802590
if (!msr->host_initiated &&
@@ -2589,7 +2599,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
25892599
break;
25902600

25912601
wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
2592-
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2602+
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
25932603
break;
25942604
case MSR_AMD64_VIRT_SPEC_CTRL:
25952605
if (!msr->host_initiated &&
@@ -2653,9 +2663,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
26532663
svm->vmcb->save.dbgctl = data;
26542664
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
26552665
if (data & (1ULL<<0))
2656-
svm_enable_lbrv(svm);
2666+
svm_enable_lbrv(vcpu);
26572667
else
2658-
svm_disable_lbrv(svm);
2668+
svm_disable_lbrv(vcpu);
26592669
break;
26602670
case MSR_VM_HSAVE_PA:
26612671
svm->nested.hsave_msr = data;

arch/x86/kvm/vmx/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4776,7 +4776,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
47764776

47774777
if (vmx_pt_mode_is_host_guest()) {
47784778
vmx->pt_desc.guest.ctl = 0;
4779-
pt_update_intercept_for_msr(vmx);
4779+
pt_update_intercept_for_msr(vcpu);
47804780
}
47814781

47824782
return 0;

arch/x86/kvm/vmx/vmx.c

Lines changed: 40 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
338338
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
339339

340340
static u32 vmx_segment_access_rights(struct kvm_segment *var);
341-
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
341+
static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
342342
u32 msr, int type);
343343

344344
void vmx_vmexit(void);
@@ -1980,7 +1980,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19801980
* in the merging. We update the vmcs01 here for L1 as well
19811981
* since it will end up touching the MSR anyway now.
19821982
*/
1983-
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
1983+
vmx_disable_intercept_for_msr(vcpu,
19841984
MSR_IA32_SPEC_CTRL,
19851985
MSR_TYPE_RW);
19861986
break;
@@ -2016,8 +2016,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20162016
* vmcs02.msr_bitmap here since it gets completely overwritten
20172017
* in the merging.
20182018
*/
2019-
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
2020-
MSR_TYPE_W);
2019+
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W);
20212020
break;
20222021
case MSR_IA32_CR_PAT:
20232022
if (!kvm_pat_valid(data))
@@ -2067,7 +2066,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20672066
return 1;
20682067
vmcs_write64(GUEST_IA32_RTIT_CTL, data);
20692068
vmx->pt_desc.guest.ctl = data;
2070-
pt_update_intercept_for_msr(vmx);
2069+
pt_update_intercept_for_msr(vcpu);
20712070
break;
20722071
case MSR_IA32_RTIT_STATUS:
20732072
if (!pt_can_write_msr(vmx))
@@ -3584,9 +3583,11 @@ void free_vpid(int vpid)
35843583
spin_unlock(&vmx_vpid_lock);
35853584
}
35863585

3587-
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
3586+
static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
35883587
u32 msr, int type)
35893588
{
3589+
struct vcpu_vmx *vmx = to_vmx(vcpu);
3590+
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
35903591
int f = sizeof(unsigned long);
35913592

35923593
if (!cpu_has_vmx_msr_bitmap())
@@ -3622,9 +3623,11 @@ static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bit
36223623
}
36233624
}
36243625

3625-
static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
3626+
static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
36263627
u32 msr, int type)
36273628
{
3629+
struct vcpu_vmx *vmx = to_vmx(vcpu);
3630+
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
36283631
int f = sizeof(unsigned long);
36293632

36303633
if (!cpu_has_vmx_msr_bitmap())
@@ -3660,13 +3663,13 @@ static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitm
36603663
}
36613664
}
36623665

3663-
static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
3664-
u32 msr, int type, bool value)
3666+
static __always_inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
3667+
u32 msr, int type, bool value)
36653668
{
36663669
if (value)
3667-
vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
3670+
vmx_enable_intercept_for_msr(vcpu, msr, type);
36683671
else
3669-
vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
3672+
vmx_disable_intercept_for_msr(vcpu, msr, type);
36703673
}
36713674

36723675
static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
@@ -3684,8 +3687,8 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
36843687
return mode;
36853688
}
36863689

3687-
static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
3688-
u8 mode)
3690+
static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu,
3691+
unsigned long *msr_bitmap, u8 mode)
36893692
{
36903693
int msr;
36913694

@@ -3700,11 +3703,11 @@ static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
37003703
* TPR reads and writes can be virtualized even if virtual interrupt
37013704
* delivery is not in use.
37023705
*/
3703-
vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
3706+
vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
37043707
if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
3705-
vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
3706-
vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
3707-
vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
3708+
vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
3709+
vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
3710+
vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
37083711
}
37093712
}
37103713
}
@@ -3720,30 +3723,24 @@ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
37203723
return;
37213724

37223725
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
3723-
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
3726+
vmx_update_msr_bitmap_x2apic(vcpu, msr_bitmap, mode);
37243727

37253728
vmx->msr_bitmap_mode = mode;
37263729
}
37273730

3728-
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
3731+
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
37293732
{
3730-
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3733+
struct vcpu_vmx *vmx = to_vmx(vcpu);
37313734
bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
37323735
u32 i;
37333736

3734-
vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS,
3735-
MSR_TYPE_RW, flag);
3736-
vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE,
3737-
MSR_TYPE_RW, flag);
3738-
vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK,
3739-
MSR_TYPE_RW, flag);
3740-
vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH,
3741-
MSR_TYPE_RW, flag);
3737+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
3738+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
3739+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
3740+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
37423741
for (i = 0; i < vmx->pt_desc.addr_range; i++) {
3743-
vmx_set_intercept_for_msr(msr_bitmap,
3744-
MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
3745-
vmx_set_intercept_for_msr(msr_bitmap,
3746-
MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
3742+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
3743+
vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
37473744
}
37483745
}
37493746

@@ -6753,18 +6750,18 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
67536750
goto free_pml;
67546751

67556752
msr_bitmap = vmx->vmcs01.msr_bitmap;
6756-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R);
6757-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
6758-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
6759-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
6760-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
6761-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
6762-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
6753+
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
6754+
vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
6755+
vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
6756+
vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
6757+
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
6758+
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
6759+
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
67636760
if (kvm_cstate_in_guest(vcpu->kvm)) {
6764-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R);
6765-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
6766-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
6767-
vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
6761+
vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
6762+
vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
6763+
vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
6764+
vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
67686765
}
67696766
vmx->msr_bitmap_mode = 0;
67706767

arch/x86/kvm/vmx/vmx.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,7 @@ bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
330330
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
331331
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
332332
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
333-
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
333+
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
334334
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
335335
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
336336
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)