@@ -338,7 +338,7 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
338338module_param_cb (vmentry_l1d_flush , & vmentry_l1d_flush_ops , NULL , 0644 );
339339
340340static u32 vmx_segment_access_rights (struct kvm_segment * var );
341- static __always_inline void vmx_disable_intercept_for_msr (unsigned long * msr_bitmap ,
341+ static __always_inline void vmx_disable_intercept_for_msr (struct kvm_vcpu * vcpu ,
342342 u32 msr , int type );
343343
344344void vmx_vmexit (void );
@@ -1980,7 +1980,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19801980 * in the merging. We update the vmcs01 here for L1 as well
19811981 * since it will end up touching the MSR anyway now.
19821982 */
1983- vmx_disable_intercept_for_msr (vmx -> vmcs01 . msr_bitmap ,
1983+ vmx_disable_intercept_for_msr (vcpu ,
19841984 MSR_IA32_SPEC_CTRL ,
19851985 MSR_TYPE_RW );
19861986 break ;
@@ -2016,8 +2016,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20162016 * vmcs02.msr_bitmap here since it gets completely overwritten
20172017 * in the merging.
20182018 */
2019- vmx_disable_intercept_for_msr (vmx -> vmcs01 .msr_bitmap , MSR_IA32_PRED_CMD ,
2020- MSR_TYPE_W );
2019+ vmx_disable_intercept_for_msr (vcpu , MSR_IA32_PRED_CMD , MSR_TYPE_W );
20212020 break ;
20222021 case MSR_IA32_CR_PAT :
20232022 if (!kvm_pat_valid (data ))
@@ -2067,7 +2066,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20672066 return 1 ;
20682067 vmcs_write64 (GUEST_IA32_RTIT_CTL , data );
20692068 vmx -> pt_desc .guest .ctl = data ;
2070- pt_update_intercept_for_msr (vmx );
2069+ pt_update_intercept_for_msr (vcpu );
20712070 break ;
20722071 case MSR_IA32_RTIT_STATUS :
20732072 if (!pt_can_write_msr (vmx ))
@@ -3584,9 +3583,11 @@ void free_vpid(int vpid)
35843583 spin_unlock (& vmx_vpid_lock );
35853584}
35863585
3587- static __always_inline void vmx_disable_intercept_for_msr (unsigned long * msr_bitmap ,
3586+ static __always_inline void vmx_disable_intercept_for_msr (struct kvm_vcpu * vcpu ,
35883587 u32 msr , int type )
35893588{
3589+ struct vcpu_vmx * vmx = to_vmx (vcpu );
3590+ unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
35903591 int f = sizeof (unsigned long );
35913592
35923593 if (!cpu_has_vmx_msr_bitmap ())
@@ -3622,9 +3623,11 @@ static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bit
36223623 }
36233624}
36243625
3625- static __always_inline void vmx_enable_intercept_for_msr (unsigned long * msr_bitmap ,
3626+ static __always_inline void vmx_enable_intercept_for_msr (struct kvm_vcpu * vcpu ,
36263627 u32 msr , int type )
36273628{
3629+ struct vcpu_vmx * vmx = to_vmx (vcpu );
3630+ unsigned long * msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
36283631 int f = sizeof (unsigned long );
36293632
36303633 if (!cpu_has_vmx_msr_bitmap ())
@@ -3660,13 +3663,13 @@ static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitm
36603663 }
36613664}
36623665
3663- static __always_inline void vmx_set_intercept_for_msr (unsigned long * msr_bitmap ,
3664- u32 msr , int type , bool value )
3666+ static __always_inline void vmx_set_intercept_for_msr (struct kvm_vcpu * vcpu ,
3667+ u32 msr , int type , bool value )
36653668{
36663669 if (value )
3667- vmx_enable_intercept_for_msr (msr_bitmap , msr , type );
3670+ vmx_enable_intercept_for_msr (vcpu , msr , type );
36683671 else
3669- vmx_disable_intercept_for_msr (msr_bitmap , msr , type );
3672+ vmx_disable_intercept_for_msr (vcpu , msr , type );
36703673}
36713674
36723675static u8 vmx_msr_bitmap_mode (struct kvm_vcpu * vcpu )
@@ -3684,8 +3687,8 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
36843687 return mode ;
36853688}
36863689
3687- static void vmx_update_msr_bitmap_x2apic (unsigned long * msr_bitmap ,
3688- u8 mode )
3690+ static void vmx_update_msr_bitmap_x2apic (struct kvm_vcpu * vcpu ,
3691+ unsigned long * msr_bitmap , u8 mode )
36893692{
36903693 int msr ;
36913694
@@ -3700,11 +3703,11 @@ static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
37003703 * TPR reads and writes can be virtualized even if virtual interrupt
37013704 * delivery is not in use.
37023705 */
3703- vmx_disable_intercept_for_msr (msr_bitmap , X2APIC_MSR (APIC_TASKPRI ), MSR_TYPE_RW );
3706+ vmx_disable_intercept_for_msr (vcpu , X2APIC_MSR (APIC_TASKPRI ), MSR_TYPE_RW );
37043707 if (mode & MSR_BITMAP_MODE_X2APIC_APICV ) {
3705- vmx_enable_intercept_for_msr (msr_bitmap , X2APIC_MSR (APIC_TMCCT ), MSR_TYPE_R );
3706- vmx_disable_intercept_for_msr (msr_bitmap , X2APIC_MSR (APIC_EOI ), MSR_TYPE_W );
3707- vmx_disable_intercept_for_msr (msr_bitmap , X2APIC_MSR (APIC_SELF_IPI ), MSR_TYPE_W );
3708+ vmx_enable_intercept_for_msr (vcpu , X2APIC_MSR (APIC_TMCCT ), MSR_TYPE_RW );
3709+ vmx_disable_intercept_for_msr (vcpu , X2APIC_MSR (APIC_EOI ), MSR_TYPE_W );
3710+ vmx_disable_intercept_for_msr (vcpu , X2APIC_MSR (APIC_SELF_IPI ), MSR_TYPE_W );
37083711 }
37093712 }
37103713}
@@ -3720,30 +3723,24 @@ void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
37203723 return ;
37213724
37223725 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV ))
3723- vmx_update_msr_bitmap_x2apic (msr_bitmap , mode );
3726+ vmx_update_msr_bitmap_x2apic (vcpu , msr_bitmap , mode );
37243727
37253728 vmx -> msr_bitmap_mode = mode ;
37263729}
37273730
3728- void pt_update_intercept_for_msr (struct vcpu_vmx * vmx )
3731+ void pt_update_intercept_for_msr (struct kvm_vcpu * vcpu )
37293732{
3730- unsigned long * msr_bitmap = vmx -> vmcs01 . msr_bitmap ;
3733+ struct vcpu_vmx * vmx = to_vmx ( vcpu ) ;
37313734 bool flag = !(vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN );
37323735 u32 i ;
37333736
3734- vmx_set_intercept_for_msr (msr_bitmap , MSR_IA32_RTIT_STATUS ,
3735- MSR_TYPE_RW , flag );
3736- vmx_set_intercept_for_msr (msr_bitmap , MSR_IA32_RTIT_OUTPUT_BASE ,
3737- MSR_TYPE_RW , flag );
3738- vmx_set_intercept_for_msr (msr_bitmap , MSR_IA32_RTIT_OUTPUT_MASK ,
3739- MSR_TYPE_RW , flag );
3740- vmx_set_intercept_for_msr (msr_bitmap , MSR_IA32_RTIT_CR3_MATCH ,
3741- MSR_TYPE_RW , flag );
3737+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_STATUS , MSR_TYPE_RW , flag );
3738+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_OUTPUT_BASE , MSR_TYPE_RW , flag );
3739+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_OUTPUT_MASK , MSR_TYPE_RW , flag );
3740+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_CR3_MATCH , MSR_TYPE_RW , flag );
37423741 for (i = 0 ; i < vmx -> pt_desc .addr_range ; i ++ ) {
3743- vmx_set_intercept_for_msr (msr_bitmap ,
3744- MSR_IA32_RTIT_ADDR0_A + i * 2 , MSR_TYPE_RW , flag );
3745- vmx_set_intercept_for_msr (msr_bitmap ,
3746- MSR_IA32_RTIT_ADDR0_B + i * 2 , MSR_TYPE_RW , flag );
3742+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_ADDR0_A + i * 2 , MSR_TYPE_RW , flag );
3743+ vmx_set_intercept_for_msr (vcpu , MSR_IA32_RTIT_ADDR0_B + i * 2 , MSR_TYPE_RW , flag );
37473744 }
37483745}
37493746
@@ -6753,18 +6750,18 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
67536750 goto free_pml ;
67546751
67556752 msr_bitmap = vmx -> vmcs01 .msr_bitmap ;
6756- vmx_disable_intercept_for_msr (msr_bitmap , MSR_IA32_TSC , MSR_TYPE_R );
6757- vmx_disable_intercept_for_msr (msr_bitmap , MSR_FS_BASE , MSR_TYPE_RW );
6758- vmx_disable_intercept_for_msr (msr_bitmap , MSR_GS_BASE , MSR_TYPE_RW );
6759- vmx_disable_intercept_for_msr (msr_bitmap , MSR_KERNEL_GS_BASE , MSR_TYPE_RW );
6760- vmx_disable_intercept_for_msr (msr_bitmap , MSR_IA32_SYSENTER_CS , MSR_TYPE_RW );
6761- vmx_disable_intercept_for_msr (msr_bitmap , MSR_IA32_SYSENTER_ESP , MSR_TYPE_RW );
6762- vmx_disable_intercept_for_msr (msr_bitmap , MSR_IA32_SYSENTER_EIP , MSR_TYPE_RW );
6753+ vmx_disable_intercept_for_msr (vcpu , MSR_IA32_TSC , MSR_TYPE_R );
6754+ vmx_disable_intercept_for_msr (vcpu , MSR_FS_BASE , MSR_TYPE_RW );
6755+ vmx_disable_intercept_for_msr (vcpu , MSR_GS_BASE , MSR_TYPE_RW );
6756+ vmx_disable_intercept_for_msr (vcpu , MSR_KERNEL_GS_BASE , MSR_TYPE_RW );
6757+ vmx_disable_intercept_for_msr (vcpu , MSR_IA32_SYSENTER_CS , MSR_TYPE_RW );
6758+ vmx_disable_intercept_for_msr (vcpu , MSR_IA32_SYSENTER_ESP , MSR_TYPE_RW );
6759+ vmx_disable_intercept_for_msr (vcpu , MSR_IA32_SYSENTER_EIP , MSR_TYPE_RW );
67636760 if (kvm_cstate_in_guest (vcpu -> kvm )) {
6764- vmx_disable_intercept_for_msr (msr_bitmap , MSR_CORE_C1_RES , MSR_TYPE_R );
6765- vmx_disable_intercept_for_msr (msr_bitmap , MSR_CORE_C3_RESIDENCY , MSR_TYPE_R );
6766- vmx_disable_intercept_for_msr (msr_bitmap , MSR_CORE_C6_RESIDENCY , MSR_TYPE_R );
6767- vmx_disable_intercept_for_msr (msr_bitmap , MSR_CORE_C7_RESIDENCY , MSR_TYPE_R );
6761+ vmx_disable_intercept_for_msr (vcpu , MSR_CORE_C1_RES , MSR_TYPE_R );
6762+ vmx_disable_intercept_for_msr (vcpu , MSR_CORE_C3_RESIDENCY , MSR_TYPE_R );
6763+ vmx_disable_intercept_for_msr (vcpu , MSR_CORE_C6_RESIDENCY , MSR_TYPE_R );
6764+ vmx_disable_intercept_for_msr (vcpu , MSR_CORE_C7_RESIDENCY , MSR_TYPE_R );
67686765 }
67696766 vmx -> msr_bitmap_mode = 0 ;
67706767
0 commit comments