@@ -84,51 +84,48 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
84
84
85
85
#define X2APIC_MSR (x ) (APIC_BASE_MSR + (x >> 4))
86
86
87
- static const struct svm_direct_access_msrs {
88
- u32 index ; /* Index of the MSR */
89
- bool always ; /* True if intercept is initially cleared */
90
- } direct_access_msrs [] = {
91
- { .index = MSR_STAR , .always = true },
92
- { .index = MSR_IA32_SYSENTER_CS , .always = true },
93
- { .index = MSR_IA32_SYSENTER_EIP , .always = false },
94
- { .index = MSR_IA32_SYSENTER_ESP , .always = false },
87
+ static const u32 direct_access_msrs [] = {
88
+ MSR_STAR ,
89
+ MSR_IA32_SYSENTER_CS ,
90
+ MSR_IA32_SYSENTER_EIP ,
91
+ MSR_IA32_SYSENTER_ESP ,
95
92
#ifdef CONFIG_X86_64
96
- { . index = MSR_GS_BASE , . always = true } ,
97
- { . index = MSR_FS_BASE , . always = true } ,
98
- { . index = MSR_KERNEL_GS_BASE , . always = true } ,
99
- { . index = MSR_LSTAR , . always = true } ,
100
- { . index = MSR_CSTAR , . always = true } ,
101
- { . index = MSR_SYSCALL_MASK , . always = true } ,
93
+ MSR_GS_BASE ,
94
+ MSR_FS_BASE ,
95
+ MSR_KERNEL_GS_BASE ,
96
+ MSR_LSTAR ,
97
+ MSR_CSTAR ,
98
+ MSR_SYSCALL_MASK ,
102
99
#endif
103
- { . index = MSR_IA32_SPEC_CTRL , . always = false } ,
104
- { . index = MSR_IA32_PRED_CMD , . always = false } ,
105
- { . index = MSR_IA32_FLUSH_CMD , . always = false } ,
106
- { . index = MSR_IA32_DEBUGCTLMSR , . always = false } ,
107
- { . index = MSR_IA32_LASTBRANCHFROMIP , . always = false } ,
108
- { . index = MSR_IA32_LASTBRANCHTOIP , . always = false } ,
109
- { . index = MSR_IA32_LASTINTFROMIP , . always = false } ,
110
- { . index = MSR_IA32_LASTINTTOIP , . always = false } ,
111
- { . index = MSR_IA32_XSS , . always = false } ,
112
- { . index = MSR_EFER , . always = false } ,
113
- { . index = MSR_IA32_CR_PAT , . always = false } ,
114
- { . index = MSR_AMD64_SEV_ES_GHCB , . always = false } ,
115
- { . index = MSR_TSC_AUX , . always = false } ,
116
- { . index = X2APIC_MSR (APIC_ID ), . always = false } ,
117
- { . index = X2APIC_MSR (APIC_LVR ), . always = false } ,
118
- { . index = X2APIC_MSR (APIC_TASKPRI ), . always = false } ,
119
- { . index = X2APIC_MSR (APIC_ARBPRI ), . always = false } ,
120
- { . index = X2APIC_MSR (APIC_PROCPRI ), . always = false } ,
121
- { . index = X2APIC_MSR (APIC_EOI ), . always = false } ,
122
- { . index = X2APIC_MSR (APIC_RRR ), . always = false } ,
123
- { . index = X2APIC_MSR (APIC_LDR ), . always = false } ,
124
- { . index = X2APIC_MSR (APIC_DFR ), . always = false } ,
125
- { . index = X2APIC_MSR (APIC_SPIV ), . always = false } ,
126
- { . index = X2APIC_MSR (APIC_ISR ), . always = false } ,
127
- { . index = X2APIC_MSR (APIC_TMR ), . always = false } ,
128
- { . index = X2APIC_MSR (APIC_IRR ), . always = false } ,
129
- { . index = X2APIC_MSR (APIC_ESR ), . always = false } ,
130
- { . index = X2APIC_MSR (APIC_ICR ), . always = false } ,
131
- { . index = X2APIC_MSR (APIC_ICR2 ), . always = false } ,
100
+ MSR_IA32_SPEC_CTRL ,
101
+ MSR_IA32_PRED_CMD ,
102
+ MSR_IA32_FLUSH_CMD ,
103
+ MSR_IA32_DEBUGCTLMSR ,
104
+ MSR_IA32_LASTBRANCHFROMIP ,
105
+ MSR_IA32_LASTBRANCHTOIP ,
106
+ MSR_IA32_LASTINTFROMIP ,
107
+ MSR_IA32_LASTINTTOIP ,
108
+ MSR_IA32_XSS ,
109
+ MSR_EFER ,
110
+ MSR_IA32_CR_PAT ,
111
+ MSR_AMD64_SEV_ES_GHCB ,
112
+ MSR_TSC_AUX ,
113
+ X2APIC_MSR (APIC_ID ),
114
+ X2APIC_MSR (APIC_LVR ),
115
+ X2APIC_MSR (APIC_TASKPRI ),
116
+ X2APIC_MSR (APIC_ARBPRI ),
117
+ X2APIC_MSR (APIC_PROCPRI ),
118
+ X2APIC_MSR (APIC_EOI ),
119
+ X2APIC_MSR (APIC_RRR ),
120
+ X2APIC_MSR (APIC_LDR ),
121
+ X2APIC_MSR (APIC_DFR ),
122
+ X2APIC_MSR (APIC_SPIV ),
123
+ X2APIC_MSR (APIC_ISR ),
124
+ X2APIC_MSR (APIC_TMR ),
125
+ X2APIC_MSR (APIC_IRR ),
126
+ X2APIC_MSR (APIC_ESR ),
127
+ X2APIC_MSR (APIC_ICR ),
128
+ X2APIC_MSR (APIC_ICR2 ),
132
129
133
130
/*
134
131
* Note:
@@ -137,14 +134,14 @@ static const struct svm_direct_access_msrs {
137
134
* the AVIC hardware would generate GP fault. Therefore, always
138
135
* intercept the MSR 0x832, and do not setup direct_access_msr.
139
136
*/
140
- { . index = X2APIC_MSR (APIC_LVTTHMR ), . always = false } ,
141
- { . index = X2APIC_MSR (APIC_LVTPC ), . always = false } ,
142
- { . index = X2APIC_MSR (APIC_LVT0 ), . always = false } ,
143
- { . index = X2APIC_MSR (APIC_LVT1 ), . always = false } ,
144
- { . index = X2APIC_MSR (APIC_LVTERR ), . always = false } ,
145
- { . index = X2APIC_MSR (APIC_TMICT ), . always = false } ,
146
- { . index = X2APIC_MSR (APIC_TMCCT ), . always = false } ,
147
- { . index = X2APIC_MSR (APIC_TDCR ), . always = false } ,
137
+ X2APIC_MSR (APIC_LVTTHMR ),
138
+ X2APIC_MSR (APIC_LVTPC ),
139
+ X2APIC_MSR (APIC_LVT0 ),
140
+ X2APIC_MSR (APIC_LVT1 ),
141
+ X2APIC_MSR (APIC_LVTERR ),
142
+ X2APIC_MSR (APIC_TMICT ),
143
+ X2APIC_MSR (APIC_TMCCT ),
144
+ X2APIC_MSR (APIC_TDCR ),
148
145
};
149
146
150
147
static_assert (ARRAY_SIZE (direct_access_msrs ) ==
@@ -761,7 +758,7 @@ static int direct_access_msr_slot(u32 msr)
761
758
u32 i ;
762
759
763
760
for (i = 0 ; i < ARRAY_SIZE (direct_access_msrs ); i ++ ) {
764
- if (direct_access_msrs [i ]. index == msr )
761
+ if (direct_access_msrs [i ] == msr )
765
762
return i ;
766
763
}
767
764
@@ -925,14 +922,17 @@ u32 *svm_vcpu_alloc_msrpm(void)
925
922
926
923
static void svm_vcpu_init_msrpm (struct kvm_vcpu * vcpu )
927
924
{
928
- int i ;
925
+ svm_disable_intercept_for_msr (vcpu , MSR_STAR , MSR_TYPE_RW );
926
+ svm_disable_intercept_for_msr (vcpu , MSR_IA32_SYSENTER_CS , MSR_TYPE_RW );
929
927
930
- for (i = 0 ; i < ARRAY_SIZE (direct_access_msrs ); i ++ ) {
931
- if (!direct_access_msrs [i ].always )
932
- continue ;
933
- svm_disable_intercept_for_msr (vcpu , direct_access_msrs [i ].index ,
934
- MSR_TYPE_RW );
935
- }
928
+ #ifdef CONFIG_X86_64
929
+ svm_disable_intercept_for_msr (vcpu , MSR_GS_BASE , MSR_TYPE_RW );
930
+ svm_disable_intercept_for_msr (vcpu , MSR_FS_BASE , MSR_TYPE_RW );
931
+ svm_disable_intercept_for_msr (vcpu , MSR_KERNEL_GS_BASE , MSR_TYPE_RW );
932
+ svm_disable_intercept_for_msr (vcpu , MSR_LSTAR , MSR_TYPE_RW );
933
+ svm_disable_intercept_for_msr (vcpu , MSR_CSTAR , MSR_TYPE_RW );
934
+ svm_disable_intercept_for_msr (vcpu , MSR_SYSCALL_MASK , MSR_TYPE_RW );
935
+ #endif
936
936
}
937
937
938
938
void svm_set_x2apic_msr_interception (struct vcpu_svm * svm , bool intercept )
@@ -946,7 +946,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
946
946
return ;
947
947
948
948
for (i = 0 ; i < ARRAY_SIZE (direct_access_msrs ); i ++ ) {
949
- int index = direct_access_msrs [i ]. index ;
949
+ int index = direct_access_msrs [i ];
950
950
951
951
if ((index < APIC_BASE_MSR ) ||
952
952
(index > APIC_BASE_MSR + 0xff ))
@@ -974,7 +974,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
974
974
* back in sync after this.
975
975
*/
976
976
for (i = 0 ; i < ARRAY_SIZE (direct_access_msrs ); i ++ ) {
977
- u32 msr = direct_access_msrs [i ]. index ;
977
+ u32 msr = direct_access_msrs [i ];
978
978
u32 read = test_bit (i , svm -> shadow_msr_intercept .read );
979
979
u32 write = test_bit (i , svm -> shadow_msr_intercept .write );
980
980
@@ -1014,7 +1014,7 @@ static __init int init_msrpm_offsets(void)
1014
1014
for (i = 0 ; i < ARRAY_SIZE (direct_access_msrs ); i ++ ) {
1015
1015
u32 offset ;
1016
1016
1017
- offset = svm_msrpm_offset (direct_access_msrs [i ]. index );
1017
+ offset = svm_msrpm_offset (direct_access_msrs [i ]);
1018
1018
if (WARN_ON (offset == MSR_INVALID ))
1019
1019
return - EIO ;
1020
1020
0 commit comments