Skip to content

Commit cb53d07

Browse files
committed
KVM: SVM: Drop "always" flag from list of possible passthrough MSRs
Drop the "always" flag from the array of possible passthrough MSRs, and instead manually initialize the permissions for the handful of MSRs that KVM passes through by default. In addition to cutting down on boilerplate copy+paste code and eliminating a misleading flag (the MSRs aren't always passed through, e.g. thanks to MSR filters), this will allow for removing the direct_access_msrs array entirely. Link: https://lore.kernel.org/r/20250610225737.156318-17-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 3a0f09b commit cb53d07

File tree

1 file changed

+62
-62
lines changed

1 file changed

+62
-62
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 62 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -84,51 +84,48 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
8484

8585
#define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
8686

87-
static const struct svm_direct_access_msrs {
88-
u32 index; /* Index of the MSR */
89-
bool always; /* True if intercept is initially cleared */
90-
} direct_access_msrs[] = {
91-
{ .index = MSR_STAR, .always = true },
92-
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
93-
{ .index = MSR_IA32_SYSENTER_EIP, .always = false },
94-
{ .index = MSR_IA32_SYSENTER_ESP, .always = false },
87+
static const u32 direct_access_msrs[] = {
88+
MSR_STAR,
89+
MSR_IA32_SYSENTER_CS,
90+
MSR_IA32_SYSENTER_EIP,
91+
MSR_IA32_SYSENTER_ESP,
9592
#ifdef CONFIG_X86_64
96-
{ .index = MSR_GS_BASE, .always = true },
97-
{ .index = MSR_FS_BASE, .always = true },
98-
{ .index = MSR_KERNEL_GS_BASE, .always = true },
99-
{ .index = MSR_LSTAR, .always = true },
100-
{ .index = MSR_CSTAR, .always = true },
101-
{ .index = MSR_SYSCALL_MASK, .always = true },
93+
MSR_GS_BASE,
94+
MSR_FS_BASE,
95+
MSR_KERNEL_GS_BASE,
96+
MSR_LSTAR,
97+
MSR_CSTAR,
98+
MSR_SYSCALL_MASK,
10299
#endif
103-
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
104-
{ .index = MSR_IA32_PRED_CMD, .always = false },
105-
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
106-
{ .index = MSR_IA32_DEBUGCTLMSR, .always = false },
107-
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
108-
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
109-
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
110-
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
111-
{ .index = MSR_IA32_XSS, .always = false },
112-
{ .index = MSR_EFER, .always = false },
113-
{ .index = MSR_IA32_CR_PAT, .always = false },
114-
{ .index = MSR_AMD64_SEV_ES_GHCB, .always = false },
115-
{ .index = MSR_TSC_AUX, .always = false },
116-
{ .index = X2APIC_MSR(APIC_ID), .always = false },
117-
{ .index = X2APIC_MSR(APIC_LVR), .always = false },
118-
{ .index = X2APIC_MSR(APIC_TASKPRI), .always = false },
119-
{ .index = X2APIC_MSR(APIC_ARBPRI), .always = false },
120-
{ .index = X2APIC_MSR(APIC_PROCPRI), .always = false },
121-
{ .index = X2APIC_MSR(APIC_EOI), .always = false },
122-
{ .index = X2APIC_MSR(APIC_RRR), .always = false },
123-
{ .index = X2APIC_MSR(APIC_LDR), .always = false },
124-
{ .index = X2APIC_MSR(APIC_DFR), .always = false },
125-
{ .index = X2APIC_MSR(APIC_SPIV), .always = false },
126-
{ .index = X2APIC_MSR(APIC_ISR), .always = false },
127-
{ .index = X2APIC_MSR(APIC_TMR), .always = false },
128-
{ .index = X2APIC_MSR(APIC_IRR), .always = false },
129-
{ .index = X2APIC_MSR(APIC_ESR), .always = false },
130-
{ .index = X2APIC_MSR(APIC_ICR), .always = false },
131-
{ .index = X2APIC_MSR(APIC_ICR2), .always = false },
100+
MSR_IA32_SPEC_CTRL,
101+
MSR_IA32_PRED_CMD,
102+
MSR_IA32_FLUSH_CMD,
103+
MSR_IA32_DEBUGCTLMSR,
104+
MSR_IA32_LASTBRANCHFROMIP,
105+
MSR_IA32_LASTBRANCHTOIP,
106+
MSR_IA32_LASTINTFROMIP,
107+
MSR_IA32_LASTINTTOIP,
108+
MSR_IA32_XSS,
109+
MSR_EFER,
110+
MSR_IA32_CR_PAT,
111+
MSR_AMD64_SEV_ES_GHCB,
112+
MSR_TSC_AUX,
113+
X2APIC_MSR(APIC_ID),
114+
X2APIC_MSR(APIC_LVR),
115+
X2APIC_MSR(APIC_TASKPRI),
116+
X2APIC_MSR(APIC_ARBPRI),
117+
X2APIC_MSR(APIC_PROCPRI),
118+
X2APIC_MSR(APIC_EOI),
119+
X2APIC_MSR(APIC_RRR),
120+
X2APIC_MSR(APIC_LDR),
121+
X2APIC_MSR(APIC_DFR),
122+
X2APIC_MSR(APIC_SPIV),
123+
X2APIC_MSR(APIC_ISR),
124+
X2APIC_MSR(APIC_TMR),
125+
X2APIC_MSR(APIC_IRR),
126+
X2APIC_MSR(APIC_ESR),
127+
X2APIC_MSR(APIC_ICR),
128+
X2APIC_MSR(APIC_ICR2),
132129

133130
/*
134131
* Note:
@@ -137,14 +134,14 @@ static const struct svm_direct_access_msrs {
137134
* the AVIC hardware would generate GP fault. Therefore, always
138135
* intercept the MSR 0x832, and do not setup direct_access_msr.
139136
*/
140-
{ .index = X2APIC_MSR(APIC_LVTTHMR), .always = false },
141-
{ .index = X2APIC_MSR(APIC_LVTPC), .always = false },
142-
{ .index = X2APIC_MSR(APIC_LVT0), .always = false },
143-
{ .index = X2APIC_MSR(APIC_LVT1), .always = false },
144-
{ .index = X2APIC_MSR(APIC_LVTERR), .always = false },
145-
{ .index = X2APIC_MSR(APIC_TMICT), .always = false },
146-
{ .index = X2APIC_MSR(APIC_TMCCT), .always = false },
147-
{ .index = X2APIC_MSR(APIC_TDCR), .always = false },
137+
X2APIC_MSR(APIC_LVTTHMR),
138+
X2APIC_MSR(APIC_LVTPC),
139+
X2APIC_MSR(APIC_LVT0),
140+
X2APIC_MSR(APIC_LVT1),
141+
X2APIC_MSR(APIC_LVTERR),
142+
X2APIC_MSR(APIC_TMICT),
143+
X2APIC_MSR(APIC_TMCCT),
144+
X2APIC_MSR(APIC_TDCR),
148145
};
149146

150147
static_assert(ARRAY_SIZE(direct_access_msrs) ==
@@ -761,7 +758,7 @@ static int direct_access_msr_slot(u32 msr)
761758
u32 i;
762759

763760
for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
764-
if (direct_access_msrs[i].index == msr)
761+
if (direct_access_msrs[i] == msr)
765762
return i;
766763
}
767764

@@ -925,14 +922,17 @@ u32 *svm_vcpu_alloc_msrpm(void)
925922

926923
static void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu)
927924
{
928-
int i;
925+
svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
926+
svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
929927

930-
for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
931-
if (!direct_access_msrs[i].always)
932-
continue;
933-
svm_disable_intercept_for_msr(vcpu, direct_access_msrs[i].index,
934-
MSR_TYPE_RW);
935-
}
928+
#ifdef CONFIG_X86_64
929+
svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
930+
svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
931+
svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
932+
svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
933+
svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
934+
svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
935+
#endif
936936
}
937937

938938
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
@@ -946,7 +946,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
946946
return;
947947

948948
for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
949-
int index = direct_access_msrs[i].index;
949+
int index = direct_access_msrs[i];
950950

951951
if ((index < APIC_BASE_MSR) ||
952952
(index > APIC_BASE_MSR + 0xff))
@@ -974,7 +974,7 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
974974
* back in sync after this.
975975
*/
976976
for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
977-
u32 msr = direct_access_msrs[i].index;
977+
u32 msr = direct_access_msrs[i];
978978
u32 read = test_bit(i, svm->shadow_msr_intercept.read);
979979
u32 write = test_bit(i, svm->shadow_msr_intercept.write);
980980

@@ -1014,7 +1014,7 @@ static __init int init_msrpm_offsets(void)
10141014
for (i = 0; i < ARRAY_SIZE(direct_access_msrs); i++) {
10151015
u32 offset;
10161016

1017-
offset = svm_msrpm_offset(direct_access_msrs[i].index);
1017+
offset = svm_msrpm_offset(direct_access_msrs[i]);
10181018
if (WARN_ON(offset == MSR_INVALID))
10191019
return -EIO;
10201020

0 commit comments

Comments
 (0)