Skip to content

Commit 7bb7fce

Browse files
committed
KVM: x86/pmu: Prioritize VMX interception over #GP on RDPMC due to bad index
Apply the pre-intercepts RDPMC validity check only to AMD, and rename all relevant functions to make it as clear as possible that the check is not a standard PMC index check. On Intel, the basic rule is that only invalid opcodes and privilege/permission/mode checks have priority over VM-Exit, i.e. RDPMC with an invalid index should VM-Exit, not #GP. While the SDM doesn't explicitly call out RDPMC, it _does_ explicitly use RDMSR of a non-existent MSR as an example where VM-Exit has priority over #GP, and RDPMC is effectively just a variation of RDMSR. Manually testing on various Intel CPUs confirms this behavior, and the inverted priority was introduced for SVM compatibility, i.e. was not an intentional change for Intel PMUs. On AMD, *all* exceptions on RDPMC have priority over VM-Exit. Check for a NULL kvm_pmu_ops.check_rdpmc_early instead of using a RET0 static call so as to provide a convenient location to document the difference between Intel and AMD, and to again try to make it as obvious as possible that the early check is a one-off thing, not a generic "is this PMC valid?" helper. Fixes: 8061252 ("KVM: SVM: Add intercept checks for remaining twobyte instructions") Cc: Jim Mattson <jmattson@google.com> Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20240109230250.424295-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent ecb4907 commit 7bb7fce

File tree

8 files changed

+27
-29
lines changed

8 files changed

+27
-29
lines changed

arch/x86/include/asm/kvm-x86-pmu-ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ BUILD_BUG_ON(1)
1515
KVM_X86_PMU_OP(pmc_idx_to_pmc)
1616
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
1717
KVM_X86_PMU_OP(msr_idx_to_pmc)
18-
KVM_X86_PMU_OP(is_valid_rdpmc_ecx)
18+
KVM_X86_PMU_OP_OPTIONAL(check_rdpmc_early)
1919
KVM_X86_PMU_OP(is_valid_msr)
2020
KVM_X86_PMU_OP(get_msr)
2121
KVM_X86_PMU_OP(set_msr)

arch/x86/kvm/emulate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3962,7 +3962,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
39623962
* protected mode.
39633963
*/
39643964
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3965-
ctxt->ops->check_pmc(ctxt, rcx))
3965+
ctxt->ops->check_rdpmc_early(ctxt, rcx))
39663966
return emulate_gp(ctxt, 0);
39673967

39683968
return X86EMUL_CONTINUE;

arch/x86/kvm/kvm_emulate.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ struct x86_emulate_ops {
208208
int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
209209
int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
210210
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
211-
int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
211+
int (*check_rdpmc_early)(struct x86_emulate_ctxt *ctxt, u32 pmc);
212212
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
213213
void (*halt)(struct x86_emulate_ctxt *ctxt);
214214
void (*wbinvd)(struct x86_emulate_ctxt *ctxt);

arch/x86/kvm/pmu.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -524,10 +524,20 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
524524
kvm_pmu_cleanup(vcpu);
525525
}
526526

527-
/* check if idx is a valid index to access PMU */
528-
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
527+
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
529528
{
530-
return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
529+
/*
530+
* On Intel, VMX interception has priority over RDPMC exceptions that
531+
* aren't already handled by the emulator, i.e. there are no additional
532+
* check needed for Intel PMUs.
533+
*
534+
* On AMD, _all_ exceptions on RDPMC have priority over SVM intercepts,
535+
* i.e. an invalid PMC results in a #GP, not #VMEXIT.
536+
*/
537+
if (!kvm_pmu_ops.check_rdpmc_early)
538+
return 0;
539+
540+
return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx);
531541
}
532542

533543
bool is_vmware_backdoor_pmc(u32 pmc_idx)

arch/x86/kvm/pmu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ struct kvm_pmu_ops {
2323
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
2424
unsigned int idx, u64 *mask);
2525
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
26-
bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
26+
int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
2727
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
2828
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
2929
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
@@ -215,7 +215,7 @@ static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
215215
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
216216
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
217217
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
218-
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
218+
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
219219
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
220220
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
221221
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);

arch/x86/kvm/svm/pmu.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,14 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
7373
return amd_pmc_idx_to_pmc(pmu, idx);
7474
}
7575

76-
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
76+
static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
7777
{
7878
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
7979

80-
return idx < pmu->nr_arch_gp_counters;
80+
if (idx >= pmu->nr_arch_gp_counters)
81+
return -EINVAL;
82+
83+
return 0;
8184
}
8285

8386
/* idx is the ECX register of RDPMC instruction */
@@ -229,7 +232,7 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
229232
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
230233
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
231234
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
232-
.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
235+
.check_rdpmc_early = amd_check_rdpmc_early,
233236
.is_valid_msr = amd_is_valid_msr,
234237
.get_msr = amd_pmu_get_msr,
235238
.set_msr = amd_pmu_set_msr,

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -55,17 +55,6 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
5555
}
5656
}
5757

58-
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
59-
{
60-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
61-
bool fixed = idx & (1u << 30);
62-
63-
idx &= ~(3u << 30);
64-
65-
return fixed ? idx < pmu->nr_arch_fixed_counters
66-
: idx < pmu->nr_arch_gp_counters;
67-
}
68-
6958
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
7059
unsigned int idx, u64 *mask)
7160
{
@@ -718,7 +707,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
718707
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
719708
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
720709
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
721-
.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
722710
.is_valid_msr = intel_is_valid_msr,
723711
.get_msr = intel_pmu_get_msr,
724712
.set_msr = intel_pmu_set_msr,

arch/x86/kvm/x86.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8389,12 +8389,9 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
83898389
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
83908390
}
83918391

8392-
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
8393-
u32 pmc)
8392+
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
83948393
{
8395-
if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc))
8396-
return 0;
8397-
return -EINVAL;
8394+
return kvm_pmu_check_rdpmc_early(emul_to_vcpu(ctxt), pmc);
83988395
}
83998396

84008397
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
@@ -8526,7 +8523,7 @@ static const struct x86_emulate_ops emulate_ops = {
85268523
.set_msr_with_filter = emulator_set_msr_with_filter,
85278524
.get_msr_with_filter = emulator_get_msr_with_filter,
85288525
.get_msr = emulator_get_msr,
8529-
.check_pmc = emulator_check_pmc,
8526+
.check_rdpmc_early = emulator_check_rdpmc_early,
85308527
.read_pmc = emulator_read_pmc,
85318528
.halt = emulator_halt,
85328529
.wbinvd = emulator_wbinvd,

0 commit comments

Comments
 (0)