Skip to content

Commit

Permalink
KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers
Browse files Browse the repository at this point in the history
Invert the flows in intel_pmu_{g,s}et_msr()'s case statements so that
they follow the kernel's preferred style of:

        if (<not valid>)
                return <error>

        <commit change>
        return <success>

which is also the style used by every other {g,s}et_msr() helper (except
AMD's PMU variant, which doesn't use a switch statement).

Modify the "set" paths with costly side effects, i.e. that reprogram
counters, to skip only the side effects, i.e. to perform reserved bits
checks even if the value is unchanged.  None of the reserved bits checks
are expensive, so there's no strong justification for skipping them, and
guarding only the side effect makes it slightly more obvious what is being
skipped and why.

No functional change intended (assuming no reserved bit bugs).

Link: https://lkml.kernel.org/r/Y%2B6cfen%2FCpO3%2FdLO%40google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
  • Loading branch information
sean-jc committed Apr 6, 2023
1 parent cdd2fbf commit 8bca8c5
Showing 1 changed file with 57 additions and 52 deletions.
109 changes: 57 additions & 52 deletions arch/x86/kvm/vmx/pmu_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
msr_info->data = pmu->fixed_ctr_ctrl;
return 0;
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
msr_info->data = pmu->global_status;
return 0;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
msr_info->data = pmu->global_ctrl;
return 0;
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
msr_info->data = 0;
return 0;
break;
case MSR_IA32_PEBS_ENABLE:
msr_info->data = pmu->pebs_enable;
return 0;
break;
case MSR_IA32_DS_AREA:
msr_info->data = pmu->ds_area;
return 0;
break;
case MSR_PEBS_DATA_CFG:
msr_info->data = pmu->pebs_data_cfg;
return 0;
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
u64 val = pmc_read_counter(pmc);
msr_info->data =
val & pmu->counter_bitmask[KVM_PMC_GP];
return 0;
break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
u64 val = pmc_read_counter(pmc);
msr_info->data =
val & pmu->counter_bitmask[KVM_PMC_FIXED];
return 0;
break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
msr_info->data = pmc->eventsel;
return 0;
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
return 0;
break;
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
break;
}
return 1;
}

return 1;
return 0;
}

static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Expand All @@ -402,94 +404,97 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)

switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data)
return 0;
if (!(data & pmu->fixed_ctr_ctrl_mask)) {
if (data & pmu->fixed_ctr_ctrl_mask)
return 1;

if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
if (msr_info->host_initiated) {
pmu->global_status = data;
return 0;
}
break; /* RO MSR */
if (!msr_info->host_initiated)
return 1; /* RO MSR */

pmu->global_status = data;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data)
return 0;
if (kvm_valid_perf_global_ctrl(pmu, data)) {
if (!kvm_valid_perf_global_ctrl(pmu, data))
return 1;

if (pmu->global_ctrl != data) {
diff = pmu->global_ctrl ^ data;
pmu->global_ctrl = data;
reprogram_counters(pmu, diff);
return 0;
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if (!(data & pmu->global_ovf_ctrl_mask)) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
return 0;
}
if (data & pmu->global_ovf_ctrl_mask)
return 1;

if (!msr_info->host_initiated)
pmu->global_status &= ~data;
break;
case MSR_IA32_PEBS_ENABLE:
if (pmu->pebs_enable == data)
return 0;
if (!(data & pmu->pebs_enable_mask)) {
if (data & pmu->pebs_enable_mask)
return 1;

if (pmu->pebs_enable != data) {
diff = pmu->pebs_enable ^ data;
pmu->pebs_enable = data;
reprogram_counters(pmu, diff);
return 0;
}
break;
case MSR_IA32_DS_AREA:
if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
return 1;
if (is_noncanonical_address(data, vcpu))
return 1;

pmu->ds_area = data;
return 0;
break;
case MSR_PEBS_DATA_CFG:
if (pmu->pebs_data_cfg == data)
return 0;
if (!(data & pmu->pebs_data_cfg_mask)) {
pmu->pebs_data_cfg = data;
return 0;
}
if (data & pmu->pebs_data_cfg_mask)
return 1;

pmu->pebs_data_cfg = data;
break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
(data & ~pmu->counter_bitmask[KVM_PMC_GP]))
return 1;

if (!msr_info->host_initiated &&
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
return 0;
break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
pmc_update_sample_period(pmc);
return 0;
break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
reserved_bits = pmu->reserved_bits;
if ((pmc->idx == 2) &&
(pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
if (!(data & reserved_bits)) {
if (data & reserved_bits)
return 1;

if (data != pmc->eventsel) {
pmc->eventsel = data;
kvm_pmu_request_counter_reprogam(pmc);
return 0;
}
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
return 0;
break;
} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
break;
}
/* Not a known PMU MSR. */
return 1;
}

return 1;
return 0;
}

static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
Expand Down

0 comments on commit 8bca8c5

Please sign in to comment.