Skip to content

Commit 0e102ce

Browse files
Dapeng Misean-jc
authored andcommitted
KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
Several '_mask' suffixed variables such as, global_ctrl_mask, are defined in kvm_pmu structure. However the _mask suffix is ambiguous and misleading since it's not a real mask with positive logic. On the contrary it represents the reserved bits of corresponding MSRs and these bits should not be accessed. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20240430005239.13527-2-dapeng1.mi@linux.intel.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 0c468a6 commit 0e102ce

File tree

5 files changed

+29
-29
lines changed

5 files changed

+29
-29
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -546,12 +546,12 @@ struct kvm_pmu {
546546
unsigned nr_arch_fixed_counters;
547547
unsigned available_event_types;
548548
u64 fixed_ctr_ctrl;
549-
u64 fixed_ctr_ctrl_mask;
549+
u64 fixed_ctr_ctrl_rsvd;
550550
u64 global_ctrl;
551551
u64 global_status;
552552
u64 counter_bitmask[2];
553-
u64 global_ctrl_mask;
554-
u64 global_status_mask;
553+
u64 global_ctrl_rsvd;
554+
u64 global_status_rsvd;
555555
u64 reserved_bits;
556556
u64 raw_event_mask;
557557
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
@@ -571,9 +571,9 @@ struct kvm_pmu {
571571

572572
u64 ds_area;
573573
u64 pebs_enable;
574-
u64 pebs_enable_mask;
574+
u64 pebs_enable_rsvd;
575575
u64 pebs_data_cfg;
576-
u64 pebs_data_cfg_mask;
576+
u64 pebs_data_cfg_rsvd;
577577

578578
/*
579579
* If a guest counter is cross-mapped to host counter with different

arch/x86/kvm/pmu.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
681681
if (!msr_info->host_initiated)
682682
break;
683683

684-
if (data & pmu->global_status_mask)
684+
if (data & pmu->global_status_rsvd)
685685
return 1;
686686

687687
pmu->global_status = data;
688688
break;
689689
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
690-
data &= ~pmu->global_ctrl_mask;
690+
data &= ~pmu->global_ctrl_rsvd;
691691
fallthrough;
692692
case MSR_CORE_PERF_GLOBAL_CTRL:
693693
if (!kvm_valid_perf_global_ctrl(pmu, data))
@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
704704
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
705705
* GLOBAL_STATUS, and so the set of reserved bits is the same.
706706
*/
707-
if (data & pmu->global_status_mask)
707+
if (data & pmu->global_status_rsvd)
708708
return 1;
709709
fallthrough;
710710
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
768768
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
769769
pmu->reserved_bits = 0xffffffff00200000ull;
770770
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
771-
pmu->global_ctrl_mask = ~0ull;
772-
pmu->global_status_mask = ~0ull;
773-
pmu->fixed_ctr_ctrl_mask = ~0ull;
774-
pmu->pebs_enable_mask = ~0ull;
775-
pmu->pebs_data_cfg_mask = ~0ull;
771+
pmu->global_ctrl_rsvd = ~0ull;
772+
pmu->global_status_rsvd = ~0ull;
773+
pmu->fixed_ctr_ctrl_rsvd = ~0ull;
774+
pmu->pebs_enable_rsvd = ~0ull;
775+
pmu->pebs_data_cfg_rsvd = ~0ull;
776776
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
777777

778778
if (!vcpu->kvm->arch.enable_pmu)

arch/x86/kvm/pmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
129129
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
130130
u64 data)
131131
{
132-
return !(pmu->global_ctrl_mask & data);
132+
return !(pmu->global_ctrl_rsvd & data);
133133
}
134134

135135
/* returns general purpose PMC with the specified MSR. Note that it can be

arch/x86/kvm/svm/pmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
199199
kvm_pmu_cap.num_counters_gp);
200200

201201
if (pmu->version > 1) {
202-
pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203-
pmu->global_status_mask = pmu->global_ctrl_mask;
202+
pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203+
pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
204204
}
205205

206206
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;

arch/x86/kvm/vmx/pmu_intel.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
348348

349349
switch (msr) {
350350
case MSR_CORE_PERF_FIXED_CTR_CTRL:
351-
if (data & pmu->fixed_ctr_ctrl_mask)
351+
if (data & pmu->fixed_ctr_ctrl_rsvd)
352352
return 1;
353353

354354
if (pmu->fixed_ctr_ctrl != data)
355355
reprogram_fixed_counters(pmu, data);
356356
break;
357357
case MSR_IA32_PEBS_ENABLE:
358-
if (data & pmu->pebs_enable_mask)
358+
if (data & pmu->pebs_enable_rsvd)
359359
return 1;
360360

361361
if (pmu->pebs_enable != data) {
@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
371371
pmu->ds_area = data;
372372
break;
373373
case MSR_PEBS_DATA_CFG:
374-
if (data & pmu->pebs_data_cfg_mask)
374+
if (data & pmu->pebs_data_cfg_rsvd)
375375
return 1;
376376

377377
pmu->pebs_data_cfg = data;
@@ -456,7 +456,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
456456
union cpuid10_eax eax;
457457
union cpuid10_edx edx;
458458
u64 perf_capabilities;
459-
u64 counter_mask;
459+
u64 counter_rsvd;
460460
int i;
461461

462462
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
@@ -502,21 +502,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
502502
}
503503

504504
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
505-
pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
506-
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
505+
pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4));
506+
counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
507507
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
508-
pmu->global_ctrl_mask = counter_mask;
508+
pmu->global_ctrl_rsvd = counter_rsvd;
509509

510510
/*
511511
* GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
512512
* share reserved bit definitions. The kernel just happens to use
513513
* OVF_CTRL for the names.
514514
*/
515-
pmu->global_status_mask = pmu->global_ctrl_mask
515+
pmu->global_status_rsvd = pmu->global_ctrl_rsvd
516516
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
517517
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
518518
if (vmx_pt_mode_is_host_guest())
519-
pmu->global_status_mask &=
519+
pmu->global_status_rsvd &=
520520
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
521521

522522
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
@@ -544,15 +544,15 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
544544

545545
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
546546
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
547-
pmu->pebs_enable_mask = counter_mask;
547+
pmu->pebs_enable_rsvd = counter_rsvd;
548548
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
549549
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
550-
pmu->fixed_ctr_ctrl_mask &=
550+
pmu->fixed_ctr_ctrl_rsvd &=
551551
~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
552552
}
553-
pmu->pebs_data_cfg_mask = ~0xff00000full;
553+
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
554554
} else {
555-
pmu->pebs_enable_mask =
555+
pmu->pebs_enable_rsvd =
556556
~((1ull << pmu->nr_arch_gp_counters) - 1);
557557
}
558558
}

0 commit comments

Comments
 (0)