Skip to content

Commit 004a0aa

Browse files
committed
KVM: x86/pmu: Snapshot and clear reprogramming bitmap before reprogramming
Refactor the handling of the reprogramming bitmap to snapshot and clear to-be-processed bits before doing the reprogramming, and then explicitly set bits for PMCs that need to be reprogrammed (again). This will allow adding a macro to iterate over all valid PMCs without having to add special handling for the reprogramming bit, which (a) can have bits set for non-existent PMCs and (b) needs to clear such bits to avoid wasting cycles in perpetuity. Note, the existing behavior of clearing bits after reprogramming does NOT have a race with kvm_vm_ioctl_set_pmu_event_filter(). Setting a new PMU filter synchronizes SRCU _before_ setting the bitmap, i.e. guarantees that the vCPU isn't in the middle of reprogramming with a stale filter prior to setting the bitmap. Link: https://lore.kernel.org/r/20231110022857.1273836-5-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent b31880c commit 004a0aa

File tree

2 files changed

+30
-23
lines changed

2 files changed

+30
-23
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -536,6 +536,7 @@ struct kvm_pmc {
536536
#define KVM_PMC_MAX_FIXED 3
537537
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
538538
#define KVM_AMD_PMC_MAX_GENERIC 6
539+
539540
struct kvm_pmu {
540541
u8 version;
541542
unsigned nr_arch_gp_counters;

arch/x86/kvm/pmu.c

Lines changed: 29 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
444444
check_pmu_event_filter(pmc);
445445
}
446446

447-
static void reprogram_counter(struct kvm_pmc *pmc)
447+
static int reprogram_counter(struct kvm_pmc *pmc)
448448
{
449449
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
450450
u64 eventsel = pmc->eventsel;
@@ -455,7 +455,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)
455455
emulate_overflow = pmc_pause_counter(pmc);
456456

457457
if (!pmc_event_is_allowed(pmc))
458-
goto reprogram_complete;
458+
return 0;
459459

460460
if (emulate_overflow)
461461
__kvm_perf_overflow(pmc, false);
@@ -476,43 +476,49 @@ static void reprogram_counter(struct kvm_pmc *pmc)
476476
}
477477

478478
if (pmc->current_config == new_config && pmc_resume_counter(pmc))
479-
goto reprogram_complete;
479+
return 0;
480480

481481
pmc_release_perf_event(pmc);
482482

483483
pmc->current_config = new_config;
484484

485-
/*
486-
* If reprogramming fails, e.g. due to contention, leave the counter's
487-
* regprogram bit set, i.e. opportunistically try again on the next PMU
488-
* refresh. Don't make a new request as doing so can stall the guest
489-
* if reprogramming repeatedly fails.
490-
*/
491-
if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
492-
(eventsel & pmu->raw_event_mask),
493-
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
494-
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
495-
eventsel & ARCH_PERFMON_EVENTSEL_INT))
496-
return;
497-
498-
reprogram_complete:
499-
clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
485+
return pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
486+
(eventsel & pmu->raw_event_mask),
487+
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
488+
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
489+
eventsel & ARCH_PERFMON_EVENTSEL_INT);
500490
}
501491

502492
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
503493
{
494+
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
504495
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
505496
int bit;
506497

507-
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
498+
bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);
499+
500+
/*
501+
* The reprogramming bitmap can be written asynchronously by something
502+
* other than the task that holds vcpu->mutex, take care to clear only
503+
* the bits that will actually processed.
504+
*/
505+
BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
506+
atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);
507+
508+
for_each_set_bit(bit, bitmap, X86_PMC_IDX_MAX) {
508509
struct kvm_pmc *pmc = kvm_pmc_idx_to_pmc(pmu, bit);
509510

510-
if (unlikely(!pmc)) {
511-
clear_bit(bit, pmu->reprogram_pmi);
511+
if (unlikely(!pmc))
512512
continue;
513-
}
514513

515-
reprogram_counter(pmc);
514+
/*
515+
* If reprogramming fails, e.g. due to contention, re-set the
516+
* regprogram bit set, i.e. opportunistically try again on the
517+
* next PMU refresh. Don't make a new request as doing so can
518+
* stall the guest if reprogramming repeatedly fails.
519+
*/
520+
if (reprogram_counter(pmc))
521+
set_bit(pmc->idx, pmu->reprogram_pmi);
516522
}
517523

518524
/*

0 commit comments

Comments
 (0)