|
22 | 22 |
|
23 | 23 | #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
|
24 | 24 |
|
25 |
| -enum intel_pmu_architectural_events { |
26 |
| - /* |
27 |
| - * The order of the architectural events matters as support for each |
28 |
| - * event is enumerated via CPUID using the index of the event. |
29 |
| - */ |
30 |
| - INTEL_ARCH_CPU_CYCLES, |
31 |
| - INTEL_ARCH_INSTRUCTIONS_RETIRED, |
32 |
| - INTEL_ARCH_REFERENCE_CYCLES, |
33 |
| - INTEL_ARCH_LLC_REFERENCES, |
34 |
| - INTEL_ARCH_LLC_MISSES, |
35 |
| - INTEL_ARCH_BRANCHES_RETIRED, |
36 |
| - INTEL_ARCH_BRANCHES_MISPREDICTED, |
37 |
| - |
38 |
| - NR_REAL_INTEL_ARCH_EVENTS, |
39 |
| - |
40 |
| - /* |
41 |
| - * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a. |
42 |
| - * TSC reference cycles. The architectural reference cycles event may |
43 |
| - * or may not actually use the TSC as the reference, e.g. might use the |
44 |
| - * core crystal clock or the bus clock (yeah, "architectural"). |
45 |
| - */ |
46 |
| - PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS, |
47 |
| - NR_INTEL_ARCH_EVENTS, |
48 |
| -}; |
49 |
| - |
50 |
| -static struct { |
51 |
| - u8 eventsel; |
52 |
| - u8 unit_mask; |
53 |
| -} const intel_arch_events[] = { |
54 |
| - [INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 }, |
55 |
| - [INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 }, |
56 |
| - [INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 }, |
57 |
| - [INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f }, |
58 |
| - [INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 }, |
59 |
| - [INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 }, |
60 |
| - [INTEL_ARCH_BRANCHES_MISPREDICTED] = { 0xc5, 0x00 }, |
61 |
| - [PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 }, |
62 |
| -}; |
63 |
| - |
64 |
| -/* mapping between fixed pmc index and intel_arch_events array */ |
65 |
| -static int fixed_pmc_events[] = { |
66 |
| - [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED, |
67 |
| - [1] = INTEL_ARCH_CPU_CYCLES, |
68 |
| - [2] = PSEUDO_ARCH_REFERENCE_CYCLES, |
69 |
| -}; |
70 |
| - |
71 | 25 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
72 | 26 | {
|
73 | 27 | struct kvm_pmc *pmc;
|
@@ -440,19 +394,39 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
440 | 394 | return 0;
|
441 | 395 | }
|
442 | 396 |
|
| 397 | +/* |
| 398 | + * Map fixed counter events to architectural general purpose event encodings. |
| 399 | + * Perf doesn't provide APIs to allow KVM to directly program a fixed counter, |
| 400 | + * and so KVM instead programs the architectural event to effectively request |
| 401 | + * the fixed counter. Perf isn't guaranteed to use a fixed counter and may |
| 402 | + * instead program the encoding into a general purpose counter, e.g. if a |
| 403 | + * different perf_event is already utilizing the requested counter, but the end |
| 404 | + * result is the same (ignoring the fact that using a general purpose counter |
| 405 | + * will likely exacerbate counter contention). |
| 406 | + * |
| 407 | + * Note, reference cycles is counted using a perf-defined "psuedo-encoding", |
| 408 | + * as there is no architectural general purpose encoding for reference cycles. |
| 409 | + */ |
443 | 410 | static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
|
444 | 411 | {
|
| 412 | + const struct { |
| 413 | + u8 eventsel; |
| 414 | + u8 unit_mask; |
| 415 | + } fixed_pmc_events[] = { |
| 416 | + [0] = { 0xc0, 0x00 }, /* Instruction Retired / PERF_COUNT_HW_INSTRUCTIONS. */ |
| 417 | + [1] = { 0x3c, 0x00 }, /* CPU Cycles/ PERF_COUNT_HW_CPU_CYCLES. */ |
| 418 | + [2] = { 0x00, 0x03 }, /* Reference Cycles / PERF_COUNT_HW_REF_CPU_CYCLES*/ |
| 419 | + }; |
445 | 420 | int i;
|
446 | 421 |
|
447 | 422 | BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
|
448 | 423 |
|
449 | 424 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
|
450 | 425 | int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
|
451 | 426 | struct kvm_pmc *pmc = &pmu->fixed_counters[index];
|
452 |
| - u32 event = fixed_pmc_events[index]; |
453 | 427 |
|
454 |
| - pmc->eventsel = (intel_arch_events[event].unit_mask << 8) | |
455 |
| - intel_arch_events[event].eventsel; |
| 428 | + pmc->eventsel = (fixed_pmc_events[index].unit_mask << 8) | |
| 429 | + fixed_pmc_events[index].eventsel; |
456 | 430 | }
|
457 | 431 | }
|
458 | 432 |
|
|
0 commit comments