|
29 | 29 | struct x86_pmu_capability __read_mostly kvm_pmu_cap;
|
30 | 30 | EXPORT_SYMBOL_GPL(kvm_pmu_cap);
|
31 | 31 |
|
32 |
| -static const struct x86_cpu_id vmx_icl_pebs_cpu[] = { |
| 32 | +/* Precise Distribution of Instructions Retired (PDIR) */ |
| 33 | +static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = { |
33 | 34 | X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
|
34 | 35 | X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
|
| 36 | + /* Instruction-Accurate PDIR (PDIR++) */ |
| 37 | + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), |
| 38 | + {} |
| 39 | +}; |
| 40 | + |
| 41 | +/* Precise Distribution (PDist) */ |
| 42 | +static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = { |
| 43 | + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), |
35 | 44 | {}
|
36 | 45 | };
|
37 | 46 |
|
@@ -156,6 +165,28 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
|
156 | 165 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
|
157 | 166 | }
|
158 | 167 |
|
| 168 | +static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc) |
| 169 | +{ |
| 170 | + /* |
| 171 | + * For some model specific pebs counters with special capabilities |
| 172 | + * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise |
| 173 | + * level to the maximum value (currently 3, backwards compatible) |
| 174 | + * so that the perf subsystem would assign specific hardware counter |
| 175 | + * with that capability for vPMC. |
| 176 | + */ |
| 177 | + if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || |
| 178 | + (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) |
| 179 | + return 3; |
| 180 | + |
| 181 | + /* |
| 182 | + * The non-zero precision level of guest event makes the ordinary |
| 183 | + * guest event becomes a guest PEBS event and triggers the host |
| 184 | + * PEBS PMI handler to determine whether the PEBS overflow PMI |
| 185 | + * comes from the host counters or the guest. |
| 186 | + */ |
| 187 | + return 1; |
| 188 | +} |
| 189 | + |
159 | 190 | static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
|
160 | 191 | bool exclude_user, bool exclude_kernel,
|
161 | 192 | bool intr)
|
@@ -187,22 +218,12 @@ static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
|
187 | 218 | }
|
188 | 219 | if (pebs) {
|
189 | 220 | /*
|
190 |
| - * The non-zero precision level of guest event makes the ordinary |
191 |
| - * guest event becomes a guest PEBS event and triggers the host |
192 |
| - * PEBS PMI handler to determine whether the PEBS overflow PMI |
193 |
| - * comes from the host counters or the guest. |
194 |
| - * |
195 | 221 | * For most PEBS hardware events, the difference in the software
|
196 | 222 | * precision levels of guest and host PEBS events will not affect
|
197 | 223 | * the accuracy of the PEBS profiling result, because the "event IP"
|
198 | 224 | * in the PEBS record is calibrated on the guest side.
|
199 |
| - * |
200 |
| - * On Icelake everything is fine. Other hardware (GLC+, TNT+) that |
201 |
| - * could possibly care here is unsupported and needs changes. |
202 | 225 | */
|
203 |
| - attr.precise_ip = 1; |
204 |
| - if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32) |
205 |
| - attr.precise_ip = 3; |
| 226 | + attr.precise_ip = pmc_get_pebs_precise_level(pmc); |
206 | 227 | }
|
207 | 228 |
|
208 | 229 | event = perf_event_create_kernel_counter(&attr, -1, current,
|
|
0 commit comments