Skip to content

Commit 58ae30c

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86/intel: Add attr_update for Hybrid PMUs
The attribute_group for Hybrid PMUs should be different from the previous cpu PMU. For example, cpumask is required for a Hybrid PMU. The PMU type should be included in the event and format attribute. Add hybrid_attr_update for the Hybrid PMU. Check the PMU type in is_visible() function. Only display the event or format for the matched Hybrid PMU. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Link: https://lkml.kernel.org/r/1618237865-33448-19-git-send-email-kan.liang@linux.intel.com
1 parent a9c81cc commit 58ae30c

File tree

1 file changed

+114
-6
lines changed

1 file changed

+114
-6
lines changed

arch/x86/events/intel/core.c

Lines changed: 114 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5118,6 +5118,106 @@ static const struct attribute_group *attr_update[] = {
51185118
NULL,
51195119
};
51205120

5121+
static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5122+
{
5123+
struct device *dev = kobj_to_dev(kobj);
5124+
struct x86_hybrid_pmu *pmu =
5125+
container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5126+
struct perf_pmu_events_hybrid_attr *pmu_attr =
5127+
container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5128+
5129+
return pmu->cpu_type & pmu_attr->pmu_type;
5130+
}
5131+
5132+
static umode_t hybrid_events_is_visible(struct kobject *kobj,
5133+
struct attribute *attr, int i)
5134+
{
5135+
return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5136+
}
5137+
5138+
static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5139+
{
5140+
int cpu = cpumask_first(&pmu->supported_cpus);
5141+
5142+
return (cpu >= nr_cpu_ids) ? -1 : cpu;
5143+
}
5144+
5145+
static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5146+
struct attribute *attr, int i)
5147+
{
5148+
struct device *dev = kobj_to_dev(kobj);
5149+
struct x86_hybrid_pmu *pmu =
5150+
container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5151+
int cpu = hybrid_find_supported_cpu(pmu);
5152+
5153+
return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5154+
}
5155+
5156+
static umode_t hybrid_format_is_visible(struct kobject *kobj,
5157+
struct attribute *attr, int i)
5158+
{
5159+
struct device *dev = kobj_to_dev(kobj);
5160+
struct x86_hybrid_pmu *pmu =
5161+
container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5162+
struct perf_pmu_format_hybrid_attr *pmu_attr =
5163+
container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5164+
int cpu = hybrid_find_supported_cpu(pmu);
5165+
5166+
return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5167+
}
5168+
5169+
static struct attribute_group hybrid_group_events_td = {
5170+
.name = "events",
5171+
.is_visible = hybrid_events_is_visible,
5172+
};
5173+
5174+
static struct attribute_group hybrid_group_events_mem = {
5175+
.name = "events",
5176+
.is_visible = hybrid_events_is_visible,
5177+
};
5178+
5179+
static struct attribute_group hybrid_group_events_tsx = {
5180+
.name = "events",
5181+
.is_visible = hybrid_tsx_is_visible,
5182+
};
5183+
5184+
static struct attribute_group hybrid_group_format_extra = {
5185+
.name = "format",
5186+
.is_visible = hybrid_format_is_visible,
5187+
};
5188+
5189+
static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5190+
struct device_attribute *attr,
5191+
char *buf)
5192+
{
5193+
struct x86_hybrid_pmu *pmu =
5194+
container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5195+
5196+
return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5197+
}
5198+
5199+
static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5200+
static struct attribute *intel_hybrid_cpus_attrs[] = {
5201+
&dev_attr_cpus.attr,
5202+
NULL,
5203+
};
5204+
5205+
static struct attribute_group hybrid_group_cpus = {
5206+
.attrs = intel_hybrid_cpus_attrs,
5207+
};
5208+
5209+
static const struct attribute_group *hybrid_attr_update[] = {
5210+
&hybrid_group_events_td,
5211+
&hybrid_group_events_mem,
5212+
&hybrid_group_events_tsx,
5213+
&group_caps_gen,
5214+
&group_caps_lbr,
5215+
&hybrid_group_format_extra,
5216+
&group_default,
5217+
&hybrid_group_cpus,
5218+
NULL,
5219+
};
5220+
51215221
static struct attribute *empty_attrs;
51225222

51235223
static void intel_pmu_check_num_counters(int *num_counters,
@@ -5861,14 +5961,22 @@ __init int intel_pmu_init(void)
58615961

58625962
snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
58635963

5964+
if (!is_hybrid()) {
5965+
group_events_td.attrs = td_attr;
5966+
group_events_mem.attrs = mem_attr;
5967+
group_events_tsx.attrs = tsx_attr;
5968+
group_format_extra.attrs = extra_attr;
5969+
group_format_extra_skl.attrs = extra_skl_attr;
58645970

5865-
group_events_td.attrs = td_attr;
5866-
group_events_mem.attrs = mem_attr;
5867-
group_events_tsx.attrs = tsx_attr;
5868-
group_format_extra.attrs = extra_attr;
5869-
group_format_extra_skl.attrs = extra_skl_attr;
5971+
x86_pmu.attr_update = attr_update;
5972+
} else {
5973+
hybrid_group_events_td.attrs = td_attr;
5974+
hybrid_group_events_mem.attrs = mem_attr;
5975+
hybrid_group_events_tsx.attrs = tsx_attr;
5976+
hybrid_group_format_extra.attrs = extra_attr;
58705977

5871-
x86_pmu.attr_update = attr_update;
5978+
x86_pmu.attr_update = hybrid_attr_update;
5979+
}
58725980

58735981
intel_pmu_check_num_counters(&x86_pmu.num_counters,
58745982
&x86_pmu.num_counters_fixed,

0 commit comments

Comments
 (0)