Skip to content

Commit 027440b

Browse files
Like XuPeter Zijlstra
authored andcommitted
perf/x86/core: Refactor hw->idx checks and cleanup
For intel_pmu_en/disable_event(), reorder the branches checks for hw->idx and make them sorted by probability: gp,fixed,bts,others. Clean up the x86_assign_hw_event() by converting multiple if-else statements to a switch statement. To skip x86_perf_event_update() and x86_perf_event_set_period(), it's generic to replace "idx == INTEL_PMC_IDX_FIXED_BTS" check with '!hwc->event_base' because that should be 0 for all non-gp/fixed cases. Wrap related bit operations into intel_set/clear_masks() and make the main path more cleaner and readable. No functional changes. Signed-off-by: Like Xu <like.xu@linux.intel.com> Original-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200613080958.132489-3-like.xu@linux.intel.com
1 parent 3cb9d54 commit 027440b

File tree

2 files changed

+62
-48
lines changed

2 files changed

+62
-48
lines changed

arch/x86/events/core.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,9 @@ u64 x86_perf_event_update(struct perf_event *event)
7171
struct hw_perf_event *hwc = &event->hw;
7272
int shift = 64 - x86_pmu.cntval_bits;
7373
u64 prev_raw_count, new_raw_count;
74-
int idx = hwc->idx;
7574
u64 delta;
7675

77-
if (idx == INTEL_PMC_IDX_FIXED_BTS)
76+
if (unlikely(!hwc->event_base))
7877
return 0;
7978

8079
/*
@@ -1097,22 +1096,30 @@ static inline void x86_assign_hw_event(struct perf_event *event,
10971096
struct cpu_hw_events *cpuc, int i)
10981097
{
10991098
struct hw_perf_event *hwc = &event->hw;
1099+
int idx;
11001100

1101-
hwc->idx = cpuc->assign[i];
1101+
idx = hwc->idx = cpuc->assign[i];
11021102
hwc->last_cpu = smp_processor_id();
11031103
hwc->last_tag = ++cpuc->tags[i];
11041104

1105-
if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
1105+
switch (hwc->idx) {
1106+
case INTEL_PMC_IDX_FIXED_BTS:
11061107
hwc->config_base = 0;
11071108
hwc->event_base = 0;
1108-
} else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1109+
break;
1110+
1111+
case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
11091112
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1110-
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1111-
hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1112-
} else {
1113+
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
1114+
(idx - INTEL_PMC_IDX_FIXED);
1115+
hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1116+
break;
1117+
1118+
default:
11131119
hwc->config_base = x86_pmu_config_addr(hwc->idx);
11141120
hwc->event_base = x86_pmu_event_addr(hwc->idx);
11151121
hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1122+
break;
11161123
}
11171124
}
11181125

@@ -1233,7 +1240,7 @@ int x86_perf_event_set_period(struct perf_event *event)
12331240
s64 period = hwc->sample_period;
12341241
int ret = 0, idx = hwc->idx;
12351242

1236-
if (idx == INTEL_PMC_IDX_FIXED_BTS)
1243+
if (unlikely(!hwc->event_base))
12371244
return 0;
12381245

12391246
/*

arch/x86/events/intel/core.c

Lines changed: 46 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -2136,8 +2136,35 @@ static inline void intel_pmu_ack_status(u64 ack)
21362136
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
21372137
}
21382138

2139-
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2139+
static inline bool event_is_checkpointed(struct perf_event *event)
2140+
{
2141+
return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2142+
}
2143+
2144+
static inline void intel_set_masks(struct perf_event *event, int idx)
2145+
{
2146+
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2147+
2148+
if (event->attr.exclude_host)
2149+
__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2150+
if (event->attr.exclude_guest)
2151+
__set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2152+
if (event_is_checkpointed(event))
2153+
__set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2154+
}
2155+
2156+
static inline void intel_clear_masks(struct perf_event *event, int idx)
21402157
{
2158+
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2159+
2160+
__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2161+
__clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2162+
__clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2163+
}
2164+
2165+
static void intel_pmu_disable_fixed(struct perf_event *event)
2166+
{
2167+
struct hw_perf_event *hwc = &event->hw;
21412168
int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
21422169
u64 ctrl_val, mask;
21432170

@@ -2148,31 +2175,22 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
21482175
wrmsrl(hwc->config_base, ctrl_val);
21492176
}
21502177

2151-
static inline bool event_is_checkpointed(struct perf_event *event)
2152-
{
2153-
return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2154-
}
2155-
21562178
static void intel_pmu_disable_event(struct perf_event *event)
21572179
{
21582180
struct hw_perf_event *hwc = &event->hw;
2159-
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2181+
int idx = hwc->idx;
21602182

2161-
if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2183+
if (idx < INTEL_PMC_IDX_FIXED) {
2184+
intel_clear_masks(event, idx);
2185+
x86_pmu_disable_event(event);
2186+
} else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
2187+
intel_clear_masks(event, idx);
2188+
intel_pmu_disable_fixed(event);
2189+
} else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
21622190
intel_pmu_disable_bts();
21632191
intel_pmu_drain_bts_buffer();
2164-
return;
21652192
}
21662193

2167-
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2168-
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2169-
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2170-
2171-
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
2172-
intel_pmu_disable_fixed(hwc);
2173-
else
2174-
x86_pmu_disable_event(event);
2175-
21762194
/*
21772195
* Needs to be called after x86_pmu_disable_event,
21782196
* so we don't trigger the event without PEBS bit set.
@@ -2238,33 +2256,22 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
22382256
static void intel_pmu_enable_event(struct perf_event *event)
22392257
{
22402258
struct hw_perf_event *hwc = &event->hw;
2241-
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2242-
2243-
if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2244-
if (!__this_cpu_read(cpu_hw_events.enabled))
2245-
return;
2246-
2247-
intel_pmu_enable_bts(hwc->config);
2248-
return;
2249-
}
2250-
2251-
if (event->attr.exclude_host)
2252-
cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2253-
if (event->attr.exclude_guest)
2254-
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2255-
2256-
if (unlikely(event_is_checkpointed(event)))
2257-
cpuc->intel_cp_status |= (1ull << hwc->idx);
2259+
int idx = hwc->idx;
22582260

22592261
if (unlikely(event->attr.precise_ip))
22602262
intel_pmu_pebs_enable(event);
22612263

2262-
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2264+
if (idx < INTEL_PMC_IDX_FIXED) {
2265+
intel_set_masks(event, idx);
2266+
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2267+
} else if (idx < INTEL_PMC_IDX_FIXED_BTS) {
2268+
intel_set_masks(event, idx);
22632269
intel_pmu_enable_fixed(event);
2264-
return;
2270+
} else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
2271+
if (!__this_cpu_read(cpu_hw_events.enabled))
2272+
return;
2273+
intel_pmu_enable_bts(hwc->config);
22652274
}
2266-
2267-
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
22682275
}
22692276

22702277
static void intel_pmu_add_event(struct perf_event *event)

0 commit comments

Comments
 (0)