Skip to content

Commit 7087bfb

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86/intel/ds: Clarify adaptive PEBS processing
Modify the pebs_basic and pebs_meminfo structs to make the bitfields more explicit to ease readability of the code. Co-developed-by: Stephane Eranian <eranian@google.com> Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20241119135504.1463839-3-kan.liang@linux.intel.com
1 parent dce210a commit 7087bfb

File tree

2 files changed

+34
-25
lines changed

2 files changed

+34
-25
lines changed

arch/x86/events/intel/ds.c

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1915,8 +1915,6 @@ static void adaptive_pebs_save_regs(struct pt_regs *regs,
19151915
}
19161916

19171917
#define PEBS_LATENCY_MASK 0xffff
1918-
#define PEBS_CACHE_LATENCY_OFFSET 32
1919-
#define PEBS_RETIRE_LATENCY_OFFSET 32
19201918

19211919
/*
19221920
* With adaptive PEBS the layout depends on what fields are configured.
@@ -1930,8 +1928,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
19301928
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
19311929
struct pebs_basic *basic = __pebs;
19321930
void *next_record = basic + 1;
1933-
u64 sample_type;
1934-
u64 format_size;
1931+
u64 sample_type, format_group;
19351932
struct pebs_meminfo *meminfo = NULL;
19361933
struct pebs_gprs *gprs = NULL;
19371934
struct x86_perf_regs *perf_regs;
@@ -1943,7 +1940,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
19431940
perf_regs->xmm_regs = NULL;
19441941

19451942
sample_type = event->attr.sample_type;
1946-
format_size = basic->format_size;
1943+
format_group = basic->format_group;
19471944
perf_sample_data_init(data, 0, event->hw.last_period);
19481945
data->period = event->hw.last_period;
19491946

@@ -1964,7 +1961,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
19641961

19651962
if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
19661963
if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY)
1967-
data->weight.var3_w = format_size >> PEBS_RETIRE_LATENCY_OFFSET & PEBS_LATENCY_MASK;
1964+
data->weight.var3_w = basic->retire_latency;
19681965
else
19691966
data->weight.var3_w = 0;
19701967
}
@@ -1974,12 +1971,12 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
19741971
* But PERF_SAMPLE_TRANSACTION needs gprs->ax.
19751972
* Save the pointer here but process later.
19761973
*/
1977-
if (format_size & PEBS_DATACFG_MEMINFO) {
1974+
if (format_group & PEBS_DATACFG_MEMINFO) {
19781975
meminfo = next_record;
19791976
next_record = meminfo + 1;
19801977
}
19811978

1982-
if (format_size & PEBS_DATACFG_GP) {
1979+
if (format_group & PEBS_DATACFG_GP) {
19831980
gprs = next_record;
19841981
next_record = gprs + 1;
19851982

@@ -1992,27 +1989,27 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
19921989
adaptive_pebs_save_regs(regs, gprs);
19931990
}
19941991

1995-
if (format_size & PEBS_DATACFG_MEMINFO) {
1992+
if (format_group & PEBS_DATACFG_MEMINFO) {
19961993
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1997-
u64 weight = meminfo->latency;
1994+
u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ?
1995+
meminfo->cache_latency : meminfo->mem_latency;
19981996

1999-
if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) {
2000-
data->weight.var2_w = weight & PEBS_LATENCY_MASK;
2001-
weight >>= PEBS_CACHE_LATENCY_OFFSET;
2002-
}
1997+
if (x86_pmu.flags & PMU_FL_INSTR_LATENCY)
1998+
data->weight.var2_w = meminfo->instr_latency;
20031999

20042000
/*
20052001
* Although meminfo::latency is defined as a u64,
20062002
* only the lower 32 bits include the valid data
20072003
* in practice on Ice Lake and earlier platforms.
20082004
*/
20092005
if (sample_type & PERF_SAMPLE_WEIGHT) {
2010-
data->weight.full = weight ?:
2006+
data->weight.full = latency ?:
20112007
intel_get_tsx_weight(meminfo->tsx_tuning);
20122008
} else {
2013-
data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?:
2009+
data->weight.var1_dw = (u32)latency ?:
20142010
intel_get_tsx_weight(meminfo->tsx_tuning);
20152011
}
2012+
20162013
data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
20172014
}
20182015

@@ -2033,16 +2030,16 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
20332030
}
20342031
}
20352032

2036-
if (format_size & PEBS_DATACFG_XMMS) {
2033+
if (format_group & PEBS_DATACFG_XMMS) {
20372034
struct pebs_xmm *xmm = next_record;
20382035

20392036
next_record = xmm + 1;
20402037
perf_regs->xmm_regs = xmm->xmm;
20412038
}
20422039

2043-
if (format_size & PEBS_DATACFG_LBRS) {
2040+
if (format_group & PEBS_DATACFG_LBRS) {
20442041
struct lbr_entry *lbr = next_record;
2045-
int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
2042+
int num_lbr = ((format_group >> PEBS_DATACFG_LBR_SHIFT)
20462043
& 0xff) + 1;
20472044
next_record = next_record + num_lbr * sizeof(struct lbr_entry);
20482045

@@ -2052,11 +2049,11 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
20522049
}
20532050
}
20542051

2055-
WARN_ONCE(next_record != __pebs + (format_size >> 48),
2056-
"PEBS record size %llu, expected %llu, config %llx\n",
2057-
format_size >> 48,
2052+
WARN_ONCE(next_record != __pebs + basic->format_size,
2053+
"PEBS record size %u, expected %llu, config %llx\n",
2054+
basic->format_size,
20582055
(u64)(next_record - __pebs),
2059-
basic->format_size);
2056+
format_group);
20602057
}
20612058

20622059
static inline void *

arch/x86/include/asm/perf_event.h

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,9 @@ static inline bool is_topdown_idx(int idx)
422422
*/
423423

424424
struct pebs_basic {
425-
u64 format_size;
425+
u64 format_group:32,
426+
retire_latency:16,
427+
format_size:16;
426428
u64 ip;
427429
u64 applicable_counters;
428430
u64 tsc;
@@ -431,7 +433,17 @@ struct pebs_basic {
431433
struct pebs_meminfo {
432434
u64 address;
433435
u64 aux;
434-
u64 latency;
436+
union {
437+
/* pre Alder Lake */
438+
u64 mem_latency;
439+
/* Alder Lake and later */
440+
struct {
441+
u64 instr_latency:16;
442+
u64 pad2:16;
443+
u64 cache_latency:16;
444+
u64 pad3:16;
445+
};
446+
};
435447
u64 tsx_tuning;
436448
};
437449

0 commit comments

Comments
 (0)