From 4c704363910aa00df3243bcb1658edf3fb9919c9 Mon Sep 17 00:00:00 2001 From: Manisha Chinthapally Date: Tue, 30 Aug 2022 17:55:21 -0700 Subject: [PATCH] Merge SEP5.36 into Linux tree Signed-off-by: Manisha Chinthapally --- drivers/platform/x86/sepdk/inc/apic.h | 118 + drivers/platform/x86/sepdk/inc/asm_helper.h | 185 + drivers/platform/x86/sepdk/inc/control.h | 543 ++ drivers/platform/x86/sepdk/inc/core2.h | 53 + drivers/platform/x86/sepdk/inc/cpumon.h | 61 + .../platform/x86/sepdk/inc/ecb_iterators.h | 614 ++ drivers/platform/x86/sepdk/inc/eventmux.h | 50 + drivers/platform/x86/sepdk/inc/gmch.h | 39 + .../platform/x86/sepdk/inc/haswellunc_sa.h | 65 + drivers/platform/x86/sepdk/inc/ipt.h | 83 + drivers/platform/x86/sepdk/inc/linuxos.h | 95 + drivers/platform/x86/sepdk/inc/lwpmudrv.h | 348 + drivers/platform/x86/sepdk/inc/master.h | 55 + drivers/platform/x86/sepdk/inc/msrdefs.h | 133 + drivers/platform/x86/sepdk/inc/output.h | 150 + drivers/platform/x86/sepdk/inc/pci.h | 322 + drivers/platform/x86/sepdk/inc/pebs.h | 525 ++ drivers/platform/x86/sepdk/inc/perfver4.h | 59 + drivers/platform/x86/sepdk/inc/pmi.h | 67 + drivers/platform/x86/sepdk/inc/pmu_info.h | 223 + .../platform/x86/sepdk/inc/pmu_info_mmio.h | 397 + drivers/platform/x86/sepdk/inc/pmu_info_msr.h | 479 + drivers/platform/x86/sepdk/inc/pmu_info_pci.h | 295 + .../platform/x86/sepdk/inc/pmu_info_struct.h | 115 + drivers/platform/x86/sepdk/inc/pmu_list.h | 154 + .../platform/x86/sepdk/inc/sepdrv_p_state.h | 42 + drivers/platform/x86/sepdk/inc/silvermont.h | 49 + drivers/platform/x86/sepdk/inc/sys_info.h | 77 + drivers/platform/x86/sepdk/inc/unc_common.h | 221 + drivers/platform/x86/sepdk/inc/unc_gt.h | 87 + drivers/platform/x86/sepdk/inc/utility.h | 648 ++ .../x86/sepdk/inc/valleyview_sochap.h | 68 + .../x86/sepdk/include/error_reporting_utils.h | 331 + .../x86/sepdk/include/lwpmudrv_defines.h | 607 ++ .../platform/x86/sepdk/include/lwpmudrv_ecb.h | 1317 +++ .../platform/x86/sepdk/include/lwpmudrv_gfx.h | 35 + .../x86/sepdk/include/lwpmudrv_ioctl.h | 297 + .../x86/sepdk/include/lwpmudrv_struct.h | 2768 ++++++ .../x86/sepdk/include/lwpmudrv_types.h | 168 + .../x86/sepdk/include/lwpmudrv_version.h | 136 + .../platform/x86/sepdk/include/pax_shared.h | 175 + .../platform/x86/sepdk/include/rise_errors.h | 350 + drivers/platform/x86/sepdk/pax/Makefile | 36 + drivers/platform/x86/sepdk/pax/pax.c | 968 ++ drivers/platform/x86/sepdk/pax/pax.h | 41 + drivers/platform/x86/sepdk/sep/Makefile | 112 + drivers/platform/x86/sepdk/sep/apic.c | 212 + drivers/platform/x86/sepdk/sep/control.c | 885 ++ drivers/platform/x86/sepdk/sep/core2.c | 1481 +++ drivers/platform/x86/sepdk/sep/cpumon.c | 354 + drivers/platform/x86/sepdk/sep/eventmux.c | 486 + drivers/platform/x86/sepdk/sep/ipt.c | 650 ++ drivers/platform/x86/sepdk/sep/linuxos.c | 1977 ++++ drivers/platform/x86/sepdk/sep/lwpmudrv.c | 8053 +++++++++++++++++ drivers/platform/x86/sepdk/sep/output.c | 1602 ++++ drivers/platform/x86/sepdk/sep/pci.c | 1027 +++ drivers/platform/x86/sepdk/sep/pebs.c | 2023 +++++ drivers/platform/x86/sepdk/sep/pebs_linref.c | 1797 ++++ drivers/platform/x86/sepdk/sep/perfver4.c | 1434 +++ drivers/platform/x86/sepdk/sep/pmi.c | 440 + drivers/platform/x86/sepdk/sep/pmu_list.c | 841 ++ .../platform/x86/sepdk/sep/sepdrv_p_state.c | 97 + drivers/platform/x86/sepdk/sep/silvermont.c | 1164 +++ drivers/platform/x86/sepdk/sep/sys32.S | 197 + drivers/platform/x86/sepdk/sep/sys64.S | 147 + drivers/platform/x86/sepdk/sep/sys_info.c | 1373 +++ drivers/platform/x86/sepdk/sep/unc_common.c | 998 ++ drivers/platform/x86/sepdk/sep/unc_gt.c | 538 ++ drivers/platform/x86/sepdk/sep/unc_mmio.c | 2807 ++++++ drivers/platform/x86/sepdk/sep/unc_msr.c | 427 + drivers/platform/x86/sepdk/sep/unc_pci.c | 545 ++ drivers/platform/x86/sepdk/sep/unc_pmt.c | 481 + drivers/platform/x86/sepdk/sep/unc_power.c | 713 ++ drivers/platform/x86/sepdk/sep/unc_sa.c | 188 + drivers/platform/x86/sepdk/sep/utility.c | 1280 +++ .../x86/sepdk/sep/valleyview_sochap.c | 319 + drivers/platform/x86/socperf/Makefile | 70 + drivers/platform/x86/socperf/control.c | 762 ++ drivers/platform/x86/socperf/haswellunc_sa.c | 419 + drivers/platform/x86/socperf/inc/control.h | 473 + .../platform/x86/socperf/inc/ecb_iterators.h | 136 + .../platform/x86/socperf/inc/haswellunc_sa.h | 85 + drivers/platform/x86/socperf/inc/npk_uncore.h | 82 + drivers/platform/x86/socperf/inc/pci.h | 110 + drivers/platform/x86/socperf/inc/pmu_info.h | 93 + .../platform/x86/socperf/inc/pmu_info_mmio.h | 1118 +++ .../x86/socperf/inc/pmu_info_struct.h | 139 + drivers/platform/x86/socperf/inc/pmu_list.h | 116 + drivers/platform/x86/socperf/inc/soc_uncore.h | 91 + drivers/platform/x86/socperf/inc/socperfdrv.h | 203 + drivers/platform/x86/socperf/inc/utility.h | 73 + .../socperf/include/error_reporting_utils.h | 331 + .../x86/socperf/include/lwpmudrv_defines.h | 607 ++ .../x86/socperf/include/lwpmudrv_ecb.h | 1317 +++ .../x86/socperf/include/lwpmudrv_ioctl.h | 312 + .../x86/socperf/include/lwpmudrv_struct.h | 2768 ++++++ .../x86/socperf/include/lwpmudrv_types.h | 168 + .../x86/socperf/include/lwpmudrv_version.h | 148 + .../x86/socperf/include/rise_errors.h | 350 + drivers/platform/x86/socperf/npk_uncore.c | 519 ++ drivers/platform/x86/socperf/pci.c | 197 + drivers/platform/x86/socperf/pmu_list.c | 414 + drivers/platform/x86/socperf/soc_uncore.c | 948 ++ drivers/platform/x86/socperf/socperfdrv.c | 1826 ++++ drivers/platform/x86/socperf/utility.c | 185 + 105 files changed, 62357 insertions(+) create mode 100644 drivers/platform/x86/sepdk/inc/apic.h create mode 100644 drivers/platform/x86/sepdk/inc/asm_helper.h create mode 100644 drivers/platform/x86/sepdk/inc/control.h create mode 100644 drivers/platform/x86/sepdk/inc/core2.h create mode 100644 drivers/platform/x86/sepdk/inc/cpumon.h create mode 100644 drivers/platform/x86/sepdk/inc/ecb_iterators.h create mode 100644 drivers/platform/x86/sepdk/inc/eventmux.h create mode 100644 drivers/platform/x86/sepdk/inc/gmch.h create mode 100644 drivers/platform/x86/sepdk/inc/haswellunc_sa.h create mode 100644 drivers/platform/x86/sepdk/inc/ipt.h create mode 100644 drivers/platform/x86/sepdk/inc/linuxos.h create mode 100644 drivers/platform/x86/sepdk/inc/lwpmudrv.h create mode 100644 drivers/platform/x86/sepdk/inc/master.h create mode 100644 drivers/platform/x86/sepdk/inc/msrdefs.h create mode 100644 drivers/platform/x86/sepdk/inc/output.h create mode 100644 drivers/platform/x86/sepdk/inc/pci.h create mode 100644 drivers/platform/x86/sepdk/inc/pebs.h create mode 100644 drivers/platform/x86/sepdk/inc/perfver4.h create mode 100644 drivers/platform/x86/sepdk/inc/pmi.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_info.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_info_mmio.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_info_msr.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_info_pci.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_info_struct.h create mode 100644 drivers/platform/x86/sepdk/inc/pmu_list.h create mode 100644 drivers/platform/x86/sepdk/inc/sepdrv_p_state.h create mode 100644 drivers/platform/x86/sepdk/inc/silvermont.h create mode 100644 drivers/platform/x86/sepdk/inc/sys_info.h create mode 100644 drivers/platform/x86/sepdk/inc/unc_common.h create mode 100644 drivers/platform/x86/sepdk/inc/unc_gt.h create mode 100644 drivers/platform/x86/sepdk/inc/utility.h create mode 100644 drivers/platform/x86/sepdk/inc/valleyview_sochap.h create mode 100644 drivers/platform/x86/sepdk/include/error_reporting_utils.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_defines.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_struct.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_types.h create mode 100644 drivers/platform/x86/sepdk/include/lwpmudrv_version.h create mode 100644 drivers/platform/x86/sepdk/include/pax_shared.h create mode 100644 drivers/platform/x86/sepdk/include/rise_errors.h create mode 100644 drivers/platform/x86/sepdk/pax/Makefile create mode 100644 drivers/platform/x86/sepdk/pax/pax.c create mode 100644 drivers/platform/x86/sepdk/pax/pax.h create mode 100644 drivers/platform/x86/sepdk/sep/Makefile create mode 100644 drivers/platform/x86/sepdk/sep/apic.c create mode 100644 drivers/platform/x86/sepdk/sep/control.c create mode 100644 drivers/platform/x86/sepdk/sep/core2.c create mode 100644 drivers/platform/x86/sepdk/sep/cpumon.c create mode 100644 drivers/platform/x86/sepdk/sep/eventmux.c create mode 100644 drivers/platform/x86/sepdk/sep/ipt.c create mode 100644 drivers/platform/x86/sepdk/sep/linuxos.c create mode 100644 drivers/platform/x86/sepdk/sep/lwpmudrv.c create mode 100644 drivers/platform/x86/sepdk/sep/output.c create mode 100644 drivers/platform/x86/sepdk/sep/pci.c create mode 100644 drivers/platform/x86/sepdk/sep/pebs.c create mode 100644 drivers/platform/x86/sepdk/sep/pebs_linref.c create mode 100644 drivers/platform/x86/sepdk/sep/perfver4.c create mode 100644 drivers/platform/x86/sepdk/sep/pmi.c create mode 100644 drivers/platform/x86/sepdk/sep/pmu_list.c create mode 100644 drivers/platform/x86/sepdk/sep/sepdrv_p_state.c create mode 100644 drivers/platform/x86/sepdk/sep/silvermont.c create mode 100644 drivers/platform/x86/sepdk/sep/sys32.S create mode 100644 drivers/platform/x86/sepdk/sep/sys64.S create mode 100644 drivers/platform/x86/sepdk/sep/sys_info.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_common.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_gt.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_mmio.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_msr.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_pci.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_pmt.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_power.c create mode 100644 drivers/platform/x86/sepdk/sep/unc_sa.c create mode 100644 drivers/platform/x86/sepdk/sep/utility.c create mode 100644 drivers/platform/x86/sepdk/sep/valleyview_sochap.c create mode 100644 drivers/platform/x86/socperf/Makefile create mode 100644 drivers/platform/x86/socperf/control.c create mode 100644 drivers/platform/x86/socperf/haswellunc_sa.c create mode 100644 drivers/platform/x86/socperf/inc/control.h create mode 100644 drivers/platform/x86/socperf/inc/ecb_iterators.h create mode 100644 drivers/platform/x86/socperf/inc/haswellunc_sa.h create mode 100644 drivers/platform/x86/socperf/inc/npk_uncore.h create mode 100644 drivers/platform/x86/socperf/inc/pci.h create mode 100644 drivers/platform/x86/socperf/inc/pmu_info.h create mode 100644 drivers/platform/x86/socperf/inc/pmu_info_mmio.h create mode 100644 drivers/platform/x86/socperf/inc/pmu_info_struct.h create mode 100644 drivers/platform/x86/socperf/inc/pmu_list.h create mode 100644 drivers/platform/x86/socperf/inc/soc_uncore.h create mode 100644 drivers/platform/x86/socperf/inc/socperfdrv.h create mode 100644 drivers/platform/x86/socperf/inc/utility.h create mode 100644 drivers/platform/x86/socperf/include/error_reporting_utils.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_defines.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_ecb.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_struct.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_types.h create mode 100644 drivers/platform/x86/socperf/include/lwpmudrv_version.h create mode 100644 drivers/platform/x86/socperf/include/rise_errors.h create mode 100644 drivers/platform/x86/socperf/npk_uncore.c create mode 100644 drivers/platform/x86/socperf/pci.c create mode 100644 drivers/platform/x86/socperf/pmu_list.c create mode 100644 drivers/platform/x86/socperf/soc_uncore.c create mode 100644 drivers/platform/x86/socperf/socperfdrv.c create mode 100644 drivers/platform/x86/socperf/utility.c diff --git a/drivers/platform/x86/sepdk/inc/apic.h b/drivers/platform/x86/sepdk/inc/apic.h new file mode 100644 index 00000000000000..9cd9c785cbc9eb --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/apic.h @@ -0,0 +1,118 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _APIC_H_ +#define _APIC_H_ + +#include + +typedef U64 *PHYSICAL_ADDRESS; +/** +// Data Types and Macros +*/ +/* +// APIC registers and constants +*/ +// APIC base MSR +#define DRV_APIC_BASE_MSR 0x001b + +// APIC registers +#define DRV_APIC_LCL_ID 0x0020 +#define DRV_APIC_LCL_TSKPRI 0x0080 +#define DRV_APIC_LCL_PPR 0x00a0 +#define DRV_APIC_LCL_EOI 0x00b0 +#define DRV_APIC_LCL_LDEST 0x00d0 +#define DRV_APIC_LCL_DSTFMT 0x00e0 +#define DRV_APIC_LCL_SVR 0x00f0 +#define DRV_APIC_LCL_ICR 0x0300 +#define DRV_APIC_LVT_TIMER 0x0320 +#define DRV_APIC_LVT_PMI 0x0340 +#define DRV_APIC_LVT_LINT0 0x0350 +#define DRV_APIC_LVT_LINT1 0x0360 +#define DRV_APIC_LVT_ERROR 0x0370 + +#define DRV_APIC_LCL_ID_MSR 0x802 +#define DRV_APIC_LCL_TSKPRI_MSR 0x808 +#define DRV_APIC_LCL_PPR_MSR 0x80a +#define DRV_APIC_LCL_EOI_MSR 0x80b +#define DRV_APIC_LCL_LDEST_MSR 0x80d +#define DRV_APIC_LCL_DSTFMT_MSR 0x80e +#define DRV_APIC_LCL_SVR_MSR 0x80f +#define DRV_APIC_LCL_ICR_MSR 0x830 +#define DRV_APIC_LVT_TIMER_MSR 0x832 +#define DRV_APIC_LVT_PMI_MSR 0x834 +#define DRV_APIC_LVT_LINT0_MSR 0x835 +#define DRV_APIC_LVT_LINT1_MSR 0x836 +#define DRV_APIC_LVT_ERROR_MSR 0x837 + +// masks for LVT +#define DRV_LVT_MASK 0x10000 +#define DRV_LVT_EDGE 0x00000 +#define DRV_LVT_LEVEL 0x08000 +#define DRV_LVT_EXTINT 0x00700 +#define DRV_LVT_NMI 0x00400 + +// task priorities +#define DRV_APIC_TSKPRI_LO 0x0000 +#define DRV_APIC_TSKPRI_HI 0x00f0 + +#define DRV_X2APIC_ENABLED 0xc00LL + +//// Interrupt vector for PMU overflow event +// +// Choose the highest unused IDT vector possible so that our +// callback routine runs at the highest priority allowed; +// must avoid using pre-defined vectors in, +// include/asm/irq.h +// include/asm/hw_irq.h +// include/asm/irq_vectors.h +// +// FIRST_DEVICE_VECTOR should be valid for kernels 2.6.33 and earlier +#define CPU_PERF_VECTOR DRV_LVT_NMI +// Has the APIC Been enabled +#define DRV_APIC_BASE_GLOBAL_ENABLED(a) ((a) & 1 << 11) +#define DRV_APIC_VIRTUAL_WIRE_ENABLED(a) ((a) & 0x100) + +/** +// Function Declarations +*/ + +/* +// APIC control functions +*/ +extern VOID APIC_Enable_Pmi(VOID); +extern VOID APIC_Init(PVOID param); +extern VOID APIC_Install_Interrupt_Handler(PVOID param); +extern VOID APIC_Restore_LVTPC(PVOID param); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/asm_helper.h b/drivers/platform/x86/sepdk/inc/asm_helper.h new file mode 100644 index 00000000000000..f112a7012f43e9 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/asm_helper.h @@ -0,0 +1,185 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _ASM_HELPER_H_ +#define _ASM_HELPER_H_ + +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) + +#include +#include + +#else + +#ifdef CONFIG_AS_CFI + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_RESTORE .cfi_restore + +#else + +.macro cfi_ignore a=0, b=0, c=0, d=0 +.endm + +#define CFI_STARTPROC cfi_ignore +#define CFI_ENDPROC cfi_ignore +#define CFI_ADJUST_CFA_OFFSET cfi_ignore +#define CFI_REL_OFFSET cfi_ignore +#define CFI_RESTORE cfi_ignore +#endif + +#ifdef CONFIG_X86_64 + .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 + .if \r11 + movq %r11, 6*8+\offset(%rsp) + CFI_REL_OFFSET r11, \offset + .endif + .if \r8910 + movq %r10, 7*8+\offset(%rsp) + CFI_REL_OFFSET r10, \offset + movq %r9, 8*8+\offset(%rsp) + CFI_REL_OFFSET r9, \offset + movq %r8, 9*8+\offset(%rsp) + CFI_REL_OFFSET r8, \offset + .endif + .if \rax + movq %rax, 10*8+\offset(%rsp) + CFI_REL_OFFSET rax, \offset + .endif + .if \rcx + movq %rcx, 11*8+\offset(%rsp) + CFI_REL_OFFSET rcx, \offset + .endif + movq %rdx, 12*8+\offset(%rsp) + CFI_REL_OFFSET rdx, \offset + movq %rsi, 13*8+\offset(%rsp) + CFI_REL_OFFSET rsi, \offset + movq %rdi, 14*8+\offset(%rsp) + CFI_REL_OFFSET rdi, \offset + .endm + .macro SAVE_C_REGS offset=0 + SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 + .endm + .macro SAVE_EXTRA_REGS offset=0 + movq %r15, 0*8+\offset(%rsp) + CFI_REL_OFFSET r15, \offset + movq %r14, 1*8+\offset(%rsp) + CFI_REL_OFFSET r14, \offset + movq %r13, 2*8+\offset(%rsp) + CFI_REL_OFFSET r13, \offset + movq %r12, 3*8+\offset(%rsp) + CFI_REL_OFFSET r12, \offset + movq %rbp, 4*8+\offset(%rsp) + CFI_REL_OFFSET rbp, \offset + movq %rbx, 5*8+\offset(%rsp) + CFI_REL_OFFSET rbx, \offset + .endm + + .macro RESTORE_EXTRA_REGS offset=0 + movq 0*8+\offset(%rsp), %r15 + CFI_RESTORE r15 + movq 1*8+\offset(%rsp), %r14 + CFI_RESTORE r14 + movq 2*8+\offset(%rsp), %r13 + CFI_RESTORE r13 + movq 3*8+\offset(%rsp), %r12 + CFI_RESTORE r12 + movq 4*8+\offset(%rsp), %rbp + CFI_RESTORE rbp + movq 5*8+\offset(%rsp), %rbx + CFI_RESTORE rbx + .endm + .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 + .if \rstor_r11 + movq 6*8(%rsp), %r11 + CFI_RESTORE r11 + .endif + .if \rstor_r8910 + movq 7*8(%rsp), %r10 + CFI_RESTORE r10 + movq 8*8(%rsp), %r9 + CFI_RESTORE r9 + movq 9*8(%rsp), %r8 + CFI_RESTORE r8 + .endif + .if \rstor_rax + movq 10*8(%rsp), %rax + CFI_RESTORE rax + .endif + .if \rstor_rcx + movq 11*8(%rsp), %rcx + CFI_RESTORE rcx + .endif + .if \rstor_rdx + movq 12*8(%rsp), %rdx + CFI_RESTORE rdx + .endif + movq 13*8(%rsp), %rsi + CFI_RESTORE rsi + movq 14*8(%rsp), %rdi + CFI_RESTORE rdi + .endm + .macro RESTORE_C_REGS + RESTORE_C_REGS_HELPER 1,1,1,1,1 + .endm + + .macro ALLOC_PT_GPREGS_ON_STACK addskip=0 + subq $15*8+\addskip, %rsp + CFI_ADJUST_CFA_OFFSET 15*8+\addskip + .endm + + .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 + addq $15*8+\addskip, %rsp + CFI_ADJUST_CFA_OFFSET -(15*8+\addskip) + .endm + + .macro SAVE_ALL + ALLOC_PT_GPREGS_ON_STACK + SAVE_C_REGS + SAVE_EXTRA_REGS + .endm + + .macro RESTORE_ALL + RESTORE_EXTRA_REGS + RESTORE_C_REGS + REMOVE_PT_GPREGS_FROM_STACK + .endm +#endif //CONFIG_X86_64 +#endif + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/control.h b/drivers/platform/x86/sepdk/inc/control.h new file mode 100644 index 00000000000000..8f1a4243515f35 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/control.h @@ -0,0 +1,543 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include +#include +#if defined(DRV_IA32) +#include +#endif +#include +#if defined(DRV_IA32) +#include +#endif +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv.h" +#include "lwpmudrv_types.h" +#include "ipt.h" + +// large memory allocation will be used if the requested size (in bytes) is +// above this threshold +#define MAX_KMALLOC_SIZE ((1 << 17) - 1) +#define SEP_DRV_MEMSET memset +#define SEP_DRV_MEMCPY(dst, src, n) memcpy((dst), (src), (n)) + +// check whether Linux driver should use unlocked ioctls (not protected by BKL) +// Kernel 5.9 removed the HAVE_UNLOCKED_IOCTL and HAVE_COMPAT_IOCTL definitions +// Changing the kernel version check to 5.0.0 as SLES15SP3 has backported the above change +#if defined(HAVE_UNLOCKED_IOCTL) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define DRV_USE_UNLOCKED_IOCTL +#endif +#if defined(DRV_USE_UNLOCKED_IOCTL) +#define IOCTL_OP .unlocked_ioctl +#define IOCTL_OP_TYPE long +#define IOCTL_USE_INODE +#else +#define IOCTL_OP .ioctl +#define IOCTL_OP_TYPE S32 +#define IOCTL_USE_INODE struct inode *inode, +#endif + +// Information about the state of the driver +typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE; +typedef GLOBAL_STATE_NODE *GLOBAL_STATE; +struct GLOBAL_STATE_NODE_S { + volatile S32 cpu_count; + volatile S32 dpc_count; + + S32 num_cpus; // Number of CPUs in the system + // Number of active CPUs - some cores can be deactivated by the user/admin + S32 active_cpus; + S32 num_em_groups; + S32 num_descriptors; + volatile S32 current_phase; + U32 num_modules; + + U32 symbol_lookup_available : 1; + U32 reserved1 : 31; + U32 reserved2; +}; + +// Access Macros +#define GLOBAL_STATE_num_cpus(x) ((x).num_cpus) +#define GLOBAL_STATE_active_cpus(x) ((x).active_cpus) +#define GLOBAL_STATE_cpu_count(x) ((x).cpu_count) +#define GLOBAL_STATE_dpc_count(x) ((x).dpc_count) +#define GLOBAL_STATE_num_em_groups(x) ((x).num_em_groups) +#define GLOBAL_STATE_num_descriptors(x) ((x).num_descriptors) +#define GLOBAL_STATE_current_phase(x) ((x).current_phase) +#define GLOBAL_STATE_sampler_id(x) ((x).sampler_id) +#define GLOBAL_STATE_num_modules(x) ((x).num_modules) +#define GLOBAL_STATE_symbol_lookup_available(x) ((x).symbol_lookup_available) + +/* + * + * + * CPU State data structure and access macros + * + */ +typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; +typedef CPU_STATE_NODE *CPU_STATE; +struct CPU_STATE_NODE_S { + S32 apic_id; // Processor ID on the system bus + PVOID apic_linear_addr; // linear address of local apic + PVOID apic_physical_addr; // physical address of local apic + + PVOID idt_base; // local IDT base address + atomic_t in_interrupt; + +#if defined(DRV_IA32) + U64 saved_ih; // saved perfvector to restore +#endif +#if defined(DRV_EM64T) + PVOID saved_ih; // saved perfvector to restore +#endif + + U64 last_mperf; // previous value of MPERF, needed for calculating delta MPERF + U64 last_aperf; // previous value of APERF, needed for calculating delta MPERF + // are the previous values valid? (e.g., the first measurement does not have + // a previous value for calculating the delta's + DRV_BOOL last_p_state_valid; + DRV_BOOL p_state_counting; // Flag to mark PMI interrupt from fixed event + + // holds the data that is saved/restored during event multiplexing + S64 *em_tables; + U32 em_table_offset; + + struct timer_list *em_timer; + + U32 current_group; + S32 trigger_count; + S32 trigger_event_num; + + DISPATCH dispatch; + PVOID lbr_area; + PVOID old_dts_buffer; + PVOID dts_buffer; + U32 dts_buffer_size; + U32 dts_buffer_offset; + U32 initial_mask; + U32 accept_interrupt; + + // holds PMU state (e.g., MSRs) that will be saved before and restored after collection + U64 *pmu_state; + S32 socket_master; + S32 core_master; + S32 thr_master; + U64 num_samples; + U64 reset_mask; + U64 group_swap; + U64 last_visa_count[16]; + U16 cpu_module_num; + U16 cpu_module_master; + S32 system_master; + DRV_BOOL offlined; + U32 nmi_handled; + + struct tasklet_struct nmi_tasklet; + + U32 em_timer_delay; + U32 core_type; + U32 last_thread_id; + IPT_NODE ipt_node; + U64 prev_rdt_pqr_assoc; + U32 ipt_collected_count; // number of IPT flush attempts + U32 ipt_dropped_count; // number of IPT packet drop cases + U64 ipt_dropped_packets; // Dropped IPT packet size in byte + U32 num_ebs_sampled; // number of attempted EBS sample generation + U32 num_dropped_ebs_samples; // number of dropped EBS samples + U64 ipt_data_offset; // the offset for flushed IPT data +}; + +#define CPU_STATE_apic_id(cpu) ((cpu)->apic_id) +#define CPU_STATE_apic_linear_addr(cpu) ((cpu)->apic_linear_addr) +#define CPU_STATE_apic_physical_addr(cpu) ((cpu)->apic_physical_addr) +#define CPU_STATE_idt_base(cpu) ((cpu)->idt_base) +#define CPU_STATE_in_interrupt(cpu) ((cpu)->in_interrupt) +#define CPU_STATE_saved_ih(cpu) ((cpu)->saved_ih) +#define CPU_STATE_saved_ih_hi(cpu) ((cpu)->saved_ih_hi) +#define CPU_STATE_dpc(cpu) ((cpu)->dpc) +#define CPU_STATE_em_tables(cpu) ((cpu)->em_tables) +#define CPU_STATE_em_table_offset(cpu) ((cpu)->em_table_offset) +#define CPU_STATE_pmu_state(cpu) ((cpu)->pmu_state) +#define CPU_STATE_em_dpc(cpu) ((cpu)->em_dpc) +#define CPU_STATE_em_timer(cpu) ((cpu)->em_timer) +#define CPU_STATE_current_group(cpu) ((cpu)->current_group) +#define CPU_STATE_trigger_count(cpu) ((cpu)->trigger_count) +#define CPU_STATE_trigger_event_num(cpu) ((cpu)->trigger_event_num) +#define CPU_STATE_dispatch(cpu) ((cpu)->dispatch) +#define CPU_STATE_lbr(cpu) ((cpu)->lbr) +#define CPU_STATE_old_dts_buffer(cpu) ((cpu)->old_dts_buffer) +#define CPU_STATE_dts_buffer(cpu) ((cpu)->dts_buffer) +#define CPU_STATE_dts_buffer_size(cpu) ((cpu)->dts_buffer_size) +#define CPU_STATE_dts_buffer_offset(cpu) ((cpu)->dts_buffer_offset) +#define CPU_STATE_initial_mask(cpu) ((cpu)->initial_mask) +#define CPU_STATE_accept_interrupt(cpu) ((cpu)->accept_interrupt) +#define CPU_STATE_msr_value(cpu) ((cpu)->msr_value) +#define CPU_STATE_msr_addr(cpu) ((cpu)->msr_addr) +#define CPU_STATE_socket_master(cpu) ((cpu)->socket_master) +#define CPU_STATE_core_master(cpu) ((cpu)->core_master) +#define CPU_STATE_thr_master(cpu) ((cpu)->thr_master) +#define CPU_STATE_num_samples(cpu) ((cpu)->num_samples) +#define CPU_STATE_reset_mask(cpu) ((cpu)->reset_mask) +#define CPU_STATE_group_swap(cpu) ((cpu)->group_swap) +#define CPU_STATE_last_mperf(cpu) ((cpu)->last_mperf) +#define CPU_STATE_last_aperf(cpu) ((cpu)->last_aperf) +#define CPU_STATE_last_p_state_valid(cpu) ((cpu)->last_p_state_valid) +#define CPU_STATE_cpu_module_num(cpu) ((cpu)->cpu_module_num) +#define CPU_STATE_cpu_module_master(cpu) ((cpu)->cpu_module_master) +#define CPU_STATE_p_state_counting(cpu) ((cpu)->p_state_counting) +#define CPU_STATE_system_master(cpu) ((cpu)->system_master) +#define CPU_STATE_offlined(cpu) ((cpu)->offlined) +#define CPU_STATE_nmi_handled(cpu) ((cpu)->nmi_handled) +#define CPU_STATE_nmi_tasklet(cpu) ((cpu)->nmi_tasklet) +#define CPU_STATE_em_timer_delay(cpu) ((cpu)->em_timer_delay) +#define CPU_STATE_core_type(cpu) ((cpu)->core_type) +#define CPU_STATE_last_thread_id(cpu) ((cpu)->last_thread_id) +#define CPU_STATE_ipt_node(cpu) ((cpu)->ipt_node) +#define CPU_STATE_prev_rdt_pqr_assoc(cpu) ((cpu)->prev_rdt_pqr_assoc) +#define CPU_STATE_ipt_collected_count(cpu) ((cpu)->ipt_collected_count) +#define CPU_STATE_ipt_dropped_count(cpu) ((cpu)->ipt_dropped_count) +#define CPU_STATE_ipt_dropped_packets(cpu) ((cpu)->ipt_dropped_packets) +#define CPU_STATE_num_ebs_sampled(cpu) ((cpu)->num_ebs_sampled) +#define CPU_STATE_num_dropped_ebs_samples(cpu) ((cpu)->num_dropped_ebs_samples) +#define CPU_STATE_ipt_data_offset(cpu) ((cpu)->ipt_data_offset) + +/* + * For storing data for --read/--write-msr command line options + */ +typedef struct MSR_DATA_NODE_S MSR_DATA_NODE; +typedef MSR_DATA_NODE *MSR_DATA; +struct MSR_DATA_NODE_S { + U64 value; // Used for emon, for read/write-msr value + U32 addr; + S32 status; +}; + +#define MSR_DATA_value(md) ((md)->value) +#define MSR_DATA_addr(md) ((md)->addr) +#define MSR_DATA_status(md) ((md)->status) + +/* + * Memory Allocation tracker + * + * Currently used to track large memory allocations + */ + +typedef struct MEM_EL_NODE_S MEM_EL_NODE; +typedef MEM_EL_NODE *MEM_EL; +struct MEM_EL_NODE_S { + PVOID address; // pointer to piece of memory we're tracking + S32 size; // size (bytes) of the piece of memory + U32 is_addr_vmalloc; // flag to check if the memory is allocated using vmalloc +}; + +// accessors for MEM_EL defined in terms of MEM_TRACKER below + +#define MEM_EL_MAX_ARRAY_SIZE 32 // minimum is 1, nominal is 64 + +typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE; +typedef MEM_TRACKER_NODE *MEM_TRACKER; +struct MEM_TRACKER_NODE_S { + // MAX number of elements in the array (default: MEM_EL_MAX_ARRAY_SIZE) + U16 max_size; + // number of elements available in this array + U16 elements; + // flag to check whether the node struct is allocated using vmalloc + U16 node_vmalloc; + // flag to check whether the list of mem el is allocated using vmalloc + U16 array_vmalloc; + MEM_EL mem; // array of large memory items we're tracking + MEM_TRACKER prev, next; // enables bi-directional scanning of linked list +}; +#define MEM_TRACKER_max_size(mt) ((mt)->max_size) +#define MEM_TRACKER_node_vmalloc(mt) ((mt)->node_vmalloc) +#define MEM_TRACKER_array_vmalloc(mt) ((mt)->array_vmalloc) +#define MEM_TRACKER_elements(mt) ((mt)->elements) +#define MEM_TRACKER_mem(mt) ((mt)->mem) +#define MEM_TRACKER_prev(mt) ((mt)->prev) +#define MEM_TRACKER_next(mt) ((mt)->next) +#define MEM_TRACKER_mem_address(mt, i) ((MEM_TRACKER_mem(mt)[(i)].address)) +#define MEM_TRACKER_mem_size(mt, i) ((MEM_TRACKER_mem(mt)[(i)].size)) +#define MEM_TRACKER_mem_vmalloc(mt, i) \ + ((MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc)) + +/**************************************************************************** + ** Global State variables exported + ***************************************************************************/ +extern CPU_STATE pcb; +extern U64 *cpu_tsc; +extern GLOBAL_STATE_NODE driver_state; +extern MSR_DATA msr_data; +extern U32 *core_to_package_map; +extern U32 *core_to_module_map; +extern U32 *core_to_dev_map; +extern U32 *core_to_phys_core_map; +extern U32 *core_to_thread_map; +extern U32 *cores_per_module; +extern U32 *threads_per_core; +extern U32 max_threads_per_core; +extern U32 max_cores_per_module; +extern U32 module_enum_supported; +extern U32 num_packages; +extern U32 num_pci_domains; + +/**************************************************************************** + ** Handy Short cuts + ***************************************************************************/ + +/* + * CONTROL_THIS_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#define CONTROL_THIS_CPU() smp_processor_id() + +/* + * CONTROL_THIS_RAW_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#define CONTROL_THIS_RAW_CPU() (raw_smp_processor_id()) + +#define CONTROL_Allocate_Memory_Aligned(a, b) CONTROL_Allocate_Memory(a) +#define CONTROL_Free_Memory_Aligned(a, b) CONTROL_Free_Memory(a) + +/**************************************************************************** + ** Interface definitions + ***************************************************************************/ + +/* + * Execution Control Functions + */ + +extern VOID +CONTROL_Invoke_Cpu(S32 cpuid, VOID (*func)(PVOID), PVOID ctx); + +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns none + * + * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking/non-blocking mode. + * The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. Use the aliases + * CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), CONTROL_Invoke_Parallel_XS(). + * + */ +extern VOID +CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), + PVOID ctx, + S32 blocking, + S32 exclude); + +/* + * @fn VOID CONTROL_Invoke_Parallel(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_NB(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. DO NOT Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_NB(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), FALSE, FALSE) + +/* + * @fn VOID CONTROL_Invoke_Parallel_XS(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, excluding the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define CONTROL_Invoke_Parallel_XS(a, b) \ + CONTROL_Invoke_Parallel_Service((a), (b), TRUE, TRUE) + +/* + * @fn VOID CONTROL_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the + * the driver is being loaded. + */ +extern VOID CONTROL_Memory_Tracker_Init(VOID); + +/* + * @fn VOID CONTROL_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the + * driver is being unloaded. + */ +extern VOID CONTROL_Memory_Tracker_Free(VOID); + +/* + * @fn VOID CONTROL_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * At end of collection (or at other safe sync point), + * reclaim/compact space used by mem tracker + */ +extern VOID CONTROL_Memory_Tracker_Compaction(void); + +/* + * @fn PVOID CONTROL_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * CONTROL_Allocate_KMemory instead. + */ +extern PVOID CONTROL_Allocate_Memory(size_t size); + +/* + * @fn PVOID CONTROL_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +extern PVOID CONTROL_Allocate_KMemory(size_t size); + +/* + * @fn PVOID CONTROL_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = CONTROL_Free_Memory(ptr); + */ +extern PVOID CONTROL_Free_Memory(PVOID location); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/core2.h b/drivers/platform/x86/sepdk/inc/core2.h new file mode 100644 index 00000000000000..4de987c4076016 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/core2.h @@ -0,0 +1,53 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _CORE2_H_ +#define _CORE2_H_ + +#include "msrdefs.h" + +extern DISPATCH_NODE corei7_dispatch; +extern DISPATCH_NODE corei7_dispatch_htoff_mode; + +#define CORE2UNC_BLBYPASS_BITMASK 0x00000001 +#define CORE2UNC_DISABLE_BL_BYPASS_MSR 0x39C + +#if defined(DRV_IA32) +#define CORE2_LBR_DATA_BITS 32 +#else +#define CORE2_LBR_DATA_BITS 48 +#endif + +#define CORE2_LBR_BITMASK ((1ULL << CORE2_LBR_DATA_BITS) - 1) + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/cpumon.h b/drivers/platform/x86/sepdk/inc/cpumon.h new file mode 100644 index 00000000000000..d829f60b0396fe --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/cpumon.h @@ -0,0 +1,61 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _CPUMON_H_ +#define _CPUMON_H_ + +#include +#include "lwpmudrv_defines.h" + +/* + * Defines + */ + +/** + * Function Declarations + */ + +/* + * CPUMON control functions + */ + +extern VOID CPUMON_Install_Cpuhooks(VOID); +extern VOID CPUMON_Remove_Cpuhooks(VOID); +#if defined(DRV_CPU_HOTPLUG) +extern DRV_BOOL CPUMON_is_Online_Allowed(VOID); +extern DRV_BOOL CPUMON_is_Offline_Allowed(VOID); +extern VOID CPUMON_Online_Cpu(PVOID parm); +extern VOID CPUMON_Offline_Cpu(PVOID parm); +#endif + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/ecb_iterators.h b/drivers/platform/x86/sepdk/inc/ecb_iterators.h new file mode 100644 index 00000000000000..9dba272df1a4af --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/ecb_iterators.h @@ -0,0 +1,614 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _ECB_ITERATORS_H_ +#define _ECB_ITERATORS_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +// +// Loop macros to walk through the event control block +// Use for access only in the kernel mode +// To Do - Control access from kernel mode by a macro +// +#define FOR_EACH_CCCR_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_cccr_start((pecb)); \ + (idx) < \ + ECB_cccr_start((pecb)) + ECB_cccr_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_CCCR_REG \ + } \ + } \ + } + +#define FOR_EACH_CCCR_GP_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_cccr_start((pecb)); \ + (idx) < \ + ECB_cccr_start((pecb)) + ECB_cccr_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_is_gp_reg_get((pecb), \ + (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_CCCR_GP_REG \ + } \ + } \ + } + +#define FOR_EACH_ESCR_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_escr_start((pecb)); \ + (idx) < \ + ECB_escr_start((pecb)) + ECB_escr_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_ESCR_REG \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start((pecb)); \ + (idx) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG_UNC(pecb, device_idx, idx) \ + { \ + U32 (idx); \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start((pecb)); \ + (idx) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG_UNC \ + } \ + } \ + } + +#define FOR_EACH_DATA_REG_UNC_VER2(pecb, i, idx) \ + { \ + U32 (idx); \ + if ((pecb)) { \ + for ((idx) = ECB_data_start((pecb)); \ + (idx) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_REG_UNC_VER2 \ + } \ + } \ + } + +#define FOR_EACH_DATA_GP_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start((pecb)); \ + (idx) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_is_gp_reg_get((pecb), \ + (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_GP_REG \ + } \ + } \ + } + +#define FOR_EACH_DATA_GENERIC_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_data_start((pecb)); \ + (idx) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_is_generic_reg_get( \ + (pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_DATA_GENERIC_REG \ + } \ + } \ + } + +#define FOR_EACH_REG_ENTRY(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY \ + } \ + } \ + } + +#define FOR_EACH_REG_ENTRY_UNC(pecb, device_idx, idx) \ + { \ + U32 (idx); \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY_UNC \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG(pecb, i, device_idx, offset_delta) \ + { \ + U32 (i) = 0; \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_data_start((pecb)); \ + (i) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } \ + (offset_delta) = \ + ECB_entries_reg_offset((pecb), (i)) - \ + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ + &ECB_pcidev_entry_node(pecb)); + +#define END_FOR_EACH_PCI_DATA_REG \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG_VER2(pecb, i, device_idx, offset_delta) \ + { \ + U32 (i) = 0; \ + if ((pecb)) { \ + for ((i) = ECB_data_start((pecb)); \ + (i) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } \ + (offset_delta) = \ + ECB_entries_reg_offset((pecb), (i)) - \ + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( \ + &ECB_pcidev_entry_node(pecb)); + +#define END_FOR_EACH_PCI_DATA_REG_VER2 \ + } \ + } \ + } + +#define FOR_EACH_PCI_DATA_REG_RAW(pecb, i, device_idx) \ + { \ + U32 (i) = 0; \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_data_start((pecb)); \ + (i) < \ + ECB_data_start((pecb)) + ECB_data_pop((pecb)); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_DATA_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_CCCR_REG_RAW(pecb, i, device_idx) \ + { \ + U32 (i) = 0; \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_cccr_start((pecb)); \ + (i) < \ + ECB_cccr_start((pecb)) + ECB_cccr_pop((pecb)); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_CCCR_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx) \ + { \ + U32 (i) = 0; \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = 0; (i) < ECB_num_entries((pecb)); (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_REG_RAW_GROUP(pecb, i, device_idx, cur_grp) \ + { \ + U32 (i) = 0; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = 0; (i) < ECB_num_entries((pecb)); (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_REG_RAW_GROUP \ + } \ + } \ + } + +#define CHECK_SAVE_RESTORE_EVENT_INDEX(prev_ei, cur_ei, evt_index) \ + { \ + if (prev_ei == -1) { \ + prev_ei = cur_ei; \ + } \ + if (prev_ei < cur_ei) { \ + prev_ei = cur_ei; \ + evt_index++; \ + } else { \ + evt_index = 0; \ + prev_ei = cur_ei; \ + } \ + } + +#define FOR_EACH_REG_ENTRY_UNC_WRITE_MSR(pecb, device_idx, idx) \ + { \ + U32 (idx); \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY_UNC \ + } \ + } \ + } + +#define FOR_EACH_REG_UNC_OPERATION(pecb, device_idx, idx, operation) \ + { \ + U32 (idx); \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + (pecb), (operation)); \ + (idx) < ECB_operations_register_start( \ + (pecb), (operation)) + \ + ECB_operations_register_len( \ + (pecb), (operation)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_UNC_OPERATION \ + } \ + } \ + } + +#define FOR_EACH_SCHEDULED_REG_UNC_OPERATION(pecb, device_idx, idx, operation) \ + { \ + U32 (idx); \ + U32 (cpu) = CONTROL_THIS_CPU(); \ + U32 (pkg) = core_to_package_map[cpu]; \ + U32 (cur_grp) = \ + LWPMU_DEVICE_cur_group(&devices[(device_idx)])[(pkg)]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[(device_idx)])[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + (pecb), (operation)); \ + (idx) < ECB_operations_register_start( \ + (pecb), (operation)) + \ + ECB_operations_register_len( \ + (pecb), (operation)); \ + (idx)++) { \ + if (ECB_entries_scheduled((pecb), (idx)) != \ + TRUE) { \ + continue; \ + } + +#define END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION \ + } \ + } \ + } + +#define FOR_EACH_NONEVENT_REG(pecb, idx) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 (dev_idx) = core_to_dev_map[this_cpu__]; \ + U32 (cur_grp) = CPU_STATE_current_group(pcpu__); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_metric_start((pecb)); \ + (idx) < ECB_metric_start((pecb)) + \ + ECB_metric_pop((pecb)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_NONEVENT_REG \ + } \ + } \ + } + +#define FOR_EACH_REG_CORE_OPERATION(pecb, idx, operation) \ + { \ + U32 (idx); \ + U32 this_cpu__ = CONTROL_THIS_CPU(); \ + CPU_STATE pcpu__ = &pcb[this_cpu__]; \ + U32 cur_grp = CPU_STATE_current_group(pcpu__); \ + U32 dev_idx = core_to_dev_map[this_cpu__]; \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[cur_grp]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + (pecb), (operation)); \ + (idx) < ECB_operations_register_start( \ + (pecb), (operation)) + \ + ECB_operations_register_len( \ + (pecb), (operation)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_CORE_OPERATION \ + } \ + } \ + } + +#define FOR_EACH_REG_CORE_OPERATION_IN_FIRST_NONNULL_GRP(pecb, idx, operation) \ + { \ + U32 (idx), j; \ + U32 this_cpu = CONTROL_THIS_CPU(); \ + ECB (pecb) = NULL; \ + U32 dev_idx = core_to_dev_map[this_cpu]; \ + for (j = 0; \ + (S32)j < LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); \ + j++) { \ + (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[j]; \ + if (!(pecb)) { \ + continue; \ + } else { \ + break; \ + } \ + } \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + (pecb), (operation)); \ + (idx) < ECB_operations_register_start( \ + (pecb), (operation)) + \ + ECB_operations_register_len( \ + (pecb), (operation)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_CORE_OPERATION_IN_FIRST_NONNULL_GRP \ + } \ + } \ + } + +#define FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS(pecb, idx, operation) \ + { \ + U32 (idx), j; \ + U32 this_cpu = CONTROL_THIS_CPU(); \ + ECB (pecb) = NULL; \ + U32 dev_idx = core_to_dev_map[this_cpu]; \ + for (j = 0; \ + (S32)j < LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); \ + j++) { \ + (pecb) = LWPMU_DEVICE_PMU_register_data( \ + &devices[dev_idx])[j]; \ + if ((pecb)) { \ + for ((idx) = ECB_operations_register_start( \ + (pecb), (operation)); \ + (idx) < \ + ECB_operations_register_start( \ + (pecb), (operation)) + \ + ECB_operations_register_len( \ + (pecb), (operation)); \ + (idx)++) { \ + if (ECB_entries_reg_id((pecb), \ + (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS \ + } \ + } \ + } \ + } + +#define ECB_SECTION_REG_INDEX(pecb, idx, operation) \ + (ECB_operations_register_start((pecb), (operation)) + (idx)) + +#if defined(__cplusplus) +} +#endif + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/eventmux.h b/drivers/platform/x86/sepdk/inc/eventmux.h new file mode 100644 index 00000000000000..aa8aa2e4dc4e6b --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/eventmux.h @@ -0,0 +1,50 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +/* + * cvs_id[] = "$Id$" + */ + +#ifndef _EVENTMUX_H_ +#define _EVENTMUX_H_ + +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_types.h" + +extern VOID EVENTMUX_Start(VOID); + +extern VOID EVENTMUX_Initialize(VOID); + +extern VOID EVENTMUX_Destroy(VOID); + +#endif /* _EVENTMUX_H_ */ + diff --git a/drivers/platform/x86/sepdk/inc/gmch.h b/drivers/platform/x86/sepdk/inc/gmch.h new file mode 100644 index 00000000000000..4d20bc98096aa4 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/gmch.h @@ -0,0 +1,39 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _GMCH_H_ +#define _GMCH_H_ + +extern CS_DISPATCH_NODE gmch_dispatch; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/haswellunc_sa.h b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h new file mode 100644 index 00000000000000..c6ceb0a6c36d5f --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/haswellunc_sa.h @@ -0,0 +1,65 @@ +/**** + Copyright (C) 2011 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _HSWUNC_SA_H_INC_ +#define _HSWUNC_SA_H_INC_ + +/* + * Local to this architecture: Haswell uncore SA unit + * + */ +#define HSWUNC_SA_DESKTOP_DID 0x000C04 +#define HSWUNC_SA_NEXT_ADDR_OFFSET 4 +#define HSWUNC_SA_BAR_ADDR_SHIFT 32 +#define HSWUNC_SA_BAR_ADDR_MASK 0x0007FFFFFF000LL +#define HSWUNC_SA_MAX_PCI_DEVICES 16 +#define HSWUNC_SA_MAX_COUNT 0x00000000FFFFFFFFLL +#define HSWUNC_SA_MAX_COUNTERS 8 + +#define HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE 8 * 4096 +#define HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE 57 * 4096 +#define HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define HSWUNC_SA_GDXCBAR_OFFSET_LO 0x5420 +#define HSWUNC_SA_GDXCBAR_OFFSET_HI 0x5424 +#define HSWUNC_SA_GDXCBAR_MASK 0x7FFFFFF000LL +#define HSWUNC_SA_CHAP_SAMPLE_DATA 0x00020000 +#define HSWUNC_SA_CHAP_STOP 0x00040000 +#define HSWUNC_SA_CHAP_CTRL_REG_OFFSET 0x0 + +#define HSWUNC_SA_PAGE_MASK 0xfffffffffffff000 +#define HSWUNC_SA_PAGE_OFFSET_MASK 0xfff +#define HSWUNC_SA_PAGE_SIZE 0x1000 + +extern DISPATCH_NODE hswunc_sa_dispatch; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/ipt.h b/drivers/platform/x86/sepdk/inc/ipt.h new file mode 100644 index 00000000000000..3de97fee4ef0c2 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/ipt.h @@ -0,0 +1,83 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _IPT_H_ +#define _IPT_H_ + +#define IPT_BUF_NUM_DEFAULT 16 +#define IPT_BUF_NUM_SMALL 8 +#define IPT_BUF_NUM_LARGE 31 +#define IPT_BUF_NUM_MAX IPT_BUF_NUM_LARGE +#define IPT_BUF_SIZE 0x1000 // 4096 = 4K +#define IPT_BUF_SIZE_MASK 0xFFFF // up to 16 * 4096 + +typedef struct IPT_NODE_S IPT_NODE; + +struct IPT_NODE_S { + PVOID topa_virt_address; + U64 topa_phys_address; + PVOID outbuf_virt_address; + U64 outbuf_phys_address[IPT_BUF_NUM_MAX]; +}; + +#define IPT_NODE_topa_virt_address(x) ((x).topa_virt_address) +#define IPT_NODE_topa_phys_address(x) ((x).topa_phys_address) +#define IPT_NODE_outbuf_virt_address(x) ((x).outbuf_virt_address) +#define IPT_NODE_outbuf_phys_address(x) ((x).outbuf_phys_address) + +extern OS_STATUS IPT_Initialize(DRV_CONFIG cfg); + +extern VOID IPT_Destroy(DRV_CONFIG cfg); + +extern VOID IPT_Start(PVOID param); + +extern U64 IPT_TOPA_Interrupted(S32 this_cpu, U64 overflow_status); + +extern VOID IPT_TOPA_Flush(PVOID param); + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct IPT_DISPATCH_NODE_S IPT_DISPATCH_NODE; +typedef IPT_DISPATCH_NODE *IPT_DISPATCH; +struct IPT_DISPATCH_NODE_S { + VOID (*init)(PVOID); + VOID (*fini)(PVOID); + VOID (*enable)(S32); + VOID (*disable)(S32); + U64 (*flush)(S32); +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/linuxos.h b/drivers/platform/x86/sepdk/inc/linuxos.h new file mode 100644 index 00000000000000..c557dc17b4d5ab --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/linuxos.h @@ -0,0 +1,95 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _LINUXOS_H_ +#define _LINUXOS_H_ + +// defines for options parameter of samp_load_image_notify_routine() +#define LOPTS_1ST_MODREC 0x1 +#define LOPTS_GLOBAL_MODULE 0x2 +#define LOPTS_EXE 0x4 + +#define FOR_EACH_TASK for_each_process +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 00) +#define DRV_F_DENTRY f_path.dentry +#else +#define DRV_F_DENTRY f_dentry +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +#define D_PATH(vm_file, name, maxlen) \ + d_path((vm_file)->f_dentry, (vm_file)->f_vfsmnt, (name), (maxlen)) +#else +#define D_PATH(vm_file, name, maxlen) \ + d_path(&((vm_file)->f_path), (name), (maxlen)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) +#define DRV_VM_MOD_EXECUTABLE(vma) (vma->vm_flags & VM_EXECUTABLE) +#else +#define DRV_VM_MOD_EXECUTABLE(vma) (linuxos_Equal_VM_Exe_File(vma)) +#define DRV_MM_EXE_FILE_PRESENT +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#define DRV_ALLOW_VDSO +#endif + +#if defined(DRV_IA32) +#define FIND_VMA(mm, data) find_vma((mm), (U32)(data)); +#endif +#if defined(DRV_EM64T) +#define FIND_VMA(mm, data) find_vma((mm), (U64)(data)); +#endif + +#define VIRT_TO_PHYS_ADDR(va) ((U64)virt_to_phys(va)) + +extern VOID LINUXOS_Install_Hooks(VOID); + +extern VOID LINUXOS_Uninstall_Hooks(VOID); + +extern OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end); + +extern DRV_BOOL LINUXOS_Check_KVM_Guest_Process(VOID); + +extern int LINUXOS_Install_GuestVM_Transition_Callback(VOID); + +extern int LINUXOS_Uninstall_GuestVM_Transition_Callback(VOID); + +#if defined(DRV_CPU_HOTPLUG) +extern VOID LINUXOS_Register_Hotplug(VOID); + +extern VOID LINUXOS_Unregister_Hotplug(VOID); + +#endif +#endif + diff --git a/drivers/platform/x86/sepdk/inc/lwpmudrv.h b/drivers/platform/x86/sepdk/inc/lwpmudrv.h new file mode 100644 index 00000000000000..059617cfaf9a1e --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/lwpmudrv.h @@ -0,0 +1,348 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _LWPMUDRV_H_ +#define _LWPMUDRV_H_ + +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#else +#include +#endif +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_version.h" +#include "lwpmudrv_struct.h" +#include "pebs.h" + +#if defined(X86_FEATURE_KAISER) || defined(CONFIG_KAISER) || \ + defined(KAISER_HEADER_PRESENT) +#define DRV_USE_KAISER +#elif defined(X86_FEATURE_PTI) +#define DRV_USE_PTI +#endif + +/* + * Print macros for driver messages + */ + +#if defined(MYDEBUG) +#define SEP_PRINT_DEBUG(fmt, args...) \ + { \ + printk(KERN_INFO SEP_MSG_PREFIX " [DEBUG] " fmt, ##args); \ + } +#else +#define SEP_PRINT_DEBUG(fmt, args...) \ + { \ + ; \ + } +#endif + +#define SEP_PRINT(fmt, args...) \ + { \ + printk(KERN_INFO SEP_MSG_PREFIX " " fmt, ##args); \ + } + +#define SEP_PRINT_WARNING(fmt, args...) \ + { \ + printk(KERN_ALERT SEP_MSG_PREFIX " [Warning] " fmt, ##args); \ + } + +#define SEP_PRINT_ERROR(fmt, args...) \ + { \ + printk(KERN_CRIT SEP_MSG_PREFIX " [ERROR] " fmt, ##args); \ + } + +// Macro to return the thread group id +#define GET_CURRENT_TGID() (current->tgid) + +#define OVERFLOW_ARGS U64 *, U64 * + +typedef struct DRV_EVENT_MASK_NODE_S DRV_EVENT_MASK_NODE; +typedef DRV_EVENT_MASK_NODE *DRV_EVENT_MASK; + +struct DRV_EVENT_MASK_NODE_S { + U16 event_idx; // 0 <= index < MAX_EVENTS + U16 desc_id; + union { + U32 bitFields1; + struct { + U32 precise : 1; + U32 lbr_capture : 1; + // Indicates which events need to have additional + // registers read because they are DEAR events. + U32 dear_capture : 1; + // Indicates which events need to have additional + // registers read because they are IEAR events. + U32 iear_capture : 1; + // Indicates which events need to have additional + // registers read because they are BTB events. + U32 btb_capture : 1; + // Indicates which events need to have additional + // registers read because they are IPEAR events. + U32 ipear_capture : 1; + U32 uncore_capture : 1; + // Indicates whether the event is related to branch + // opertion or not + U32 branch : 1; + // Indicates whether the event is related to + // perf_metrics or not + U32 perf_metrics_capture : 1; + // Indicates if the event interrupted is the trigger event, + // flag used in ebc mode and uncore interrupt mode + U32 trigger : 1; + // Indicates to collect the data only if context switch occurs. + U32 collect_on_ctx_sw : 1; + U32 reserved : 21; + } s1; + } u1; +}; + +#define DRV_EVENT_MASK_event_idx(d) ((d)->event_idx) +#define DRV_EVENT_MASK_desc_id(d) ((d)->desc_id) +#define DRV_EVENT_MASK_bitFields1(d) ((d)->u1.bitFields1) +#define DRV_EVENT_MASK_precise(d) ((d)->u1.s1.precise) +#define DRV_EVENT_MASK_lbr_capture(d) ((d)->u1.s1.lbr_capture) +#define DRV_EVENT_MASK_dear_capture(d) ((d)->u1.s1.dear_capture) +#define DRV_EVENT_MASK_iear_capture(d) ((d)->u1.s1.iear_capture) +#define DRV_EVENT_MASK_btb_capture(d) ((d)->u1.s1.btb_capture) +#define DRV_EVENT_MASK_ipear_capture(d) ((d)->u1.s1.ipear_capture) +#define DRV_EVENT_MASK_uncore_capture(d) ((d)->u1.s1.uncore_capture) +#define DRV_EVENT_MASK_branch(d) ((d)->u1.s1.branch) +#define DRV_EVENT_MASK_perf_metrics_capture(d) ((d)->u1.s1.perf_metrics_capture) +#define DRV_EVENT_MASK_trigger(d) ((d)->u1.s1.trigger) +#define DRV_EVENT_MASK_collect_on_ctx_sw(d) ((d)->u1.s1.collect_on_ctx_sw) + +/* This defines the maximum number of overflow events per interrupt. + * In order to reduce memory footprint, the value should be at least + * the number of fixed and general PMU registers. + * Sandybridge with HT off has 11 PMUs(3 fixed and 8 generic) + */ +#define MAX_OVERFLOW_EVENTS 16 + +typedef struct DRV_MASKS_NODE_S DRV_MASKS_NODE; +typedef DRV_MASKS_NODE *DRV_MASKS; + +/* + * @macro DRV_EVENT_MASK_NODE_S + * @brief + * The structure is used to store overflow events when handling PMU interrupt. + * This approach should be more efficient than checking all event masks + * if there are many events to be monitored + * and only a few events among them have overflow per interrupt. + */ +struct DRV_MASKS_NODE_S { + DRV_EVENT_MASK_NODE eventmasks[MAX_OVERFLOW_EVENTS]; + U8 masks_num; // 0 <= mask_num <= MAX_OVERFLOW_EVENTS +}; + +#define DRV_MASKS_masks_num(d) ((d)->masks_num) +#define DRV_MASKS_eventmasks(d) ((d)->eventmasks) + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct DISPATCH_NODE_S DISPATCH_NODE; +typedef DISPATCH_NODE *DISPATCH; + +struct DISPATCH_NODE_S { + VOID (*init)(PVOID); + VOID (*fini)(PVOID); + VOID (*write)(PVOID); + VOID (*freeze)(PVOID); + VOID (*restart)(PVOID); + VOID (*read_data)(PVOID, U32); + VOID (*check_overflow)(DRV_MASKS); + VOID (*swap_group)(DRV_BOOL); + U64 (*read_lbrs)(PVOID); + VOID (*cleanup)(PVOID); + VOID (*hw_errata)(VOID); + VOID (*read_power)(PVOID); + U64 (*check_overflow_errata)(ECB, U32, U64); + VOID (*read_counts)(PVOID, U32); + U64 (*check_overflow_gp_errata)(ECB, U64 *); + VOID (*read_ro)(PVOID, U32, U32); + VOID (*platform_info)(PVOID); + VOID (*trigger_read)(PVOID, U32, U32); + // Counter reads triggered/initiated by User mode timer + VOID (*scan_for_uncore)(PVOID); + VOID (*read_metrics)(PVOID); +}; + +extern VOID **PMU_register_data; +extern VOID **desc_data; +extern U64 *prev_counter_data; + +/*! + * @struct LWPMU_DEVICE_NODE_S + * @brief Struct to hold fields per device + * PMU_register_data_unc - MSR info + * dispatch_unc - dispatch table + * em_groups_counts_unc - # groups + * pcfg_unc - config struct + */ +typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE; +typedef LWPMU_DEVICE_NODE *LWPMU_DEVICE; + +struct LWPMU_DEVICE_NODE_S { + VOID **PMU_register_data; + DISPATCH dispatch; + S32 em_groups_count; + VOID *pcfg; + U64 **unc_prev_value; + U64 ***unc_acc_value; + U64 counter_mask; + U64 num_events; + U32 num_units; + VOID *ec; + S32 *cur_group; + S32 pci_dev_node_index; + U32 device_type; + LBR lbr; + PEBS_INFO_NODE pebs_info_node; +}; + +#define LWPMU_DEVICE_PMU_register_data(dev) ((dev)->PMU_register_data) +#define LWPMU_DEVICE_dispatch(dev) ((dev)->dispatch) +#define LWPMU_DEVICE_em_groups_count(dev) ((dev)->em_groups_count) +#define LWPMU_DEVICE_pcfg(dev) ((dev)->pcfg) +#define LWPMU_DEVICE_prev_value(dev) ((dev)->unc_prev_value) +#define LWPMU_DEVICE_acc_value(dev) ((dev)->unc_acc_value) +#define LWPMU_DEVICE_counter_mask(dev) ((dev)->counter_mask) +#define LWPMU_DEVICE_num_events(dev) ((dev)->num_events) +#define LWPMU_DEVICE_num_units(dev) ((dev)->num_units) +#define LWPMU_DEVICE_ec(dev) ((dev)->ec) +#define LWPMU_DEVICE_cur_group(dev) ((dev)->cur_group) +#define LWPMU_DEVICE_pci_dev_node_index(dev) ((dev)->pci_dev_node_index) +#define LWPMU_DEVICE_device_type(dev) ((dev)->device_type) +#define LWPMU_DEVICE_lbr(dev) ((dev)->lbr) +#define LWPMU_DEVICE_pebs_dispatch(dev) ((dev)->pebs_info_node.pebs_dispatch) +#define LWPMU_DEVICE_pebs_record_size(dev) \ + ((dev)->pebs_info_node.pebs_record_size) +#define LWPMU_DEVICE_apebs_basic_offset(dev) \ + ((dev)->pebs_info_node.apebs_basic_offset) +#define LWPMU_DEVICE_apebs_mem_offset(dev) \ + ((dev)->pebs_info_node.apebs_mem_offset) +#define LWPMU_DEVICE_apebs_gpr_offset(dev) \ + ((dev)->pebs_info_node.apebs_gpr_offset) +#define LWPMU_DEVICE_apebs_xmm_offset(dev) \ + ((dev)->pebs_info_node.apebs_xmm_offset) +#define LWPMU_DEVICE_apebs_lbr_offset(dev) \ + ((dev)->pebs_info_node.apebs_lbr_offset) + +extern U32 num_devices; +extern U32 cur_devices; +extern LWPMU_DEVICE devices; +extern U64 *pmu_state; +extern U32 num_core_devs; + +typedef struct UNC_EM_DESC_NODE_S UNC_EM_DESC_NODE; +typedef UNC_EM_DESC_NODE *UNC_EM_DESC; + +struct UNC_EM_DESC_NODE_S { + struct timer_list *read_timer; + U32 em_factor; +}; + +#define UNC_EM_read_timer(desc) ((desc)->read_timer) +#define UNC_EM_em_factor(desc) ((desc)->em_factor) + +typedef struct EMON_DESC_NODE_S EMON_DESC_NODE; +typedef EMON_DESC_NODE *EMON_DESC; + +struct EMON_DESC_NODE_S { + struct timer_list *read_timer; + U32 call_count; + U32 buf_index; +}; + +#define EMON_read_timer(desc) ((desc)->read_timer) +#define EMON_call_count(desc) ((desc)->call_count) +#define EMON_buf_index(desc) ((desc)->buf_index) + +typedef struct DRV_PMT_TELEM_DEV_NODE_S DRV_PMT_TELEM_DEV_NODE; +typedef DRV_PMT_TELEM_DEV_NODE *DRV_PMT_TELEM_DEV; + +struct DRV_PMT_TELEM_DEV_NODE_S { + struct telem_endpoint *telem_ep; + U32 vendor_id; + U32 device_id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U64 guid; + U32 dev_idx; + U32 found; +}; + +#define DRV_PMT_TELEM_DEV_telem_endpoint(pmt) ((pmt)->telem_ep) +#define DRV_PMT_TELEM_DEV_device_id(pmt) ((pmt)->device_id) +#define DRV_PMT_TELEM_DEV_vendor_id(pmt) ((pmt)->vendor_id) +#define DRV_PMT_TELEM_DEV_domain(pmt) ((pmt)->domain) +#define DRV_PMT_TELEM_DEV_bus(pmt) ((pmt)->bus) +#define DRV_PMT_TELEM_DEV_device(pmt) ((pmt)->device) +#define DRV_PMT_TELEM_DEV_function(pmt) ((pmt)->function) +#define DRV_PMT_TELEM_DEV_guid(pmt) ((pmt)->guid) +#define DRV_PMT_TELEM_DEV_dev_idx(pmt) ((pmt)->dev_idx) +#define DRV_PMT_TELEM_DEV_found(pmt) ((pmt)->found) + +// Handy macro +#define TSC_SKEW(this_cpu) (cpu_tsc[this_cpu] - cpu_tsc[0]) + +/* + * The IDT / GDT descriptor for use in identifying code segments + */ +#if defined(DRV_EM64T) +#pragma pack(push, 1) +typedef struct _idtgdtDesc { + U16 idtgdt_limit; + PVOID idtgdt_base; +} IDTGDT_DESC; +#pragma pack(pop) + +extern IDTGDT_DESC gdt_desc; +#endif + +extern DRV_BOOL NMI_mode; +extern DRV_BOOL KVM_guest_mode; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/master.h b/drivers/platform/x86/sepdk/inc/master.h new file mode 100644 index 00000000000000..3c6c58352c558c --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/master.h @@ -0,0 +1,55 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _MASTER_H_INC_ +#define _MASTER_H_INC_ + +#include +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "msrdefs.h" +#include "control.h" +#include "utility.h" +#include "output.h" +#include "ecb_iterators.h" +#include "linuxos.h" +#include "ipt.h" + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/msrdefs.h b/drivers/platform/x86/sepdk/inc/msrdefs.h new file mode 100644 index 00000000000000..34c61db6c05327 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/msrdefs.h @@ -0,0 +1,133 @@ +/**** + Copyright (C) 2011 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _MSRDEFS_H_ +#define _MSRDEFS_H_ + +/* + * Arch Perf monitoring version 3 + */ +#define IA32_PMC0 0x0C1 +#define IA32_PMC1 0x0C2 +#define IA32_PMC2 0x0C3 +#define IA32_PMC3 0x0C4 +#define IA32_PMC4 0x0C5 +#define IA32_PMC5 0x0C6 +#define IA32_PMC6 0x0C7 +#define IA32_PMC7 0x0C8 +#define IA32_FULL_PMC0 0x4C1 +#define IA32_FULL_PMC1 0x4C2 +#define IA32_PERFEVTSEL0 0x186 +#define IA32_PERFEVTSEL1 0x187 +#define IA32_PERFEVTSEL2 0x188 +#define IA32_PERFEVTSEL3 0x189 +#define IA32_FIXED_CTR0 0x309 +#define IA32_FIXED_CTR1 0x30A +#define IA32_FIXED_CTR2 0x30B +#define IA32_FIXED_CTR3 0x30C +#define IA32_PERF_CAPABILITIES 0x345 +#define IA32_FIXED_CTRL 0x38D +#define IA32_PERF_GLOBAL_STATUS 0x38E +#define IA32_PERF_GLOBAL_CTRL 0x38F +#define IA32_PERF_GLOBAL_OVF_CTRL 0x390 +#define IA32_PEBS_ENABLE 0x3F1 +#define IA32_MISC_ENABLE 0x1A0 +#define IA32_DS_AREA 0x600 +#define IA32_DEBUG_CTRL 0x1D9 +#undef IA32_LBR_FILTER_SELECT +#define IA32_LBR_FILTER_SELECT 0x1c8 +#define IA32_PEBS_FRONTEND 0x3F7 +#define IA32_PERF_METRICS 0x329 +#define IA32_LBR_CTRL 0x14CE + +#define COMPOUND_CTR_CTL 0x306 +#define COMPOUND_PERF_CTR 0x307 +#define COMPOUND_CTR_OVF_BIT 0x800 +#define COMPOUND_CTR_OVF_SHIFT 12 + +#define FIXED_CORE_CYCLE_GLOBAL_CTRL_MASK 0x200000000 +#define FIXED_CORE_CYCLE_FIXED_CTRL_MASK 0xF0 + +/* + * Intel Processor Trace (IPT) + */ +#define IA32_RTIT_CTL 0x570 +#define IA32_RTIT_STATUS 0x571 +#define IA32_RTIT_CR3_MATCH 0x572 +#define IA32_RTIT_OUTPUT_BASE 0x560 +#define IA32_RTIT_OUTPUT_MASK_PTRS 0x561 +#define IA32_RTIT_ADDR0_A 0x580 +#define IA32_RTIT_ADDR0_B 0x581 +#define IA32_RTIT_ADDR1_A 0x582 +#define IA32_RTIT_ADDR1_B 0x583 +#define IA32_RTIT_ADDR2_A 0x584 +#define IA32_RTIT_ADDR2_B 0x585 +#define IA32_RTIT_ADDR3_A 0x586 +#define IA32_RTIT_ADDR3_B 0x587 +#define IA32_VMX_MISC 0x485 + +#define IA32_RTIT_CTL_TRACEEN 0x1 +#define IA32_RTIT_CTL_CYCEN 0x2 +#define IA32_RTIT_CTL_OS 0x4 +#define IA32_RTIT_CTL_USER 0x8 +#define IA32_RTIT_CTL_PWREVTEN 0x10 +#define IA32_RTIT_CTL_FUPONPTW 0x20 +#define IA32_RTIT_CTL_FABRICEN 0x40 +#define IA32_RTIT_CTL_CR3FILTER 0x80 +#define IA32_RTIT_CTL_TOPA 0x100 +#define IA32_RTIT_CTL_MTCEN 0x200 +#define IA32_RTIT_CTL_TSCEN 0x400 +#define IA32_RTIT_CTL_DISRETC 0x800 +#define IA32_RTIT_CTL_PTWEN 0x1000 +#define IA32_RTIT_CTL_BRANCHEN 0x2000 +#define IA32_VMX_MISC_VMXONPT 0x4000 + +#define RTIT_OUTPUT_MASK_PTRS_TABLEOFFSET 0xFFFFFF80ULL + +#define PERF_GLOBAL_STATUS_TOPA_PMI_MASK 0x80000000000000ULL + +// REG INDEX inside GLOBAL CTRL SECTION +enum { + GLOBAL_CTRL_REG_INDEX = 0, + GLOBAL_OVF_CTRL_REG_INDEX, + PEBS_ENABLE_REG_INDEX, + DEBUG_CTRL_REG_INDEX, + FIXED_CTRL_REG_INDEX, +}; + +// REG INDEX inside GLOBAL STATUS SECTION +enum { + GLOBAL_STATUS_REG_INDEX = 0, +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/output.h b/drivers/platform/x86/sepdk/inc/output.h new file mode 100644 index 00000000000000..4ffff5ae547333 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/output.h @@ -0,0 +1,150 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _OUTPUT_H_ +#define _OUTPUT_H_ + +#include +#include + +/* + * Initial allocation + * Size of buffer = 512KB (2^19) + * number of buffers = 2 + * The max size of the buffer cannot exceed 1<<22 i.e. 4MB + */ +#define OUTPUT_SMALL_BUFFER (1 << 15) +#define OUTPUT_LARGE_BUFFER (1 << 19) +#define OUTPUT_CP_BUFFER (1 << 22) +#define OUTPUT_EMON_BUFFER (1 << 25) +#define OUTPUT_MEMORY_THRESHOLD 0x8000000 + +extern U32 output_buffer_size; +extern U32 saved_buffer_size; +#define OUTPUT_BUFFER_SIZE output_buffer_size +#define OUTPUT_NUM_BUFFERS 2 +#if defined(DRV_ANDROID) +#define MODULE_BUFF_SIZE 1 +#else +#define MODULE_BUFF_SIZE 2 +#endif +#if defined(DRV_ANDROID) +#define IPT_TRACE_BUFF_SIZE 1 +#define IPT_INFO_BUFF_SIZE 1 +#else +#define IPT_TRACE_BUFF_SIZE 2 +#define IPT_INFO_BUFF_SIZE 2 +#endif + +/* + * Data type declarations and accessors macros + */ +typedef struct { + spinlock_t buffer_lock; + U32 remaining_buffer_size; + U32 current_buffer; + U32 total_buffer_size; + U32 next_buffer[OUTPUT_NUM_BUFFERS]; + U32 buffer_full[OUTPUT_NUM_BUFFERS]; + U8 *buffer[OUTPUT_NUM_BUFFERS]; + U32 signal_full; + DRV_BOOL tasklet_queued; +} OUTPUT_NODE, *OUTPUT; + +#define OUTPUT_buffer_lock(x) ((x)->buffer_lock) +#define OUTPUT_remaining_buffer_size(x) ((x)->remaining_buffer_size) +#define OUTPUT_total_buffer_size(x) ((x)->total_buffer_size) +#define OUTPUT_buffer(x, y) ((x)->buffer[(y)]) +#define OUTPUT_buffer_full(x, y) ((x)->buffer_full[(y)]) +#define OUTPUT_current_buffer(x) ((x)->current_buffer) +#define OUTPUT_signal_full(x) ((x)->signal_full) +#define OUTPUT_tasklet_queued(x) ((x)->tasklet_queued) +/* + * Add an array of control buffer for per-cpu + */ +typedef struct { + wait_queue_head_t queue; + OUTPUT_NODE outbuf; + U32 sample_count; +} BUFFER_DESC_NODE, *BUFFER_DESC; + +#define BUFFER_DESC_queue(a) ((a)->queue) +#define BUFFER_DESC_outbuf(a) ((a)->outbuf) +#define BUFFER_DESC_sample_count(a) ((a)->sample_count) + +extern BUFFER_DESC cpu_buf; // actually an array of BUFFER_DESC_NODE +extern BUFFER_DESC unc_buf; +extern BUFFER_DESC module_buf; +extern BUFFER_DESC cpu_sideband_buf; +extern BUFFER_DESC emon_buf; +extern BUFFER_DESC ipt_trace_buf; +extern BUFFER_DESC ipt_info_buf; + +/* + * Interface Functions + */ +extern int OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification); +extern OS_STATUS OUTPUT_Initialize(void); +extern OS_STATUS OUTPUT_Initialize_UNC(void); +extern OS_STATUS OUTPUT_Initialize_EMON(void); +extern void OUTPUT_Cleanup(VOID); +extern void OUTPUT_Cleanup(VOID); +extern int OUTPUT_Destroy(VOID); +extern int OUTPUT_Flush(VOID); +extern int OUTPUT_Flush_EMON(VOID); +extern ssize_t +OUTPUT_Module_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern ssize_t +OUTPUT_IPT_Trace_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern ssize_t +OUTPUT_IPT_Info_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern ssize_t +OUTPUT_Sample_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern ssize_t +OUTPUT_UncSample_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern ssize_t +OUTPUT_SidebandInfo_Read(struct file *filp, + char *buf, + size_t count, + loff_t *f_pos); +extern ssize_t +OUTPUT_Emon_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos); +extern void * +OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, + U32 size, + DRV_BOOL defer, + U8 in_notification); +extern int OUTPUT_IPT_Fill(char *data, U32 size); +extern int OUTPUT_IPT_Info_Fill(char *data, U32 size); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pci.h b/drivers/platform/x86/sepdk/inc/pci.h new file mode 100644 index 00000000000000..1e15cdab8ce28e --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pci.h @@ -0,0 +1,322 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PCI_H_ +#define _PCI_H_ + +#if defined(DRV_PMT_ENABLE) +#include /* Definition of __weak */ +#include /* struct kref */ +#include /* struct notifier_block */ +#include /* struct pci_dev */ +#include /* struct resource */ +#endif + +#include "lwpmudrv_defines.h" + +/* + * PCI Config Address macros + */ +#define PCI_ENABLE 0x80000000 + +#define PCI_ADDR_IO 0xCF8 +#define PCI_DATA_IO 0xCFC + +#define BIT0 0x1 +#define BIT1 0x2 + +/* + * Macro for forming a PCI configuration address + */ +#define FORM_PCI_ADDR(bus, dev, fun, off) \ + (((PCI_ENABLE)) | ((bus & 0xFF) << 16) | ((dev & 0x1F) << 11) | \ + ((fun & 0x07) << 8) | ((off & 0xFF) << 0)) + +#define VENDOR_ID_MASK 0x0000FFFF +#define DEVICE_ID_MASK 0xFFFF0000 +#define DEVICE_ID_BITSHIFT 16 +#define LOWER_4_BYTES_MASK 0x00000000FFFFFFFF +#define MAX_BUSNO 256 +#define NEXT_ADDR_OFFSET 4 +#define NEXT_ADDR_SHIFT 32 +#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 +#define MAX_PCI_DEVS 32 + +#define CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, device_id) \ + { \ + vendor_id = value & VENDOR_ID_MASK; \ + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ + \ + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ + continue; \ + } \ + } + +#define CHECK_IF_GENUINE_INTEL_DEVICE(value, vendor_id, device_id, valid) \ + { \ + vendor_id = value & VENDOR_ID_MASK; \ + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ + \ + valid = 1; \ + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ + valid = 0; \ + } \ + } + +#define PCI_FIND_DVSEC_CAPABILITY_BAR(config, bar_list) \ + PCI_Find_DVSEC_Capability_BAR(config, bar_list) + +typedef struct SEP_MMIO_NODE_S SEP_MMIO_NODE; + +struct SEP_MMIO_NODE_S { + U64 physical_address; + U64 virtual_address; + U64 map_token; + U32 size; +}; + +#define SEP_MMIO_NODE_physical_address(x) ((x)->physical_address) +#define SEP_MMIO_NODE_virtual_address(x) ((x)->virtual_address) +#define SEP_MMIO_NODE_map_token(x) ((x)->map_token) +#define SEP_MMIO_NODE_size(x) ((x)->size) + +#if defined(DRV_PMT_ENABLE) +/* + * Struct definitions taken from PMT driver. + */ + +struct telem_header { + u8 access_type; + u8 telem_type; + u16 size; + u32 guid; + u32 base_offset; +}; + +struct telem_endpoint { + struct pci_dev *parent; + struct telem_header header; + void __iomem *base; + struct resource res; + bool present; + struct kref kref; +}; + +struct telem_endpoint_info { + struct pci_dev *pdev; + struct telem_header header; +}; + +#define PMT_TELEM_NOTIFY_ADD 0 +#define PMT_TELEM_NOTIFY_REMOVE 1 + +/* + * Weak linkage of functions from the PMT driver + */ + +/** + * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint + * @start: starting devid to look from + * + * This functions can be used in a while loop predicate to retrieve the devid + * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint() + * and pmt_telem_register_endpoint() can be used inside of the loop to examine + * endpoint info and register to receive a pointer to the endpoint. The pointer + * is then usable in the telemetry read calls to access the telemetry data. + * + * Return: + * * devid - devid of the next present endpoint from start + * * 0 - when no more endpoints are present after start + */ +extern int __weak +pmt_telem_get_next_endpoint(int start); + +/** + * pmt_telem_register_endpoint() - Register a telemetry endpoint + * @devid: device id/handle of the telemetry endpoint + * + * Increments the kref usage counter for the endpoint. + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +extern struct telem_endpoint *__weak +pmt_telem_register_endpoint(int devid); + +/** + * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint + * @ep: ep structure to populate. + * + * Decrements the kref usage counter for the endpoint. + */ +extern void __weak +pmt_telem_unregister_endpoint(struct telem_endpoint *ep); + +/** + * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid + * @devid: device id/handle of the telemetry endpoint + * @info: Endpoint info structure to be populated + * + * Return: + * * 0 - Success + * * -ENXIO - telemetry endpoint not found for the devid + * * -EINVAL - @info is NULL + */ +extern int __weak +pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info); + +/** + * pmt_telem_read() - Read qwords from telemetry sram + * @ep: Telemetry endpoint to be read + * @offset: Register offset in bytes + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered not valid. + */ +extern int __weak +pmt_telem_read(struct telem_endpoint *ep, u32 offset, u64 *data, u32 count); + +/** + * pmt_telem_register_notifier() - Receive notification endpoint events + * @nb: Notifier block + * + * Events: + * PMT_TELEM_NOTIFY_ADD - An endpoint has been added. Notifier data + * is the devid + * PMT_TELEM_NOTIF_REMOVE - An endpoint has been removed. Notifier data + * is the devid + */ +extern int __weak +pmt_telem_register_notifier(struct notifier_block *nb); + +/** + * pmt_telem_unregister_notifier() - Unregister notification of endpoint events + * @nb: Notifier block + * + */ +extern int __weak +pmt_telem_unregister_notifier(struct notifier_block *nb); + +#endif + +extern OS_STATUS +PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, U32 map_size); + +extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node); + +extern int PCI_Read_From_Memory_Address(U32 addr, U32 *val); + +extern int PCI_Write_To_Memory_Address(U32 addr, U32 val); + +extern int PCI_Read_From_Memory_Address_U64(U64 addr, U64* val); + +extern int PCI_Write_To_Memory_Address_U64(U64 addr, U64 val); + +/*** UNIVERSAL PCI ACCESSORS ***/ + +extern VOID PCI_Initialize(VOID); + +extern VOID PCI_Cleanup(VOID); + +extern U32 +PCI_Read_U32(U32 domain, U32 bus, U32 device, U32 function, U32 offset); + +extern U32 +PCI_Read_U32_Valid(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U32 invalid_value); + +extern U64 +PCI_Read_U64(U32 domain, U32 bus, U32 device, U32 function, U32 offset); + +extern U64 +PCI_Read_U64_Valid(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U64 invalid_value); + +extern U32 +PCI_Write_U32(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U32 value); + +extern U32 +PCI_Write_U64(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U64 value); + +extern VOID +PCI_Find_DVSEC_Capability_BAR(UNCORE_DISCOVERY_DVSEC_CONFIG config, + DRV_PCI_DEVICE_ENTRY bar_list); + +/*** UNIVERSAL MMIO ACCESSORS ***/ + +extern U32 PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset); + +extern U64 PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset); + +extern void +PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, U32 value); + +extern void +PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, U64 value); + +#if defined(DRV_PMT_ENABLE) +extern U32 PCI_PMT_Read_U32(struct telem_endpoint *telem_ep, U32 offset); + +extern U64 PCI_PMT_Read_U64(struct telem_endpoint *telem_ep, U32 offset); +#endif + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pebs.h b/drivers/platform/x86/sepdk/inc/pebs.h new file mode 100644 index 00000000000000..71d712cdff95c7 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pebs.h @@ -0,0 +1,525 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PEBS_H_ +#define _PEBS_H_ + +typedef struct PEBS_REC_NODE_S PEBS_REC_NODE; + +struct PEBS_REC_NODE_S { + U64 r_flags; // Offset 0x00 + U64 linear_ip; // Offset 0x08 + U64 rax; // Offset 0x10 + U64 rbx; // Offset 0x18 + U64 rcx; // Offset 0x20 + U64 rdx; // Offset 0x28 + U64 rsi; // Offset 0x30 + U64 rdi; // Offset 0x38 + U64 rbp; // Offset 0x40 + U64 rsp; // Offset 0x48 + U64 r8; // Offset 0x50 + U64 r9; // Offset 0x58 + U64 r10; // Offset 0x60 + U64 r11; // Offset 0x68 + U64 r12; // Offset 0x70 + U64 r13; // Offset 0x78 + U64 r14; // Offset 0x80 + U64 r15; // Offset 0x88 +}; + +typedef struct PEBS_REC_EXT_NODE_S PEBS_REC_EXT_NODE; +typedef PEBS_REC_EXT_NODE *PEBS_REC_EXT; +struct PEBS_REC_EXT_NODE_S { + PEBS_REC_NODE pebs_basic; // Offset 0x00 to 0x88 + U64 glob_perf_overflow; // Offset 0x90 + U64 data_linear_address; // Offset 0x98 + U64 data_source; // Offset 0xA0 + U64 latency; // Offset 0xA8 +}; + +#define PEBS_REC_EXT_r_flags(x) ((x)->pebs_basic.r_flags) +#define PEBS_REC_EXT_linear_ip(x) ((x)->pebs_basic.linear_ip) +#define PEBS_REC_EXT_rax(x) ((x)->pebs_basic.rax) +#define PEBS_REC_EXT_rbx(x) ((x)->pebs_basic.rbx) +#define PEBS_REC_EXT_rcx(x) ((x)->pebs_basic.rcx) +#define PEBS_REC_EXT_rdx(x) ((x)->pebs_basic.rdx) +#define PEBS_REC_EXT_rsi(x) ((x)->pebs_basic.rsi) +#define PEBS_REC_EXT_rdi(x) ((x)->pebs_basic.rdi) +#define PEBS_REC_EXT_rbp(x) ((x)->pebs_basic.rbp) +#define PEBS_REC_EXT_rsp(x) ((x)->pebs_basic.rsp) +#define PEBS_REC_EXT_r8(x) ((x)->pebs_basic.r8) +#define PEBS_REC_EXT_r9(x) ((x)->pebs_basic.r9) +#define PEBS_REC_EXT_r10(x) ((x)->pebs_basic.r10) +#define PEBS_REC_EXT_r11(x) ((x)->pebs_basic.r11) +#define PEBS_REC_EXT_r12(x) ((x)->pebs_basic.r12) +#define PEBS_REC_EXT_r13(x) ((x)->pebs_basic.r13) +#define PEBS_REC_EXT_r14(x) ((x)->pebs_basic.r14) +#define PEBS_REC_EXT_r15(x) ((x)->pebs_basic.r15) +#define PEBS_REC_EXT_glob_perf_overflow(x) ((x)->glob_perf_overflow) +#define PEBS_REC_EXT_data_linear_address(x) ((x)->data_linear_address) +#define PEBS_REC_EXT_data_source(x) ((x)->data_source) +#define PEBS_REC_EXT_latency(x) ((x)->latency) + +typedef struct PEBS_REC_EXT1_NODE_S PEBS_REC_EXT1_NODE; +typedef PEBS_REC_EXT1_NODE *PEBS_REC_EXT1; +struct PEBS_REC_EXT1_NODE_S { + PEBS_REC_EXT_NODE pebs_ext; + U64 eventing_ip; //Offset 0xB0 + U64 hle_info; //Offset 0xB8 +}; + +#define PEBS_REC_EXT1_r_flags(x) ((x)->pebs_ext.pebs_basic.r_flags) +#define PEBS_REC_EXT1_linear_ip(x) ((x)->pebs_ext.pebs_basic.linear_ip) +#define PEBS_REC_EXT1_rax(x) ((x)->pebs_ext.pebs_basic.rax) +#define PEBS_REC_EXT1_rbx(x) ((x)->pebs_ext.pebs_basic.rbx) +#define PEBS_REC_EXT1_rcx(x) ((x)->pebs_ext.pebs_basic.rcx) +#define PEBS_REC_EXT1_rdx(x) ((x)->pebs_ext.pebs_basic.rdx) +#define PEBS_REC_EXT1_rsi(x) ((x)->pebs_ext.pebs_basic.rsi) +#define PEBS_REC_EXT1_rdi(x) ((x)->pebs_ext.pebs_basic.rdi) +#define PEBS_REC_EXT1_rbp(x) ((x)->pebs_ext.pebs_basic.rbp) +#define PEBS_REC_EXT1_rsp(x) ((x)->pebs_ext.pebs_basic.rsp) +#define PEBS_REC_EXT1_r8(x) ((x)->pebs_ext.pebs_basic.r8) +#define PEBS_REC_EXT1_r9(x) ((x)->pebs_ext.pebs_basic.r9) +#define PEBS_REC_EXT1_r10(x) ((x)->pebs_ext.pebs_basic.r10) +#define PEBS_REC_EXT1_r11(x) ((x)->pebs_ext.pebs_basic.r11) +#define PEBS_REC_EXT1_r12(x) ((x)->pebs_ext.pebs_basic.r12) +#define PEBS_REC_EXT1_r13(x) ((x)->pebs_ext.pebs_basic.r13) +#define PEBS_REC_EXT1_r14(x) ((x)->pebs_ext.pebs_basic.r14) +#define PEBS_REC_EXT1_r15(x) ((x)->pebs_ext.pebs_basic.r15) +#define PEBS_REC_EXT1_glob_perf_overflow(x) ((x)->pebs_ext.glob_perf_overflow) +#define PEBS_REC_EXT1_data_linear_address(x) ((x)->pebs_ext.data_linear_address) +#define PEBS_REC_EXT1_data_source(x) ((x)->pebs_ext.data_source) +#define PEBS_REC_EXT1_latency(x) ((x)->pebs_ext.latency) +#define PEBS_REC_EXT1_eventing_ip(x) ((x)->eventing_ip) +#define PEBS_REC_EXT1_hle_info(x) ((x)->hle_info) + +typedef struct PEBS_REC_EXT2_NODE_S PEBS_REC_EXT2_NODE; +typedef PEBS_REC_EXT2_NODE *PEBS_REC_EXT2; +struct PEBS_REC_EXT2_NODE_S { + PEBS_REC_EXT1_NODE pebs_ext1; + U64 tsc; //Offset 0xC0 +}; + +#define PEBS_REC_EXT2_r_flags(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r_flags) +#define PEBS_REC_EXT2_linear_ip(x) \ + ((x)->pebs_ext1->pebs_ext.pebs_basic.linear_ip) +#define PEBS_REC_EXT2_rax(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rax) +#define PEBS_REC_EXT2_rbx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbx) +#define PEBS_REC_EXT2_rcx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rcx) +#define PEBS_REC_EXT2_rdx(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdx) +#define PEBS_REC_EXT2_rsi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsi) +#define PEBS_REC_EXT2_rdi(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rdi) +#define PEBS_REC_EXT2_rbp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rbp) +#define PEBS_REC_EXT2_rsp(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.rsp) +#define PEBS_REC_EXT2_r8(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r8) +#define PEBS_REC_EXT2_r9(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r9) +#define PEBS_REC_EXT2_r10(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r10) +#define PEBS_REC_EXT2_r11(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r11) +#define PEBS_REC_EXT2_r12(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r12) +#define PEBS_REC_EXT2_r13(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r13) +#define PEBS_REC_EXT2_r14(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r14) +#define PEBS_REC_EXT2_r15(x) ((x)->pebs_ext1->pebs_ext.pebs_basic.r15) +#define PEBS_REC_EXT2_glob_perf_overflow(x) \ + ((x)->pebs_ext1->pebs_ext.glob_perf_overflow) +#define PEBS_REC_EXT2_data_linear_address(x) \ + ((x)->pebs_ext1->pebs_ext.data_linear_address) +#define PEBS_REC_EXT2_data_source(x) ((x)->pebs_ext1->pebs_ext.data_source) +#define PEBS_REC_EXT2_latency(x) ((x)->pebs_ext1->pebs_ext.latency) +#define PEBS_REC_EXT2_eventing_ip(x) ((x)->pebs_ext1->eventing_ip) +#define PEBS_REC_EXT2_hle_info(x) ((x)->pebs_ext1->hle_info) +#define PEBS_REC_EXT2_tsc(x) ((x)->tsc) + +typedef struct APEBS_CONFIG_NODE_S APEBS_CONFIG_NODE; +typedef APEBS_CONFIG_NODE *APEBS_CONFIG; + +struct APEBS_CONFIG_NODE_S { + U8 apebs_enabled; + U8 collect_mem; + U8 collect_gpr; + U8 collect_xmm; + U8 collect_lbrs; + U8 precise_ip_lbrs; + U8 num_lbr_entries; + U16 basic_offset; + U16 mem_offset; + U16 gpr_offset; + U16 xmm_offset; + U16 lbr_offset; +}; + +#define APEBS_CONFIG_apebs_enabled(x) ((x)->apebs_enabled) +#define APEBS_CONFIG_collect_mem(x) ((x)->collect_mem) +#define APEBS_CONFIG_collect_gpr(x) ((x)->collect_gpr) +#define APEBS_CONFIG_collect_xmm(x) ((x)->collect_xmm) +#define APEBS_CONFIG_collect_lbrs(x) ((x)->collect_lbrs) +#define APEBS_CONFIG_precise_ip_lbrs(x) ((x)->precise_ip_lbrs) +#define APEBS_CONFIG_num_lbr_entries(x) ((x)->num_lbr_entries) +#define APEBS_CONFIG_basic_offset(x) ((x)->basic_offset) +#define APEBS_CONFIG_mem_offset(x) ((x)->mem_offset) +#define APEBS_CONFIG_gpr_offset(x) ((x)->gpr_offset) +#define APEBS_CONFIG_xmm_offset(x) ((x)->xmm_offset) +#define APEBS_CONFIG_lbr_offset(x) ((x)->lbr_offset) + +typedef struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S ADAPTIVE_PEBS_BASIC_INFO_NODE; +typedef ADAPTIVE_PEBS_BASIC_INFO_NODE *ADAPTIVE_PEBS_BASIC_INFO; + +struct ADAPTIVE_PEBS_BASIC_INFO_NODE_S { + // Offset 0x0, [47:0] - record format, [63:48] - record size + U64 record_info; + U64 eventing_ip; // Offset 0x8 + U64 applicable_counters; // Offset 0x10 + U64 tsc; // Offset 0x18 +}; + +#define ADAPTIVE_PEBS_BASIC_INFO_record_info(x) ((x)->record_info) +#define ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(x) ((x)->eventing_ip) +#define ADAPTIVE_PEBS_BASIC_INFO_tsc(x) ((x)->tsc) +#define ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(x) \ + ((x)->applicable_counters) + +typedef struct ADAPTIVE_PEBS_MEM_INFO_NODE_S ADAPTIVE_PEBS_MEM_INFO_NODE; +typedef ADAPTIVE_PEBS_MEM_INFO_NODE *ADAPTIVE_PEBS_MEM_INFO; + +struct ADAPTIVE_PEBS_MEM_INFO_NODE_S { + U64 data_linear_address; // Offset 0x20 + U64 data_source; // Offset 0x28 + U64 latency; // Offset 0x30 + U64 hle_info; // Offset 0x38 +}; + +#define ADAPTIVE_PEBS_MEM_INFO_data_linear_address(x) ((x)->data_linear_address) +#define ADAPTIVE_PEBS_MEM_INFO_data_source(x) ((x)->data_source) +#define ADAPTIVE_PEBS_MEM_INFO_latency(x) ((x)->latency) +#define ADAPTIVE_PEBS_MEM_INFO_hle_info(x) ((x)->hle_info) + +typedef struct ADAPTIVE_PEBS_GPR_INFO_NODE_S ADAPTIVE_PEBS_GPR_INFO_NODE; +typedef ADAPTIVE_PEBS_GPR_INFO_NODE *ADAPTIVE_PEBS_GPR_INFO; + +struct ADAPTIVE_PEBS_GPR_INFO_NODE_S { + U64 rflags; // Offset 0x40 + U64 rip; // Offset 0x48 + U64 rax; // Offset 0x50 + U64 rcx; // Offset 0x58 + U64 rdx; // Offset 0x60 + U64 rbx; // Offset 0x68 + U64 rsp; // Offset 0x70 + U64 rbp; // Offset 0x78 + U64 rsi; // Offset 0x80 + U64 rdi; // Offset 0x88 + U64 r8; // Offset 0x90 + U64 r9; // Offset 0x98 + U64 r10; // Offset 0xA0 + U64 r11; // Offset 0xA8 + U64 r12; // Offset 0xB0 + U64 r13; // Offset 0xB8 + U64 r14; // Offset 0xC0 + U64 r15; // Offset 0xC8 +}; + +#define ADAPTIVE_PEBS_GPR_INFO_rflags(x) ((x)->rflags) +#define ADAPTIVE_PEBS_GPR_INFO_rip(x) ((x)->rip) +#define ADAPTIVE_PEBS_GPR_INFO_rax(x) ((x)->rax) +#define ADAPTIVE_PEBS_GPR_INFO_rcx(x) ((x)->rcx) +#define ADAPTIVE_PEBS_GPR_INFO_rdx(x) ((x)->rdx) +#define ADAPTIVE_PEBS_GPR_INFO_rbx(x) ((x)->rbx) +#define ADAPTIVE_PEBS_GPR_INFO_rsp(x) ((x)->rsp) +#define ADAPTIVE_PEBS_GPR_INFO_rbp(x) ((x)->rbp) +#define ADAPTIVE_PEBS_GPR_INFO_rsi(x) ((x)->rsi) +#define ADAPTIVE_PEBS_GPR_INFO_rdi(x) ((x)->rdi) +#define ADAPTIVE_PEBS_GPR_INFO_r8(x) ((x)->r8) +#define ADAPTIVE_PEBS_GPR_INFO_r9(x) ((x)->r9) +#define ADAPTIVE_PEBS_GPR_INFO_r10(x) ((x)->r10) +#define ADAPTIVE_PEBS_GPR_INFO_r11(x) ((x)->r11) +#define ADAPTIVE_PEBS_GPR_INFO_r12(x) ((x)->r12) +#define ADAPTIVE_PEBS_GPR_INFO_r13(x) ((x)->r13) +#define ADAPTIVE_PEBS_GPR_INFO_r14(x) ((x)->r14) +#define ADAPTIVE_PEBS_GPR_INFO_r15(x) ((x)->r15) + +typedef struct ADAPTIVE_PEBS_XMM_INFO_NODE_S ADAPTIVE_PEBS_XMM_INFO_NODE; +typedef ADAPTIVE_PEBS_XMM_INFO_NODE *ADAPTIVE_PEBS_XMM_INFO; + +struct ADAPTIVE_PEBS_XMM_INFO_NODE_S { + U64 xmm0_l; // Offset 0xD0 + U64 xmm0_h; // Offset 0xD8 + U64 xmm1_l; // Offset 0xE0 + U64 xmm1_h; // Offset 0xE8 + U64 xmm2_l; // Offset 0xF0 + U64 xmm2_h; // Offset 0xF8 + U64 xmm3_l; // Offset 0x100 + U64 xmm3_h; // Offset 0x108 + U64 xmm4_l; // Offset 0x110 + U64 xmm4_h; // Offset 0x118 + U64 xmm5_l; // Offset 0x120 + U64 xmm5_h; // Offset 0x128 + U64 xmm6_l; // Offset 0x130 + U64 xmm6_h; // Offset 0x138 + U64 xmm7_l; // Offset 0x140 + U64 xmm7_h; // Offset 0x148 + U64 xmm8_l; // Offset 0x150 + U64 xmm8_h; // Offset 0x158 + U64 xmm9_l; // Offset 0x160 + U64 xmm9_h; // Offset 0x168 + U64 xmm10_l; // Offset 0x170 + U64 xmm10_h; // Offset 0x178 + U64 xmm11_l; // Offset 0x180 + U64 xmm11_h; // Offset 0x188 + U64 xmm12_l; // Offset 0x190 + U64 xmm12_h; // Offset 0x198 + U64 xmm13_l; // Offset 0x1A0 + U64 xmm13_h; // Offset 0x1A8 + U64 xmm14_l; // Offset 0x1B0 + U64 xmm14_h; // Offset 0x1B8 + U64 xmm15_l; // Offset 0x1C0 + U64 xmm15_h; // Offset 0x1C8 +}; + +#define ADAPTIVE_PEBS_XMM_INFO_xmm0_l(x) ((x)->xmm0_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm0_h(x) ((x)->xmm0_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm1_l(x) ((x)->xmm1_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm1_h(x) ((x)->xmm1_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm2_l(x) ((x)->xmm2_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm2_h(x) ((x)->xmm2_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm3_l(x) ((x)->xmm3_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm3_h(x) ((x)->xmm3_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm4_l(x) ((x)->xmm4_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm4_h(x) ((x)->xmm4_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm5_l(x) ((x)->xmm5_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm5_h(x) ((x)->xmm5_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm6_l(x) ((x)->xmm6_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm6_h(x) ((x)->xmm6_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm7_l(x) ((x)->xmm7_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm7_h(x) ((x)->xmm7_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm8_l(x) ((x)->xmm8_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm8_h(x) ((x)->xmm8_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm9_l(x) ((x)->xmm9_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm9_h(x) ((x)->xmm9_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm10_l(x) ((x)->xmm10_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm10_h(x) ((x)->xmm10_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm11_l(x) ((x)->xmm11_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm11_h(x) ((x)->xmm11_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm12_l(x) ((x)->xmm12_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm12_h(x) ((x)->xmm12_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm13_l(x) ((x)->xmm13_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm13_h(x) ((x)->xmm13_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm14_l(x) ((x)->xmm14_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm14_h(x) ((x)->xmm14_h) +#define ADAPTIVE_PEBS_XMM_INFO_xmm15_l(x) ((x)->xmm15_l) +#define ADAPTIVE_PEBS_XMM_INFO_xmm15_h(x) ((x)->xmm15_h) + +typedef struct ADAPTIVE_PEBS_LBR_INFO_NODE_S ADAPTIVE_PEBS_LBR_INFO_NODE; +typedef ADAPTIVE_PEBS_LBR_INFO_NODE *ADAPTIVE_PEBS_LBR_INFO; + +struct ADAPTIVE_PEBS_LBR_INFO_NODE_S { + U64 lbr_from; // Offset 0x1D0 + U64 lbr_to; // Offset 0x1D8 + U64 lbr_info; // Offset 0x1E0 +}; + +#define ADAPTIVE_PEBS_LBR_INFO_lbr_from(x) ((x)->lbr_from) +#define ADAPTIVE_PEBS_LBR_INFO_lbr_to(x) ((x)->lbr_to) +#define ADAPTIVE_PEBS_LBR_INFO_lbr_info(x) ((x)->lbr_info) + +typedef struct LATENCY_INFO_NODE_S LATENCY_INFO_NODE; +typedef LATENCY_INFO_NODE *LATENCY_INFO; + +struct LATENCY_INFO_NODE_S { + U64 linear_address; + U64 data_source; + U64 latency; + U64 stack_pointer; + U64 phys_addr; +}; + +#define LATENCY_INFO_linear_address(x) ((x)->linear_address) +#define LATENCY_INFO_data_source(x) ((x)->data_source) +#define LATENCY_INFO_latency(x) ((x)->latency) +#define LATENCY_INFO_stack_pointer(x) ((x)->stack_pointer) +#define LATENCY_INFO_phys_addr(x) ((x)->phys_addr) + +typedef struct DTS_BUFFER_EXT_NODE_S DTS_BUFFER_EXT_NODE; +typedef DTS_BUFFER_EXT_NODE *DTS_BUFFER_EXT; +struct DTS_BUFFER_EXT_NODE_S { + U64 base; // Offset 0x00 + U64 index; // Offset 0x08 + U64 max; // Offset 0x10 + U64 threshold; // Offset 0x18 + U64 pebs_base; // Offset 0x20 + U64 pebs_index; // Offset 0x28 + U64 pebs_max; // Offset 0x30 + U64 pebs_threshold; // Offset 0x38 + U64 counter_reset0; // Offset 0x40 + U64 counter_reset1; // Offset 0x48 + U64 counter_reset2; // Offset 0x50 + U64 counter_reset3; +}; + +#define DTS_BUFFER_EXT_base(x) ((x)->base) +#define DTS_BUFFER_EXT_index(x) ((x)->index) +#define DTS_BUFFER_EXT_max(x) ((x)->max) +#define DTS_BUFFER_EXT_threshold(x) ((x)->threshold) +#define DTS_BUFFER_EXT_pebs_base(x) ((x)->pebs_base) +#define DTS_BUFFER_EXT_pebs_index(x) ((x)->pebs_index) +#define DTS_BUFFER_EXT_pebs_max(x) ((x)->pebs_max) +#define DTS_BUFFER_EXT_pebs_threshold(x) ((x)->pebs_threshold) +#define DTS_BUFFER_EXT_counter_reset0(x) ((x)->counter_reset0) +#define DTS_BUFFER_EXT_counter_reset1(x) ((x)->counter_reset1) +#define DTS_BUFFER_EXT_counter_reset2(x) ((x)->counter_reset2) +#define DTS_BUFFER_EXT_counter_reset3(x) ((x)->counter_reset3) + +typedef struct DTS_BUFFER_EXT1_NODE_S DTS_BUFFER_EXT1_NODE; +typedef DTS_BUFFER_EXT1_NODE *DTS_BUFFER_EXT1; +struct DTS_BUFFER_EXT1_NODE_S { + DTS_BUFFER_EXT_NODE dts_buffer; + U64 counter_reset4; // Offset 0x60 + U64 counter_reset5; // Offset 0x68 + U64 counter_reset6; // Offset 0x70 + U64 counter_reset7; // Offset 0x78 + U64 fixed_counter_reset0; // Offset 0x80 + U64 fixed_counter_reset1; // Offset 0x88 + U64 fixed_counter_reset2; // Offset 0x90 + U64 fixed_counter_reset3; // Offset 0x98 +}; + +#define DTS_BUFFER_EXT1_base(x) ((x)->dts_buffer.base) +#define DTS_BUFFER_EXT1_index(x) ((x)->dts_buffer.index) +#define DTS_BUFFER_EXT1_max(x) ((x)->dts_buffer.max) +#define DTS_BUFFER_EXT1_threshold(x) ((x)->dts_buffer.threshold) +#define DTS_BUFFER_EXT1_pebs_base(x) ((x)->dts_buffer.pebs_base) +#define DTS_BUFFER_EXT1_pebs_index(x) ((x)->dts_buffer.pebs_index) +#define DTS_BUFFER_EXT1_pebs_max(x) ((x)->dts_buffer.pebs_max) +#define DTS_BUFFER_EXT1_pebs_threshold(x) ((x)->dts_buffer.pebs_threshold) +#define DTS_BUFFER_EXT1_counter_reset0(x) ((x)->dts_buffer.counter_reset0) +#define DTS_BUFFER_EXT1_counter_reset1(x) ((x)->dts_buffer.counter_reset1) +#define DTS_BUFFER_EXT1_counter_reset2(x) ((x)->dts_buffer.counter_reset2) +#define DTS_BUFFER_EXT1_counter_reset3(x) ((x)->dts_buffer.counter_reset3) +#define DTS_BUFFER_EXT1_counter_reset4(x) ((x)->counter_reset4) +#define DTS_BUFFER_EXT1_counter_reset5(x) ((x)->counter_reset5) +#define DTS_BUFFER_EXT1_counter_reset6(x) ((x)->counter_reset6) +#define DTS_BUFFER_EXT1_counter_reset7(x) ((x)->counter_reset7) +#define DTS_BUFFER_EXT1_fixed_counter_reset0(x) ((x)->fixed_counter_reset0) +#define DTS_BUFFER_EXT1_fixed_counter_reset1(x) ((x)->fixed_counter_reset1) +#define DTS_BUFFER_EXT1_fixed_counter_reset2(x) ((x)->fixed_counter_reset2) +#define DTS_BUFFER_EXT1_fixed_counter_reset3(x) ((x)->fixed_counter_reset3) + + +#define EXT1_FIXED_OFFSET 8 +#define EXT2_FIXED_OFFSET 32 + +#define NUM_EXT_COUNTER_RESET 4 +#define NUM_EXT1_COUNTER_RESET 4 +#define NUM_EXT1_FIXED_COUNTER_RESET 4 +#define NUM_EXT2_COUNTER_RESET 28 // Total 32, but Re-using EXT's 4 counters +#define NUM_EXT2_FIXED_COUNTER_RESET 16 + +typedef struct DTS_BUFFER_EXT2_NODE_S DTS_BUFFER_EXT2_NODE; +typedef DTS_BUFFER_EXT2_NODE *DTS_BUFFER_EXT2; +struct DTS_BUFFER_EXT2_NODE_S { + DTS_BUFFER_EXT_NODE dts_buffer; + U64 counter_reset[NUM_EXT2_COUNTER_RESET]; + U64 fixed_counter_reset[NUM_EXT2_FIXED_COUNTER_RESET]; +}; + +#define DTS_BUFFER_EXT2_base(x) ((x)->dts_buffer.base) +#define DTS_BUFFER_EXT2_index(x) ((x)->dts_buffer.index) +#define DTS_BUFFER_EXT2_max(x) ((x)->dts_buffer.max) +#define DTS_BUFFER_EXT2_threshold(x) ((x)->dts_buffer.threshold) +#define DTS_BUFFER_EXT2_pebs_base(x) ((x)->dts_buffer.pebs_base) +#define DTS_BUFFER_EXT2_pebs_index(x) ((x)->dts_buffer.pebs_index) +#define DTS_BUFFER_EXT2_pebs_max(x) ((x)->dts_buffer.pebs_max) +#define DTS_BUFFER_EXT2_pebs_threshold(x) ((x)->dts_buffer.pebs_threshold) +#define DTS_BUFFER_EXT2_counter_reset0(x) ((x)->dts_buffer.counter_reset0) +#define DTS_BUFFER_EXT2_counter_reset1(x) ((x)->dts_buffer.counter_reset1) +#define DTS_BUFFER_EXT2_counter_reset2(x) ((x)->dts_buffer.counter_reset2) +#define DTS_BUFFER_EXT2_counter_reset3(x) ((x)->dts_buffer.counter_reset3) +#define DTS_BUFFER_EXT2_counter_reset(x) ((x)->counter_reset) +#define DTS_BUFFER_EXT2_fixed_counter_reset(x) ((x)->fixed_counter_reset) + + +extern OS_STATUS PEBS_Initialize(U32 dev_idx); + +extern OS_STATUS PEBS_Allocate(VOID); + +extern VOID PEBS_Destroy(VOID); + +extern VOID PEBS_Flush_Buffer(VOID *); + +extern VOID PEBS_Reset_Counter(S32 this_cpu, U32 index, U32 op_type); + +extern VOID PEBS_Reset_Index(S32 this_cpu); + +extern VOID PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index); + +extern VOID PEBS_Modify_TSC(void *sample, U32 rec_index); + +extern U32 PEBS_Get_Num_Records_Filled(VOID); + +extern U64 PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); + +extern U64 APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index); + +extern U64 PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index); + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct PEBS_DISPATCH_NODE_S PEBS_DISPATCH_NODE; +typedef PEBS_DISPATCH_NODE *PEBS_DISPATCH; +struct PEBS_DISPATCH_NODE_S { + VOID (*initialize_threshold)(DTS_BUFFER_EXT); + U64 (*overflow)(S32, U64, U32); + VOID (*modify_ip)(void *, DRV_BOOL, U32); + VOID (*modify_tsc)(void *, U32); + U32 (*get_num_records_filled)(VOID); +}; + +typedef struct PEBS_INFO_NODE_S PEBS_INFO_NODE; +typedef PEBS_INFO_NODE *PEBS_INFO; +struct PEBS_INFO_NODE_S { + PEBS_DISPATCH pebs_dispatch; + U32 pebs_record_size; + U16 apebs_basic_offset; + U16 apebs_mem_offset; + U16 apebs_gpr_offset; + U16 apebs_xmm_offset; + U16 apebs_lbr_offset; +}; + +#define APEBS_RECORD_SIZE_MASK 0xFFFF000000000000ULL //[63:48] +#define APEBS_RECORD_FORMAT_MASK 0xFFFFFFFFFFFF //[47:0] +#define APEBS_MEM_RECORD_FORMAT_MASK 0x1ULL +#define APEBS_GPR_RECORD_FORMAT_MASK 0x2ULL +#define APEBS_XMM_RECORD_FORMAT_MASK 0x4ULL +#define APEBS_LBR_RECORD_FORMAT_MASK 0x8ULL +#endif + diff --git a/drivers/platform/x86/sepdk/inc/perfver4.h b/drivers/platform/x86/sepdk/inc/perfver4.h new file mode 100644 index 00000000000000..f6377fcbca41e6 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/perfver4.h @@ -0,0 +1,59 @@ +/**** + Copyright (C) 2013 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PERFVER4_H_ +#define _PERFVER4_H_ + +#include "msrdefs.h" + +extern DISPATCH_NODE perfver4_dispatch; +extern DISPATCH_NODE perfver4_dispatch_htoff_mode; +extern DISPATCH_NODE perfver4_dispatch_nonht_mode; + +#define PERFVER4_UNC_BLBYPASS_BITMASK 0x00000001 +#define PERFVER4_UNC_DISABLE_BL_BYPASS_MSR 0x39C + +#if defined(DRV_IA32) +#define PERFVER4_LBR_DATA_BITS 32 +#else +#define PERFVER4_LBR_DATA_BITS 57 +#endif + +#define PERFVER4_LBR_BITMASK ((1ULL << PERFVER4_LBR_DATA_BITS) - 1) + +#define PERFVER4_FROZEN_BIT_MASK 0xc00000000000000ULL +#define PERFVER4_OVERFLOW_BIT_MASK_HT_ON 0x600000070000000FULL +#define PERFVER4_OVERFLOW_BIT_MASK_HT_OFF 0x60000007000000FFULL +#define PERFVER4_OVERFLOW_BIT_MASK_NON_HT 0x6000000F000000FFULL + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmi.h b/drivers/platform/x86/sepdk/inc/pmi.h new file mode 100644 index 00000000000000..09aea9469a31d0 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmi.h @@ -0,0 +1,67 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMI_H_ +#define _PMI_H_ + +#include "lwpmudrv_defines.h" +#include +#include + +#if defined(DRV_IA32) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +#define REGS_xcs(regs) (regs->xcs) +#define REGS_eip(regs) (regs->eip) +#define REGS_eflags(regs) (regs->eflags) +#else +#define REGS_xcs(regs) (regs->cs) +#define REGS_eip(regs) (regs->ip) +#define REGS_eflags(regs) (regs->flags) +#endif +#endif + +#if defined(DRV_EM64T) +#define REGS_cs(regs) (regs->cs) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +#define REGS_rip(regs) (regs->rip) +#define REGS_eflags(regs) (regs->eflags) +#else +#define REGS_rip(regs) (regs->ip) +#define REGS_eflags(regs) (regs->flags) +#endif +#endif + +asmlinkage VOID PMI_Interrupt_Handler(struct pt_regs *regs); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_info.h b/drivers/platform/x86/sepdk/inc/pmu_info.h new file mode 100644 index 00000000000000..62f48aa1a9be4b --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_info.h @@ -0,0 +1,223 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_INFO_H_INC_ +#define _PMU_INFO_H_INC_ + +U32 drv_type = DRV_TYPE_PUBLIC; +S8 *drv_type_str = "PUBLIC"; + +#define MAX_PERFMON_VERSION 5 + +static const PMU_INFO_NODE architectural_pmu_info_list[] = { + // No Perfmon version + {0x6, 0x0, 0x0, 0xF, NULL, NULL, NULL}, + + // Perfmon version 1 + { 0x6, 0x0, 0x0, 0xF, NULL, NULL, NULL}, + + // Perfmon version 2 + { 0x6, 0x0, 0x0, 0xF, NULL, NULL, NULL}, + + // Perfmon version 3 + { 0x6, 0x0, 0x0, 0xF, NULL, NULL, NULL}, + + // Perfmon version 4 + { 0x6, 0x0, 0x0, 0xF, perfmon_v4_msr_list, NULL, NULL}, + + // Perfmon version 5 + { 0x6, 0x0, 0x0, 0xF, perfmon_v5_msr_list, NULL, NULL}, + + // Last + { 0x0, 0x0, 0x0, 0x0, NULL, NULL, NULL} +}; + +static const PMU_INFO_NODE pmu_info_list[] = { + // CTI number = 108 + {0x6, 0x4F, 0x0, 0xF, bdx_msr_list, bdx_pci_list, skx_mmio_list }, + + // CTI number = 103 + { 0x6, 0x5E, 0x0, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 107 + { 0x6, 0x4E, 0x0, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 117 + { 0x6, 0x9E, 0x0, 0x9, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 116 + { 0x6, 0x8E, 0x0, 0xB, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 111 + { 0x6, 0x55, 0x0, 0x4, skx_msr_list, skx_pci_list, skx_mmio_list }, + + // CTI number = 122 + { 0x6, 0x9E, 0xA, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 127 + { 0x6, 0x55, 0x5, 0x9, clx_msr_list, skx_pci_list, server_common_mmio_list}, + + // CTI number = 128 + { 0x6, 0x86, 0x0, 0xF, snr_msr_list, snr_pci_list, snr_mmio_info_list }, + + // CTI number = 123 + { 0x6, 0x7E, 0x0, 0xF, icl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 118 + { 0x6, 0x7A, 0x0, 0xF, gml_msr_list, NULL, NULL }, + + // CTI number = 113 + { 0x6, 0x6E, 0x0, 0xF, slm_msr_list, NULL, NULL }, + + // CTI number = 112 + { 0x6, 0x5F, 0x0, 0xF, dnv_msr_list, NULL, bxt_dvt_mmio_info_list }, + + // CTI number = 106 + { 0x6, 0x5C, 0x0, 0xF, bxt_msr_list, NULL, bxt_dvt_mmio_info_list }, + + // CTI number = 104 + { 0x6, 0x56, 0x0, 0xF, bdx_msr_list, bdw_de_pci_list, client_common_mmio_list}, + + // CTI number = 105 + { 0x6, 0x47, 0x0, 0xF, hsw_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 102 + { 0x6, 0x57, 0x0, 0xF, knl_msr_list, knl_pci_list, server_common_mmio_list}, + + // CTI number = 101 + { 0x6, 0x5A, 0x0, 0xF, and_msr_list, NULL, NULL }, + + // CTI number = 100 + { 0x6, 0x4C, 0x0, 0xF, slm_msr_list, NULL, NULL }, + + // CTI number = 98 + { 0x6, 0x4A, 0x0, 0xF, slm_msr_list, NULL, NULL }, + + // CTI number = 99 + { 0x6, 0x3F, 0x0, 0xF, hsx_msr_list, hsx_pci_list, server_common_mmio_list}, + + // CTI number = 97 + { 0x6, 0x3D, 0x0, 0xF, hsw_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 96 + { 0x6, 0x4D, 0x0, 0xF, avt_msr_list, NULL, NULL }, + + // CTI number = 93 + { 0x6, 0x46, 0x0, 0xF, hsw_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 94 + { 0x6, 0x45, 0x0, 0xF, hsw_ult_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 90 + { 0x6, 0x37, 0x0, 0xF, slm_msr_list, NULL, NULL }, + + // CTI number = 88 + { 0x6, 0x3C, 0x0, 0xF, hsw_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 133 + { 0x6, 0xA6, 0x0, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 150 + { 0x6, 0x8E, 0xC, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 129 + { 0x6, 0x8C, 0x0, 0xF, tgl_msr_list, NULL, tgl_mmio_info_list }, + + // CTI number = 142 + { 0x6, 0x8D, 0x0, 0xF, tgl_msr_list, NULL, tgl_h_mmio_info_list }, + + // CTI number = 134 + { 0x6, 0x96, 0x0, 0xF, ehl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 135 + { 0x6, 0x9C, 0x0, 0xF, ehl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 140 + { 0x6, 0xA7, 0x0, 0xF, icl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 141 + { 0x6, 0xA8, 0x0, 0xF, icl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 125 + { 0x6, 0x6A, 0x0, 0xF, icx_r_msr_list, icx_r_pci_list, icx_mmio_info_list }, + + // CTI number = 139 + { 0x6, 0x58, 0x0, 0xF, skx_msr_list, skx_pci_list, NULL }, + + // CTI number = 137 + { 0x6, 0x8A, 0x0, 0xF, lkf_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 132 + { 0x6, 0x55, 0xA, 0xF, clx_msr_list, cpx_pci_list, server_common_mmio_list}, + + // CTI number = 136 + { 0x6, 0xA5, 0x0, 0xF, skl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 144 + { 0x6, 0x6C, 0x0, 0xF, icx_r_msr_list, icx_r_pci_list, icx_mmio_info_list }, + + // CTI number = 120 + { 0x6, 0x85, 0x0, 0xF, knl_msr_list, knl_pci_list, server_common_mmio_list}, + + // CTI number = 119 + { 0x6, 0x66, 0x0, 0xF, plat1_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 138 + { 0x6, 0x75, 0x6, 0xF, slm_msr_list, NULL, NULL }, + + // CTI number = 130 + { 0x6, 0x9D, 0x0, 0xF, icl_msr_list, NULL, client_common_mmio_list}, + + // CTI number = 131 + { 0x6, 0x8F, 0x0, 0xF, plat4_msr_list, plat4_pci_list, plat4_mmio_info_list }, + + // CTI number = 145 + { 0x6, 0x97, 0x0, 0xF, plat5_msr_list, NULL, plat5_mmio_info_list }, + + // CTI number = 148 + { 0x6, 0x9A, 0x0, 0xF, plat5_msr_list, NULL, plat5_mmio_info_list }, + + // CTI number = 151 + { 0x6, 0xB7, 0x0, 0xF, plat5_msr_list, NULL, plat5_mmio_info_list }, + + // CTI number = 152 + { 0x6, 0xBA, 0x0, 0xF, plat5_msr_list, NULL, plat5_mmio_info_list }, + + // CTI number = 153 + { 0x6, 0xBF, 0x0, 0xF, plat5_msr_list, NULL, plat5_mmio_info_list }, + + // Last + { 0x0, 0x0, 0x0, 0x0, NULL, NULL, NULL } +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_info_mmio.h b/drivers/platform/x86/sepdk/inc/pmu_info_mmio.h new file mode 100644 index 00000000000000..ac24a3b922b3eb --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_info_mmio.h @@ -0,0 +1,397 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_INFO_MMIO_H_INC_ +#define _PMU_INFO_MMIO_H_INC_ + +static U32 mmio_client_pmu1_common_offset[] = { + 0x5040, 0x5044, 0x5048, 0x5050, 0x5054, 0x5058, 0x0 +}; + +static U32 mmio_pmu2_offset[] = { + 0x250, 0x254, 0x258, 0x25c, 0x260, 0x264, 0x268, 0x0 +}; + +static U32 mmio_pmu3_common_offset[] = { + 0x3008, 0x3010, 0x3018, 0x3020, 0x3028, 0x1008, 0x1010, 0x1018, 0x2008, 0x0 +}; + +static U32 mmio_pmu4_offset[] = { + 0x91B8, 0x91B9, 0x91BA, 0x91BB, 0x91bc, 0x91c0, 0x91c4, 0x0 +}; + +static U32 mmio_pmu5_common_offset[] = { + 0x500, 0x508, 0x400, 0x408, 0x418, 0x420, 0x428, 0x430, 0x438, 0x440, 0x448, + 0x450, 0x458, 0x460, 0x468, 0x470, 0x478, 0x480, 0x488, 0x490, 0x498, 0x4a0, + 0x0 +}; + +static U32 mmio_bxt_dvt_pmu1_offset[] = { 0x6868, 0x686C, 0x0 }; + +static U32 mmio_pmu1_offset[] = { + 0x22800, 0x26800, 0x2A800, 0x22840, 0x22844, 0x22848, 0x2284c, 0x22850, + 0x26840, 0x26844, 0x26848, 0x2684c, 0x26850, 0x2A840, 0x2A844, 0x2A848, + 0x2A84c, 0x2A850, 0x2285c, 0x2685c, 0x2A85c, 0x22808, 0x22810, 0x22818, + 0x22820, 0x22828, 0x26808, 0x26810, 0x26818, 0x26820, 0x26828, 0x2A808, + 0x2A810, 0x2A818, 0x2A820, 0x2A828, 0x22854, 0x26854, 0x2A854, 0x22838, + 0x26838, 0x2A838, 0x2290, 0x2298, 0x22A0, 0x22A8, 0x22B0, 0x4000, + 0x8, 0x40, 0x4008, 0x8000, 0x8008, 0x4040, 0x4044, 0x8040, + 0x8044, 0x54, 0x4054, 0x8054, 0x4038, 0x38, 0x8038, 0xF8, + 0x44, 0x48, 0x10, 0x4010, 0x8010, 0x4C, 0x4048, 0x8048, + 0x404C, 0x804C, 0x18, 0x20, 0x4018, 0x4020, 0x8018, 0x8020, + 0x0 +}; + +static U32 mmio_pmu6_offset[] = { + 0x4000, 0x9000, 0xD000, 0x12000, 0x16000, 0x1B000, 0x1F000, 0x24000, + 0x28000, 0x2D000, 0x31000, 0x36000, 0x3A000, 0x3F000, 0x43000, 0x48000, + 0x4C000, 0x51000, 0x55000, 0x5A000, 0x5E000, 0x63000, 0x67000, 0x6C000, + 0x70000, 0x75000, 0x79000, 0x7E000, 0x82000, 0x87000, 0x8B000, 0x40, + 0x4040, 0x9040, 0xD040, 0x12040, 0x16040, 0x1B040, 0x1F040, 0x24040, + 0x28040, 0x2D040, 0x31040, 0x36040, 0x3A040, 0x3F040, 0x43040, 0x48040, + 0x4C040, 0x51040, 0x55040, 0x5A040, 0x5E040, 0x63040, 0x67040, 0x6C040, + 0x70040, 0x75040, 0x79040, 0x7E040, 0x82040, 0x87040, 0x8B040, 0x44, + 0x4044, 0x9044, 0xD044, 0x12044, 0x16044, 0x1B044, 0x1F044, 0x24044, + 0x28044, 0x2D044, 0x31044, 0x36044, 0x3A044, 0x3F044, 0x43044, 0x48044, + 0x4C044, 0x51044, 0x55044, 0x5A044, 0x5E044, 0x63044, 0x67044, 0x6C044, + 0x70044, 0x75044, 0x79044, 0x7E044, 0x82044, 0x87044, 0x8B044, 0x48, + 0x4048, 0x9048, 0xD048, 0x12048, 0x16048, 0x1B048, 0x1F048, 0x24048, + 0x28048, 0x2D048, 0x31048, 0x36048, 0x3A048, 0x3F048, 0x43048, 0x48048, + 0x4C048, 0x51048, 0x55048, 0x5A048, 0x5E048, 0x63048, 0x67048, 0x6C048, + 0x70048, 0x75048, 0x79048, 0x7E048, 0x82048, 0x87048, 0x8B048, 0x4C, + 0x404C, 0x904C, 0xD04C, 0x1204C, 0x1604C, 0x1B04C, 0x1F04C, 0x2404C, + 0x2804C, 0x2D04C, 0x3104C, 0x3604C, 0x3A04C, 0x3F04C, 0x4304C, 0x4804C, + 0x4C04C, 0x5104C, 0x5504C, 0x5A04C, 0x5E04C, 0x6304C, 0x6704C, 0x6C04C, + 0x7004C, 0x7504C, 0x7904C, 0x7E04C, 0x8204C, 0x8704C, 0x8B04C, 0x8, + 0x4008, 0x9008, 0xD008, 0x12008, 0x16008, 0x1B008, 0x1F008, 0x24008, + 0x28008, 0x2D008, 0x31008, 0x36008, 0x3A008, 0x3F008, 0x43008, 0x48008, + 0x4C008, 0x51008, 0x55008, 0x5A008, 0x5E008, 0x63008, 0x67008, 0x6C008, + 0x70008, 0x75008, 0x79008, 0x7E008, 0x82008, 0x87008, 0x8B008, 0x10, + 0x4010, 0x9010, 0xD010, 0x12010, 0x16010, 0x1B010, 0x1F010, 0x24010, + 0x28010, 0x2D010, 0x31010, 0x36010, 0x3A010, 0x3F010, 0x43010, 0x48010, + 0x4C010, 0x51010, 0x55010, 0x5A010, 0x5E010, 0x63010, 0x67010, 0x6C010, + 0x70010, 0x75010, 0x79010, 0x7E010, 0x82010, 0x87010, 0x8B010, 0x18, + 0x4018, 0x9018, 0xD018, 0x12018, 0x16018, 0x1B018, 0x1F018, 0x24018, + 0x28018, 0x2D018, 0x31018, 0x36018, 0x3A018, 0x3F018, 0x43018, 0x48018, + 0x4C018, 0x51018, 0x55018, 0x5A018, 0x5E018, 0x63018, 0x67018, 0x6C018, + 0x70018, 0x75018, 0x79018, 0x7E018, 0x82018, 0x87018, 0x8B018, 0x20, + 0x4020, 0x9020, 0xD020, 0x12020, 0x16020, 0x1B020, 0x1F020, 0x24020, + 0x28020, 0x2D020, 0x31020, 0x36020, 0x3A020, 0x3F020, 0x43020, 0x48020, + 0x4C020, 0x51020, 0x55020, 0x5A020, 0x5E020, 0x63020, 0x67020, 0x6C020, + 0x70020, 0x75020, 0x79020, 0x7E020, 0x82020, 0x87020, 0x8B020, 0x0 +}; + +static U32 mmio_tgl_pmu1_offset[] = { + 0x5040, 0x5058, 0x50A0, 0x50A8, 0x50B0, 0x40D8, 0x40E0, + 0x40E8, 0x40F0, 0x44D8, 0x44E0, 0x44E8, 0x44F0, 0x4300, + 0x4308, 0x4310, 0x4318, 0x4700, 0x4708, 0x4710, 0x4718, + 0x15040, 0x15058, 0x150A0, 0x150A8, 0x150B0, 0x140D8, 0x140E0, + 0x140E8, 0x140F0, 0x144D8, 0x144E0, 0x144E8, 0x144F0, 0x14300, + 0x14308, 0x14310, 0x14700, 0x14708, 0x14710, 0x14718, 0x0 +}; + +static U32 mmio_tgl_h_pmu1_offset[] = { + 0xD840, 0xD858, 0xD8A0, 0xD8A8, 0xD8B0, 0xE0D8, 0xE0E0, 0xE0E8, + 0xE0F0, 0xE8D8, 0xE8E0, 0xE8E8, 0xE8F0, 0xE500, 0xE508, 0xE510, + 0xE518, 0xED00, 0xED08, 0xED10, 0xED18, 0x1D840, 0x1D858, 0x1D8A0, + 0x1D8A8, 0x1D8B0, 0x1E0D8, 0x1E0E0, 0x1E0E8, 0x1E0F0, 0x1E8D8, 0x1E8E0, + 0x1E8E8, 0x1E8F0, 0x1E500, 0x1E508, 0x1E510, 0x1E518, 0x1ED00, 0x1ED08, + 0x1ED10, 0x1ED18, 0x0 +}; + +static U32 mmio_plat5_pmu1_offset[] = { + 0x1C0, 0x1C4, 0x101C0, 0x101C4, 0x1D0, 0x1D4, 0x1D8, 0x1DC, + 0x1E0, 0x101D0, 0x101D4, 0x101D8, 0x101DC, 0x101E0, 0xD9E8, 0xD9F0, + 0xD9F8, 0xDA00, 0xDA08, 0x1D9E8, 0x1D9F0, 0x1D9F8, 0x1DA00, 0x1DA08, + 0xD840, 0xD858, 0xD8A0, 0x1D840, 0x1D858, 0x1D8A0, 0 +}; + +static U32 mmio_pmu7_offset[] = { + 0x80, 0xC0, 0x140, 0x180, 0x1C0, 0x200, 0x240, 0x280, 0x2C0, + 0x300, 0x340, 0x380, 0x3C0, 0x1000, 0x1080, 0x10C0, 0x1140, 0x1180, + 0x11C0, 0x1200, 0x1240, 0x1280, 0x12C0, 0x1300, 0x1340, 0x1380, 0x13C0, + 0x2000, 0x2080, 0x20C0, 0x2140, 0x2180, 0x21C0, 0x2200, 0x2240, 0x2280, + 0x22C0, 0x2300, 0x2340, 0x2380, 0x23C0, 0x3000, 0x3080, 0x30C0, 0x3140, + 0x3180, 0x31C0, 0x3200, 0x3240, 0x3280, 0x32C0, 0x3300, 0x3340, 0x3380, + 0x33C0, 0x4000, 0x4080, 0x40C0, 0x4140, 0x4180, 0x41C0, 0x4200, 0x4240, + 0x4280, 0x42C0, 0x4300, 0x4340, 0x4380, 0x43C0, 0x5000, 0x5080, 0x50C0, + 0x5140, 0x5180, 0x51C0, 0x5200, 0x5240, 0x5280, 0x52C0, 0x5300, 0x5340, + 0x5380, 0x53C0, 0x6000, 0x6080, 0x60C0, 0x6140, 0x6180, 0x61C0, 0x6200, + 0x6240, 0x6280, 0x62C0, 0x6300, 0x6340, 0x6380, 0x63C0, 0x7000, 0x7080, + 0x70C0, 0x7140, 0x7180, 0x71C0, 0x7200, 0x7240, 0x7280, 0x72C0, 0x7300, + 0x7340, 0x7380, 0x73C0, 0x0 +}; + +static U32 mmio_pmu8_offset[] = { + 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0, 0x200, 0x240, 0x280, + 0x1000, 0x1080, 0x10C0, 0x1100, 0x1140, 0x1180, 0x11C0, 0x1200, 0x1240, + 0x1280, 0x2000, 0x2080, 0x20C0, 0x2100, 0x2140, 0x2180, 0x21C0, 0x2200, + 0x2240, 0x2280, 0x3000, 0x3080, 0x30C0, 0x3100, 0x3140, 0x3180, 0x31C0, + 0x3200, 0x3240, 0x3280, 0x4000, 0x4080, 0x40C0, 0x4100, 0x4140, 0x4180, + 0x41C0, 0x4200, 0x4240, 0x4280, 0x5000, 0x5080, 0x50C0, 0x5100, 0x5140, + 0x5180, 0x51C0, 0x5200, 0x5240, 0x5280, 0x6000, 0x6080, 0x60C0, 0x6100, + 0x6140, 0x6180, 0x61C0, 0x6200, 0x6240, 0x6280, 0x7000, 0x7080, 0x70C0, + 0x7100, 0x7140, 0x7180, 0x71C0, 0x7200, 0x7240, 0x7280, 0x0 +}; + +static PMU_MMIO_UNIT_INFO_NODE client_common_mmio_list[] = { + { + { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFF8000ULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_client_pmu1_common_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFFF000ULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu2_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu4_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE skx_mmio_list[] = { + { + { { { 0, 0, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x3FFFFFFF8000ULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu3_common_offset + }, + { + { { { 0, 0, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x3ffffffff0000ULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu5_common_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE server_common_mmio_list[] = { + { + { { { 0, 0, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x3ffffffff0000ULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu5_common_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE bxt_dvt_mmio_info_list[] = { + { + { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFF8000LL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_dvt_pmu1_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFFF000 }, + { { { 0, 0, 0, 0x0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu2_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + { { { 0, 0, 0, 0x0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu4_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE icx_mmio_info_list[] = { + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xD8 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xDC } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xE0 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xE4 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE snr_mmio_info_list[] = { + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xD8 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xDC } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 0, 0x0 } }, 0, MMIO_DUAL_BAR_TYPE, 0, 0x0 }, + { { { 0, 0, 0, 0x0 } }, 0, MMIO_DUAL_BAR_TYPE, 0, 0x0 }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE tgl_mmio_info_list[] = { + { + { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFF8000LL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_tgl_pmu1_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFFF000 }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu2_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu4_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE tgl_h_mmio_info_list[] = { + { + { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFF8000LL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_tgl_h_pmu1_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFFF000 }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu2_offset + }, + { + { { { 0, 2, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu4_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE plat4_mmio_info_list[] = { + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xD4 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu6_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xD8 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xDC } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xE0 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 0, 1, 0xD0 } }, 23, MMIO_DUAL_BAR_TYPE, 0, 0x1FFFFFFFULL }, + { { { 0, 0, 1, 0xE4 } }, 12, MMIO_DUAL_BAR_TYPE, 0, 0x7FFULL }, + mmio_pmu1_offset + }, + { + { { { 0, 29, 1, 0x298 } }, 26, MMIO_SINGLE_BAR_TYPE, 0, 0x3FFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_pmu7_offset + }, + { + { { { 0, 29, 1, 0x298 } }, 26, MMIO_DUAL_BAR_TYPE, 0, 0x3FFFFFFULL }, + { { { 0, 29, 1, 0x600 } }, 0, MMIO_DUAL_BAR_TYPE, 0, 0x0 }, + mmio_pmu8_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +static PMU_MMIO_UNIT_INFO_NODE plat5_mmio_info_list[] = { + { + { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0007FFFFF8000LL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_plat5_pmu1_offset + }, + { + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL + } +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_info_msr.h b/drivers/platform/x86/sepdk/inc/pmu_info_msr.h new file mode 100644 index 00000000000000..8f62e40659ee9c --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_info_msr.h @@ -0,0 +1,479 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_INFO_MSR_H_INC_ +#define _PMU_INFO_MSR_H_INC_ + +static PMU_MSR_INFO_NODE perfmon_v4_architectural[] = { + { 0x1D9 }, { 0x3F1 }, { 0x345, 0, 0, 1 }, { 0x38D, 0, 0, 1 }, + { 0x38E }, { 0x38F }, { 0x390 }, { 0x600 }, + { 0x1A0 }, { 0x186, 0, 0x7 }, { 0x8b }, { 0x392 }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE perfmon_v5_architectural[] = { + { 0x1D9 }, { 0x3F1 }, { 0x345, 0, 0, 1 }, { 0x38D, 0, 0, 1 }, + { 0x38E }, { 0x38F }, { 0x390 }, { 0x600 }, + { 0x1A0 }, { 0x186, 0, 0x7 }, { 0x8b }, { 0x392 }, + { 0x3F2 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_1[] = { + { 0x1D9 }, { 0x3F1 }, { 0x345, 0, 0, 1 }, { 0x38D, 0, 0, 1 }, + { 0x38E }, { 0x38F }, { 0x390 }, { 0x1C8 }, + { 0x3F7 }, { 0x600 }, { 0x1A0 }, { 0x186, 0, 0x3 }, + { 0x1C9 }, { 0xCE }, { 0x1A4 }, { 0x1A6 }, + { 0x1A7 }, { 0x34 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_2[] = { + { 0xC5, 0, 0, 1 }, { 0xC6, 0, 0, 1 }, { 0xC7, 0, 0, 1 }, + { 0xC8, 0, 0, 1 }, { 0x3F6 }, { 0x18A, 0, 0x3, 1 }, + { 0x1AD }, { 0x1AE }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_3[] = { + { 0x680, 0, 0xF }, { 0x6C0, 0, 0xF }, { 0x1B }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_4[] = { + { 0x3F2 }, { 0x30C }, { 0x329, 0, 0, 1 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_5[] = { + { 0x40, 0, 0x7 }, { 0x60, 0, 0x7 }, { 0xCD }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_6[] = { { 0xDC0, 0, 0x1F }, { 0x0 } }; + +static PMU_MSR_INFO_NODE core_7[] = { + { 0x690, 0, 0xF }, { 0x6D0, 0, 0xF }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_8[] = { { 0x1AF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE core_9[] = { + { 0x1500, 0, 0x1F }, { 0x1600, 0, 0x1F }, { 0x1200, 0, 0x1F }, + { 0x14CE }, { 0x14CF }, { 0x1DD }, + { 0x1DE }, { 0x1E0 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_10[] = { + { 0xC5 }, { 0xC6 }, { 0x18A }, { 0x18B }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE core_11[] = { { 0x392 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_2[] = { { 0x700 }, { 0x702 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_3[] = { { 0x1F80 }, { 0x1F82 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_4[] = { { 0xE01, 0, 0x1 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_5[] = { { 0x391, 0, 0x1 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_6[] = { { 0x2FF0 }, { 0x2FFE }, { 0x0 } }; + +static PMU_MSR_INFO_NODE uncore_7[] = { { 0x2FF0 }, { 0x2FF2 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu1_1[] = { { 0x396 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu1_2[] = { + { 0xE00, 0, 0x6 }, { 0xE08, 0, 0x4 }, { 0xE10, 0, 0xB }, + { 0xE20, 0, 0xB }, { 0xE30, 0, 0xB }, { 0xE40, 0, 0xB }, + { 0xE50, 0, 0xB }, { 0xE60, 0, 0xB }, { 0xE70, 0, 0xB }, + { 0xE80, 0, 0xB }, { 0xE90, 0, 0xB }, { 0xEA0, 0, 0xB }, + { 0xEB0, 0, 0xB }, { 0xEC0, 0, 0xB }, { 0xED0, 0, 0xB }, + { 0xEE0, 0, 0xB }, { 0xEF0, 0, 0xB }, { 0xF00, 0, 0xB }, + { 0xF10, 0, 0xB }, { 0xF20, 0, 0xB }, { 0xF30, 0, 0xB }, + { 0xF40, 0, 0xB }, { 0xF50, 0, 0xB }, { 0xF60, 0, 0xB }, + { 0xF70, 0, 0xB }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu1_6[] = { + { 0x700, 0, 0x4 }, { 0x708, 0, 0x4 }, { 0x710, 0, 0x4 }, + { 0x718, 0, 0x4 }, { 0x720, 0, 0x4 }, { 0x728, 0, 0x4 }, + { 0x730, 0, 0x4 }, { 0x738, 0, 0x4 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu1_7[] = { + { 0x700, 0, 0x69 }, { 0xF70, 0, 0x29 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu1_8[] = { { 0x700, 0, 0x49 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu1_9[] = { { 0x2000, 0, 0x7C }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu2_1[] = { { 0xE00, 0, 0x1BB }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu2_2[] = { { 0xE00, 0, 0x1C8 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu2_3[] = { + { 0xE00, 0, 0x1E7 }, { 0xB60, 0, 0x6D }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu2_4[] = { { 0x1C00, 0, 0x5B }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu2_5[] = { { 0x2000, 0, 0x7FF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu3_1[] = { { 0x2800, 0, 0x7BF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu4_1[] = { + { 0xA40, 0, 0x5 }, { 0xA47, 0, 0x4 }, { 0xB00, 0, 0xF }, + { 0xA60, 0, 0x5 }, { 0xA67, 0, 0x4 }, { 0xB10, 0, 0xF }, + { 0xA80, 0, 0x5 }, { 0xA87, 0, 0x4 }, { 0xB20, 0, 0xF }, + { 0xAA0, 0, 0x5 }, { 0xAA7, 0, 0x4 }, { 0xB30, 0, 0xF }, + { 0xAC0, 0, 0x5 }, { 0xAC7, 0, 0x4 }, { 0xB40, 0, 0xF }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu4_2[] = { + { 0xA50, 0, 0x5 }, { 0xA57, 0, 0x4 }, { 0xAA0, 0, 0xF }, + { 0xA70, 0, 0x5 }, { 0xA77, 0, 0x4 }, { 0xAB0, 0, 0xF }, + { 0xA90, 0, 0x5 }, { 0xA97, 0, 0x4 }, { 0xAC0, 0, 0xF }, + { 0xAE0, 0, 0x5 }, { 0xAE7, 0, 0x4 }, { 0xB30, 0, 0xF }, + { 0xB00, 0, 0x5 }, { 0xB07, 0, 0x4 }, { 0xB40, 0, 0xF }, + { 0xB20, 0, 0x5 }, { 0xB27, 0, 0x4 }, { 0xB50, 0, 0xF }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu4_3[] = { + { 0x1E00, 0, 0x4 }, { 0x1E07, 0, 0x4 }, { 0x1F00, 0, 0xF }, + { 0x1E10, 0, 0x4 }, { 0x1E17, 0, 0x4 }, { 0x1F10, 0, 0xF }, + { 0x1E20, 0, 0x4 }, { 0x1E27, 0, 0x4 }, { 0x1F20, 0, 0xF }, + { 0x1E30, 0, 0x4 }, { 0x1E37, 0, 0x4 }, { 0x1F30, 0, 0xF }, + { 0x1E40, 0, 0x4 }, { 0x1E47, 0, 0x4 }, { 0x1F40, 0, 0xF }, + { 0x1EAC }, { 0x1EBC }, { 0x1ECC }, + { 0x1EDC }, { 0x1EDC }, { 0x1EEC }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu4_4[] = { + { 0x3000, 0, 0x1FF }, { 0x3800, 0, 0x1FF }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu5_msr_reg_list1[] = { { 0x300, 0, 0 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu5_msr_reg_list2[] = { + { 0xA09, 0, 0 }, { 0xA0A, 0, 0 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu6_1[] = { + { 0xA58 }, { 0xA78 }, { 0xA98 }, { 0xAB8 }, { 0xAD8 }, { 0xA5B }, + { 0xA5C }, { 0xA7B }, { 0xA7C }, { 0xA9B }, { 0xA9C }, { 0xABB }, + { 0xABC }, { 0xADB }, { 0xADC }, { 0xA5F }, { 0xA7F }, { 0xA9F }, + { 0xABF }, { 0xADF }, { 0xA59 }, { 0xA5A }, { 0xA79 }, { 0xA7A }, + { 0xA99 }, { 0xA9A }, { 0xAB9 }, { 0xABA }, { 0xAD9 }, { 0xADA }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu6_2[] = { + { 0xA4A }, { 0xA6A }, { 0xA8A }, { 0xADA }, { 0xAFA }, { 0xB1A }, + { 0xA4F }, { 0xA6F }, { 0xA8F }, { 0xADF }, { 0xAFF }, { 0xB1F }, + { 0xA4D }, { 0xA6D }, { 0xA8D }, { 0xADD }, { 0xAFD }, { 0xB1D }, + { 0xA4E }, { 0xA6E }, { 0xA8E }, { 0xADE }, { 0xAFE }, { 0xB1E }, + { 0xA4B }, { 0xA6B }, { 0xA8B }, { 0xADB }, { 0xAFB }, { 0xB1B }, + { 0xA4C }, { 0xA6C }, { 0xA8C }, { 0xADC }, { 0xAFC }, { 0xB1C }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu6_3[] = { + { 0x1EA0 }, { 0x1EB0 }, { 0x1EC0 }, { 0x1ED0 }, { 0x1EE0 }, { 0x1EA8 }, + { 0x1EA9 }, { 0x1EB8 }, { 0x1EB9 }, { 0x1EC8 }, { 0x1EC9 }, { 0x1ED8 }, + { 0x1ED9 }, { 0x1EE8 }, { 0x1EE9 }, { 0x1EA7 }, { 0x1EB7 }, { 0x1EC7 }, + { 0x1ED7 }, { 0x1EE7 }, { 0x1EA1 }, { 0x1EA2 }, { 0x1EB1 }, { 0x1EB2 }, + { 0x1EC1 }, { 0x1EC2 }, { 0x1ED1 }, { 0x1ED2 }, { 0x1EE1 }, { 0x1EE2 }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu6_4[] = { { 0x3400, 0, 0x1FF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu7_1[] = { + { 0x394 }, { 0x395 }, { 0x396 }, { 0x3B0, 0, 0x3 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu7_2[] = { { 0x3B8, 0, 0x3 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu7_3[] = { + { 0x2FD0, 0, 0x6 }, { 0x2FD8, 0, 0x6 }, { 0x2FDE }, { 0x2FDF }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu8_1[] = { + { 0x715 }, { 0x717, 0, 4 }, { 0x711, 0, 4 }, { 0x710 }, { 0x100 }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu8_2[] = { { 0x3FD }, { 0x3FC }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu8_3[] = { { 0x3F9 }, { 0x3F8 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu8_5[] = { + { 0x1EF5 }, { 0x1EF8, 0, 4 }, { 0x1EF1, 0, 4 }, { 0x1EF0 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu8_6[] = { { 0x2FC0, 0, 0xF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_1[] = { + { 0x198 }, { 0x19C }, { 0x1A2 }, { 0x1B0 }, { 0x3F8 }, { 0x3F9 }, + { 0x3FA }, { 0x3FC }, { 0x3FD }, { 0x60D }, { 0x611 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_2[] = { { 0x639 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_3[] = { + { 0x3FE }, { 0x606 }, { 0x614 }, { 0x619 }, { 0x621 }, { 0x61D }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_4[] = { + { 0x199 }, { 0x771 }, { 0x774 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_5[] = { { 0x660, 0, 0x5 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_6[] = { + { 0x611 }, { 0x606 }, { 0x639 }, { 0x641 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_7[] = { + { 0x60D }, { 0x3F8 }, { 0x3F9 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_8[] = { + { 0x3FA }, { 0x3FC }, { 0x3FD }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_9[] = { + { 0x1A2 }, { 0x198 }, { 0x19C }, { 0x611 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_11[] = { + { 0x19C }, { 0x1B1 }, { 0x619 }, { 0x3FE }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_12[] = { + { 0x19C }, { 0x1B1 }, { 0x630 }, { 0x631 }, { 0x632 }, { 0x619 }, + { 0x3FE }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_13[] = { + { 0x630 }, { 0x631 }, { 0x632 }, { 0x619 }, { 0x3FE }, { 0x653 }, + { 0x61D }, { 0x655 }, { 0x656 }, { 0x19C }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_14[] = { { 0x660 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_15[] = { + { 0x660 }, { 0x1B0 }, { 0x639 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_16[] = { { 0x661, 0, 0x4 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_18[] = { + { 0x606 }, { 0x1B0 }, { 0x614 }, { 0x619 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu9_19[] = { { 0x17D2 }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu9_20[] = { + { 0x64F }, { 0x1B1 }, { 0x613 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu10_1[] = { + { 0xA40, 0, 0x9 }, { 0xA60, 0, 0x9 }, { 0xA80, 0, 0x9 }, + { 0xAD0, 0, 0x9 }, { 0xAF0, 0, 0x9 }, { 0xB10, 0, 0x9 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu10_2[] = { + { 0x1E50, 0, 0x4 }, { 0x1E60, 0, 0x4 }, { 0x1E70, 0, 0x4 }, + { 0x1E80, 0, 0x4 }, { 0x1E90, 0, 0x4 }, { 0x1E57, 0, 0x4 }, + { 0x1E67, 0, 0x4 }, { 0x1E77, 0, 0x4 }, { 0x1E87, 0, 0x4 }, + { 0x1E97, 0, 0x4 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu10_3[] = { { 0x3200, 0, 0x1FF }, { 0x0 } }; + +static PMU_MSR_INFO_NODE pmu11_1[] = { + { 0x720 }, { 0x72A }, { 0x734 }, + { 0x73E }, { 0x721, 0, 0x3 }, { 0x72B, 0, 0x3 }, + { 0x735, 0, 0x3 }, { 0x73F, 0, 0x3 }, { 0x726, 0, 0x3 }, + { 0x730, 0, 0x3 }, { 0x73A, 0, 0x3 }, { 0x744, 0, 0x3 }, + { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu12_1[] = { + { 0xC14 }, { 0x620 }, { 0x703, 0, 0x7 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu12_2[] = { + { 0xC14 }, { 0x620 }, { 0x1F91, 0, 0x3 }, { 0x1F98, 0, 0x1 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu12_4[] = { + { 0x620 }, { 0x2FD0 }, { 0x2FDE, 0, 0x2 }, { 0x2FD2, 0, 2 }, + { 0x2FD8, 0, 2 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE pmu13[] = { + { 0xC8D }, { 0xC8E }, { 0xC8F }, { 0xD10 }, { 0xD11 }, { 0xD12 }, + { 0xD13 }, { 0xC90 }, { 0xC91 }, { 0xC92 }, { 0xC93 }, { 0x0 } +}; + +static PMU_MSR_INFO_NODE *perfmon_v4_msr_list[] = { + perfmon_v4_architectural, 0 +}; + +static PMU_MSR_INFO_NODE *perfmon_v5_msr_list[] = { + perfmon_v5_architectural, 0 +}; + +static PMU_MSR_INFO_NODE *bdx_msr_list[] = { + core_1, core_2, core_3, core_8, pmu1_1, pmu1_2, pmu1_8, pmu8_1, + pmu8_2, pmu9_1, pmu9_2, pmu9_3, pmu9_4, pmu11_1, pmu12_1, uncore_2, + pmu13, 0 +}; + +static PMU_MSR_INFO_NODE *skx_msr_list[] = { + core_1, core_2, core_3, core_6, core_7, core_11, + pmu2_1, pmu4_1, pmu6_1, pmu8_1, pmu8_2, pmu9_1, + pmu9_2, pmu9_3, pmu12_1, uncore_2, pmu5_msr_reg_list1, 0 +}; + +static PMU_MSR_INFO_NODE *skl_msr_list[] = { + core_1, core_2, core_3, core_6, core_7, core_11, pmu1_1, pmu1_7, + pmu7_1, pmu9_6, pmu9_7, pmu9_8, pmu9_13, uncore_4, 0 +}; + +static PMU_MSR_INFO_NODE *clx_msr_list[] = { + core_1, core_2, core_3, core_6, core_7, core_11, + pmu2_1, pmu4_1, pmu6_1, pmu8_1, pmu8_2, pmu9_1, + pmu9_2, pmu9_3, pmu9_11, pmu12_1, uncore_2, pmu13, + pmu5_msr_reg_list1, 0 +}; + +static PMU_MSR_INFO_NODE *plat1_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, + pmu1_1, pmu1_6, pmu9_14, pmu7_1, pmu7_2, uncore_4, 0 +}; + +static PMU_MSR_INFO_NODE *icx_r_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, + pmu2_3, pmu4_2, pmu6_2, pmu8_1, pmu8_3, pmu10_1, pmu12_1, + uncore_2, uncore_5, pmu9_1, pmu9_2, pmu9_3, pmu13, 0 +}; + +static PMU_MSR_INFO_NODE *hsw_msr_list[] = { + core_1, core_2, core_3, core_8, pmu1_1, pmu1_8, pmu7_1, pmu9_6, + pmu9_7, pmu9_8, pmu9_11, uncore_5, 0 +}; + +static PMU_MSR_INFO_NODE *hsw_ult_msr_list[] = { + core_1, core_2, core_3, core_8, pmu1_1, pmu1_8, pmu7_1, pmu9_6, + pmu9_7, pmu9_8, pmu9_12, uncore_5, 0 +}; + +static PMU_MSR_INFO_NODE *slm_msr_list[] = { + core_1, core_5, pmu9_8, pmu9_9, pmu9_15, 0 +}; + +static PMU_MSR_INFO_NODE *avt_msr_list[] = { + core_1, core_5, pmu9_1, pmu9_2, pmu9_5, 0 +}; + +static PMU_MSR_INFO_NODE *and_msr_list[] = { + core_1, core_5, pmu9_7, pmu9_8, pmu9_9, pmu9_15, pmu9_16, 0 +}; + +static PMU_MSR_INFO_NODE *bxt_msr_list[] = { + core_1, core_3, core_6, core_7, core_11, pmu9_8, pmu9_9, pmu9_15, + pmu13, 0 }; + +static PMU_MSR_INFO_NODE *dnv_msr_list[] = { + core_1, core_3, core_6, core_11, pmu9_1, pmu9_2, pmu9_5, 0 +}; + +static PMU_MSR_INFO_NODE *gml_msr_list[] = { + core_1, core_3, core_6, core_11, pmu9_2, pmu9_9, 0 +}; + +static PMU_MSR_INFO_NODE *hsx_msr_list[] = { + core_1, core_2, core_3, core_8, pmu1_1, pmu1_2, pmu1_8, pmu8_1, + pmu8_2, pmu9_1, pmu9_2, pmu9_3, pmu11_1, pmu12_1, uncore_2, 0 +}; + +static PMU_MSR_INFO_NODE *icl_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, pmu1_1, + pmu1_6, pmu7_1, uncore_4, uncore_5, pmu9_6, pmu9_7, pmu9_8, pmu9_13, + 0 +}; + +static PMU_MSR_INFO_NODE *snr_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, pmu2_4, + pmu4_3, pmu6_3, pmu8_2, pmu8_5, pmu10_2, pmu12_2, uncore_3, pmu9_1, + pmu9_2, pmu9_3, 0 +}; + +static PMU_MSR_INFO_NODE *knl_msr_list[] = { + core_1, core_2, core_3, pmu2_2, pmu9_2, pmu9_7, pmu9_8, pmu9_9, + pmu9_18, pmu12_1, uncore_2, 0 +}; + +static PMU_MSR_INFO_NODE *ehl_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, pmu1_1, + pmu1_6, pmu7_1, uncore_4, pmu9_6, pmu9_7, pmu9_8, pmu9_13, 0 +}; + +static PMU_MSR_INFO_NODE *tgl_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, pmu1_1, + pmu1_6, pmu7_1, pmu7_2, uncore_5, uncore_4, pmu9_6, pmu9_7, pmu9_8, + pmu9_13, 0 +}; + +static PMU_MSR_INFO_NODE *lkf_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_11, pmu1_1, + pmu1_6, pmu7_1, uncore_4, 0 +}; + +static PMU_MSR_INFO_NODE *plat4_msr_list[] = { + core_1, core_2, core_4, core_9, core_11, pmu2_5, pmu3_1, + pmu4_4, pmu6_4, pmu8_6, pmu10_3, pmu12_4, uncore_5, uncore_6, + pmu9_1, pmu9_2, pmu9_3, pmu9_20, pmu13, pmu5_msr_reg_list2, pmu9_19, 0 +}; + +static PMU_MSR_INFO_NODE *plat5_msr_list[] = { + core_1, core_2, core_3, core_4, core_6, core_7, core_9, core_10, + core_11, pmu1_1, pmu1_9, pmu7_3, uncore_7, pmu9_6, pmu9_7, pmu9_8, + pmu9_13, pmu9_19, 0 +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_info_pci.h b/drivers/platform/x86/sepdk/inc/pmu_info_pci.h new file mode 100644 index 00000000000000..578060caaf3bda --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_info_pci.h @@ -0,0 +1,295 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_INFO_PCI_H_INC_ +#define _PMU_INFO_PCI_H_INC_ + +static U16 common_reg_list[] = { + 0xa0, 0xa8, 0xb0, 0xb8, 0xd8, 0xdc, 0xe0, 0xe4, 0xf4, 0xf8, 0x0 +}; + +static U16 knl_pmu1_reg_list[] = { + 0x400, 0x408, 0x410, 0x418, 0x420, 0x424, 0x428, 0x42c, 0x430, 0x434, + 0x44c, 0x454, 0x0 +}; + +static U16 common_pmu2_reg_list[] = { + 0xa0, 0xa8, 0xb0, 0xb8, 0xd0, 0xd8, 0xdc, 0xe0, 0xe4, 0xf0, 0xf4, 0xf8, + 0x0 +}; + +static U16 knl_pmu2_reg_list[] = { + 0xb00, 0xb08, 0xb10, 0xb18, 0xb20, 0xb24, 0xb28, 0xb2c, 0xb30, 0xb34, + 0xb3c, 0xb44, 0x0 +}; + +static U16 pmu2_dimm_reg_list[] = { 0x80, 0x84, 0x88, 0x0 }; + +static U16 pmu2_dimm_reg_list1[] = { 0xD0, 0xD8, 0xDC, 0xE0, 0xE4, 0x0 }; + +static U16 pmu12_dimm_reg_list[] = { 0xD4, 0x0 }; + +static U16 pmu3_reg_list1[] = { 0x3f4, 0x0 }; + +static U16 pmu3_reg_list2[] = { 0x150, 0x154, 0x0 }; + +static U16 pmu3_reg_list3[] = { 0x190, 0x194, 0x198, 0x19C, 0x1A0, 0x0 }; + +static U16 knl_pmu4_reg_list[] = { + 0xa00, 0xa08, 0xa10, 0xa18, 0xa20, 0xa24, 0xa28, 0xa2c, 0xa30, 0xa34, + 0xa3c, 0xa44, 0x0 +}; + +static U16 pmu5_rx_tx_reg_list[] = { + 0x200, 0x204, 0x210, 0x214, 0x228, 0x22c, 0x238, 0x23c, 0x0 }; + +static U16 pmu5_lp_reg_list[] = { 0x4c, 0x50, 0x54, 0x58, 0xC0, 0x0 }; + +static U16 skx_pmu7_reg_list[] = { + 0x318, 0x320, 0x328, 0x330, 0x350, 0x358, 0x360, 0x368, 0x37c, 0x378, 0x0 +}; + +static U16 icx_r_pmu7_reg_list[] = { + 0x320, 0x328, 0x330, 0x338, 0x350, 0x358, 0x360, 0x368, 0x37c, 0x318, 0x0 +}; + +static U16 plat4_pmu7_reg_list[] = { + 0x320, 0x328, 0x330, 0x338, 0x340, 0x350, 0x358, 0x360, 0x368, 0x370, + 0x378, 0x37c, 0x380, 0x384, 0x388, 0x38c, 0x318, 0x0 +}; + +static U16 pmu7_rx_tx_reg_list[] = { + 0x200, 0x204, 0x210, 0x214, 0x23C, 0x240, 0x244, 0x248, 0x24c, 0x250, + 0x254, 0x258, 0x25c, 0x260, 0x264, 0x268, 0x26C, 0x270, 0x0 +}; + +static U16 pmu7_lp_reg_list[] = { 0x94, 0x120, 0x0 }; + +static U16 knl_pmu9_reg_list[] = { 0xa0, 0xa8, 0xd8, 0xdc, 0xf0, 0xf4, 0x0 }; + +static U16 common_pmu6_reg_list[] = { + 0xa0, 0xa8, 0xb0, 0xd8, 0xdc, 0xe0, 0xf4, 0xf8, 0x0 +}; + +static U16 icx_pmu8_reg_list[] = { + 0xa0, 0xa8, 0xb0, 0xb8, 0xc0, 0xd8, 0xdc, 0xe0, 0xe4, 0xf8, 0x0 +}; + +static U16 plat4_pmu8_reg_list[] = { + 0xa0, 0xa8, 0xb0, 0xb8, 0xc0, 0xd8, 0xe0, 0xe8, 0xf0, 0xf8, 0x0 +}; + +static U16 skx_pmu10_reg_list[] = { + 0x200, 0x208, 0x210, 0x218, 0x228, 0x230, 0x238, 0x240, 0x258, 0x0 +}; + +static U16 icx_r_pmu10_reg_list[] = { + 0x438, 0x440, 0x448, 0x450, 0x458, 0x460, 0x468, 0x470, 0x478, 0x480, + 0x488, 0x0 +}; + +static U16 pmu11_reg_list[] = { 0x2A0, 0x2A4, 0x2A8, 0x2AC, 0x0 }; + +static PMU_PCI_UNIT_INFO_NODE hsx_pci_list[] = { + { 18, 1, common_reg_list }, { 18, 5, common_reg_list }, + { 20, 0, common_pmu2_reg_list }, { 20, 1, common_pmu2_reg_list }, + { 21, 0, common_pmu2_reg_list }, { 21, 1, common_pmu2_reg_list }, + { 23, 0, common_pmu2_reg_list }, { 23, 1, common_pmu2_reg_list }, + { 24, 0, common_pmu2_reg_list }, { 24, 1, common_pmu2_reg_list }, + { 19, 2, pmu2_dimm_reg_list }, { 19, 3, pmu2_dimm_reg_list }, + { 19, 4, pmu2_dimm_reg_list }, { 19, 5, pmu2_dimm_reg_list }, + { 22, 2, pmu2_dimm_reg_list }, { 22, 3, pmu2_dimm_reg_list }, + { 22, 4, pmu2_dimm_reg_list }, { 22, 5, pmu2_dimm_reg_list }, + { 8, 2, common_reg_list }, { 9, 2, common_reg_list }, + { 10, 2, common_reg_list }, { 8, 6, pmu5_rx_tx_reg_list }, + { 9, 6, pmu5_rx_tx_reg_list }, { 10, 6, pmu5_rx_tx_reg_list }, + { 8, 0, pmu5_lp_reg_list }, { 9, 0, pmu5_lp_reg_list }, + { 10, 0, pmu5_lp_reg_list }, { 5, 6, common_reg_list }, + { 16, 1, common_reg_list }, { 11, 1, common_pmu6_reg_list }, + { 11, 2, common_pmu6_reg_list }, { 11, 5, common_pmu6_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE bdw_de_pci_list[] = { + { 18, 1, common_reg_list }, { 18, 0, common_reg_list }, + { 20, 0, common_pmu2_reg_list }, { 20, 1, common_pmu2_reg_list }, + { 21, 0, common_pmu2_reg_list }, { 21, 1, common_pmu2_reg_list }, + { 23, 0, common_pmu2_reg_list }, { 23, 1, common_pmu2_reg_list }, + { 24, 0, common_pmu2_reg_list }, { 24, 1, common_pmu2_reg_list }, + { 19, 2, pmu2_dimm_reg_list }, { 19, 3, pmu2_dimm_reg_list }, + { 19, 4, pmu2_dimm_reg_list }, { 19, 5, pmu2_dimm_reg_list }, + { 5, 6, common_reg_list }, { 16, 0, common_reg_list }, + { 16, 1, common_reg_list }, { 11, 1, common_reg_list }, + { 11, 2, common_reg_list }, { 11, 5, common_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE bdx_pci_list[] = { + { 18, 1, common_reg_list }, { 18, 5, common_reg_list }, + { 20, 0, common_pmu2_reg_list }, { 20, 1, common_pmu2_reg_list }, + { 21, 0, common_pmu2_reg_list }, { 21, 1, common_pmu2_reg_list }, + { 23, 0, common_pmu2_reg_list }, { 23, 1, common_pmu2_reg_list }, + { 24, 0, common_pmu2_reg_list }, { 24, 1, common_pmu2_reg_list }, + { 19, 2, pmu2_dimm_reg_list }, { 19, 3, pmu2_dimm_reg_list }, + { 19, 4, pmu2_dimm_reg_list }, { 19, 5, pmu2_dimm_reg_list }, + { 22, 2, pmu2_dimm_reg_list }, { 22, 3, pmu2_dimm_reg_list }, + { 22, 4, pmu2_dimm_reg_list }, { 22, 5, pmu2_dimm_reg_list }, + { 8, 2, common_reg_list }, { 9, 2, common_reg_list }, + { 10, 2, common_reg_list }, { 8, 6, pmu5_rx_tx_reg_list }, + { 9, 6, pmu5_rx_tx_reg_list }, { 10, 6, pmu5_rx_tx_reg_list }, + { 8, 0, pmu5_lp_reg_list }, { 9, 0, pmu5_lp_reg_list }, + { 10, 0, pmu5_lp_reg_list }, { 5, 6, common_reg_list }, + { 16, 0, common_reg_list }, { 16, 1, common_reg_list }, + { 11, 1, common_reg_list }, { 11, 2, common_reg_list }, + { 11, 5, common_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE knl_pci_list[] = { + { 8, 2, knl_pmu2_reg_list }, { 8, 3, knl_pmu2_reg_list }, + { 8, 4, knl_pmu2_reg_list }, { 9, 2, knl_pmu2_reg_list }, + { 9, 3, knl_pmu2_reg_list }, { 9, 4, knl_pmu2_reg_list }, + { 10, 0, knl_pmu1_reg_list }, { 11, 0, knl_pmu1_reg_list }, + { 24, 2, knl_pmu4_reg_list }, { 25, 2, knl_pmu4_reg_list }, + { 26, 2, knl_pmu4_reg_list }, { 27, 2, knl_pmu4_reg_list }, + { 28, 2, knl_pmu4_reg_list }, { 29, 2, knl_pmu4_reg_list }, + { 30, 2, knl_pmu4_reg_list }, { 31, 2, knl_pmu4_reg_list }, + { 15, 0, knl_pmu1_reg_list }, { 16, 0, knl_pmu1_reg_list }, + { 17, 0, knl_pmu1_reg_list }, { 18, 0, knl_pmu1_reg_list }, + { 19, 0, knl_pmu1_reg_list }, { 20, 0, knl_pmu1_reg_list }, + { 21, 0, knl_pmu1_reg_list }, { 22, 0, knl_pmu1_reg_list }, + { 5, 6, knl_pmu9_reg_list }, { 12, 1, common_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE skx_pci_list[] = { + { 10, 2, common_pmu2_reg_list }, { 10, 6, common_pmu2_reg_list }, + { 11, 2, common_pmu2_reg_list }, { 12, 2, common_pmu2_reg_list }, + { 12, 6, common_pmu2_reg_list }, { 13, 2, common_pmu2_reg_list }, + { 10, 0, pmu2_dimm_reg_list }, { 10, 4, pmu2_dimm_reg_list }, + { 11, 0, pmu2_dimm_reg_list }, { 12, 0, pmu2_dimm_reg_list }, + { 12, 4, pmu2_dimm_reg_list }, { 13, 0, pmu2_dimm_reg_list }, + { 14, 0, skx_pmu7_reg_list }, { 15, 0, skx_pmu7_reg_list }, + { 16, 0, skx_pmu7_reg_list }, { 14, 0, pmu7_rx_tx_reg_list }, + { 15, 0, pmu7_rx_tx_reg_list }, { 16, 0, pmu7_rx_tx_reg_list }, + { 14, 0, pmu7_lp_reg_list }, { 15, 0, pmu7_lp_reg_list }, + { 16, 0, pmu7_lp_reg_list }, { 21, 1, common_reg_list }, + { 22, 1, common_reg_list }, { 23, 1, common_reg_list }, + { 22, 5, common_reg_list }, { 18, 1, common_pmu6_reg_list }, + { 18, 2, common_pmu6_reg_list }, { 18, 5, common_pmu6_reg_list }, + { 8, 0, skx_pmu10_reg_list }, { 9, 0, skx_pmu10_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE snr_pci_list[] = { + { 0, 1, pmu2_dimm_reg_list1 }, { 12, 0, icx_r_pmu10_reg_list }, + { 13, 0, icx_r_pmu10_reg_list }, { 14, 0, icx_r_pmu10_reg_list }, + { 15, 0, icx_r_pmu10_reg_list }, { 0, 0, pmu3_reg_list1 }, + { 0, 1, pmu3_reg_list2 }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE icx_r_pci_list[] = { + { 0, 1, pmu2_dimm_reg_list1 }, { 2, 1, icx_r_pmu7_reg_list }, + { 3, 1, icx_r_pmu7_reg_list }, { 4, 1, icx_r_pmu7_reg_list }, + { 2, 3, pmu7_rx_tx_reg_list }, { 3, 7, pmu7_rx_tx_reg_list }, + { 4, 3, pmu7_rx_tx_reg_list }, { 2, 1, pmu7_lp_reg_list }, + { 3, 1, pmu7_lp_reg_list }, { 4, 1, pmu7_lp_reg_list }, + { 5, 1, icx_pmu8_reg_list }, { 6, 1, icx_pmu8_reg_list }, + { 7, 1, icx_pmu8_reg_list }, { 12, 0, icx_r_pmu10_reg_list }, + { 13, 0, icx_r_pmu10_reg_list }, { 14, 0, icx_r_pmu10_reg_list }, + { 15, 0, icx_r_pmu10_reg_list }, { 0, 0, pmu3_reg_list1 }, + { 0, 1, pmu3_reg_list2 }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE cpx_pci_list[] = { + { 10, 2, common_pmu2_reg_list }, { 10, 6, common_pmu2_reg_list }, + { 11, 2, common_pmu2_reg_list }, { 12, 2, common_pmu2_reg_list }, + { 12, 6, common_pmu2_reg_list }, { 13, 2, common_pmu2_reg_list }, + { 10, 0, pmu2_dimm_reg_list }, { 10, 4, pmu2_dimm_reg_list }, + { 11, 0, pmu2_dimm_reg_list }, { 12, 0, pmu2_dimm_reg_list }, + { 12, 4, pmu2_dimm_reg_list }, { 13, 0, pmu2_dimm_reg_list }, + { 14, 0, skx_pmu7_reg_list }, { 15, 0, skx_pmu7_reg_list }, + { 16, 0, skx_pmu7_reg_list }, { 14, 4, skx_pmu7_reg_list }, + { 15, 4, skx_pmu7_reg_list }, { 16, 4, skx_pmu7_reg_list }, + { 14, 0, pmu7_rx_tx_reg_list }, { 14, 4, pmu7_rx_tx_reg_list }, + { 15, 0, pmu7_rx_tx_reg_list }, { 15, 4, pmu7_rx_tx_reg_list }, + { 16, 0, pmu7_rx_tx_reg_list }, { 16, 4, pmu7_rx_tx_reg_list }, + { 14, 0, pmu7_lp_reg_list }, { 14, 4, pmu7_lp_reg_list }, + { 15, 0, pmu7_lp_reg_list }, { 15, 4, pmu7_lp_reg_list }, + { 16, 0, pmu7_lp_reg_list }, { 16, 4, pmu7_lp_reg_list }, + { 21, 1, common_reg_list }, { 22, 1, common_reg_list }, + { 23, 1, common_reg_list }, { 22, 5, common_reg_list }, + { 18, 1, common_pmu6_reg_list }, { 18, 2, common_pmu6_reg_list }, + { 19, 1, common_pmu6_reg_list }, { 19, 2, common_pmu6_reg_list }, + { 20, 1, common_pmu6_reg_list }, { 20, 2, common_pmu6_reg_list }, + { 8, 0, skx_pmu10_reg_list }, { 9, 0, skx_pmu10_reg_list }, + // End of list + { 0 } +}; + +static PMU_PCI_UNIT_INFO_NODE plat4_pci_list[] = { + { 0, 1, pmu2_dimm_reg_list1 }, { 0, 1, pmu12_dimm_reg_list }, + { 1, 1, plat4_pmu7_reg_list }, { 2, 1, plat4_pmu7_reg_list }, + { 3, 1, plat4_pmu7_reg_list }, { 4, 1, plat4_pmu7_reg_list }, + { 1, 1, pmu7_rx_tx_reg_list }, { 2, 1, pmu7_rx_tx_reg_list }, + { 3, 1, pmu7_rx_tx_reg_list }, { 4, 1, pmu7_rx_tx_reg_list }, + { 1, 1, pmu7_lp_reg_list }, { 2, 1, pmu7_lp_reg_list }, + { 3, 1, pmu7_lp_reg_list }, { 4, 1, pmu7_lp_reg_list }, + { 5, 1, plat4_pmu8_reg_list }, { 6, 1, plat4_pmu8_reg_list }, + { 7, 1, plat4_pmu8_reg_list }, { 8, 1, plat4_pmu8_reg_list }, + { 12, 0, icx_r_pmu10_reg_list }, { 13, 0, icx_r_pmu10_reg_list }, + { 14, 0, icx_r_pmu10_reg_list }, { 15, 0, icx_r_pmu10_reg_list }, + { 12, 1, icx_r_pmu10_reg_list }, { 12, 2, icx_r_pmu10_reg_list }, + { 12, 3, icx_r_pmu10_reg_list }, { 12, 4, icx_r_pmu10_reg_list }, + { 13, 1, icx_r_pmu10_reg_list }, { 13, 2, icx_r_pmu10_reg_list }, + { 13, 3, icx_r_pmu10_reg_list }, { 13, 4, icx_r_pmu10_reg_list }, + { 14, 1, icx_r_pmu10_reg_list }, { 14, 2, icx_r_pmu10_reg_list }, + { 14, 3, icx_r_pmu10_reg_list }, { 14, 4, icx_r_pmu10_reg_list }, + { 15, 1, icx_r_pmu10_reg_list }, { 15, 2, icx_r_pmu10_reg_list }, + { 15, 3, icx_r_pmu10_reg_list }, { 15, 4, icx_r_pmu10_reg_list }, + { 29, 1, pmu11_reg_list }, { 0, 0, pmu3_reg_list1 }, + { 3, 0, pmu3_reg_list3 }, + // End of list + { 0 } +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_info_struct.h b/drivers/platform/x86/sepdk/inc/pmu_info_struct.h new file mode 100644 index 00000000000000..149034dcb86076 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_info_struct.h @@ -0,0 +1,115 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_INFO_STRUCT_H_INC_ +#define _PMU_INFO_STRUCT_H_INC_ + +// Data Structures for storing entire PMU list +typedef struct PMU_MSR_INFO_NODE_S PMU_MSR_INFO_NODE; +struct PMU_MSR_INFO_NODE_S { + U64 msr_id; + U64 mask; + U16 range; + U16 dynamic; // to be updated +}; + +typedef struct PMU_PCI_INFO_NODE_S PMU_PCI_INFO_NODE; +struct PMU_PCI_INFO_NODE_S { + union { + struct { + U64 bus : 8; + U64 dev : 5; + U64 func : 3; + U64 offset : 16; + U64 rsvd : 32; + } s; + U64 reg; + } u; +}; + +typedef struct PMU_PCI_UNIT_INFO_NODE_S PMU_PCI_UNIT_INFO_NODE; +struct PMU_PCI_UNIT_INFO_NODE_S { + U32 dev; + U32 func; + U16 *reg_offset_list; +}; + +typedef struct PMU_MMIO_BAR_INFO_NODE_S PMU_MMIO_BAR_INFO_NODE; +struct PMU_MMIO_BAR_INFO_NODE_S { + union { + struct { + U32 bus : 8; + U32 dev : 5; + U32 func : 3; + U32 offset : 16; + } s; + U32 reg; + } u; + U8 shift; + U8 bar_prog_type; + U16 reserved; + U64 mask; +}; + +enum { MMIO_SINGLE_BAR_TYPE = 1, MMIO_DUAL_BAR_TYPE, MMIO_DIRECT_BAR_TYPE }; + +typedef struct PMU_MMIO_UNIT_INFO_NODE_S PMU_MMIO_UNIT_INFO_NODE; +struct PMU_MMIO_UNIT_INFO_NODE_S { + PMU_MMIO_BAR_INFO_NODE primary; + PMU_MMIO_BAR_INFO_NODE secondary; + U32 *reg_offset_list; +}; + +typedef struct PMU_INFO_NODE_S PMU_INFO_NODE; +struct PMU_INFO_NODE_S { + U16 family; + U16 model; + U16 stepping_from; + U16 stepping_to; + PMU_MSR_INFO_NODE **msr_info_list; + PMU_PCI_UNIT_INFO_NODE *pci_info_list; + PMU_MMIO_UNIT_INFO_NODE *mmio_info_list; +}; + +// Data Structure for search operation +typedef struct PMU_SEARCH_NODE_S PMU_SEARCH_NODE; +struct PMU_SEARCH_NODE_S { + U64 key; // common for MSR/PCI/MMIO + void *addr; // copy address of static node + PMU_SEARCH_NODE *left; + PMU_SEARCH_NODE *right; + U16 height; // For balancing the search tree + U16 range; // For MSR +}; + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/pmu_list.h b/drivers/platform/x86/sepdk/inc/pmu_list.h new file mode 100644 index 00000000000000..ed476e466f3d44 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/pmu_list.h @@ -0,0 +1,154 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PMU_LIST_H_INC_ +#define _PMU_LIST_H_INC_ + +extern U32 drv_type; + +/************************************************************/ +/* + * PMU list API for checking valid PMU list + * + ************************************************************/ + +/*! + * @fn DRV_BOOL PMU_LIST_Check_MSR (U32) + * + * @brief Search the MSR address in the list + * + * @param MSR address to be checked + * + * @return TRUE if the MSR address is found in the list, + * FALSE otherwise + */ +extern DRV_BOOL PMU_LIST_Check_MSR(U32 msr_id); + +/*! + * @fn DRV_BOOL PMU_LIST_Check_PCI (U8, U8, U8, U32) + * + * @brief Search the PCI programming info in the list + * + * @param bus_id - PCI bus id + * device_id - PCI device id + * func_id - PCI function id + * offset - PCI offset + * + * @return TRUE if the PCI information is found in the list, + * FALSE otherwise + */ +extern DRV_BOOL +PMU_LIST_Check_PCI(U8 bus_id, U8 device_id, U8 func_id, U32 offset); + +/*! + * @fn DRV_BOOL PMU_LIST_Check_MMIO (PMU_MMIO_BAR_INFO_NODE, + * PMU_MMIO_BAR_INFO_NODE, + * U32) + * + * @brief Search the MMIO programming info in the list + * + * @param primary - pimary MMIO BAR programming info + * secondary - secondary MMIO BAR programming info + * offset - MMIO offset + * + * @return TRUE if the MMIO information is found in the list, + * FALSE otherwise + */ +extern DRV_BOOL +PMU_LIST_Check_MMIO(PMU_MMIO_BAR_INFO_NODE primary, + PMU_MMIO_BAR_INFO_NODE secondary, + U32 offset); + +/*! + * @fn OS_STATUS PMU_LIST_Initialize (S32 *) + * @brief Detect the CPU id and locate the applicable PMU list + * + * @param index value of the allowlist. -1 if not found. + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Initialize(S32 *idx); + +/*! + * @fn OS_STATUS PMU_LIST_Build_MSR_List (void) + * @brief Build the MSR search tree + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Build_MSR_List(void); + +/*! + * @fn OS_STATUS PMU_LIST_Build_PCI_List (void) + * @brief Build the PCI search tree + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Build_PCI_List(void); + +/*! + * @fn OS_STATUS PMU_LIST_Build_MMIO_List (void) + * @brief Build the MMIO search tree + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Build_MMIO_List(void); + +/*! + * @fn OS_STATUS PMU_LIST_Add_To_MMIO_List (U64, void*, U8) + * + * @brief Adds entry to the mmio list + * + * @param U64 key - key for the node in the MMIO BST + * void *addr - address to add into the node for additional checks + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Add_To_MMIO_List(U64 key, void *addr); + +/*! + * @fn OS_STATUS PMU_LIST_Clean_Up (void) + * @brief Clean up all the search trees + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Clean_Up(void); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h new file mode 100644 index 00000000000000..42a877bd427dfd --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/sepdrv_p_state.h @@ -0,0 +1,42 @@ +/**** + Copyright (C) 2013 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _SEPDRV_P_STATE_H_ +#define _SEPDRV_P_STATE_H_ + +#define DRV_APERF_MSR 0xE8 +#define DRV_MPERF_MSR 0xE7 + +extern OS_STATUS SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/silvermont.h b/drivers/platform/x86/sepdk/inc/silvermont.h new file mode 100644 index 00000000000000..f90b85777fcf86 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/silvermont.h @@ -0,0 +1,49 @@ +/**** + Copyright (C) 2011 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _SILVERMONT_H_ +#define _SILVERMONT_H_ + +#include "msrdefs.h" +extern DISPATCH_NODE silvermont_dispatch; +extern DISPATCH_NODE knights_dispatch; + +#if defined(DRV_IA32) +#define SILVERMONT_LBR_DATA_BITS 32 +#else +#define SILVERMONT_LBR_DATA_BITS 48 +#endif + +#define SILVERMONT_LBR_BITMASK ((1ULL << SILVERMONT_LBR_DATA_BITS) - 1) + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/sys_info.h b/drivers/platform/x86/sepdk/inc/sys_info.h new file mode 100644 index 00000000000000..a3d47ae86cfb76 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/sys_info.h @@ -0,0 +1,77 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _SYS_INFO_H_ +#define _SYS_INFO_H_ + +#include "lwpmudrv_defines.h" + +#define KNIGHTS_FAMILY 0x06 +#define KNL_MODEL 0x57 +#define KNM_MODEL 0x85 + +#define is_Knights_family(family, model) \ + ((family == KNIGHTS_FAMILY) && \ + ((model == KNL_MODEL) || (model == KNM_MODEL))) + +typedef struct __generic_ioctl { + U32 size; + S32 ret; + U64 rsv[3]; +} GENERIC_IOCTL; + +#define GENERIC_IOCTL_size(gio) ((gio)->size) +#define GENERIC_IOCTL_ret(gio) ((gio)->ret) + +// +// This one is unusual in that it's really a variable +// size. The system_info field is just a easy way +// to access the base information, but the actual size +// when used tends to be much larger that what is +// shown here. +// +typedef struct __system_info { + GENERIC_IOCTL gen; + VTSA_SYS_INFO sys_info; +} IOCTL_SYS_INFO; + +#define IOCTL_SYS_INFO_gen(isi) ((isi)->gen) +#define IOCTL_SYS_INFO_sys_info(isi) ((isi)->sys_info) + +extern U32 SYS_INFO_Build(void); +extern void +SYS_INFO_Transfer(PVOID buf_usr_to_drv, unsigned long len_usr_to_drv); +extern void SYS_INFO_Destroy(void); +extern void SYS_INFO_Build_Cpu(PVOID param); + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/unc_common.h b/drivers/platform/x86/sepdk/inc/unc_common.h new file mode 100644 index 00000000000000..e9c5ca684abb61 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/unc_common.h @@ -0,0 +1,221 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _UNC_COMMON_H_INC_ +#define _UNC_COMMON_H_INC_ + +#include "pci.h" + +#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 +#define VENDOR_ID_MASK 0x0000FFFF +#define DEVICE_ID_MASK 0xFFFF0000 +#define DEVICE_ID_BITSHIFT 16 +#define EXT_CAPID_MASK VENDOR_ID_MASK +#define EXT_CAPID_LIST_NEXT_ADDR_MASK 0xFFF00000 +#define EXT_CAPID_LIST_NEXT_ADDR_BITSHIFT 20 +#define EXT_CAPID_START_OFFSET 0x100 +#define EXT_CAPID_END_OFFSET 0xFFF + +#define UNCORE_SOCKETID_UBOX_LNID_OFFSET 0x40 +#define UNCORE_SOCKETID_UBOX_GID_OFFSET 0x54 + +#define INVALID_BUS_NUMBER -1 +#define PCI_INVALID_VALUE 0xFFFFFFFF + +typedef struct DEVICE_CALLBACK_NODE_S DEVICE_CALLBACK_NODE; +typedef DEVICE_CALLBACK_NODE *DEVICE_CALLBACK; + +struct DEVICE_CALLBACK_NODE_S { + DRV_BOOL (*is_Valid_Device)(U32); + DRV_BOOL (*is_Valid_For_Write)(U32, U32); + DRV_BOOL (*is_Unit_Ctl)(U32); + DRV_BOOL (*is_PMON_Ctl)(U32); +}; + +#define MAX_PCIDEV_UNITS 32 +#define GET_MAX_PCIDEV_ENTRIES(num_pkg) \ + ((num_pkg > MAX_PCIDEV_UNITS) ? num_pkg : MAX_PCIDEV_UNITS) + +typedef struct UNC_PCIDEV_NODE_S UNC_PCIDEV_NODE; + +struct UNC_PCIDEV_NODE_S { + U32 num_entries; + U32 max_entries; + S32 *busno_list; // array for pcibus mapping + S32 *domainno_list; // array for pcibus mapping + SEP_MMIO_NODE *mmio_map; // virtual memory mapping entries + U32 num_mmio_main_bar_per_entry; + U32 num_mmio_secondary_bar_per_entry; +}; + +#define UNC_PCIDEV_max_entries(x) ((x)->max_entries) +#define UNC_PCIDEV_num_entries(x) ((x)->num_entries) +#define UNC_PCIDEV_busno_list(x) ((x)->busno_list) +#define UNC_PCIDEV_busno_entry(x, entry) ((x)->busno_list[entry]) +#define UNC_PCIDEV_domainno_list(x) ((x)->domainno_list) +#define UNC_PCIDEV_domainno_entry(x, entry) ((x)->domainno_list[entry]) +#define UNC_PCIDEV_mmio_map(x) ((x)->mmio_map) +#define UNC_PCIDEV_mmio_map_entry(x, entry) ((x)->mmio_map[entry]) +#define UNC_PCIDEV_num_mmio_main_bar_per_entry(x) \ + ((x)->num_mmio_main_bar_per_entry) +#define UNC_PCIDEV_num_mmio_secondary_bar_per_entry(x) \ + ((x)->num_mmio_secondary_bar_per_entry) +#define UNC_PCIDEV_virtual_addr_entry(x, entry) \ + (SEP_MMIO_NODE_virtual_address(&UNC_PCIDEV_mmio_map_entry(x, entry))) + +#define UNC_PCIDEV_is_busno_valid(x, entry) \ + (((x)->busno_list) && ((x)->num_entries > (entry)) && \ + ((x)->busno_list[(entry)] != INVALID_BUS_NUMBER)) +#define UNC_PCIDEV_is_vaddr_valid(x, entry) \ + (((x)->mmio_map) && \ + ((x)->num_entries * (x)->num_mmio_secondary_bar_per_entry > \ + (entry)) && \ + ((x)->mmio_map[(entry)].virtual_address)) + +extern UNC_PCIDEV_NODE unc_pcidev_map[]; + +#define GET_BUS_MAP(dev_node, entry) \ + (UNC_PCIDEV_busno_entry((&(unc_pcidev_map[dev_node])), entry)) +#define GET_DOMAIN_MAP(dev_node, entry) \ + (UNC_PCIDEV_domainno_entry((&(unc_pcidev_map[dev_node])), entry)) +#define GET_NUM_MAP_ENTRIES(dev_node) \ + (UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))) +#define GET_NUM_MMIO_SECONDARY_BAR(dev_node) \ + (UNC_PCIDEV_num_mmio_secondary_bar_per_entry( \ + &(unc_pcidev_map[dev_node]))) +#define IS_MMIO_MAP_VALID(dev_node, entry) \ + (UNC_PCIDEV_is_vaddr_valid((&(unc_pcidev_map[dev_node])), entry)) +#define IS_BUS_MAP_VALID(dev_node, entry) \ + (UNC_PCIDEV_is_busno_valid((&(unc_pcidev_map[dev_node])), entry)) +#define virtual_address_table(dev_node, entry) \ + (UNC_PCIDEV_virtual_addr_entry(&(unc_pcidev_map[dev_node]), entry)) + +extern OS_STATUS +UNC_COMMON_Do_Bus_to_Socket_Map(U32 uncore_did, + U32 dev_node, + U32 bus_no, + U32 device_no, + U32 function_no); + +extern VOID UNC_COMMON_Dummy_Func(PVOID param); + +extern VOID UNC_COMMON_Read_Counts(PVOID param, U32 id); + +/************************************************************/ +/* + * UNC common PCI based API + * + ************************************************************/ + +extern VOID +UNC_COMMON_PCI_Write_PMU(PVOID param, + U32 ubox_did, + U32 control_msr, + U32 ctl_val, + U32 pci_dev_index, + DEVICE_CALLBACK callback); + +extern VOID +UNC_COMMON_PCI_Enable_PMU(PVOID param, + U32 control_msr, + U32 enable_val, + U32 disable_val, + DEVICE_CALLBACK callback); + +extern VOID +UNC_COMMON_PCI_Disable_PMU(PVOID param, + U32 control_msr, + U32 enable_val, + U32 disable_val, + DEVICE_CALLBACK callback); + +extern OS_STATUS +UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, U32 bus_no, U32 domain_no); + +extern OS_STATUS UNC_COMMON_Init(void); + +extern VOID UNC_COMMON_Clean_Up(void); + +extern VOID UNC_COMMON_PCI_Trigger_Read(U32 id); + +extern VOID UNC_COMMON_PCI_Read_PMU_Data(PVOID param); + +extern VOID +UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, + U32 dev_info_node, + DEVICE_CALLBACK callback); + +extern U32 UNC_COMMON_Scan_PMT_Device(void); + +extern VOID UNC_COMMON_Get_Platform_Topology(U32 dev_info_node); + +/************************************************************/ +/* + * UNC common MSR based API + * + ************************************************************/ + +extern VOID +UNC_COMMON_MSR_Write_PMU(PVOID param, + U32 control_msr, + U64 control_val, + U64 reset_val, + DEVICE_CALLBACK callback); + +extern VOID +UNC_COMMON_MSR_Enable_PMU(PVOID param, + U32 control_msr, + U64 control_val, + U64 unit_ctl_val, + U64 pmon_ctl_val, + DEVICE_CALLBACK callback); + +extern VOID +UNC_COMMON_MSR_Disable_PMU(PVOID param, + U32 control_msr, + U64 unit_ctl_val, + U64 pmon_ctl_val, + DEVICE_CALLBACK callback); + +extern VOID UNC_COMMON_MSR_Trigger_Read(U32 id); + +extern VOID UNC_COMMON_MSR_Read_PMU_Data(PVOID param); + +extern VOID UNC_COMMON_Read_Uncore_Discovery_Tables(void); + +extern VOID UNC_COMMON_MSR_Clean_Up(PVOID param); + +extern VOID +UNC_COMMON_PCI_Scan_For_CapId(UNCORE_DISCOVERY_DVSEC_CONFIG config, + DRV_PCI_DEVICE_ENTRY bar_list); +#endif + diff --git a/drivers/platform/x86/sepdk/inc/unc_gt.h b/drivers/platform/x86/sepdk/inc/unc_gt.h new file mode 100644 index 00000000000000..4fd05a2eb2f829 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/unc_gt.h @@ -0,0 +1,87 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _UNC_GT_H_INC_ +#define _UNC_GT_H_INC_ + +/* + * Local to this architecture: SNB uncore GT unit + * + */ +#define GT_MMIO_SIZE 0x200000 +#define NEXT_ADDR_OFFSET 4 +#define UNC_GT_BAR_MASK 0x7FFF000000 +#define PERF_GLOBAL_CTRL 0x391 +#define GT_CLEAR_COUNTERS 0xFFFF0000 + +#define GT_DID_1 0x102 +#define INTEL_VENDOR_ID 0x8086 +#define DRV_GET_PCI_VENDOR_ID(value) (value & 0x0000FFFF) +#define DRV_GET_PCI_DEVICE_ID(value) ((value & 0xFFFF0000) >> 16) +#define DRV_IS_INTEL_VENDOR_ID(value) (value == INTEL_VENDOR_ID) +#define DRV_IS_GT_DEVICE_ID(value) (value == GT_DID_1) + +//clock gating disable values +#define UNC_GT_GCPUNIT_REG1 0x9400 +#define UNC_GT_GCPUNIT_REG2 0x9404 +#define UNC_GT_GCPUNIT_REG3 0x9408 +#define UNC_GT_GCPUNIT_REG4 0x940c +#define UNC_GT_GCPUNIT_REG1_VALUE 0xffffffff +#define UNC_GT_GCPUNIT_REG2_VALUE 0xffffffff +#define UNC_GT_GCPUNIT_REG3_VALUE 0xffe3ffff +#define UNC_GT_GCPUNIT_REG4_VALUE 0x00000003 +//RC6 disable +#define UNC_GT_RC6_REG1 0xa090 +#define UNC_GT_RC6_REG2 0xa094 +#define UNC_GT_RC6_REG1_OR_VALUE 0x80000000 +#define UNC_GT_RC6_REG2_VALUE 0x00000000 +extern DISPATCH_NODE unc_gt_dispatch; + +typedef struct GT_CTR_NODE_S GT_CTR_NODE; +typedef GT_CTR_NODE *GT_CTR; +struct GT_CTR_NODE_S { + union { + struct { + U32 low : 32; + U32 high : 12; + } bits; + U64 value; + } u; +}; + +#define GT_CTR_NODE_value(x) (x.u.value) +#define GT_CTR_NODE_low(x) (x.u.bits.low) +#define GT_CTR_NODE_high(x) (x.u.bits.high) +#define GT_CTR_NODE_value_reset(x) (x.u.value = 0) + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/utility.h b/drivers/platform/x86/sepdk/inc/utility.h new file mode 100644 index 00000000000000..f3e03318485685 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/utility.h @@ -0,0 +1,648 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _UTILITY_H_ +#define _UTILITY_H_ + +/** +// Data Types and Macros +*/ +#pragma pack(push, 1) + +#pragma pack(pop) + +/* + * These routines have macros defined in asm/system.h + */ +#define SYS_Local_Irq_Enable() local_irq_enable() +#define SYS_Local_Irq_Disable() local_irq_disable() +#define SYS_Local_Irq_Save(flags) local_irq_save(flags) +#define SYS_Local_Irq_Restore(flags) local_irq_restore(flags) + +#include + +extern U64 SYS_Read_MSR_With_Status(U32 msr, S32 *status); +#define SYS_Read_MSR(msr) SYS_Read_MSR_With_Status((msr), (NULL)) + +extern void SYS_Write_MSR_With_Status(U32 msr, U64 val, S32 *status); +#define SYS_Write_MSR(msr, val) SYS_Write_MSR_With_Status((msr), (val), (NULL)) + +#if defined(DRV_USE_RDPMC) +extern U64 SYS_Read_PMC_opt(U32 ctr_addr, U32 is_fixed_ctr); +#endif + +#if defined(DRV_USE_RDPMC) +#define SYS_Read_PMC(ctr_addr, is_fixed_ctr) \ + SYS_Read_PMC_opt((ctr_addr), (is_fixed_ctr)) +#else +#define SYS_Read_PMC(ctr_addr, is_fixed_ctr) SYS_Read_MSR((ctr_addr)) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) && \ + defined(CONFIG_UIDGID_STRICT_TYPE_CHECKS)) +#define DRV_GET_UID(p) (p->cred->uid.val) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#define DRV_GET_UID(p) (p->cred->uid) +#else +#define DRV_GET_UID(p) (p->uid) +#endif + +extern void SYS_Perfvec_Handler(void); + +extern void * SYS_get_stack_ptr0(void); +extern void * SYS_get_stack_ptr3(void); +extern void * SYS_get_user_fp(void); +extern short SYS_Get_cs(void); + +#if defined(DRV_IA32) +extern void * SYS_Get_GDT_Base_HWR(void); /// GDT base from hardware GDTR +extern U64 SYS_Get_TSC(void); + +#define SYS_Get_GDT_Base SYS_Get_GDT_Base_HWR +#endif + +#if defined(DRV_EM64T) +extern unsigned short SYS_Get_Code_Selector0(void); +extern void SYS_Get_GDT_Base(void **); +#endif + +extern void SYS_IO_Delay(void); +#define SYS_Inb(port) inb(port) +#define SYS_Outb(byte, port) outb(byte, port) + +/* typedef int OSSTATUS; */ + +/* + * Lock implementations + */ +#define SYS_Locked_Inc(var) atomic_inc((var)) +#define SYS_Locked_Dec(var) atomic_dec((var)) + +extern void UTILITY_Read_TSC(U64 *pTsc); + +extern void UTILITY_down_read_mm(struct mm_struct *mm); + +extern void UTILITY_up_read_mm(struct mm_struct *mm); + +extern void +UTILITY_Read_Cpuid(U64 cpuid_function, + U64 *rax_value, + U64 *rbx_value, + U64 *rcx_value, + U64 *rdx_value); + +extern DISPATCH UTILITY_Configure_CPU(U32); + +#if defined(DRV_IA32) +asmlinkage void SYS_Get_CSD(U32, U32 *, U32 *); +#endif + +#define RDPMC_COUNTER_TYPE_BIT_SHIFT 30 +#if defined(DRV_USE_RDPMC) +#define LFENCE_SERIALIZE() __asm__("lfence") +#else +#define LFENCE_SERIALIZE() {} +#endif + +/* + * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) + * + * @brief Finds the address of the specified kernel symbol. + * + * @param const char* name - name of the symbol to look for + * + * @return Symbol address (0 if could not find) + * + * Special Notes: + * This wrapper is needed due to kallsyms_lookup_name not being exported + * in kernel version 2.6.32.*. + * Careful! This code is *NOT* multithread-safe or reentrant! Should only + * be called from 1 context at a time! + */ +extern unsigned long UTILITY_Find_Symbol(char const *name); + +/************************************************************************/ +/*********************** DRIVER LOG DECLARATIONS ************************/ +/************************************************************************/ + +#define DRV_LOG_COMPILER_MEM_BARRIER() asm volatile("" : : : "memory") + +#define DRV_LOG_DEFAULT_LOAD_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_INIT_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_DETECTION_VERBOSITY \ + (DRV_LOG_DEFAULT_INIT_VERBOSITY) +#define DRV_LOG_DEFAULT_ERROR_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY \ + (DRV_LOG_DEFAULT_INIT_VERBOSITY) +#define DRV_LOG_DEFAULT_MARK_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_DEBUG_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_PRINTK | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_FLOW_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_ALLOC_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_TRACE_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_REGISTER_VERBOSITY (LOG_VERBOSITY_NONE) +#define DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY \ + (LOG_CHANNEL_MEMLOG | LOG_CONTEXT_ALL) +#define DRV_LOG_DEFAULT_WARNING_VERBOSITY \ + (LOG_CHANNEL_MOSTWHERE | LOG_CONTEXT_ALL) + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, and logs it as appropriate. + * + * @param U8 category - message category + * U8 in_notification - whether or not we are in a notification/OS callback context + * (this information cannot be reliably obtained without passing + * it through the stack) + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function (more efficient + * to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * const char* format_string - classical format string for printf-like functions + * ... - elements to print + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +extern VOID +UTILITY_Log(U8 category, + U8 in_notification, + U8 secondary, + char const *function_name, + U32 func_name_len, + U32 line_number, + char const *format_string, + ...); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) + * + * @brief Allocates and initializes the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be (successfully) run before any non-LOAD log calls. + * Allocates memory without going through CONTROL_Allocate (to avoid + * complicating the instrumentation of CONTROL_* functions): calling + * UTILITY_Driver_Log_Free is necessary to free the log structure. + * Falls back to vmalloc when contiguous physical memory cannot be + * allocated. This does not impact runtime behavior, but may impact + * the easiness of retrieving the log from a core dump if the system + * crashes. + */ +extern DRV_STATUS UTILITY_Driver_Log_Init(void); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) + * + * @brief Frees the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be done before unloading the driver. + * See UTILITY_Driver_Log_Init for details. + */ +extern void UTILITY_Driver_Log_Free(VOID); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) + * + * @brief Sets the 'active_ioctl' global to the specified value. + * + * @param U32 ioctl - ioctl/drvop code to use + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +extern void UTILITY_Driver_Set_Active_Ioctl(U32); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern const char** UTILITY_Log_Category_Strings (void) + * + * @brief Accessor function for the log category string array + * + * @param none + * + * @return none + * + * Special Notes: + * Only needed for cosmetic purposes when adjusting category verbosities. + */ +extern char const ** UTILITY_Log_Category_Strings(void); + +extern DRV_LOG_BUFFER driver_log_buffer; +extern volatile U8 active_ioctl; + +#define DRV_LOG() driver_log_buffer +#define DRV_LOG_VERBOSITY(category) \ + ((DRV_LOG_BUFFER_verbosities(DRV_LOG()))[category]) +#define SEP_IN_NOTIFICATION 1 + +#define SEP_DRV_RAW_LOG(category, in_notification, second, message, ...) \ + UTILITY_Log(category, in_notification, second, __func__, \ + sizeof(__func__), __LINE__, message, ##__VA_ARGS__) +#define SEP_DRV_ULK_LOG(category, in_notification, second, message, ...) \ + UTILITY_Log(category, in_notification, second, __func__, \ + sizeof(__func__), __LINE__, message, ##__VA_ARGS__) + +#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS() \ + { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), 1); \ + __sync_fetch_and_add(&DRV_LOG_BUFFER_nb_interrupts(DRV_LOG()), \ + 1); \ + } + +#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS() \ + __sync_fetch_and_add(&DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()), \ + -1) + +#define SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS() \ + { \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), \ + 1); \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_notifications(DRV_LOG()), 1); \ + } + +#define SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS() \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()), -1) + +#define SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS() \ + __sync_fetch_and_add( \ + &DRV_LOG_BUFFER_nb_driver_state_transitions(DRV_LOG()), 1) + +#define SEP_DRV_LOG_DISAMBIGUATE() \ + __sync_fetch_and_add(&DRV_LOG_BUFFER_disambiguator(DRV_LOG()), 1) + +/************************************************************************/ +/************************** CATEGORY LOG APIs ***************************/ +/************************************************************************/ + +// ERROR, WARNING and LOAD are always compiled in... +#define SEP_DRV_LOG_ERROR(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_WARNING(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_ERROR(in_notif, message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_ERROR, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_WARNING(in_notif, message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_WARNING, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_LOAD(message, ...) \ + { \ + if (DRV_LOG()) { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_LOAD, 0, \ + DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__); \ + } else if (DRV_LOG_DEFAULT_LOAD_VERBOSITY & \ + LOG_CHANNEL_PRINTK) { \ + printk(KERN_ERR SEP_MSG_PREFIX " " message "\n", \ + ##__VA_ARGS__); \ + } \ + } + +#if defined(DRV_MINIMAL_LOGGING) // MINIMAL LOGGING MODE +#define SEP_DRV_LOG_INIT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INIT_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INIT_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DETECTION(message, ...) \ + { \ + } +#define SEP_DRV_LOG_MARK(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_FLOW_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS() +#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS() +#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS() +#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS() +#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ + { \ + (void)former_state; \ + SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ + DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ + } +#else // REGULAR LOGGING MODE (PART 1 / 2) +#define SEP_DRV_LOG_INIT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INIT_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INIT_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INIT, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DETECTION(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DETECTION, 0, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_MARK(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_MARK, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_DEBUG_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_DEBUG, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_FLOW_IN(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_FLOW_OUT(message, ...) \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_FLOW, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_ALLOC_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_ALLOC, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_INTERRUPT_IN(message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_INTERRUPTS(); \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ + DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ + } +#define SEP_DRV_LOG_INTERRUPT_OUT(message, ...) \ + { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_INTERRUPT, 0, \ + DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_INTERRUPTS(); \ + } +#define SEP_DRV_LOG_NOTIFICATION_IN(message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_ACTIVE_NOTIFICATIONS(); \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ + DRV_LOG_FLOW_IN, message, ##__VA_ARGS__); \ + } +#define SEP_DRV_LOG_NOTIFICATION_OUT(message, ...) \ + { \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_NOTIFICATION, 1, \ + DRV_LOG_FLOW_OUT, message, ##__VA_ARGS__); \ + SEP_DRV_LOG_DECREMENT_NB_ACTIVE_NOTIFICATIONS(); \ + } +#define SEP_DRV_LOG_STATE_TRANSITION(former_state, new_state, message, ...) \ + { \ + SEP_DRV_LOG_INCREMENT_NB_STATE_TRANSITIONS(); \ + DRV_LOG_BUFFER_driver_state(DRV_LOG()) = new_state; \ + SEP_DRV_RAW_LOG(DRV_LOG_CATEGORY_STATE_CHANGE, 0, \ + ((U8)former_state << 4) | \ + ((U8)new_state & 0xF), \ + message, ##__VA_ARGS__); \ + } +#endif + +#if defined(DRV_MAXIMAL_LOGGING) // MAXIMAL LOGGING MODE +#define SEP_DRV_LOG_TRACE(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_NOTHING, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_TRACE_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_IN, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, 0, DRV_LOG_FLOW_OUT, message, \ + ##__VA_ARGS__) +#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_IN, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_REGISTER, 0, DRV_LOG_FLOW_OUT, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_NOTHING, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_IN, \ + message, ##__VA_ARGS__) +#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ + SEP_DRV_ULK_LOG(DRV_LOG_CATEGORY_TRACE, in_notif, DRV_LOG_FLOW_OUT, \ + message, ##__VA_ARGS__) +#else // REGULAR LOGGING MODE (PART 2 / 2) +#define SEP_DRV_LOG_TRACE(message, ...) \ + { \ + } +#define SEP_DRV_LOG_TRACE_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_TRACE_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_REGISTER_IN(message, ...) \ + { \ + } +#define SEP_DRV_LOG_REGISTER_OUT(message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE(in_notif, message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notif, message, ...) \ + { \ + } +#define SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notif, message, ...) \ + { \ + } +#endif + +/************************************************************************/ +/************************* FACILITATOR MACROS ***************************/ +/************************************************************************/ + +#define SEP_DRV_LOG_ERROR_INIT_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_INIT_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_ERROR_FLOW_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_ERROR_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_ERROR_ALLOC_OUT(message, ...) \ + { \ + SEP_DRV_LOG_ERROR(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_FLOW_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_FLOW_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_ALLOC_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_ALLOC_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_INIT_TRACE_OUT(message, ...) \ + { \ + SEP_DRV_LOG_INIT(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_TRACE_OUT(message, ##__VA_ARGS__); \ + } + +#define SEP_DRV_LOG_WARNING_NOTIFICATION_OUT(message, ...) \ + { \ + SEP_DRV_LOG_WARNING(message, ##__VA_ARGS__); \ + SEP_DRV_LOG_NOTIFICATION_OUT(message, ##__VA_ARGS__); \ + } + +/************************************************************************/ +/************************* DRIVER STATE MACROS **************************/ +/************************************************************************/ +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, + * U32 state, const char* func, U32 line_number) + * + * @brief Updates the driver state (if the transition is legal). + * + * @param U32 allowed_prior_states - the bitmask representing the states + * from which the transition is allowed to occur + * U32 state - the destination state + * const char* func - the callsite's function's name + * U32 line_number - the callsite's line number + * + * @return 1 in case of success, 0 otherwise + * + * Special Notes: + * + */ +extern U32 +UTILITY_Change_Driver_State(U32 allowed_prior_states, + U32 state, + char const *func, + U32 line_number); + +#define GET_DRIVER_STATE() GLOBAL_STATE_current_phase(driver_state) +#define CHANGE_DRIVER_STATE(allowed_prior_states, state) \ + UTILITY_Change_Driver_State(allowed_prior_states, state, __func__, \ + __LINE__) +#define DRIVER_STATE_IN(state, states) \ + (!!(MATCHING_STATE_BIT(state) & (states))) + +#endif + diff --git a/drivers/platform/x86/sepdk/inc/valleyview_sochap.h b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h new file mode 100644 index 00000000000000..305f1560be8d53 --- /dev/null +++ b/drivers/platform/x86/sepdk/inc/valleyview_sochap.h @@ -0,0 +1,68 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _VALLEYVIEW_SOCHAP_H_INC_ +#define _VALLEYVIEW_SOCHAP_H_INC_ + +/* + * Local to this architecture: Valleyview uncore SA unit + * + */ +#define VLV_VISA_DESKTOP_DID 0x000C04 +#define VLV_VISA_NEXT_ADDR_OFFSET 4 +#define VLV_VISA_BAR_ADDR_SHIFT 32 +#define VLV_VISA_BAR_ADDR_MASK 0x000FFFC00000LL +#define VLV_VISA_MAX_PCI_DEVICES 16 +#define VLV_VISA_MCR_REG_OFFSET 0xD0 +#define VLV_VISA_MDR_REG_OFFSET 0xD4 +#define VLV_VISA_MCRX_REG_OFFSET 0xD8 +#define VLV_VISA_BYTE_ENABLES 0xF +#define VLV_VISA_OP_CODE_SHIFT 24 +#define VLV_VISA_PORT_ID_SHIFT 16 +#define VLV_VISA_OFFSET_HI_MASK 0xFF +#define VLV_VISA_OFFSET_LO_MASK 0xFF +#define VLV_CHAP_SIDEBAND_PORT_ID 23 +#define VLV_CHAP_SIDEBAND_WRITE_OP_CODE 1 +#define VLV_CHAP_SIDEBAND_READ_OP_CODE 0 +#define VLV_CHAP_MAX_COUNTERS 8 +#define VLV_CHAP_MAX_COUNT 0x00000000FFFFFFFFLL + +#define VLV_VISA_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define VLV_VISA_CHAP_SAMPLE_DATA 0x00020000 +#define VLV_VISA_CHAP_STOP 0x00040000 +#define VLV_VISA_CHAP_START 0x00110000 +#define VLV_VISA_CHAP_CTRL_REG_OFFSET 0x0 + +extern DISPATCH_NODE valleyview_visa_dispatch; + +#endif + diff --git a/drivers/platform/x86/sepdk/include/error_reporting_utils.h b/drivers/platform/x86/sepdk/include/error_reporting_utils.h new file mode 100644 index 00000000000000..20aacbd7acac9e --- /dev/null +++ b/drivers/platform/x86/sepdk/include/error_reporting_utils.h @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2002 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef __ERROR_REPORTING_UTILS_H__ +#define __ERROR_REPORTING_UTILS_H__ + +#define DRV_ASSERT_N_RET_VAL(ret_val) \ + { \ + DRV_ASSERT((ret_val) == VT_SUCCESS); \ + DRV_CHECK_N_RETURN_N_FAIL(ret_val); \ + } + +#define DRV_ASSERT_N_CONTINUE(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } + +#ifndef DRV_CHECK_N_RETURN_N_FAIL +#define DRV_CHECK_N_RETURN_N_FAIL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return ret_val; \ + } +#endif + +#define DRV_CHECK_N_RETURN_N_NULL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return NULL; \ + } + +#ifndef DRV_CHECK_N_RETURN_NO_RETVAL +#define DRV_CHECK_N_RETURN_NO_RETVAL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return; \ + } +#endif +#define DRV_CHECK_N_RETURN_N_USERDEFINED(ret_val, user_defined_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return user_defined_val; \ + } + +#define DRV_CHECK_N_CONTINUE(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + continue; \ + } + +#define DRV_CHECK_N_SET_RET_N_GOTO_LABEL(ret_val, ret_var, ret_status, \ + goto_label) \ + if ((ret_val) != VT_SUCCESS) { \ + ret_var = ret_status; \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + goto goto_label; \ + } + +#ifndef DRV_CHECK_PTR_N_RET_VAL +#define DRV_CHECK_PTR_N_RET_VAL(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return VT_SAM_ERROR; \ + } +#endif + +#define DRV_CHECK_PTR_N_RET_GIVEN_VAL(ptr, ret_val) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return ret_val; \ + } + +#define DRV_CHECK_PTR_N_RET_NULL(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return NULL; \ + } + +#define DRV_CHECK_PTR_N_RET_FALSE(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return FALSE; \ + } + +#define DRV_CHECK_PTR_N_RET(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return; \ + } + +#define DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + } + +#define DRV_CHECK_PTR_N_CLEANUP(ptr, gotolabel, ret_val) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + ret_val = VT_SAM_ERROR; \ + goto gotolabel; \ + } + +#define DRV_CHECK_PTR_ON_NULL_CLEANUP_N_RETURN(ptr, gotolabel) \ + if ((ptr) == NULL) { \ + DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr); \ + goto gotolabel; \ + } + +#define DRV_CHECK_PTR_N_RET_ASSIGNED_VAL(ptr, return_val) \ + if (!(ptr)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return return_val; \ + } + +#define DRV_CHECK_PTR_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, ret_status, \ + goto_label) \ + if (status == NULL) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_N_LOG_NO_RETURN(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } + +#define DRV_CHECK_N_RET_NEG_ONE(ret_val) \ + if ((ret_val) == -1) { \ + LOG_ERR0(VTSA_T("Operation failed with error code = -1")); \ + return VT_SAM_ERROR; \ + } + +#define DRV_CHECK_N_RET_NEG_ONE_N_CLEANUP(ret_val, gotolabel) \ + if ((ret_val) == -1) { \ + LOG_ERR0(VTSA_T("Operation failed with error code = -1")); \ + goto gotolabel; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_N_FAIL(cond) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return VT_SAM_ERROR; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_N_GOTO_CLEANUP(cond, gotolabel) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + goto gotolabel; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_ASSIGNED_VAL(cond, ret_val) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return ret_val; \ + } + +#define DRV_CHECK_N_ERR_LOG_ERR_STRNG_N_RET(rise_err) \ + if (rise_err != VT_SUCCESS) { \ + PVOID rise_ptr = NULL; \ + const VTSA_CHAR *error_str = NULL; \ + RISE_open(&rise_ptr); \ + RISE_translate_err_code(rise_ptr, rise_err, &error_str); \ + LogItW(LOG_LEVEL_ERROR | LOG_AREA_GENERAL, \ + L"Operation failed with error [ %d ] = %s\n", rise_err, \ + error_str); \ + RISE_close(rise_ptr); \ + return rise_err; \ + } + +#define DRV_CHECK_ON_FAIL_CLEANUP_N_RETURN(ret_val, gotolabel) \ + if ((ret_val) != VT_SUCCESS) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } + +#define DRV_CHECK_RET_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, ret_status, \ + goto_label) \ + if (status != VT_SUCCESS) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_N_CLEANUP_N_RETURN_RET_NEG_ONE(ret_val, gotolabel) \ + if ((ret_val) == -1) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_RETURN_GIVEN_VAL(inp_val, ret_val) \ + if ((inp_val) < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "Operation failed with error code %d\n", ret_val)); \ + return ret_val; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_RETURN_NO_VAL(inp_val) \ + if ((inp_val) < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "Operation failed with error code %d\n", ret_val)); \ + return; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_GOTO_CLEANUP(status, goto_label) \ + if (status < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, \ + ret_status, goto_label) \ + if (status < 0) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define FREE_N_SET_NULL(ptr) \ + if (ptr != NULL) { \ + free(ptr); \ + ptr = NULL; \ + } + +#define DELETE_N_SET_NULL(ptr) \ + delete ptr; \ + ptr = NULL; + +/* + * Memory management error handling macros + */ +// Check for NULL ptr and return VT_NO_MEMORY +#define SEP_CHECK_ALLOCATION_N_RET_VAL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return VT_NO_MEMORY; \ + } + +// Check for NULL ptr and exit with -1 status +#define SEP_CHECK_ALLOCATION_N_EXIT_WITH_FAILURE(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + exit(-1); \ + } + +// Check for NULL ptr and return void +#define SEP_CHECK_ALLOCATION_N_RET_NOVAL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return; \ + } + +// Check for NULL ptr and return False +#define SEP_CHECK_ALLOCATION_N_RET_BOOL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return FALSE; \ + } + +// Check for NULL ptr and return NULL +#define SEP_CHECK_ALLOCATION_N_RET_NULL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return NULL; \ + } + +// Check for NULL ptr and goto provided label +#define SEP_CHECK_ALLOCATION_N_GOTO_CLEANUP(loc, goto_label) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + goto goto_label; \ + } + +// Check for NULL ptr and continue the loop +#define SEP_CHECK_ALLOCATION_N_CONTINUE(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + continue; \ + } + +// Check for NULL ptr, set return var with provided status and goto provided label +#define SEP_CHECK_ALLOCATION_SET_RETURN_N_GOTO_CLEANUP(loc, ret_var, \ + ret_status, goto_label) \ + if (!(loc)) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + goto goto_label; \ + } + +#define SEP_CHECK_ALLOCATION_N_RET_ASSIGNED_VAL DRV_CHECK_PTR_N_RET_ASSIGNED_VAL + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h new file mode 100644 index 00000000000000..620b88159fbc5a --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_defines.h @@ -0,0 +1,607 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_DEFINES_H_ +#define _LWPMUDRV_DEFINES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif +// +// Start off with none of the OS'es are defined +// +#undef DRV_OS_WINDOWS +#undef DRV_OS_LINUX +#undef DRV_OS_SOLARIS +#undef DRV_OS_MAC +#undef DRV_OS_ANDROID +#undef DRV_OS_UNIX + +// +// Make sure none of the architectures is defined here +// +#undef DRV_IA32 +#undef DRV_EM64T + +// +// Make sure one (and only one) of the OS'es gets defined here +// +// Unfortunately entirex defines _WIN32 so we need to check for linux +// first. The definition of these flags is one and only one +// _OS_xxx is allowed to be defined. +// +#if defined(__ANDROID__) +#define DRV_OS_ANDROID +#define DRV_OS_UNIX +#elif defined(__linux__) +#define DRV_OS_LINUX +#define DRV_OS_UNIX +#elif defined(sun) +#define DRV_OS_SOLARIS +#define DRV_OS_UNIX +#elif defined(_WIN32) +#define DRV_OS_WINDOWS +#elif defined(__APPLE__) +#define DRV_OS_MAC +#define DRV_OS_UNIX +#elif defined(__FreeBSD__) +#define DRV_OS_FREEBSD +#define DRV_OS_UNIX +#else +#error "Compiling for an unknown OS" +#endif + +// +// Make sure one (and only one) architecture is defined here +// as well as one (and only one) pointer__ size +// +#if defined(_M_IX86) || defined(__i386__) +#define DRV_IA32 +#elif defined(_M_AMD64) || defined(__x86_64__) +#define DRV_EM64T +#else +#error "Unknown architecture for compilation" +#endif + +// +// Add a well defined definition of compiling for release (free) vs. +// debug (checked). Once again, don't assume these are the only two values, +// always have an else clause in case we want to expand this. +// +#if defined(DRV_OS_UNIX) +#define WINAPI +#endif + +/* + * Add OS neutral defines for file processing. This is needed in both + * the user code and the kernel code for cleanliness + */ +#undef DRV_FILE_DESC +#undef DRV_INVALID_FILE_DESC_VALUE +#define DRV_ASSERT assert + +#if defined(DRV_OS_WINDOWS) + +#define DRV_FILE_DESC HANDLE +#define DRV_INVALID_FILE_DESC_VALUE INVALID_HANDLE_VALUE + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) + +#define DRV_IOCTL_FILE_DESC SIOP +#define DRV_FILE_DESC SIOP +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_FREEBSD) + +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_MAC) +#if defined __LP64__ +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE (S64)(-1) +#else +#define DRV_IOCTL_FILE_DESC S32 +#define DRV_FILE_DESC S32 +#define DRV_INVALID_FILE_DESC_VALUE (S32)(-1) +#endif + +#else + +#error "Compiling for an unknown OS" + +#endif + +#define OUT +#define IN +#define INOUT + +// +// VERIFY_SIZEOF let's you insert a compile-time check that the size of a data +// type (e.g. a struct) is what you think it should be. Usually it is +// important to know what the actual size of your struct is, and to make sure +// it is the same across all platforms. So this will prevent the code from +// compiling if something happens that you didn't expect, whether it's because +// you counted wring, or more often because the compiler inserted padding that +// you don't want. +// +// NOTE: 'elem' and 'size' must both be identifier safe, e.g. matching the +// regular expression /^[0-9a-zA-Z_]$/. +// +// Example: +// typedef struct { void *ptr; int data; } mytype; +// VERIFY_SIZEOF(mytype, 8); +// ^-- this is correct on 32-bit platforms, but fails +// on 64-bit platforms, indicating a possible +// portability issue. +// +#define VERIFY_SIZEOF(type, size) \ + enum { sizeof_##type##_eq_##size = 1 / (int)(sizeof(type) == size) } + +#if defined(DRV_OS_WINDOWS) +#define DRV_DLLIMPORT __declspec(dllimport) +#define DRV_DLLEXPORT __declspec(dllexport) +#endif +#if defined(DRV_OS_UNIX) +#define DRV_DLLIMPORT +#define DRV_DLLEXPORT +#endif + +#define DRV_STRING_FORMAT_WIDTH2(str) #str +#define DRV_STRING_FORMAT_WIDTH(str) DRV_STRING_FORMAT_WIDTH2(str) +#if defined(DRV_OS_WINDOWS) +#define FSI64RAW "I64" +#define DRV_PATH_SEPARATOR "\\" +#define L_DRV_PATH_SEPARATOR L"\\" +#endif + +#if defined(DRV_OS_UNIX) +#define FSI64RAW "ll" +#define DRV_PATH_SEPARATOR "/" +#define L_DRV_PATH_SEPARATOR L"/" +#endif + +#define FSS64 "%" FSI64RAW "d" +#define FSU64 "%" FSI64RAW "u" +#define FSX64 "%" FSI64RAW "x" + +#if defined(DRV_OS_WINDOWS) +#define DRV_RTLD_NOW 0 +#endif +#if defined(DRV_OS_UNIX) +#if defined(DRV_OS_FREEBSD) +#define DRV_RTLD_NOW 0 +#else +#define DRV_RTLD_NOW RTLD_NOW +#endif +#endif + +#define DRV_STRLEN (U32) strlen +#define DRV_WCSLEN (U32) wcslen +#define DRV_STRCSPN strcspn +#define DRV_STRCHR strchr +#define DRV_STRRCHR strrchr +#define DRV_WCSRCHR wcsrchr +#if !defined(STATIC_PROG_API) +#define DRV_STRCPY strcpy_safe +#define DRV_STRNCPY strncpy_safe +#define DRV_STRCAT strcat_safe +#define DRV_STRNCAT strncat_safe +#define DRV_WCSCPY wcscpy_safe +#define DRV_WCSNCPY wcsncpy_safe +#define DRV_WCSCAT wcscat_safe +#define DRV_WCSNCAT wcsncat_safe +#define DRV_SPRINTF sprintf_safe +#define DRV_SNPRINTF snprintf_safe +#define DRV_SNWPRINTF snwprintf_safe +#define DRV_MEMCPY memcpy_safe +#define DRV_FOPEN(fp, name, mode) fopen_safe(&(fp), (name), (mode)) +#if defined(DRV_OS_WINDOWS) +#define DRV_WFOPEN(fp, name, mode) wfopen_safe(&(fp), (name), (mode)) +#endif +#endif +#if defined(DRV_OS_WINDOWS) +#define DRV_STCHARLEN DRV_WCSLEN +#else +#define DRV_STCHARLEN DRV_STRLEN +#endif + +#if defined(DRV_OS_WINDOWS) +// To minimize dependencies on other files in sampling_utils +// confining the below MACRO definitions to this +// file alone for static PROG_API case +#if defined(STATIC_PROG_API) +#define DRV_STRCPY(dst, dst_size, src) \ + (strcpy_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCPY(dst, dst_size, src, n) \ + (strncpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRCAT(dst, dst_size, src) \ + (strcat_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCAT(dst, dst_size, src, n) \ + (strncat_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCPY(dst, dst_size, src) \ + (wcscpy_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCPY(dst, dst_size, src, n) \ + (wcsncpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCAT(dst, dst_size, src) \ + (wcscat_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCAT(dst, dst_size, src, n) \ + (wcsncat_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SPRINTF(dst, dst_size, args...) \ + (sprintf_s((dst), dst_size, ##args) > = 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_MEMCPY(dst, dst_size, src, n) \ + (memcpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNPRINTF(buf, buf_size, length, args...) \ + (_snprintf_s(buf, buf_size, length, ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ + (_snwprintf_s(buf, buf_size, length, ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_FOPEN(fp, name, mode) fopen_s(&(fp),(name),(mode) +#define DRV_WFOPEN(fp, name, mode) wfopen_s(&(fp),(name),(mode) +#endif +#define DRV_STRICMP _stricmp +#define DRV_STRNCMP strncmp +#define DRV_STRNICMP _strnicmp +#define DRV_STRDUP _strdup +#define DRV_WCSDUP _wcsdup +#define DRV_STRCMP strcmp +#define DRV_WCSCMP wcscmp +#define DRV_VSNPRINTF(buf, buf_size, length, format, args) \ + _vsnprintf_s((buf), (buf_size), (_TRUNCATE), (format), (args)) +#define DRV_VSNWPRINTF(buf, buf_size, length, format, args) \ + _vsnwprintf_s((buf), (buf_size), (_TRUNCATE), (format), (args)) +#define DRV_SSCANF sscanf_s +#define DRV_WMEMCPY wmemcpy_s +#define DRV_STRTOK strtok_s +#define DRV_STRTOUL strtoul +#define DRV_STRTOL strtol +#define DRV_STRTOULL _strtoui64 +#define DRV_STRTOQ _strtoui64 +#define DRV_FCLOSE(fp) \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } +#define DRV_WCSTOK wcstok_s +#define DRV_WCSSTR wcsstr +#define DRV_STRERROR strerror_s +#define DRV_VSPRINTF vsprintf_s +#define DRV_VSWPRINTF vswprintf_s +#define DRV_GETENV_S getenv_s +#define DRV_WGETENV_S wgetenv_s +#define DRV_PUTENV(name) _putenv(name) +#define DRV_USTRCMP(X, Y) DRV_WCSCMP(X, Y) +#define DRV_USTRDUP(X) DRV_WCSDUP(X) +#define DRV_ACCESS(X) _access_s(X, 4) +#define DRV_STRSTR strstr + +#define DRV_STCHAR_COPY DRV_WCSNCPY + +#define DRV_GETENV(buf, buf_size, name) _dupenv_s(&(buf), &(buf_size), (name)) +#define DRV_WGETENV(buf, buf_size, name) _wdupenv_s(&(buf), &(buf_size), (name)) +#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) +#define DRV_SCLOSE(fp) _close(fp) +#define DRV_WRITE(fp, buf, buf_size) _write(fp, buf, buf_size); +#define DRV_SOPEN_S(fp, name, oflag, shflag, pmode) \ + _sopen_s((fp), (name), (oflag), (shflag), (pmode)) +#endif + +#if defined(DRV_OS_UNIX) +/* + Note: Many of the following macros have a "size" as the second argument. Generally + speaking, this is for compatibility with the _s versions available on Windows. + On Linux/Solaris/Mac, it is ignored. On Windows, it is the size of the destination + buffer and is used wrt memory checking features available in the C runtime in debug + mode. Do not confuse it with the number of bytes to be copied, or such. + + On Windows, this size should correspond to the number of allocated characters + (char or wchar_t) pointed to by the first argument. See MSDN for more details. +*/ +// To minimize dependencies on other files in sampling_utils +// confining the below MACRO definitions to this +// file alone for static PROG_API case +#if defined(STATIC_PROG_API) +#define DRV_STRCPY(dst, dst_size, src) \ + (strcpy((dst), (src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCPY(dst, dst_size, src, n) \ + (strncpy((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRCAT(dst, dst_size, src) \ + (strcat((dst), (src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCAT(dst, dst_size, src, n) \ + (strncat((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCPY(dst, dst_size, src) \ + (wcscpy((dst), (const wchar_t *)(src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCPY(dst, dst_size, src, count) \ + (wcsncpy((dst), (const wchar_t *)(src), (count)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCAT(dst, dst_size, src) \ + (wcscat((dst), (const wchar_t *)(src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCAT(dst, dst_size, src, n) \ + (wcsncat((dst), (const wchar_t *)(src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SPRINTF(dst, dst_size, args...) \ + (sprintf((dst), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNPRINTF(buf, buf_size, length, args...) \ + (snprintf((buf), (length), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ + (snwprintf((buf), (length), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_MEMCPY(dst, dst_size, src, n) \ + (memcpy((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_FOPEN(fp, name, mode) ((fp) = fopen((name), (mode))) +#endif + +#define DRV_STRICMP strcasecmp +#define DRV_STRDUP strdup +#define DRV_STRNDUP strndup +#define DRV_STRCMP strcmp +#define DRV_STRNCMP strncmp +#define DRV_STRSTR strstr +#define DRV_VSNPRINTF(buf, buf_size, length, args...) \ + vsnprintf((buf), (length), ##args) +#define DRV_SSCANF sscanf +#define DRV_STRTOK(tok, delim, context) strtok((tok), (delim)) +#define DRV_STRTOUL strtoul +#define DRV_STRTOULL strtoull +#define DRV_STRTOL strtol +#define DRV_FCLOSE(fp) \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } +#define DRV_WCSTOK(tok, delim, context) \ + wcstok((tok), (const wchar_t *)(delim), (context)) +#define DRV_STRERROR strerror +#define DRV_VSPRINTF(dst, dst_size, length, args...) \ + vsprintf((dst), (length), ##args) +#define DRV_VSWPRINTF(dst, dst_size, length, args...) \ + vswprintf((dst), (length), ##args) +#define DRV_GETENV_S(dst, dst_size) getenv(dst) +#define DRV_WGETENV_S(dst, dst_size) wgetenv(dst) +#define DRV_PUTENV(name) putenv(name) +#define DRV_GETENV(buf, buf_size, name) ((buf) = getenv((name))) +#define DRV_USTRCMP(X, Y) DRV_STRCMP(X, Y) +#define DRV_USTRDUP(X) DRV_STRDUP(X) +#define DRV_ACCESS(X) access(X, X_OK) +#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) + +#define DRV_STCHAR_COPY DRV_STRNCPY +#endif + +#if defined(DRV_OS_WINDOWS) +#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) +#else +#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) +#endif + +#if defined(DRV_OS_OPENWRT) || defined(DRV_OS_ANDROID) +#define DRV_STRTOQ strtol +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) +#define DRV_STRTOQ strtoq +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_STRTOQ strtoll +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define DRV_WCSDUP wcsdup +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_ANDROID) +#define DRV_WCSDUP android_wcsdup +#endif + +/* + * Windows uses wchar_t and linux uses char for strings. + * Need an extra level of abstraction to standardize it. + */ +#if defined(DRV_OS_WINDOWS) +#define DRV_STDUP DRV_WCSDUP +#define DRV_FORMAT_STRING(x) L##x +#define DRV_PRINT_STRING(stream, format, ...) \ + fwprintf((stream), (format), __VA_ARGS__) +#define DRV_STRING_FORMAT_SPECIFIER "%ls" +#else +#define DRV_STDUP DRV_STRDUP +#define DRV_FORMAT_STRING(x) x +#define DRV_PRINT_STRING(stream, format, ...) \ + fprintf((stream), (format), __VA_ARGS__) +#define DRV_STRING_FORMAT_SPECIFIER "%s" +#endif + +/* + * OS return types + */ +#if defined(DRV_OS_UNIX) +#define OS_STATUS int +#define OS_SUCCESS 0 +#if defined(BUILD_DRV_ESX) +#define OS_ILLEGAL_IOCTL -1 +#define OS_NO_MEM -2 +#define OS_FAULT -3 +#define OS_INVALID -4 +#define OS_NO_SYSCALL -5 +#define OS_RESTART_SYSCALL -6 +#define OS_IN_PROGRESS -7 +#else +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT +#define OS_INVALID -EINVAL +#define OS_NO_SYSCALL -ENOSYS +#define OS_RESTART_SYSCALL -ERESTARTSYS +#define OS_IN_PROGRESS -EALREADY +#endif +#endif +#if defined(DRV_OS_WINDOWS) +#define OS_STATUS NTSTATUS +#define OS_SUCCESS STATUS_SUCCESS +#define OS_ILLEGAL_IOCTL STATUS_UNSUCCESSFUL +#define OS_NO_MEM STATUS_UNSUCCESSFUL +#define OS_FAULT STATUS_UNSUCCESSFUL +#define OS_INVALID STATUS_UNSUCCESSFUL +#define OS_NO_SYSCALL STATUS_UNSUCCESSFUL +#define OS_RESTART_SYSCALL STATUS_UNSUCCESSFUL +#define OS_IN_PROGRESS STATUS_UNSUCCESSFUL +#endif + +/**************************************************************************** + ** Driver State defintions + ***************************************************************************/ +#define DRV_STATE_UNINITIALIZED 0 +#define DRV_STATE_RESERVED 1 +#define DRV_STATE_IDLE 2 +#define DRV_STATE_PAUSED 3 +#define DRV_STATE_STOPPED 4 +#define DRV_STATE_RUNNING 5 +#define DRV_STATE_PAUSING 6 +#define DRV_STATE_PREPARE_STOP 7 +#define DRV_STATE_TERMINATING 8 + +#define MATCHING_STATE_BIT(state) ((U32)1 << state) +#define STATE_BIT_UNINITIALIZED MATCHING_STATE_BIT(DRV_STATE_UNINITIALIZED) +#define STATE_BIT_RESERVED MATCHING_STATE_BIT(DRV_STATE_RESERVED) +#define STATE_BIT_IDLE MATCHING_STATE_BIT(DRV_STATE_IDLE) +#define STATE_BIT_PAUSED MATCHING_STATE_BIT(DRV_STATE_PAUSED) +#define STATE_BIT_STOPPED MATCHING_STATE_BIT(DRV_STATE_STOPPED) +#define STATE_BIT_RUNNING MATCHING_STATE_BIT(DRV_STATE_RUNNING) +#define STATE_BIT_PAUSING MATCHING_STATE_BIT(DRV_STATE_PAUSING) +#define STATE_BIT_PREPARE_STOP MATCHING_STATE_BIT(DRV_STATE_PREPARE_STOP) +#define STATE_BIT_TERMINATING MATCHING_STATE_BIT(DRV_STATE_TERMINATING) +#define STATE_BIT_ANY ((U32)-1) + +#define IS_COLLECTING_STATE(state) \ + (!!(MATCHING_STATE_BIT(state) & \ + (STATE_BIT_RUNNING | STATE_BIT_PAUSING | STATE_BIT_PAUSED))) + +/* + * Stop codes + */ +#define DRV_STOP_BASE 0 +#define DRV_STOP_NORMAL 1 +#define DRV_STOP_ASYNC 2 +#define DRV_STOP_CANCEL 3 + +#define MAX_EVENTS 256 // Limiting maximum multiplexing events to 256. +#if defined(DRV_OS_UNIX) +#define UNREFERENCED_PARAMETER(p) ((p) = (p)) +#endif + +/* + * Global marker names + */ +#define START_MARKER_NAME "SEP_START_MARKER" +#define PAUSE_MARKER_NAME "SEP_PAUSE_MARKER" +#define RESUME_MARKER_NAME "SEP_RESUME_MARKER" + +#define DRV_SOC_STRING_LEN (100 + MAX_MARKER_LENGTH) + +/* + * Temp path + */ +#define SEP_TMPDIR "SEP_TMP_DIR" +#if defined(DRV_OS_WINDOWS) +#define OS_TMPDIR "TEMP" +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + GetTempPath((U32)size, dir); \ + } +#else +#define OS_TMPDIR "TMPDIR" +/* + * Unix has default tmp dir + */ +#if defined(DRV_OS_ANDROID) +#define TEMP_PATH "/data" +#else +#define TEMP_PATH "/tmp" +#endif +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + DRV_STRCPY((STCHAR *)dir, (U32)size, (STCHAR *)TEMP_PATH); \ + } +#endif + +#define OS_ID_UNKNOWN -1 +#define OS_ID_NATIVE 0 +#define OS_ID_VMM 0 +#define OS_ID_MODEM 1 +#define OS_ID_ANDROID 2 +#define OS_ID_SECVM 3 +#define OS_ID_ACRN 0xFFFF + +#define PERF_HW_VER4 (5) + +#define INITIAL_BASE_NUM_EVENTS 2000 +#define INITIAL_BASE_NUM_MATRIX_EVENTS 100 +#define NUM_EVENTS_MULTIPLY_FACTOR 2 + +/* + * Memory allocation and deallocation macros + */ + +/* Checks if ptr is not NULL and dealocates it, logs in verbose mode */ +#ifdef SEP_FREE +#undef SEP_FREE +#endif + +#define SEP_FREE(loc) \ + if ( (loc) ) \ + { \ + free(loc); \ + LOGIT((LOG_AREA_GENERAL|LOG_LEVEL_VERBOSE, "%s:%d Memory free: %d bytes\n", \ + __FUNCTION__, __LINE__, sizeof(loc))); \ + loc = NULL; \ + } + +/* + * All Allocation macros, in versbose mode log the number of bytes requested, + * Allocates the requested size and checks if return value is NULL and logs error message + * Doesn't do any error handling, please use "Memory management error handling macros" in error_reporting_utils.h + */ +#define SEP_MALLOC(loc, size, type) \ + if (!((loc) = (type *)malloc(size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Malloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory allocation: %d bytes\n", __FUNCTION__, \ + __LINE__, size)); \ + } + +#define SEP_CALLOC(loc, num, size, type) \ + if (!((loc) = (type *)calloc(num, size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Calloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory allocation: %d bytes\n", __FUNCTION__, \ + __LINE__, size)); \ + } + +#define SEP_REALLOC(new_loc, old_loc, size, type) \ + if (!((new_loc) = (type *)realloc(old_loc, size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Realloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory reallocation: %d -> %d bytes\n", \ + __FUNCTION__, __LINE__, sizeof(old_loc), size)); \ + } + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h new file mode 100644 index 00000000000000..efefffab4d0ba0 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ecb.h @@ -0,0 +1,1317 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_ECB_UTILS_H_ +#define _LWPMUDRV_ECB_UTILS_H_ + +#if defined(DRV_OS_WINDOWS) +#pragma warning(disable : 4200) +#pragma warning(disable : 4214) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +// control register types +#define CCCR 1 // counter configuration control register +#define ESCR 2 // event selection control register +#define DATA 4 // collected as snapshot of current value +#define DATA_RO_DELTA 8 // read-only counter collected as current-previous +#define DATA_RO_SS \ + 16 // read-only counter collected as snapshot of current value +#define METRICS 32 // hardware metrics + +// event multiplexing modes +#define EM_DISABLED -1 +#define EM_TIMER_BASED 0 +#define EM_EVENT_BASED_PROFILING 1 +#define EM_TRIGGER_BASED 2 + +// *************************************************************************** + +/*!\struct EVENT_DESC_NODE + * \var sample_size - size of buffer in bytes to hold the sample + extras + * \var max_gp_events - max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture information + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the number of lbr register information + * \var latency_offset_in_sample - offset in the sample to locate the latency information + * \var latency_size_in_sample - size of latency records in the sample + * \var latency_size_from_pebs_record - size of the latency data from pebs record in the sample + * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information + * in pebs record + * \var power_offset_in_sample - offset in the sample to locate the power information + * \var ebc_offset - offset in the sample to locate the ebc count information + * \var uncore_ebc_offset - offset in the sample to locate the uncore ebc count information + * + * \var ro_offset - offset of RO data in the sample + * \var ro_count - total number of RO entries (including all of IEAR/DEAR/BTB/IPEAR) + * \var iear_offset - offset into RO data at which IEAR entries begin + * \var dear_offset - offset into RO data at which DEAR entries begin + * \var btb_offset - offset into RO data at which BTB entries begin (these use the same PMDs) + * \var ipear_offset - offset into RO data at which IPEAR entries begin (these use the same PMDs) + * \var iear_count - number of IEAR entries + * \var dear_count - number of DEAR entries + * \var btb_count - number of BTB entries + * \var ipear_count - number of IPEAR entries + * + * \var pwr_offset - offset in the sample to locate the pwr count information + * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE; +typedef EVENT_DESC_NODE *EVENT_DESC; + +struct EVENT_DESC_NODE_S { + U32 sample_size; + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 uncore_ebc_offset; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U32 perfmetrics_offset; + U32 perfmetrics_size; + /* ----------ADAPTIVE PEBS FIELDS --------- */ + U16 applicable_counters_offset; + U16 gpr_info_offset; + U16 gpr_info_size; + U16 xmm_info_offset; + U16 xmm_info_size; + U16 lbr_info_size; + /*------------------------------------------*/ + U32 reserved2; + U64 reserved3; +}; + +// +// Accessor macros for EVENT_DESC node +// +#define EVENT_DESC_sample_size(ec) ((ec)->sample_size) +#define EVENT_DESC_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_DESC_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_DESC_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_DESC_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_DESC_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) +#define EVENT_DESC_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_DESC_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_DESC_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_DESC_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_DESC_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_DESC_uncore_ebc_offset(ec) ((ec)->uncore_ebc_offset) +#define EVENT_DESC_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_DESC_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_DESC_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_DESC_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_DESC_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_DESC_perfmetrics_offset(ec) ((ec)->perfmetrics_offset) +#define EVENT_DESC_perfmetrics_size(ec) ((ec)->perfmetrics_size) +#define EVENT_DESC_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_DESC_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) +#define EVENT_DESC_applicable_counters_offset(ec) \ + ((ec)->applicable_counters_offset) +#define EVENT_DESC_gpr_info_offset(ec) ((ec)->gpr_info_offset) +#define EVENT_DESC_gpr_info_size(ec) ((ec)->gpr_info_size) +#define EVENT_DESC_xmm_info_offset(ec) ((ec)->xmm_info_offset) +#define EVENT_DESC_xmm_info_size(ec) ((ec)->xmm_info_size) +#define EVENT_DESC_lbr_info_size(ec) ((ec)->lbr_info_size) + +// *************************************************************************** + +/*!\struct EVENT_CONFIG_NODE + * \var num_groups - The number of groups being programmed + * \var em_mode - Is EM valid? If so how? + * \var em_time_slice - EM valid? time slice in milliseconds + * \var sample_size - size of buffer in bytes to hold the sample + extras + * \var max_gp_events - Max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture information + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the lbr information + * \var latency_offset_in_sample - offset in the sample to locate the latency information + * \var latency_size_in_sample - size of latency records in the sample + * \var latency_size_from_pebs_record - offset in the sample to locate the latency + * size from pebs record + * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information + * in pebs record + * \var power_offset_in_sample - offset in the sample to locate the power information + * \var ebc_offset - offset in the sample to locate the ebc count information + * + * \var pwr_offset - offset in the sample to locate the pwr count information + * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE; +typedef EVENT_CONFIG_NODE *EVENT_CONFIG; + +struct EVENT_CONFIG_NODE_S { + U32 num_groups; + S32 em_mode; + S32 em_factor; + S32 em_event_num; + U32 sample_size; + U32 max_gp_events; + U32 max_fixed_counters; + U32 max_ro_counters; // maximum read-only counters + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 num_groups_unc; + U32 ebc_offset_unc; + U32 sample_size_unc; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for EVENT_CONFIG node +// +#define EVENT_CONFIG_num_groups(ec) ((ec)->num_groups) +#define EVENT_CONFIG_mode(ec) ((ec)->em_mode) +#define EVENT_CONFIG_em_factor(ec) ((ec)->em_factor) +#define EVENT_CONFIG_em_event_num(ec) ((ec)->em_event_num) +#define EVENT_CONFIG_sample_size(ec) ((ec)->sample_size) +#define EVENT_CONFIG_max_gp_events(ec) ((ec)->max_gp_events) +#define EVENT_CONFIG_max_fixed_counters(ec) ((ec)->max_fixed_counters) +#define EVENT_CONFIG_max_ro_counters(ec) ((ec)->max_ro_counters) +#define EVENT_CONFIG_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_CONFIG_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_CONFIG_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_CONFIG_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_CONFIG_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) +#define EVENT_CONFIG_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_CONFIG_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_CONFIG_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_CONFIG_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_CONFIG_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_CONFIG_num_groups_unc(ec) ((ec)->num_groups_unc) +#define EVENT_CONFIG_ebc_offset_unc(ec) ((ec)->ebc_offset_unc) +#define EVENT_CONFIG_sample_size_unc(ec) ((ec)->sample_size_unc) +#define EVENT_CONFIG_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_CONFIG_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_CONFIG_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_CONFIG_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_CONFIG_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_CONFIG_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_CONFIG_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) + +typedef enum { UNC_MUX = 1, UNC_COUNTER } UNC_SA_PROG_TYPE; + +typedef enum { + UNC_PCICFG = 1, + UNC_MMIO, + UNC_STOP, + UNC_MEMORY, + UNC_STATUS +} UNC_SA_CONFIG_TYPE; + +typedef enum { + UNC_MCHBAR = 1, + UNC_DMIBAR, + UNC_PCIEXBAR, + UNC_GTTMMADR, + UNC_GDXCBAR, + UNC_CHAPADR, + UNC_SOCPCI, + UNC_NPKBAR +} UNC_SA_BAR_TYPE; + +typedef enum { UNC_OP_READ = 1, UNC_OP_WRITE, UNC_OP_RMW } UNC_SA_OPERATION; + +typedef enum { + STATIC_COUNTER = 1, + FREERUN_COUNTER, + PROG_FREERUN_COUNTER, + PROGRAMMABLE_COUNTER +} COUNTER_TYPES; + +typedef enum { + PACKAGE_EVENT = 1, + MODULE_EVENT, + THREAD_EVENT, + SYSTEM_EVENT +} EVENT_SCOPE_TYPES; + +typedef enum { + DEVICE_CORE = 1, // CORE DEVICE + DEVICE_HETERO, + DEVICE_UNC_CBO = 10, // UNCORE DEVICES START + DEVICE_UNC_HA, + DEVICE_UNC_IMC, + DEVICE_UNC_IRP, + DEVICE_UNC_NCU, + DEVICE_UNC_PCU, + DEVICE_UNC_POWER, + DEVICE_UNC_QPI, + DEVICE_UNC_R2PCIE, + DEVICE_UNC_R3QPI, + DEVICE_UNC_SBOX, + DEVICE_UNC_GT, + DEVICE_UNC_UBOX, + DEVICE_UNC_WBOX, + DEVICE_UNC_COREI7, + DEVICE_UNC_CHA, + DEVICE_UNC_EDC, + DEVICE_UNC_IIO, + DEVICE_UNC_M2M, + DEVICE_UNC_EDRAM, + DEVICE_UNC_FPGA_CACHE, + DEVICE_UNC_FPGA_FAB, + DEVICE_UNC_FPGA_THERMAL, + DEVICE_UNC_FPGA_POWER, + DEVICE_UNC_FPGA_GB, + DEVICE_UNC_MDF, + DEVICE_UNC_RDT, + DEVICE_UNC_PMT, // Core Telemetry + DEVICE_UNC_M2HBM, + DEVICE_UNC_MCHBM, + DEVICE_UNC_CXLCM, + DEVICE_UNC_CXLDP, + DEVICE_UNC_NOC, + DEVICE_UNC_TELEMETRY = 150, // TELEMETRY DEVICE + DEVICE_UNC_CHAP = 200, // CHIPSET DEVICES START + DEVICE_UNC_GMCH, + DEVICE_UNC_GFX, + DEVICE_UNC_SOCPERF = 300, // UNCORE VISA DEVICES START + DEVICE_UNC_HFI_RXE = 400, // STL HFI + DEVICE_UNC_HFI_TXE, + DEVICE_UNC_1 = 450, + DEVICE_UNC_2, + DEVICE_UNC_GFX_PMT = 500, // Graphics Cards + DEVICE_UNC_CSTATE_PKG = 600, + DEVICE_UNC_CSTATE_CORE, + DEVICE_PMEM_FC, + DEVICE_PMEM_MC +} DEVICE_TYPES; + +typedef enum { + LBR_ENTRY_TOS = 0, + LBR_ENTRY_FROM_IP, + LBR_ENTRY_TO_IP, + LBR_ENTRY_INFO, + LER_ENTRY_FROM_IP, + LER_ENTRY_TO_IP, + LER_ENTRY_INFO +} LBR_ENTRY_TYPE; + +// *************************************************************************** + +/*!\struct EVENT_REG_ID_NODE + * \var reg_id - MSR index to r/w + * \var pci_id - PCI based register and its details to operate on + */ +typedef struct EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE; +typedef EVENT_REG_ID_NODE *EVENT_REG_ID; + +struct EVENT_REG_ID_NODE_S { + U32 reg_id; + U32 pci_bus_no; + U32 pci_dev_no; + U32 pci_func_no; + U32 data_size; + U32 bar_index; // Points to the index (MMIO_INDEX_LIST) + // of bar memory map list to be used in mmio_bar_list of ECB + U32 reserved1; + U32 reserved2; + U64 phys_addr; // 64 bit register address +}; + +// *************************************************************************** + +typedef enum { + PMU_REG_RW_READ = 1, + PMU_REG_RW_WRITE, + PMU_REG_RW_READ_WRITE, + PMU_REG_RW_READ_MASK_WRITE, + PMU_REG_RW_READ_VALIDATE_MASK, + PMU_REG_RW_READ_MERGE_READ, +} PMU_REG_RW_TYPES; + +typedef enum { + PMU_REG_PROG_MSR = 1, + PMU_REG_PROG_PCI, + PMU_REG_PROG_MMIO, +} PMU_REG_PROG_TYPES; + +typedef enum { + PMU_REG_GLOBAL_CTRL = 1, + PMU_REG_UNIT_CTRL, + PMU_REG_UNIT_STATUS, + PMU_REG_DATA, + PMU_REG_EVENT_SELECT, + PMU_REG_FILTER, + PMU_REG_FIXED_CTRL, + PMU_REG_DISCOVERY_BASE, +} PMU_REG_TYPES; + +/*!\struct EVENT_REG_NODE + * \var reg_type - register type + * \var event_id_index - event ID index + * \var event_reg_id - register ID/pci register details + * \var desc_id - desc ID + * \var flags - flags + * \var reg_value - register value + * \var max_bits - max bits + * \var scheduled - boolean to specify if this event node has been scheduled already + * \var bus_no - PCI bus number + * \var dev_no - PCI device number + * \var func_no - PCI function number + * \var counter_type - Event counter type - static/freerun + * \var event_scope - Event scope - package/module/thread + * \var reg_prog_type - Register Programming type + * \var reg_rw_type - Register Read/Write type + * \var reg_order - Register order in the programming sequence + * \var + * \brief Data structure to describe the event registers + * + */ + +typedef struct EVENT_REG_NODE_S EVENT_REG_NODE; +typedef EVENT_REG_NODE *EVENT_REG; + +struct EVENT_REG_NODE_S { + U8 reg_type; + U8 unit_id; + U16 event_id_index; + U16 counter_event_offset; + U8 driverless_dup_event; // identify event as duplicate + U8 reg_package_id; + EVENT_REG_ID_NODE event_reg_id; + U64 reg_value; + U16 desc_id; + U16 flags; + U32 reserved2; + U64 max_bits; + U8 scheduled; + S8 secondary_pci_offset_shift; + U16 secondary_pci_offset_offset; // offset of the offset + U32 counter_type; + U32 event_scope; + U8 reg_prog_type; + U8 reg_rw_type; + U8 reg_order; + U8 bit_position; + U64 secondary_pci_offset_mask; + U32 core_event_id; + U32 uncore_buffer_offset_in_package; + U32 uncore_buffer_offset_in_system; + U32 aux_reg_id_to_read; + U64 aux_read_mask; + U32 device_subtype; + U32 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for EVENT_REG node +// Note: the flags field is not directly addressible to prevent hackery +// +#define EVENT_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define EVENT_REG_event_id_index(x, i) ((x)[(i)].event_id_index) +#define EVENT_REG_unit_id(x, i) ((x)[(i)].unit_id) +#define EVENT_REG_reg_package_id(x, i) ((x)[(i)].reg_package_id) +#define EVENT_REG_counter_event_offset(x, i) ((x)[(i)].counter_event_offset) +#define EVENT_REG_reg_id(x, i) ((x)[(i)].event_reg_id.reg_id) +#define EVENT_REG_bus_no(x, i) ((x)[(i)].event_reg_id.pci_bus_no) +#define EVENT_REG_dev_no(x, i) ((x)[(i)].event_reg_id.pci_dev_no) +#define EVENT_REG_func_no(x, i) ((x)[(i)].event_reg_id.pci_func_no) + +// points to the reg_id +#define EVENT_REG_offset(x, i) ((x)[(i)].event_reg_id.reg_id) +#define EVENT_REG_data_size(x, i) ((x)[(i)].event_reg_id.data_size) +#define EVENT_REG_bar_index(x, i) ((x)[(i)].event_reg_id.bar_index) +#define EVENT_REG_phys_addr(x, i) ((x)[(i)].event_reg_id.phys_addr) +#define EVENT_REG_desc_id(x, i) ((x)[(i)].desc_id) +#define EVENT_REG_flags(x, i) ((x)[(i)].flags) +#define EVENT_REG_reg_value(x, i) ((x)[(i)].reg_value) +#define EVENT_REG_max_bits(x, i) ((x)[(i)].max_bits) +#define EVENT_REG_scheduled(x, i) ((x)[(i)].scheduled) +#define EVENT_REG_secondary_pci_offset_shift(x, i) \ + ((x)[(i)].secondary_pci_offset_shift) +#define EVENT_REG_secondary_pci_offset_offset(x, i) \ + ((x)[(i)].secondary_pci_offset_offset) +#define EVENT_REG_secondary_pci_offset_mask(x, i) \ + ((x)[(i)].secondary_pci_offset_mask) + +#define EVENT_REG_counter_type(x, i) ((x)[(i)].counter_type) +#define EVENT_REG_event_scope(x, i) ((x)[(i)].event_scope) +#define EVENT_REG_reg_prog_type(x, i) ((x)[(i)].reg_prog_type) +#define EVENT_REG_reg_rw_type(x, i) ((x)[(i)].reg_rw_type) +#define EVENT_REG_reg_order(x, i) ((x)[(i)].reg_order) +#define EVENT_REG_bit_position(x, i) ((x)[(i)].bit_position) + +#define EVENT_REG_core_event_id(x, i) ((x)[(i)].core_event_id) +#define EVENT_REG_uncore_buffer_offset_in_package(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_package) +#define EVENT_REG_uncore_buffer_offset_in_system(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_system) +#define EVENT_REG_aux_reg_id_to_read(x, i) ((x)[(i)].aux_reg_id_to_read) +#define EVENT_REG_aux_read_mask(x, i) ((x)[(i)].aux_read_mask) +#define EVENT_REG_aux_shift_index(x, i) ((x)[(i)].bit_position) // Alias +#define EVENT_REG_device_subtype(x, i) ((x)[(i)].device_subtype) +#define EVENT_REG_driverless_dup_event(x, i) ((x)[(i)].driverless_dup_event) + +// +// Config bits +// +#define EVENT_REG_precise_bit 0x00000001 +#define EVENT_REG_global_bit 0x00000002 +#define EVENT_REG_uncore_bit 0x00000004 +#define EVENT_REG_uncore_q_rst_bit 0x00000008 +#define EVENT_REG_latency_bit 0x00000010 +#define EVENT_REG_is_gp_reg_bit 0x00000020 +#define EVENT_REG_clean_up_bit 0x00000040 +#define EVENT_REG_em_trigger_bit 0x00000080 +#define EVENT_REG_lbr_value_bit 0x00000100 +#define EVENT_REG_fixed_reg_bit 0x00000200 +#define EVENT_REG_unc_evt_intr_read_bit 0x00000400 +#define EVENT_REG_generic_status_bit 0x00000800 +#define EVENT_REG_multi_pkg_evt_bit 0x00001000 +#define EVENT_REG_branch_evt_bit 0x00002000 +#define EVENT_REG_ebc_sampling_evt_bit 0x00004000 +#define EVENT_REG_collect_on_ctx_sw 0x00008000 +// +// Accessor macros for config bits +// +#define EVENT_REG_precise_get(x, i) ((x)[(i)].flags & EVENT_REG_precise_bit) +#define EVENT_REG_precise_set(x, i) ((x)[(i)].flags |= EVENT_REG_precise_bit) +#define EVENT_REG_precise_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_precise_bit) + +#define EVENT_REG_global_get(x, i) ((x)[(i)].flags & EVENT_REG_global_bit) +#define EVENT_REG_global_set(x, i) ((x)[(i)].flags |= EVENT_REG_global_bit) +#define EVENT_REG_global_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_global_bit) + +#define EVENT_REG_uncore_get(x, i) ((x)[(i)].flags & EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_set(x, i) ((x)[(i)].flags |= EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_uncore_bit) + +#define EVENT_REG_uncore_q_rst_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit) + +#define EVENT_REG_latency_get(x, i) ((x)[(i)].flags & EVENT_REG_latency_bit) +#define EVENT_REG_latency_set(x, i) ((x)[(i)].flags |= EVENT_REG_latency_bit) +#define EVENT_REG_latency_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_latency_bit) + +#define EVENT_REG_is_gp_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit) + +#define EVENT_REG_lbr_value_get(x, i) ((x)[(i)].flags & EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit) + +#define EVENT_REG_fixed_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit) + +#define EVENT_REG_multi_pkg_evt_bit_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit) + +#define EVENT_REG_clean_up_get(x, i) ((x)[(i)].flags & EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_set(x, i) ((x)[(i)].flags |= EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit) + +#define EVENT_REG_em_trigger_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit) + +#define EVENT_REG_branch_evt_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_branch_evt_bit) + +#define EVENT_REG_ebc_sampling_evt_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_ebc_sampling_evt_bit) +#define EVENT_REG_ebc_sampling_evt_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_ebc_sampling_evt_bit) +#define EVENT_REG_ebc_sampling_evt_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_ebc_sampling_evt_bit) + +#define EVENT_REG_collect_on_ctx_sw_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_collect_on_ctx_sw) +#define EVENT_REG_collect_on_ctx_sw_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_collect_on_ctx_sw) +#define EVENT_REG_collect_on_ctx_sw_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_collect_on_ctx_sw) + +#define EVENT_REG_unc_evt_intr_read_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_unc_evt_intr_read_bit) +#define EVENT_REG_unc_evt_intr_read_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_unc_evt_intr_read_bit) +#define EVENT_REG_unc_evt_intr_read_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_unc_evt_intr_read_bit) + +#define EVENT_REG_generic_status_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_generic_status_bit) +#define EVENT_REG_generic_status_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_generic_status_bit) +#define EVENT_REG_generic_status_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_generic_status_bit) + +// *************************************************************************** + +/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S + * \var bus_no - PCI bus no to read + * \var dev_no - PCI device no to read + * \var func_no PCI device no to read + * \var bar_offset BASE Address Register offset of the PCI based PMU + * \var bit_offset Bit offset of the same + * \var size size of read/write + * \var bar_address the actual BAR present + * \var enable_offset Offset info to enable/disable + * \var enabled Status of enable/disable + * \brief Data structure to describe the PCI Device + * + */ + +typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE; +typedef DRV_PCI_DEVICE_ENTRY_NODE *DRV_PCI_DEVICE_ENTRY; + +struct DRV_PCI_DEVICE_ENTRY_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 bar_offset; + U64 bar_mask; + U32 bit_offset; + U32 size; + U64 bar_address; + U32 enable_offset; + U32 enabled; + U32 base_offset_for_mmio; + U32 operation; + U32 bar_name; + U32 prog_type; + U32 config_type; + S8 bar_shift; // positive shifts right, negative shifts left + U8 reserved0; + U16 reserved1; + U64 value; + U64 mask; + U64 virtual_address; + U32 port_id; + U32 op_code; + U32 device_id; + U16 bar_num; + U16 feature_id; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for DRV_PCI_DEVICE_NODE node +// +#define DRV_PCI_DEVICE_ENTRY_bus_no(x) ((x)->bus_no) +#define DRV_PCI_DEVICE_ENTRY_dev_no(x) ((x)->dev_no) +#define DRV_PCI_DEVICE_ENTRY_func_no(x) ((x)->func_no) +#define DRV_PCI_DEVICE_ENTRY_bar_offset(x) ((x)->bar_offset) +#define DRV_PCI_DEVICE_ENTRY_bar_mask(x) ((x)->bar_mask) +#define DRV_PCI_DEVICE_ENTRY_bit_offset(x) ((x)->bit_offset) +#define DRV_PCI_DEVICE_ENTRY_size(x) ((x)->size) +#define DRV_PCI_DEVICE_ENTRY_bar_address(x) ((x)->bar_address) +#define DRV_PCI_DEVICE_ENTRY_enable_offset(x) ((x)->enable_offset) +#define DRV_PCI_DEVICE_ENTRY_enable(x) ((x)->enabled) +#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) +#define DRV_PCI_DEVICE_ENTRY_operation(x) ((x)->operation) +#define DRV_PCI_DEVICE_ENTRY_bar_name(x) ((x)->bar_name) +#define DRV_PCI_DEVICE_ENTRY_prog_type(x) ((x)->prog_type) +#define DRV_PCI_DEVICE_ENTRY_config_type(x) ((x)->config_type) +#define DRV_PCI_DEVICE_ENTRY_bar_shift(x) ((x)->bar_shift) +#define DRV_PCI_DEVICE_ENTRY_value(x) ((x)->value) +#define DRV_PCI_DEVICE_ENTRY_mask(x) ((x)->mask) +#define DRV_PCI_DEVICE_ENTRY_virtual_address(x) ((x)->virtual_address) +#define DRV_PCI_DEVICE_ENTRY_port_id(x) ((x)->port_id) +#define DRV_PCI_DEVICE_ENTRY_op_code(x) ((x)->op_code) +#define DRV_PCI_DEVICE_ENTRY_device_id(x) ((x)->device_id) +#define DRV_PCI_DEVICE_ENTRY_bar_num(x) ((x)->bar_num) +#define DRV_PCI_DEVICE_ENTRY_feature_id(x) ((x)->feature_id) + +// *************************************************************************** +typedef enum { + PMU_OPERATION_INITIALIZE = 0, + PMU_OPERATION_WRITE, + PMU_OPERATION_ENABLE, + PMU_OPERATION_DISABLE, + PMU_OPERATION_READ, + PMU_OPERATION_CLEANUP, + PMU_OPERATION_READ_LBRS, + PMU_OPERATION_GLOBAL_REGS, + PMU_OPERATION_CTRL_GP, + PMU_OPERATION_DATA_FIXED, + PMU_OPERATION_DATA_GP, + PMU_OPERATION_OCR, + PMU_OPERATION_HW_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_ERRATA, + PMU_OPERATION_ALL_REG, + PMU_OPERATION_DATA_ALL, + PMU_OPERATION_GLOBAL_STATUS, + PMU_OPERATION_METRICS, +} PMU_OPERATION_TYPES; +#define MAX_OPERATION_TYPES 32 + +/*!\struct PMU_OPERATIONS_NODE + * \var operation_type - Type of operation from enumeration PMU_OPERATION_TYPES + * \var register_start - Start index of the registers for a specific operation + * \var register_len - Number of registers for a specific operation + * + * \brief + * Structure for defining start and end indices in the ECB entries array for + * each type of operation performed in the driver + * initialize, write, read, enable, disable, etc. + */ +typedef struct PMU_OPERATIONS_NODE_S PMU_OPERATIONS_NODE; +typedef PMU_OPERATIONS_NODE *PMU_OPERATIONS; +struct PMU_OPERATIONS_NODE_S { + U32 operation_type; + U32 register_start; + U32 register_len; + U32 reserved1; + U32 reserved2; + U32 reserved3; +}; +#define PMU_OPERATIONS_operation_type(x) ((x)->operation_type) +#define PMU_OPERATIONS_register_start(x) ((x)->register_start) +#define PMU_OPERATIONS_register_len(x) ((x)->register_len) +#define PMU_OPER_operation_type(x, i) ((x)[(i)].operation_type) +#define PMU_OPER_register_start(x, i) ((x)[(i)].register_start) +#define PMU_OPER_register_len(x, i) ((x)[(i)].register_len) + +typedef enum { + ECB_MMIO_BAR1 = 1, + ECB_MMIO_BAR2 = 2, + ECB_MMIO_BAR3 = 3, + ECB_MMIO_BAR4 = 4, + ECB_MMIO_BAR5 = 5, + ECB_MMIO_BAR6 = 6, + ECB_MMIO_BAR7 = 7, + ECB_MMIO_BAR8 = 8, +} MMIO_INDEX_LIST; +#define MAX_MMIO_BARS 8 + +/*!\struct MMIO_BAR_INFO_NODE + */ +typedef struct MMIO_BAR_INFO_NODE_S MMIO_BAR_INFO_NODE; +typedef MMIO_BAR_INFO_NODE *MMIO_BAR_INFO; + +struct MMIO_BAR_INFO_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 addr_size; + U64 base_offset_for_mmio; + U32 map_size_for_mmio; + U32 main_bar_offset; + U64 main_bar_mask; + U8 main_bar_shift; + U8 reserved1; + U16 reserved2; + U32 reserved3; + U32 reserved4; + U32 secondary_bar_offset; + U64 secondary_bar_mask; + U8 secondary_bar_shift; + U8 reserved5; + U16 reserved6; + U32 reserved7; + U16 feature_id; + U16 reserved8; + U32 reserved9; + U64 reserved10; +}; + +#define MMIO_BAR_INFO_bus_no(x) ((x)->bus_no) +#define MMIO_BAR_INFO_dev_no(x) ((x)->dev_no) +#define MMIO_BAR_INFO_func_no(x) ((x)->func_no) +#define MMIO_BAR_INFO_addr_size(x) ((x)->addr_size) +#define MMIO_BAR_INFO_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) +#define MMIO_BAR_INFO_map_size_for_mmio(x) ((x)->map_size_for_mmio) +#define MMIO_BAR_INFO_main_bar_offset(x) ((x)->main_bar_offset) +#define MMIO_BAR_INFO_main_bar_mask(x) ((x)->main_bar_mask) +#define MMIO_BAR_INFO_main_bar_shift(x) ((x)->main_bar_shift) +#define MMIO_BAR_INFO_secondary_bar_offset(x) ((x)->secondary_bar_offset) +#define MMIO_BAR_INFO_secondary_bar_mask(x) ((x)->secondary_bar_mask) +#define MMIO_BAR_INFO_secondary_bar_shift(x) ((x)->secondary_bar_shift) +#define MMIO_BAR_INFO_physical_address(x) ((x)->physical_address) +#define MMIO_BAR_INFO_virtual_address(x) ((x)->virtual_address) +#define MMIO_BAR_INFO_feature_id(x) ((x)->feature_id) + +/*!\struct ECB_NODE_S + * \var version - Version of ECB struct. + * \var num_entries - Total number of entries in "entries". + * \var group_id - Group ID. + * \var num_events - Number of events in this group. + * \var cccr_start - Starting index of counter configuration control registers in "entries". + * \var cccr_pop - Number of counter configuration control registers in "entries". + * \var escr_start - Starting index of event selection control registers in "entries". + * \var escr_pop - Number of event selection control registers in "entries". + * \var data_start - Starting index of data registers in "entries". + * \var data_pop - Number of data registers in "entries". + * \var pcidev_entry_node PCI device details for one device. + * \var metric_start Starting index of metric registers in "entries". + * \var metric_pop Number of metric registers in "entries". + * \var entries - All the register nodes required for programming. + * + * \brief + */ + +typedef struct ECB_NODE_S ECB_NODE; +typedef ECB_NODE *ECB; + +struct ECB_NODE_S { + U8 version; + U8 reserved1; + U16 reserved2; + U32 num_entries; + U32 group_id; + U32 num_events; + U32 cccr_start; + U32 cccr_pop; + U32 escr_start; + U32 escr_pop; + U32 data_start; + U32 data_pop; + U16 flags; + U8 pmu_timer_interval; + U8 reserved3; + U32 size_of_allocation; + U32 group_offset; + U32 reserved4; + DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node; + U32 num_pci_devices; + U32 pcidev_list_offset; + DRV_PCI_DEVICE_ENTRY pcidev_entry_list; +#if defined(DRV_IA32) + U32 pointer_padding; //add padding for the DRV_PCI_DEVICE_ENTRY pointer + //before this field for 32-bit mode +#endif + U32 device_type; + U32 dev_node; + PMU_OPERATIONS_NODE operations[MAX_OPERATION_TYPES]; + U32 descriptor_id; + U32 reserved5; + U32 metric_start; + U32 metric_pop; + MMIO_BAR_INFO_NODE mmio_bar_list[MAX_MMIO_BARS]; + U32 num_mmio_secondary_bar; + U32 group_id_offset_in_trigger_evt_desc; + U64 reserved7; + U64 reserved8; + EVENT_REG_NODE entries[]; +}; + +// +// Accessor macros for ECB node +// +#define ECB_version(x) ((x)->version) +#define ECB_num_entries(x) ((x)->num_entries) +#define ECB_group_id(x) ((x)->group_id) +#define ECB_num_events(x) ((x)->num_events) +#define ECB_cccr_start(x) ((x)->cccr_start) +#define ECB_cccr_pop(x) ((x)->cccr_pop) +#define ECB_escr_start(x) ((x)->escr_start) +#define ECB_escr_pop(x) ((x)->escr_pop) +#define ECB_data_start(x) ((x)->data_start) +#define ECB_data_pop(x) ((x)->data_pop) +#define ECB_metric_start(x) ((x)->metric_start) +#define ECB_metric_pop(x) ((x)->metric_pop) +#define ECB_pcidev_entry_node(x) ((x)->pcidev_entry_node) +#define ECB_num_pci_devices(x) ((x)->num_pci_devices) +#define ECB_pcidev_list_offset(x) ((x)->pcidev_list_offset) +#define ECB_pcidev_entry_list(x) ((x)->pcidev_entry_list) +#define ECB_flags(x) ((x)->flags) +#define ECB_pmu_timer_interval(x) ((x)->pmu_timer_interval) +#define ECB_size_of_allocation(x) ((x)->size_of_allocation) +#define ECB_group_offset(x) ((x)->group_offset) +#define ECB_device_type(x) ((x)->device_type) +#define ECB_dev_node(x) ((x)->dev_node) +#define ECB_operations(x) ((x)->operations) +#define ECB_descriptor_id(x) ((x)->descriptor_id) +#define ECB_entries(x) ((x)->entries) +#define ECB_num_mmio_secondary_bar(x) ((x)->num_mmio_secondary_bar) +#define ECB_mmio_bar_list(x, i) ((x)->mmio_bar_list[i]) +#define ECB_group_id_offset_in_trigger_evt_desc(x) \ + ((x)->group_id_offset_in_trigger_evt_desc) + +// for flag bit field +#define ECB_direct2core_bit 0x0001 +#define ECB_bl_bypass_bit 0x0002 +#define ECB_pci_id_offset_bit 0x0003 +#define ECB_pcu_ccst_debug 0x0004 +#define ECB_unc_evt_int_read_bit 0x0008 +#define ECB_unc_discovery_mode_bit 0x0010 + +#define ECB_unc_evt_intr_read_get(x) ((x)->flags & ECB_unc_evt_int_read_bit) +#define ECB_unc_evt_intr_read_set(x) ((x)->flags |= ECB_unc_evt_int_read_bit) +#define ECB_unc_evt_intr_read_clear(x) ((x)->flags &= ~ECB_unc_evt_int_read_bit) + +#define ECB_unc_discovery_mode_get(x) ((x)->flags & ECB_unc_discovery_mode_bit) +#define ECB_unc_discovery_mode_set(x) ((x)->flags |= ECB_unc_discovery_mode_bit) +#define ECB_unc_discovery_mode_clear(x) \ + ((x)->flags &= ~ECB_unc_discovery_mode_bit) + +#define ECB_VERSION 2 + +#define ECB_CONSTRUCT(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +#define ECB_CONSTRUCT2(x, num_entries, group_id, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +#define ECB_CONSTRUCT1(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, num_pci_devices, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = (num_pci_devices); \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +// +// Accessor macros for ECB node entries +// +#define ECB_entries_reg_type(x, i) EVENT_REG_reg_type((ECB_entries(x)), (i)) +#define ECB_entries_event_id_index(x, i) \ + EVENT_REG_event_id_index((ECB_entries(x)), (i)) +#define ECB_entries_unit_id(x, i) EVENT_REG_unit_id((ECB_entries(x)), (i)) +#define ECB_entries_reg_package_id(x, i) \ + EVENT_REG_reg_package_id((ECB_entries(x)), (i)) +#define ECB_entries_counter_event_offset(x, i) \ + EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_id(x, i) EVENT_REG_reg_id((ECB_entries(x)), (i)) +#define ECB_entries_phys_addr(x, i) EVENT_REG_phys_addr((ECB_entries(x)), (i)) +#define ECB_entries_bar_index(x, i) EVENT_REG_bar_index((ECB_entries(x)), (i)) +#define ECB_entries_reg_prog_type(x, i) \ + EVENT_REG_reg_prog_type((ECB_entries(x)), (i)) +#define ECB_entries_reg_offset(x, i) EVENT_REG_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_data_size(x, i) \ + EVENT_REG_data_size((ECB_entries(x)), (i)) +#define ECB_entries_reg_bar_index(x, i) \ + EVENT_REG_bar_index((ECB_entries(x)), (i)) +#define ECB_entries_desc_id(x, i) EVENT_REG_desc_id((ECB_entries(x)), i) +#define ECB_entries_flags(x, i) EVENT_REG_flags((ECB_entries(x)), i) +#define ECB_entries_reg_order(x, i) EVENT_REG_reg_order((ECB_entries(x)), i) +#define ECB_entries_reg_value(x, i) EVENT_REG_reg_value((ECB_entries(x)), (i)) +#define ECB_entries_max_bits(x, i) EVENT_REG_max_bits((ECB_entries(x)), (i)) +#define ECB_entries_scheduled(x, i) EVENT_REG_scheduled((ECB_entries(x)), (i)) +#define ECB_entries_bit_position(x, i) \ + EVENT_REG_bit_position((ECB_entries(x)), (i)) +// PCI config-specific fields +#define ECB_entries_bus_no(x, i) EVENT_REG_bus_no((ECB_entries(x)), (i)) +#define ECB_entries_dev_no(x, i) EVENT_REG_dev_no((ECB_entries(x)), (i)) +#define ECB_entries_func_no(x, i) EVENT_REG_func_no((ECB_entries(x)), (i)) +#define ECB_entries_counter_type(x, i) \ + EVENT_REG_counter_type((ECB_entries(x)), (i)) +#define ECB_entries_event_scope(x, i) \ + EVENT_REG_event_scope((ECB_entries(x)), (i)) +#define ECB_entries_precise_get(x, i) \ + EVENT_REG_precise_get((ECB_entries(x)), (i)) +#define ECB_entries_global_get(x, i) EVENT_REG_global_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_get(x, i) EVENT_REG_uncore_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_q_rst_get(x, i) \ + EVENT_REG_uncore_q_rst_get((ECB_entries(x)), (i)) +#define ECB_entries_is_gp_reg_get(x, i) \ + EVENT_REG_is_gp_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_lbr_value_get(x, i) \ + EVENT_REG_lbr_value_get((ECB_entries(x)), (i)) +#define ECB_entries_fixed_reg_get(x, i) \ + EVENT_REG_fixed_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_is_multi_pkg_bit_set(x, i) \ + EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)), (i)) +#define ECB_entries_clean_up_get(x, i) \ + EVENT_REG_clean_up_get((ECB_entries(x)), (i)) +#define ECB_entries_em_trigger_get(x, i) \ + EVENT_REG_em_trigger_get((ECB_entries(x)), (i)) +#define ECB_entries_branch_evt_get(x, i) \ + EVENT_REG_branch_evt_get((ECB_entries(x)), (i)) +#define ECB_entries_ebc_sampling_evt_get(x, i) \ + EVENT_REG_ebc_sampling_evt_get((ECB_entries(x)), (i)) +#define ECB_entries_unc_evt_intr_read_get(x, i) \ + EVENT_REG_unc_evt_intr_read_get((ECB_entries(x)), (i)) +#define ECB_entries_generic_status_set(x, i) \ + EVENT_REG_generic_status_set((ECB_entries(x)), (i)) +#define ECB_entries_generic_status_get(x, i) \ + EVENT_REG_generic_status_get((ECB_entries(x)), (i)) +#define ECB_entries_reg_rw_type(x, i) \ + EVENT_REG_reg_rw_type((ECB_entries(x)), (i)) +#define ECB_entries_collect_on_ctx_sw_get(x, i) \ + EVENT_REG_collect_on_ctx_sw_get((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_offset(x, i) \ + EVENT_REG_secondary_pci_offset_offset((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_shift(x, i) \ + EVENT_REG_secondary_pci_offset_shift((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_mask(x, i) \ + EVENT_REG_secondary_pci_offset_mask((ECB_entries(x)), (i)) +#define ECB_operations_operation_type(x, i) \ + PMU_OPER_operation_type((ECB_operations(x)), (i)) +#define ECB_operations_register_start(x, i) \ + PMU_OPER_register_start((ECB_operations(x)), (i)) +#define ECB_operations_register_len(x, i) \ + PMU_OPER_register_len((ECB_operations(x)), (i)) + +#define ECB_entries_core_event_id(x, i) \ + EVENT_REG_core_event_id((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_package(x, i) \ + EVENT_REG_uncore_buffer_offset_in_package((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_system(x, i) \ + EVENT_REG_uncore_buffer_offset_in_system((ECB_entries(x)), (i)) +#define ECB_entries_device_subtype(x, i) \ + EVENT_REG_device_subtype((ECB_entries(x)), (i)) +#define ECB_entries_driverless_dup_event(x, i) \ + EVENT_REG_driverless_dup_event((ECB_entries(x)), (i)) + +#define ECB_entries_aux_reg_id_to_read(x, i) \ + EVENT_REG_aux_reg_id_to_read((ECB_entries(x)), (i)) +#define ECB_entries_aux_read_mask(x, i) \ + EVENT_REG_aux_read_mask((ECB_entries(x)), (i)) +#define ECB_entries_aux_shift_index(x, i) \ + EVENT_REG_aux_shift_index((ECB_entries(x)), (i)) +#define ECB_SET_OPERATIONS(x, operation_type, start, len) \ + ECB_operations_operation_type(x, operation_type) = operation_type; \ + ECB_operations_register_start(x, operation_type) = start; \ + ECB_operations_register_len(x, operation_type) = len; + +#define ECB_mmio_bar_list_bus_no(x, i) \ + MMIO_BAR_bus_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_dev_no(x, i) \ + MMIO_BAR_dev_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_func_no(x, i) \ + MMIO_BAR_func_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_offset(x, i) \ + MMIO_BAR_offset(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_addr_size(x, i) \ + MMIO_BAR_addr_size(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_map_size(x, i) \ + MMIO_BAR_map_size(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_bar_shift(x, i) \ + MMIO_BAR_bar_shift(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_bar_mask(x, i) \ + MMIO_BAR_bar_mask(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_base_mmio_offset(x, i) \ + MMIO_BAR_base_mmio_offset(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_physical_address(x, i) \ + MMIO_BAR_physical_address(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_virtual_address(x, i) \ + MMIO_BAR_virtual_address(ECB_mmio_bar_list(x), (i)) + +// *************************************************************************** + +/*!\struct LBR_ENTRY_NODE_S + * \var etype TOS = 0; FROM = 1; TO = 2 + * \var type_index + * \var reg_id + */ + +typedef struct LBR_ENTRY_NODE_S LBR_ENTRY_NODE; +typedef LBR_ENTRY_NODE *LBR_ENTRY; + +struct LBR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for LBR entries +// +#define LBR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define LBR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define LBR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct LBR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the LBR registers that need to be read + * + */ + +typedef struct LBR_NODE_S LBR_NODE; +typedef LBR_NODE *LBR; + +struct LBR_NODE_S { + U32 size; + U32 num_entries; + LBR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for LBR node +// +#define LBR_size(lbr) ((lbr)->size) +#define LBR_num_entries(lbr) ((lbr)->num_entries) +#define LBR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define LBR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define LBR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct PWR_ENTRY_NODE_S + * \var etype none as yet + * \var type_index + * \var reg_id + */ + +typedef struct PWR_ENTRY_NODE_S PWR_ENTRY_NODE; +typedef PWR_ENTRY_NODE *PWR_ENTRY; + +struct PWR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for PWR entries +// +#define PWR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define PWR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define PWR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct PWR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the PWR registers that need to be read + * + */ + +typedef struct PWR_NODE_S PWR_NODE; +typedef PWR_NODE * PWR; + +struct PWR_NODE_S { + U32 size; + U32 num_entries; + PWR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for PWR node +// +#define PWR_size(lbr) ((lbr)->size) +#define PWR_num_entries(lbr) ((lbr)->num_entries) +#define PWR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define PWR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define PWR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct RO_ENTRY_NODE_S + * \var type - DEAR, IEAR, BTB. + */ + +typedef struct RO_ENTRY_NODE_S RO_ENTRY_NODE; +typedef RO_ENTRY_NODE *RO_ENTRY; + +struct RO_ENTRY_NODE_S { + U32 reg_id; +}; + +// +// Accessor macros for RO entries +// +#define RO_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct RO_NODE_S + * \var size - The total size including header and entries. + * \var num_entries - The number of entries. + * \var entries - The entries in the list. + * + * \brief Data structure to describe the RO registers that need to be read. + * + */ + +typedef struct RO_NODE_S RO_NODE; +typedef RO_NODE *RO; + +struct RO_NODE_S { + U32 size; + U32 num_entries; + RO_ENTRY_NODE entries[]; +}; + +typedef enum { + IPT_BUFFER_DEFAULT = 0, + IPT_BUFFER_SMALL = 1, + IPT_BUFFER_LARGE = 2, +} IPT_BUFFER_SIZE_TYPE; + +// +// Accessor macros for RO node +// +#define RO_size(ro) ((ro)->size) +#define RO_num_entries(ro) ((ro)->num_entries) +#define RO_entries_reg_id(ro, idx) ((ro)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct IPT_CONFIG_NODE_S + * \var etype TOS = 0; FROM = 1; TO = 2 + * \var type_index + * \var reg_id + */ + +typedef struct IPT_CONFIG_NODE_S IPT_CONFIG_NODE; +typedef IPT_CONFIG_NODE *IPT_CONFIG; + +struct IPT_CONFIG_NODE_S { + union { + U64 switches; + struct { + U64 branch : 1; + U64 power : 1; + U64 usr : 1; + U64 os : 1; + U64 cyc : 1; + U64 cycthresh : 4; + U64 cr3filter : 1; + U64 mtc : 1; + U64 mtcfreq : 4; + U64 tsc : 1; + U64 psbfreq : 4; + U64 ptw : 1; + U64 reserved : 43; + } s1; + } u1; + union { + U64 buf_config; + struct { + U64 ringbuf : 1; + U64 bufsize : 2; + U64 reserved : 61; + } s2; + } u2; + U64 cr3match; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; + +// +// Accessor macros for IPT config +// +#define IPT_CONFIG_branch(ptcfg) ((ptcfg)->u1.s1.branch) +#define IPT_CONFIG_power(ptcfg) ((ptcfg)->u1.s1.power) +#define IPT_CONFIG_usr(ptcfg) ((ptcfg)->u1.s1.usr) +#define IPT_CONFIG_os(ptcfg) ((ptcfg)->u1.s1.os) +#define IPT_CONFIG_cyc(ptcfg) ((ptcfg)->u1.s1.cyc) +#define IPT_CONFIG_cycthresh(ptcfg) ((ptcfg)->u1.s1.cycthresh) +#define IPT_CONFIG_cr3filter(ptcfg) ((ptcfg)->u1.s1.cr3filter) +#define IPT_CONFIG_mtc(ptcfg) ((ptcfg)->u1.s1.mtc) +#define IPT_CONFIG_mtcfreq(ptcfg) ((ptcfg)->u1.s1.mtcfreq) +#define IPT_CONFIG_tsc(ptcfg) ((ptcfg)->u1.s1.tsc) +#define IPT_CONFIG_psbfreq(ptcfg) ((ptcfg)->u1.s1.psbfreq) +#define IPT_CONFIG_ptw(ptcfg) ((ptcfg)->u1.s1.ptw) +#define IPT_CONFIG_ringbuf(ptcfg) ((ptcfg)->u2.s2.ringbuf) +#define IPT_CONFIG_bufsize(ptcfg) ((ptcfg)->u2.s2.bufsize) +#define IPT_CONFIG_cr3match(ptcfg) ((ptcfg)->cr3match) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h new file mode 100644 index 00000000000000..46001fca631f03 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_gfx.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_GFX_H_ +#define _LWPMUDRV_GFX_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#define GFX_BASE_ADDRESS 0xFF200000 +#define GFX_BASE_NEW_OFFSET 0x00080000 +#define GFX_PERF_REG 0x040 // location of GFX counter relative to base +#define GFX_NUM_COUNTERS 9 // max number of GFX counters per counter group +#define GFX_CTR_OVF_VAL 0xFFFFFFFF // overflow value for GFX counters + +#define GFX_REG_CTR_CTRL 0x01FF +#define GFX_CTRL_DISABLE 0x1E00 + +//#define GFX_COMPUTE_DELTAS 1 // use event count deltas instead of raw counts + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h new file mode 100644 index 00000000000000..b48abd963d3b42 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_ioctl.h @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_IOCTL_H_ +#define _LWPMUDRV_IOCTL_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +//SEP Driver Operation defines +/* + "NOTE THAT the definition must be identical across all OSes" + "DO NOT add any OS specific compile flag" +*/ +#define DRV_OPERATION_START 1 +#define DRV_OPERATION_STOP 2 +#define DRV_OPERATION_INIT_PMU 3 +#define DRV_OPERATION_INIT 4 +#define DRV_OPERATION_EM_GROUPS 5 +#define DRV_OPERATION_SET_CPU_MASK 17 +#define DRV_OPERATION_PCI_READ 18 +#define DRV_OPERATION_PCI_WRITE 19 +#define DRV_OPERATION_READ_PCI_CONFIG 20 +#define DRV_OPERATION_FD_PHYS 21 +#define DRV_OPERATION_WRITE_PCI_CONFIG 22 +#define DRV_OPERATION_INSERT_MARKER 23 +#define DRV_OPERATION_GET_NORMALIZED_TSC 24 +#define DRV_OPERATION_EM_CONFIG_NEXT 25 +#define DRV_OPERATION_SYS_CONFIG 26 +#define DRV_OPERATION_TSC_SKEW_INFO 27 +#define DRV_OPERATION_NUM_CORES 28 +#define DRV_OPERATION_COLLECT_SYS_CONFIG 29 +#define DRV_OPERATION_GET_SYS_CONFIG 30 +#define DRV_OPERATION_PAUSE 31 +#define DRV_OPERATION_RESUME 32 +#define DRV_OPERATION_SET_ASYNC_EVENT 33 +#define DRV_OPERATION_ASYNC_STOP 34 +#define DRV_OPERATION_TERMINATE 35 +#define DRV_OPERATION_READ_MSRS 36 +#define DRV_OPERATION_LBR_INFO 37 +#define DRV_OPERATION_RESERVE 38 +#define DRV_OPERATION_MARK 39 +#define DRV_OPERATION_AWAIT_STOP 40 +#define DRV_OPERATION_SEED_NAME 41 +#define DRV_OPERATION_KERNEL_CS 42 +#define DRV_OPERATION_SET_UID 43 +#define DRV_OPERATION_VERSION 51 +#define DRV_OPERATION_CHIPSET_INIT 52 +#define DRV_OPERATION_GET_CHIPSET_DEVICE_ID 53 +#define DRV_OPERATION_SWITCH_GROUP 54 +#define DRV_OPERATION_GET_NUM_CORE_CTRS 55 +#define DRV_OPERATION_PWR_INFO 56 +#define DRV_OPERATION_NUM_DESCRIPTOR 57 +#define DRV_OPERATION_DESC_NEXT 58 +#define DRV_OPERATION_MARK_OFF 59 +#define DRV_OPERATION_CREATE_MARKER 60 +#define DRV_OPERATION_GET_DRIVER_STATE 61 +#define DRV_OPERATION_READ_SWITCH_GROUP 62 +#define DRV_OPERATION_EM_GROUPS_UNC 63 +#define DRV_OPERATION_EM_CONFIG_NEXT_UNC 64 +#define DRV_OPERATION_INIT_UNC 65 +#define DRV_OPERATION_RO_INFO 66 +#define DRV_OPERATION_READ_MSR 67 +#define DRV_OPERATION_WRITE_MSR 68 +#define DRV_OPERATION_THREAD_SET_NAME 69 +#define DRV_OPERATION_GET_PLATFORM_INFO 70 +#define DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE 71 +#define DRV_OPERATION_READ_AND_RESET 72 +#define DRV_OPERATION_SET_CPU_TOPOLOGY 73 +#define DRV_OPERATION_INIT_NUM_DEV 74 +#define DRV_OPERATION_SET_GFX_EVENT 75 +#define DRV_OPERATION_GET_NUM_SAMPLES 76 +#define DRV_OPERATION_SET_PWR_EVENT 77 +#define DRV_OPERATION_SET_DEVICE_NUM_UNITS 78 +#define DRV_OPERATION_TIMER_TRIGGER_READ 79 +#define DRV_OPERATION_GET_INTERVAL_COUNTS 80 +#define DRV_OPERATION_FLUSH 81 +#define DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO 82 +#define DRV_OPERATION_GET_UNCORE_TOPOLOGY 83 +#define DRV_OPERATION_GET_MARKER_ID 84 +#define DRV_OPERATION_GET_SAMPLE_DROP_INFO 85 +#define DRV_OPERATION_GET_DRV_SETUP_INFO 86 +#define DRV_OPERATION_GET_PLATFORM_TOPOLOGY 87 +#define DRV_OPERATION_GET_THREAD_COUNT 88 +#define DRV_OPERATION_GET_THREAD_INFO 89 +#define DRV_OPERATION_GET_DRIVER_LOG 90 +#define DRV_OPERATION_CONTROL_DRIVER_LOG 91 +#define DRV_OPERATION_SET_OSID 92 +#define DRV_OPERATION_GET_AGENT_MODE 93 +#define DRV_OPERATION_INIT_DRIVER 94 +#define DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER 95 +#define DRV_OPERATION_GET_NUM_VM 96 +#define DRV_OPERATION_GET_VCPU_MAP 97 +#define DRV_OPERATION_GET_PERF_CAPAB 98 +#define DRV_OPERATION_PMU_STATUS 99 +#define DRV_OPERATION_SET_IPT_CONFIG 100 +#define DRV_OPERATION_GET_PMT_TOPOLOGY 101 +#define DRV_OPERATION_GET_UNCORE_DISCOVERY_TABLES 102 +// Only used by MAC OS +#define DRV_OPERATION_GET_ASLR_OFFSET 997 // this may not need +#define DRV_OPERATION_SET_OSX_VERSION 998 +#define DRV_OPERATION_PROVIDE_FUNCTION_PTRS 999 + +// IOCTL_SETUP + +// IOCTL_ARGS +typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; +typedef IOCTL_ARGS_NODE *IOCTL_ARGS; + +#if defined(DRV_EM64T) +struct IOCTL_ARGS_NODE_S { + // buffer send from driver(target) to user(host), stands for read buffer + U64 len_drv_to_usr; + // length of the driver(target) to user(host) buffer + char *buf_drv_to_usr; + // buffer send from user(host) to driver(target), stands for write buffer + U64 len_usr_to_drv; + // length of the user(host) to driver(target) buffer + char *buf_usr_to_drv; + U32 command; +}; +#endif +#if defined(DRV_IA32) +struct IOCTL_ARGS_NODE_S { + // buffer send from driver(target) to user(host), stands for read buffer + U64 len_drv_to_usr; + // length of the driver(target) to user(host) buffer + char *buf_drv_to_usr; + char *reserved1; + // buffer send from user(host) to driver(target), stands for write buffer + U64 len_usr_to_drv; + // length of the user(host) to driver(target) buffer + char *buf_usr_to_drv; + char *reserved2; + U32 command; +}; +#endif + +#if defined(DRV_OS_WINDOWS) + +// +// NtDeviceIoControlFile IoControlCode values for this device. +// +// Warning: Remember that the low two bits of the code specify how the +// buffers are passed to the driver! +// +// 16 bit device type. 12 bit function codes + +// values 0-32768 reserved for Microsoft +#define LWPMUDRV_IOCTL_DEVICE_TYPE 0xA000 +// values 0-2047 reserved for Microsoft +#define LWPMUDRV_IOCTL_FUNCTION 0x0A00 + +// +// Basic CTL CODE macro to reduce typographical errors +// Use for FILE_READ_ACCESS +// +#define LWPMUDRV_CTL_READ_CODE(x) \ + CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE, LWPMUDRV_IOCTL_FUNCTION + (x), \ + METHOD_BUFFERED, FILE_READ_ACCESS) + +/* Refernece https://docs.microsoft.com/en-us/windows-hardware/drivers/kernel/defining-i-o-control-codes + CTL_CODE (DeviceType, Function, Method, Access) generates 32 bit code + -------------------------------------------------- ---------------- + | 31 | 30 ... 16 | 15 14 | 13 | 12 ... 2 | 1 0 | + ------------------------------------------------------------------- + | common | device | req access | custom | func code | transfer | + | | type | | | | type | + ------------------------------------------------------------------- +*/ +#define LWPMUDRV_DEVICE_TYPE(x) (x & 0xFFFF0000) >> 16 +#define LWPMUDRV_METHOD(x) (x & 3) +#define LWPMUDRV_FUNCTION(x) (((x >> 2) & 0x00000FFF) - 0x0A00) + +#define LWPMUDRV_IOCTL_CODE(x) LWPMUDRV_CTL_READ_CODE(x) + +#elif defined(SEP_ESX) + +typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; +typedef CPU_ARGS_NODE *CPU_ARGS; +struct CPU_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U32 command; + U32 CPU_ID; + U32 BUCKET_ID; +}; + +// IOCTL_SETUP +#define LWPMU_IOC_MAGIC 99 +#define OS_SUCCESS 0 +#define OS_STATUS int +//#define OS_ILLEGAL_IOCTL -ENOTTY +//#define OS_NO_MEM -ENOMEM +//#define OS_FAULT -EFAULT + +#define LWPMUDRV_IOCTL_IO(x) (x) +#define LWPMUDRV_IOCTL_IOR(x) (x) +#define LWPMUDRV_IOCTL_IOW(x) (x) +#define LWPMUDRV_IOCTL_IORW(x) (x) + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) +// IOCTL_ARGS + +// COMPAT IOCTL_ARGS +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE; +typedef IOCTL_COMPAT_ARGS_NODE *IOCTL_COMPAT_ARGS; +struct IOCTL_COMPAT_ARGS_NODE_S { + U64 len_drv_to_usr; + compat_uptr_t buf_drv_to_usr; + U64 len_usr_to_drv; + compat_uptr_t buf_usr_to_drv; +}; +#endif + +// COMPAT IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), compat_uptr_t) +#else +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOR(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS) +#endif + +#elif defined(DRV_OS_FREEBSD) + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 + +/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs. + * Since these IOCTLs all pass down the real read/write buffer lengths + * and addresses inside of an IOCTL_ARGS_NODE data structure, we + * need to specify all of these as _IOW so that the kernel will + * view it as userspace passing the data to the driver, rather than + * the reverse. There are also some cases where Linux is passing + * a smaller type than IOCTL_ARGS_NODE, even though its really + * passing an IOCTL_ARGS_NODE. These needed to be fixed for FreeBSD. + */ +#define LWPMUDRV_IOCTL_IO(x) _IO(LWPMU_IOC_MAGIC, (x)) +#define LWPMUDRV_IOCTL_IOR(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_IOW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_IORW(x) _IOW(LWPMU_IOC_MAGIC, (x), IOCTL_ARGS_NODE) + +#elif defined(DRV_OS_MAC) + +typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; +typedef CPU_ARGS_NODE *CPU_ARGS; +struct CPU_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U32 command; + U32 CPU_ID; + U32 BUCKET_ID; +}; + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 +#define OS_SUCCESS 0 +#define OS_STATUS int +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT + +// Task file Opcodes. +// keeping the definitions as IOCTL but in MAC OSX +// these are really OpCodes consumed by Execute command. + +#else +#error "unknown OS in lwpmudrv_ioctl.h" +#endif + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h new file mode 100644 index 00000000000000..0fe66bc955f070 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_struct.h @@ -0,0 +1,2768 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_STRUCT_UTILS_H_ +#define _LWPMUDRV_STRUCT_UTILS_H_ + +#if defined(DRV_OS_WINDOWS) +#pragma warning(disable : 4100) +#pragma warning(disable : 4127) +#pragma warning(disable : 4214) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +// processor execution modes +#define MODE_UNKNOWN 99 +// the following defines must start at 0 +#define MODE_64BIT 3 +#define MODE_32BIT 2 +#define MODE_16BIT 1 +#define MODE_V86 0 + +// sampling methods +#define SM_RTC 2020 // real time clock +#define SM_VTD 2021 // OS Virtual Timer Device +#define SM_NMI 2022 // non-maskable interrupt time based +#define SM_EBS 2023 // event based sampling +#define SM_EBC 2024 // event based counting + +// sampling mechanism bitmap definitions +#define INTERRUPT_RTC 0x1 +#define INTERRUPT_VTD 0x2 +#define INTERRUPT_NMI 0x4 +#define INTERRUPT_EBS 0x8 + +// Device types +#define DEV_CORE 0x01 +#define DEV_UNC 0x02 + +// eflags defines +#define EFLAGS_VM 0x00020000 // V86 mode +#define EFLAGS_IOPL0 0 +#define EFLAGS_IOPL1 0x00001000 +#define EFLAGS_IOPL2 0x00002000 +#define EFLAGS_IOPL3 0x00003000 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_BUSNO 256 +#define MAX_DEVICES 30 +#define MAX_REGS 64 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_DEVNO 32 +#define MAX_PCI_FUNCNO 8 +#define MAX_PCI_DEVUNIT 16 +#define MAX_TURBO_VALUES 32 +#define REG_BIT_MASK 0xFFFFFFFFFFFFFFFFULL +#define MAX_DISCOVERY_UNCORE_UNITS 1024 // Total units across all uncore devices + +extern float freq_multiplier; + +// Enumeration for invoking dispatch on multiple cpus or not +typedef enum { DRV_MULTIPLE_INSTANCE = 0, DRV_SINGLE_INSTANCE } DRV_PROG_TYPE; + +typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE; +typedef DRV_CONFIG_NODE *DRV_CONFIG; + +struct DRV_CONFIG_NODE_S { + U32 size; + U16 version; + U16 ipt_mode; + U32 num_events; + U32 num_chipset_events; + U32 chipset_offset; + S32 seed_name_len; + union { + S8 *seed_name; + U64 dummy1; + } u1; + union { + S8 *cpu_mask; + U64 dummy2; + } u2; + union { + U64 collection_config; + struct { + U64 start_paused : 1; + U64 counting_mode : 1; + U64 enable_chipset : 1; + U64 enable_gfx : 1; + U64 enable_pwr : 1; + U64 emon_mode : 1; + U64 debug_inject : 1; + U64 virt_phys_translation : 1; + U64 enable_p_state : 1; + U64 enable_cp_mode : 1; + U64 read_pstate_msrs : 1; + U64 use_pcl : 1; + U64 enable_ebc : 1; + U64 enable_tbc : 1; + U64 ds_area_available : 1; + U64 per_cpu_tsc : 1; + U64 mixed_ebc_available : 1; + U64 hetero_supported : 1; + U64 rdt_auto_rmid : 1; + U64 enable_ipt : 1; + U64 enable_sideband : 1; + U64 reserved_field1 : 43; + } s1; + } u3; + U64 target_pid; + U32 os_of_interest; + U16 unc_timer_interval; + U16 unc_em_factor; + S32 p_state_trigger_index; + DRV_BOOL multi_pebs_enabled; + S32 emon_timer_interval; + DRV_BOOL unc_collect_in_intr_enabled; + U32 emon_driverless_mux_interval; + U32 reserved1; + U64 reserved2; + U64 reserved3; +}; + +#define DRV_CONFIG_size(cfg) ((cfg)->size) +#define DRV_CONFIG_version(cfg) ((cfg)->version) +#define DRV_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DRV_CONFIG_num_chipset_events(cfg) ((cfg)->num_chipset_events) +#define DRV_CONFIG_chipset_offset(cfg) ((cfg)->chipset_offset) + +#define DRV_CONFIG_seed_name(cfg) ((cfg)->u1.seed_name) +#define DRV_CONFIG_seed_name_len(cfg) ((cfg)->seed_name_len) +#define DRV_CONFIG_cpu_mask(cfg) ((cfg)->u2.cpu_mask) +#define DRV_CONFIG_start_paused(cfg) ((cfg)->u3.s1.start_paused) +#define DRV_CONFIG_counting_mode(cfg) ((cfg)->u3.s1.counting_mode) +#define DRV_CONFIG_enable_chipset(cfg) ((cfg)->u3.s1.enable_chipset) +#define DRV_CONFIG_enable_gfx(cfg) ((cfg)->u3.s1.enable_gfx) +#define DRV_CONFIG_enable_pwr(cfg) ((cfg)->u3.s1.enable_pwr) +#define DRV_CONFIG_emon_mode(cfg) ((cfg)->u3.s1.emon_mode) +#define DRV_CONFIG_debug_inject(cfg) ((cfg)->u3.s1.debug_inject) +#define DRV_CONFIG_virt_phys_translation(cfg) ((cfg)->u3.s1.virt_phys_translation) +#define DRV_CONFIG_enable_p_state(cfg) ((cfg)->u3.s1.enable_p_state) +#define DRV_CONFIG_enable_cp_mode(cfg) ((cfg)->u3.s1.enable_cp_mode) +#define DRV_CONFIG_read_pstate_msrs(cfg) ((cfg)->u3.s1.read_pstate_msrs) +#define DRV_CONFIG_use_pcl(cfg) ((cfg)->u3.s1.use_pcl) +#define DRV_CONFIG_event_based_counts(cfg) ((cfg)->u3.s1.enable_ebc) +#define DRV_CONFIG_timer_based_counts(cfg) ((cfg)->u3.s1.enable_tbc) +#define DRV_CONFIG_ds_area_available(cfg) ((cfg)->u3.s1.ds_area_available) +#define DRV_CONFIG_per_cpu_tsc(cfg) ((cfg)->u3.s1.per_cpu_tsc) +#define DRV_CONFIG_mixed_ebc_available(cfg) ((cfg)->u3.s1.mixed_ebc_available) +#define DRV_CONFIG_hetero_supported(cfg) ((cfg)->u3.s1.hetero_supported) +#define DRV_CONFIG_rdt_auto_rmid(cfg) ((cfg)->u3.s1.rdt_auto_rmid) +#define DRV_CONFIG_enable_ipt(cfg) ((cfg)->u3.s1.enable_ipt) +#define DRV_CONFIG_enable_sideband(cfg) ((cfg)->u3.s1.enable_sideband) +#define DRV_CONFIG_target_pid(cfg) ((cfg)->target_pid) +#define DRV_CONFIG_os_of_interest(cfg) ((cfg)->os_of_interest) +#define DRV_CONFIG_unc_timer_interval(cfg) ((cfg)->unc_timer_interval) +#define DRV_CONFIG_unc_em_factor(cfg) ((cfg)->unc_em_factor) +#define DRV_CONFIG_p_state_trigger_index(cfg) ((cfg)->p_state_trigger_index) +#define DRV_CONFIG_multi_pebs_enabled(cfg) ((cfg)->multi_pebs_enabled) +#define DRV_CONFIG_emon_timer_interval(cfg) ((cfg)->emon_timer_interval) +#define DRV_CONFIG_unc_collect_in_intr_enabled(cfg) \ + ((cfg)->unc_collect_in_intr_enabled) +#define DRV_CONFIG_ipt_mode(cfg) ((cfg)->ipt_mode) +#define DRV_CONFIG_emon_driverless_mux_interval(cfg) \ + ((cfg)->emon_driverless_mux_interval) + +#define DRV_CONFIG_VERSION 1 + +typedef struct DEV_CONFIG_NODE_S DEV_CONFIG_NODE; +typedef DEV_CONFIG_NODE *DEV_CONFIG; + +struct DEV_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 pebs_mode; + U32 pebs_record_num; + U32 results_offset; // this is to store the offset for this device's results + U32 max_gp_counters; + U32 device_type; + U32 core_type; + union { + U64 enable_bit_fields; + struct { + U64 pebs_capture : 1; + U64 collect_lbrs : 1; + U64 collect_callstacks : 1; + U64 collect_kernel_callstacks : 1; + U64 latency_capture : 1; + U64 power_capture : 1; + U64 htoff_mode : 1; + U64 eventing_ip_capture : 1; + U64 hle_capture : 1; + U64 precise_ip_lbrs : 1; + U64 store_lbrs : 1; + U64 tsc_capture : 1; + U64 enable_perf_metrics : 1; + U64 enable_adaptive_pebs : 1; + U64 apebs_collect_mem_info : 1; + U64 apebs_collect_gpr : 1; + U64 apebs_collect_xmm : 1; + U64 apebs_collect_lbrs : 1; + U64 collect_fixed_counter_pebs : 1; + U64 collect_os_callstacks : 1; + U64 enable_arch_lbrs : 1; + U64 reserved_field1 : 43; + } s1; + } u1; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U32 ebc_group_id_offset; + U8 num_perf_metrics; + U8 num_lbr_entries; + U16 emon_perf_metrics_offset; + U32 device_scope; + U32 num_events; + U16 device_index; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEV_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_CONFIG_pebs_mode(cfg) ((cfg)->pebs_mode) +#define DEV_CONFIG_pebs_record_num(cfg) ((cfg)->pebs_record_num) +#define DEV_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_CONFIG_max_gp_counters(cfg) ((cfg)->max_gp_counters) + +#define DEV_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_CONFIG_core_type(cfg) ((cfg)->core_type) + +#define DEV_CONFIG_pebs_capture(cfg) ((cfg)->u1.s1.pebs_capture) +#define DEV_CONFIG_collect_lbrs(cfg) ((cfg)->u1.s1.collect_lbrs) +#define DEV_CONFIG_collect_callstacks(cfg) ((cfg)->u1.s1.collect_callstacks) +#define DEV_CONFIG_collect_kernel_callstacks(cfg) \ + ((cfg)->u1.s1.collect_kernel_callstacks) +#define DEV_CONFIG_latency_capture(cfg) ((cfg)->u1.s1.latency_capture) +#define DEV_CONFIG_power_capture(cfg) ((cfg)->u1.s1.power_capture) +#define DEV_CONFIG_htoff_mode(cfg) ((cfg)->u1.s1.htoff_mode) +#define DEV_CONFIG_eventing_ip_capture(cfg) ((cfg)->u1.s1.eventing_ip_capture) +#define DEV_CONFIG_hle_capture(cfg) ((cfg)->u1.s1.hle_capture) +#define DEV_CONFIG_precise_ip_lbrs(cfg) ((cfg)->u1.s1.precise_ip_lbrs) +#define DEV_CONFIG_store_lbrs(cfg) ((cfg)->u1.s1.store_lbrs) +#define DEV_CONFIG_tsc_capture(cfg) ((cfg)->u1.s1.tsc_capture) +#define DEV_CONFIG_enable_perf_metrics(cfg) ((cfg)->u1.s1.enable_perf_metrics) +#define DEV_CONFIG_enable_adaptive_pebs(cfg) ((cfg)->u1.s1.enable_adaptive_pebs) +#define DEV_CONFIG_apebs_collect_mem_info(cfg) \ + ((cfg)->u1.s1.apebs_collect_mem_info) +#define DEV_CONFIG_apebs_collect_gpr(cfg) ((cfg)->u1.s1.apebs_collect_gpr) +#define DEV_CONFIG_apebs_collect_xmm(cfg) ((cfg)->u1.s1.apebs_collect_xmm) +#define DEV_CONFIG_apebs_collect_lbrs(cfg) ((cfg)->u1.s1.apebs_collect_lbrs) +#define DEV_CONFIG_collect_fixed_counter_pebs(cfg) \ + ((cfg)->u1.s1.collect_fixed_counter_pebs) +#define DEV_CONFIG_collect_os_callstacks(cfg) ((cfg)->u1.s1.collect_os_callstacks) +#define DEV_CONFIG_enable_arch_lbrs(cfg) ((cfg)->u1.s1.enable_arch_lbrs) +#define DEV_CONFIG_enable_bit_fields(cfg) ((cfg)->u1.enable_bit_fields) +#define DEV_CONFIG_emon_unc_offset(cfg, grp_num) ((cfg)->emon_unc_offset[grp_num]) +#define DEV_CONFIG_ebc_group_id_offset(cfg) ((cfg)->ebc_group_id_offset) +#define DEV_CONFIG_num_perf_metrics(cfg) ((cfg)->num_perf_metrics) +#define DEV_CONFIG_num_lbr_entries(cfg) ((cfg)->num_lbr_entries) +#define DEV_CONFIG_emon_perf_metrics_offset(cfg) ((cfg)->emon_perf_metrics_offset) +#define DEV_CONFIG_device_scope(cfg) ((cfg)->device_scope) +#define DEV_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DEV_CONFIG_device_index(cfg) ((cfg)->device_index) + +typedef struct DEV_UNC_CONFIG_NODE_S DEV_UNC_CONFIG_NODE; +typedef DEV_UNC_CONFIG_NODE *DEV_UNC_CONFIG; + +struct DEV_UNC_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 results_offset; + U32 device_type; + U32 device_scope; + U32 num_events; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U16 device_index; + U16 num_units; + U32 device_with_intr_events; // contains event(s) to read at trigger evt sampl + U32 num_module_events; + U32 reserved2; + U64 reserved3; +}; + +#define DEV_UNC_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_UNC_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_UNC_CONFIG_emon_unc_offset(cfg, grp_num) \ + ((cfg)->emon_unc_offset[grp_num]) +#define DEV_UNC_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_UNC_CONFIG_device_scope(cfg) ((cfg)->device_scope) +#define DEV_UNC_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DEV_UNC_CONFIG_device_index(cfg) ((cfg)->device_index) +#define DEV_UNC_CONFIG_num_units(cfg) ((cfg)->num_units) +#define DEV_UNC_CONFIG_device_with_intr_events(cfg) \ + ((cfg)->device_with_intr_events) + +/* + * X86 processor code descriptor + */ +typedef struct CodeDescriptor_s { + union { + U32 lowWord; // low dword of descriptor + struct { // low broken out by fields + U16 limitLow; // segment limit 15:00 + U16 baseLow; // segment base 15:00 + } s1; + } u1; + union { + U32 highWord; // high word of descriptor + struct { // high broken out by bit fields + U32 baseMid : 8; // base 23:16 + U32 accessed : 1; // accessed + U32 readable : 1; // readable + U32 conforming : 1; // conforming code segment + U32 oneOne : 2; // always 11 + U32 dpl : 2; // Dpl + U32 pres : 1; // present bit + U32 limitHi : 4; // limit 19:16 + U32 sys : 1; // available for use by system + U32 reserved_0 : 1; // reserved, always 0 + U32 default_size : 1; // default operation size (1=32bit, 0=16bit) + U32 granularity : 1; // granularity (1=32 bit, 0=20 bit) + U32 baseHi : 8; // base hi 31:24 + } s2; + } u2; +} CodeDescriptor; + +/* + * Module record. These are emitted whenever a DLL or EXE is loaded or unloaded. + * The filename fields may be 0 on an unload. The records reperesent a module for a + * certain span of time, delineated by the load / unload samplecounts. + * Note: + * The structure contains 64 bit fields which may cause the compiler to pad the + * length of the structure to an 8 byte boundary. + */ +typedef struct ModuleRecord_s { + U16 recLength; // total length of this record (including this length, + // always U32 multiple) output from sampler is variable + // length (pathname at end of record) sampfile builder moves + // path names to a separate "literal pool" area + // so that these records become fixed length, and can be treated + // as an array see modrecFixedLen in header + + U16 segmentType : 2; // V86, 16, 32, 64 (see MODE_ defines), maybe inaccurate for Win95 + // .. a 16 bit module may become a 32 bit module, inferred by + // ..looking at 1st sample record that matches the module selector + U16 loadEvent : 1; // 0 for load, 1 for unload + U16 processed : 1; // 0 for load, 1 for unload + U16 reserved0 : 12; + + U16 selector; // code selector or V86 segment + U16 segmentNameLength; // length of the segment name if the segmentNameSet bit is set + U32 segmentNumber; // segment number, Win95 (and now Java) can have multiple pieces for one module + union { + U32 flags; // all the flags as one dword + struct { + U32 exe : 1; // this module is an exe + U32 globalModule : 1; // globally loaded module. There may be multiple + // module records for a global module, but the samples + // will only point to the 1st one, the others will be + // ignored. NT's Kernel32 is an example of this. + // REVISIT this?? + U32 bogusWin95 : 1; // "bogus" win95 module. By bogus, we mean a + // module that has a pid of 0, no length and no base. + // Selector actually used as a 32 bit module. + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 sampleFound : 1; // at least one sample referenced this module + U32 tscUsed : 1; // tsc set when record written + U32 duplicate : 1; // 1st pass analysis has determined this is a + // duplicate load + U32 globalModuleTB5 : 1; // module mapped into all processes on system + U32 segmentNameSet : 1; // set if the segment name was collected + // (initially done for xbox collections) + U32 firstModuleRecInProcess : 1; // if the pidCreatesTrackedInModuleRecs flag is set + // in the SampleHeaderEx struct and this flag + // is set, the associated module indicates + // the beginning of a new process + U32 source : 1; // 0 for path in target system, 1 for path in host system (offloaded) + U32 unknownLoadAddress : 1; // for 0 valid loadAddr64 value, 1 for invalid loadAddr64 value + U32 reserved1 : 20; + } s1; + } u2; + U64 length64; // module length + U64 loadAddr64; // load address + U32 pidRecIndex; // process ID rec index (index into start of pid record section). + // .. (see pidRecIndexRaw). If pidRecIndex == 0 and pidRecIndexRaw == 1 + // ..then this is a kernel or global module. Can validly + // ..be 0 if not raw (array index). Use ReturnPid() to access this + // ..field + U32 osid; // OS identifier + U64 unloadTsc; // TSC collected on an unload event + U32 path; // module path name (section offset on disk) + // ..when initally written by sampler name is at end of this + // ..struct, when merged with main file names are pooled at end + // ..of ModuleRecord Section so ModulesRecords can be + // ..fixed length + U16 pathLength; // path name length (inludes terminating \0) + U16 filenameOffset; // offset into path name of base filename + U32 segmentName; // offset to the segmentName from the beginning of the + // module section in a processed module section + // (s/b 0 in a raw module record) + // in a raw module record, the segment name will follow the + // module name and the module name's terminating NULL char + U32 page_offset_high; + U64 tsc; // time stamp counter module event occurred + U32 parent_pid; // Parent PID of the process + U32 page_offset_low; +} ModuleRecord; + +#define MR_unloadTscSet(x, y) ((x)->unloadTsc = (y)) +#define MR_unloadTscGet(x) ((x)->unloadTsc) + +#define MR_page_offset_Set(x, y) \ + { \ + (x)->page_offset_low = (y)&0xFFFFFFFF; \ + (x)->page_offset_high = ((y) >> 32) & 0xFFFFFFFF; \ + } +#define MR_page_offset_Get(x) \ + ((((U64)(x)->page_offset_high) << 32) | (x)->page_offset_low) + +// Accessor macros for ModuleRecord +#define MODULE_RECORD_rec_length(x) ((x)->recLength) +#define MODULE_RECORD_segment_type(x) ((x)->segmentType) +#define MODULE_RECORD_load_event(x) ((x)->loadEvent) +#define MODULE_RECORD_processed(x) ((x)->processed) +#define MODULE_RECORD_selector(x) ((x)->selector) +#define MODULE_RECORD_segment_name_length(x) ((x)->segmentNameLength) +#define MODULE_RECORD_segment_number(x) ((x)->segmentNumber) +#define MODULE_RECORD_flags(x) ((x)->u2.flags) +#define MODULE_RECORD_exe(x) ((x)->u2.s1.exe) +#define MODULE_RECORD_global_module(x) ((x)->u2.s1.globalModule) +#define MODULE_RECORD_bogus_win95(x) ((x)->u2.s1.bogusWin95) +#define MODULE_RECORD_pid_rec_index_raw(x) ((x)->u2.s1.pidRecIndexRaw) +#define MODULE_RECORD_sample_found(x) ((x)->u2.s1.sampleFound) +#define MODULE_RECORD_tsc_used(x) ((x)->u2.s1.tscUsed) +#define MODULE_RECORD_duplicate(x) ((x)->u2.s1.duplicate) +#define MODULE_RECORD_global_module_tb5(x) ((x)->u2.s1.globalModuleTB5) +#define MODULE_RECORD_segment_name_set(x) ((x)->u2.s1.segmentNameSet) +#define MODULE_RECORD_first_module_rec_in_process(x) \ + ((x)->u2.s1.firstModuleRecInProcess) +#define MODULE_RECORD_source(x) ((x)->u2.s1.source) +#define MODULE_RECORD_unknown_load_address(x) ((x)->u2.s1.unknownLoadAddress) +#define MODULE_RECORD_length64(x) ((x)->length64) +#define MODULE_RECORD_load_addr64(x) ((x)->loadAddr64) +#define MODULE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define MODULE_RECORD_load_sample_count(x) ((x)->u5.s2.loadSampleCount) +#define MODULE_RECORD_unload_sample_count(x) ((x)->u5.s2.unloadSampleCount) +#define MODULE_RECORD_unload_tsc(x) ((x)->unloadTsc) +#define MODULE_RECORD_path(x) ((x)->path) +#define MODULE_RECORD_path_length(x) ((x)->pathLength) +#define MODULE_RECORD_filename_offset(x) ((x)->filenameOffset) +#define MODULE_RECORD_segment_name(x) ((x)->segmentName) +#define MODULE_RECORD_tsc(x) ((x)->tsc) +#define MODULE_RECORD_parent_pid(x) ((x)->parent_pid) +#define MODULE_RECORD_osid(x) ((x)->osid) + +/* + * Sample record. Size can be determined by looking at the header record. + * There can be up to 3 sections. The SampleFileHeader defines the presence + * of sections and their offsets. Within a sample file, all of the sample + * records have the same number of sections and the same size. However, + * different sample record sections and sizes can exist in different + * sample files. Since recording counters and the time stamp counter for + * each sample can be space consuming, the user can determine whether or not + * this information is kept at sample collection time. + */ + +typedef struct SampleRecordPC_s { // Program Counter section + U32 descriptor_id; + U32 osid; // OS identifier + union { + struct { + U64 iip; // IA64 interrupt instruction pointer + U64 ipsr; // IA64 interrupt processor status register + } s1; + struct { + U32 eip; // IA32 instruction pointer + U32 eflags; // IA32 eflags + CodeDescriptor csd; // IA32 code seg descriptor (8 bytes) + } s2; + } u1; + U16 cs; // IA32 cs (0 for IA64) + union { + U16 cpuAndOS; // cpu and OS info as one word + struct { // cpu and OS info broken out + U16 cpuNum : 12; // cpu number (0 - 4096) + U16 notVmid0 : 1; // win95, vmid0 flag (1 means NOT vmid 0) + U16 codeMode : 2; // processor mode, see MODE_ defines + U16 uncore_valid : 1; // identifies if the uncore count is valid + } s3; + } u2; + U32 tid; // OS thread ID (may get reused, see tidIsRaw) + U32 pidRecIndex; // process ID rec index (index into start of pid + // record section) .. can validly be 0 if not raw + // (array index). Use ReturnPid() to + // ..access this field .. (see pidRecIndexRaw) + union { + U32 bitFields2; + struct { + U32 mrIndex : 20; // module record index (index into start of + // module rec section) .. (see mrIndexNone) + U32 eventIndex : 8; // index into the Events section + U32 tidIsRaw : 1; // tid is raw OS tid + U32 IA64PC : 1; // TRUE=this is a IA64 PC sample record + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 mrIndexNone : 1; // no mrIndex (unknown module) + } s4; + } u3; + U64 tsc; // processor timestamp counter +} SampleRecordPC, *PSampleRecordPC; + +#define SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define SAMPLE_RECORD_osid(x) ((x)->osid) +#define SAMPLE_RECORD_iip(x) ((x)->u1.s1.iip) +#define SAMPLE_RECORD_ipsr(x) ((x)->u1.s1.ipsr) +#define SAMPLE_RECORD_eip(x) ((x)->u1.s2.eip) +#define SAMPLE_RECORD_eflags(x) ((x)->u1.s2.eflags) +#define SAMPLE_RECORD_csd(x) ((x)->u1.s2.csd) +#define SAMPLE_RECORD_cs(x) ((x)->cs) +#define SAMPLE_RECORD_cpu_and_os(x) ((x)->u2.cpuAndOS) +#define SAMPLE_RECORD_cpu_num(x) ((x)->u2.s3.cpuNum) +#define SAMPLE_RECORD_uncore_valid(x) ((x)->u2.s3.uncore_valid) +#define SAMPLE_RECORD_not_vmid0(x) ((x)->u2.s3.notVmid0) +#define SAMPLE_RECORD_code_mode(x) ((x)->u2.s3.codeMode) +#define SAMPLE_RECORD_tid(x) ((x)->tid) +#define SAMPLE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define SAMPLE_RECORD_bit_fields2(x) ((x)->u3.bitFields2) +#define SAMPLE_RECORD_mr_index(x) ((x)->u3.s4.mrIndex) +#define SAMPLE_RECORD_event_index(x) ((x)->u3.s4.eventIndex) +#define SAMPLE_RECORD_tid_is_raw(x) ((x)->u3.s4.tidIsRaw) +#define SAMPLE_RECORD_ia64_pc(x) ((x)->u3.s4.IA64PC) +#define SAMPLE_RECORD_pid_rec_index_raw(x) ((x)->u3.s4.pidRecIndexRaw) +#define SAMPLE_RECORD_mr_index_none(x) ((x)->u3.s4.mrIndexNone) +#define SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of SampleRecord sections + +/* Uncore Sample Record definition. This is a skinny sample record used by uncore boxes + to record samples. The sample record consists of a descriptor id, cpu info and timestamp.*/ + +typedef struct UncoreSampleRecordPC_s { + U32 descriptor_id; + U32 osid; + U16 cpuNum; + U16 pkgNum; + union { + U32 flags; + struct { + U32 uncore_valid : 1; // identifies if the uncore count is valid + U32 reserved1 : 31; + } s1; + } u1; + U64 reserved2; + U64 tsc; // processor timestamp counter +} UncoreSampleRecordPC, *PUnocreSampleRecordPC; + +#define UNCORE_SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define UNCORE_SAMPLE_RECORD_osid(x) ((x)->osid) +#define UNCORE_SAMPLE_RECORD_cpu_num(x) ((x)->cpuNum) +#define UNCORE_SAMPLE_RECORD_pkg_num(x) ((x)->pkgNum) +#define UNCORE_SAMPLE_RECORD_uncore_valid(x) ((x)->u1.s1.uncore_valid) +#define UNCORE_SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of UncoreSampleRecord section + +// Definitions for user markers data +// The instances of these structures will be written to the user markers temp file. +#define MARKER_DEFAULT_TYPE "Default_Marker" +#define MARKER_DEFAULT_ID 0 +#define MAX_MARKER_LENGTH 136 + +#define MARK_ID 4 +#define MARK_DATA 2 +#define THREAD_INFO 8 + +/* do not use it at ths moment +typedef enum { + SMRK_USER_DEFINED = 0, + SMRK_THREAD_NAME, + SMRK_WALLCLOCK, + SMRK_TEXT, + SMRK_TYPE_ID +} SMRK_TYPE; +*/ + +/* + * Common Register descriptions + */ + +/* + * Bits used in the debug control register + */ +#define DEBUG_CTL_LBR 0x0000001 +#define DEBUG_CTL_BTF 0x0000002 +#define DEBUG_CTL_TR 0x0000040 +#define DEBUG_CTL_BTS 0x0000080 +#define DEBUG_CTL_BTINT 0x0000100 +#define DEBUG_CTL_BT_OFF_OS 0x0000200 +#define DEBUG_CTL_BTS_OFF_USR 0x0000400 +#define DEBUG_CTL_FRZ_LBR_ON_PMI 0x0000800 +#define DEBUG_CTL_FRZ_PMON_ON_PMI 0x0001000 +#define DEBUG_CTL_ENABLE_UNCORE_PMI_BIT 0x0002000 + +#define DEBUG_CTL_NODE_lbr_get(reg) ((reg) & DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_set(reg) ((reg) |= DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_clear(reg) ((reg) &= ~DEBUG_CTL_LBR) + +#define DEBUG_CTL_NODE_btf_get(reg) ((reg) & DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_set(reg) ((reg) |= DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_clear(reg) ((reg) &= ~DEBUG_CTL_BTF) + +#define DEBUG_CTL_NODE_tr_get(reg) ((reg) & DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_set(reg) ((reg) |= DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_clear(reg) ((reg) &= ~DEBUG_CTL_TR) + +#define DEBUG_CTL_NODE_bts_get(reg) ((reg) & DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_set(reg) ((reg) |= DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_clear(reg) ((reg) &= ~DEBUG_CTL_BTS) + +#define DEBUG_CTL_NODE_btint_get(reg) ((reg) & DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_set(reg) ((reg) |= DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_clear(reg) ((reg) &= ~DEBUG_CTL_BTINT) + +#define DEBUG_CTL_NODE_bts_off_os_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_OS) + +#define DEBUG_CTL_NODE_bts_off_usr_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_USR) + +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_get(reg) ((reg) & DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_set(reg) ((reg) |= DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_LBR_ON_PMI) + +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_get(reg) \ + ((reg) & DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_PMON_ON_PMI) + +#define DEBUG_CTL_NODE_enable_uncore_pmi_get(reg) \ + ((reg) & DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_ENABLE_UNCORE_PMI) + +/* + * Bits used in the LBR control register (Arch LBRs) + */ +#define LBR_CTL_EN 0x0000001 +#define LBR_CTL_OS 0x0000002 +#define LBR_CTL_USR 0x0000004 +#define LBR_CTL_EN_ALL 0x0000007 +#define LBR_CTL_CALL_STACK 0x0000008 +/* LBR filter bits */ +#define LBR_CTL_FILTER_JCC 0x0010000 +#define LBR_CTL_FILTER_NEAR_REL_JMP 0x0020000 +#define LBR_CTL_FILTER_NEAR_IND_JMP 0x0040000 +#define LBR_CTL_FILTER_NEAR_REL_CALL 0x0080000 +#define LBR_CTL_FILTER_NEAR_IND_CALL 0x0100000 +#define LBR_CTL_FILTER_NEAR_RET 0x0200000 +#define LBR_CTL_FILTER_OTHER_BR 0x0400000 +#define LBR_CTL_FILTER_ALL_BR_TYPES 0x07F0000 + +#define LBR_CTL_NODE_lbr_en_get(reg) ((reg) & LBR_CTL_EN) +#define LBR_CTL_NODE_lbr_en_set(reg) ((reg) |= LBR_CTL_EN) +#define LBR_CTL_NODE_lbr_en_clear(reg) ((reg) &= ~LBR_CTL_EN) + +#define LBR_CTL_NODE_lbr_en_all_get(reg) ((reg) & LBR_CTL_EN_ALL) +#define LBR_CTL_NODE_lbr_en_all_set(reg) ((reg) |= LBR_CTL_EN_ALL) +#define LBR_CTL_NODE_lbr_en_all_clear(reg) ((reg) &= ~LBR_CTL_EN_ALL) + +#define LBR_CTL_NODE_filter_all_br_get(reg) ((reg) & LBR_CTL_FILTER_ALL_BR_TYPES) +#define LBR_CTL_NODE_filter_all_br_set(reg) ((reg) |= LBR_CTL_FILTER_ALL_BR_TYPES) +#define LBR_CTL_NODE_filter_all_br_clear(reg) \ + ((reg) &= ~LBR_CTL_FILTER_ALL_BR_TYPES) + +/* + * @macro SEP_VERSION_NODE_S + * @brief + * This structure supports versioning in Sep. The field major indicates the major version, + * minor indicates the minor version and api indicates the api version for the current + * sep build. This structure is initialized at the time when the driver is loaded. + */ + +typedef struct SEP_VERSION_NODE_S SEP_VERSION_NODE; +typedef SEP_VERSION_NODE *SEP_VERSION; + +struct SEP_VERSION_NODE_S { + union { + U32 sep_version; + struct { + S32 major : 8; + S32 minor : 8; + S32 api : 8; + S32 update : 8; + } s1; + } u1; +}; + +#define SEP_VERSION_NODE_sep_version(version) ((version)->u1.sep_version) +#define SEP_VERSION_NODE_major(version) ((version)->u1.s1.major) +#define SEP_VERSION_NODE_minor(version) ((version)->u1.s1.minor) +#define SEP_VERSION_NODE_api(version) ((version)->u1.s1.api) +#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) + +#define MAX_RELEASE_STRING 256 +typedef struct LINUX_VERSION_NODE_S LINUX_VERSION_NODE; +typedef LINUX_VERSION_NODE *LINUX_VERSION; +struct LINUX_VERSION_NODE_S { + S32 major; + S32 minor; + S32 revision; + S8 patch[MAX_RELEASE_STRING]; +}; + +#define LINUX_VERSION_NODE_major(version) ((version).major) +#define LINUX_VERSION_NODE_minor(version) ((version).minor) +#define LINUX_VERSION_NODE_revision(version) ((version).revision) +#define LINUX_VERSION_NODE_patch(version) ((version).patch) + +/* + * The VTSA_SYS_INFO_STRUCT information that is shared across kernel mode + * and user mode code, very specifically for tb5 file generation + */ + +typedef enum { + GT_UNK = 0, + GT_PER_CPU, + GT_PER_CHIPSET, + GT_CPUID, + GT_NODE, + GT_SYSTEM, + GT_SAMPLE_RECORD_INFO +} GEN_ENTRY_TYPES; + +typedef enum { + GST_UNK = 0, + GST_X86, + GST_ITANIUM, + GST_SA, //strong arm + GST_XSC, + GST_EM64T, + GST_CS860 +} GEN_ENTRY_SUBTYPES; + +typedef struct __fixed_size_pointer { + union { + U64 fs_force_alignment; + struct { + U32 fs_unused; + U32 is_ptr : 1; + } s1; + } u1; + union { + U64 fs_offset; + void *fs_ptr; + } u2; +} VTSA_FIXED_SIZE_PTR; + +#define VTSA_FIXED_SIZE_PTR_is_ptr(fsp) ((fsp)->u1.s1.is_ptr) +#define VTSA_FIXED_SIZE_PTR_fs_offset(fsp) ((fsp)->u2.fs_offset) +#define VTSA_FIXED_SIZE_PTR_fs_ptr(fsp) ((fsp)->u2.fs_ptr) + +typedef struct __generic_array_header { + // + // Information realted to the generic header + // + U32 hdr_size; // size of this generic header + // (for versioning and real data starts + // after the header) + + U32 next_field_hdr_padding; // make sure the next field is 8-byte aligned + + // + // VTSA_FIXED_SIZE_PTR should always be on an 8-byte boundary... + // + // pointer to the next generic header if there is one + // + VTSA_FIXED_SIZE_PTR hdr_next_gen_hdr; + + U32 hdr_reserved[7]; // padding for future use - force to 64 bytes... + + // + // Information related to the array this header is describing + // + U32 array_num_entries; + U32 array_entry_size; + U16 array_type; // from the GEN_ENTRY_TYPES enumeration + U16 array_subtype; // from the GEN_ENTRY_SUBTYPES enumeration +} VTSA_GEN_ARRAY_HDR; + +#define VTSA_GEN_ARRAY_HDR_hdr_size(gah) ((gah)->hdr_size) +#define VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gah) ((gah)->hdr_next_gen_hdr) +#define VTSA_GEN_ARRAY_HDR_array_num_entries(gah) ((gah)->array_num_entries) +#define VTSA_GEN_ARRAY_HDR_array_entry_size(gah) ((gah)->array_entry_size) +#define VTSA_GEN_ARRAY_HDR_array_type(gah) ((gah)->array_type) +#define VTSA_GEN_ARRAY_HDR_array_subtype(gah) ((gah)->array_subtype) + +typedef struct __cpuid_x86 { + U32 cpuid_eax_input; + U32 cpuid_eax; + U32 cpuid_ebx; + U32 cpuid_ecx; + U32 cpuid_edx; +} VTSA_CPUID_X86; + +#define VTSA_CPUID_X86_cpuid_eax_input(cid) ((cid)->cpuid_eax_input) +#define VTSA_CPUID_X86_cpuid_eax(cid) ((cid)->cpuid_eax) +#define VTSA_CPUID_X86_cpuid_ebx(cid) ((cid)->cpuid_ebx) +#define VTSA_CPUID_X86_cpuid_ecx(cid) ((cid)->cpuid_ecx) +#define VTSA_CPUID_X86_cpuid_edx(cid) ((cid)->cpuid_edx) + +typedef struct __cpuid_ipf { + U64 cpuid_select; + U64 cpuid_val; +} VTSA_CPUID_IPF; + +#define VTSA_CPUID_IPF_cpuid_select(cid) ((cid)->cpuid_select) +#define VTSA_CPUID_IPF_cpuid_val(cid) ((cid)->cpuid_val) + +typedef struct __generic_per_cpu { + // + // per cpu information + // + U32 cpu_number; // cpu number (as defined by the OS) + U32 cpu_speed_mhz; // cpu speed (in Mhz) + U32 cpu_fsb_mhz; // Front Side Bus speed (in Mhz) (if known) + U32 cpu_cache_L2; // ??? USER: cpu L2 (marketing definition) cache size (if known) + + // + // And pointer to other structures. Keep this on an 8-byte boundary + // + // "pointer" to generic array header that should contain + // cpuid information for this cpu + // + VTSA_FIXED_SIZE_PTR cpu_cpuid_array; + + S64 cpu_tsc_offset; // TSC offset from CPU 0 computed as (TSC CPU N - TSC CPU 0) + // + // intel processor number (from mkting). + // Currently 3 decimal digits (3xx, 5xx and 7xx) + // + U32 cpu_intel_processor_number; + + U32 cpu_cache_L3; // ??? USER: cpu L3 (marketing definition) cache size (if known) + + U64 platform_id; + + // + // package/mapping information + // + // The hierarchy for uniquely identifying a logical processor + // in a system is node number/id (from the node structure), + // package number, core number, and thread number. + // Core number is for identifying a core within a package. + // + // Actually, on Itanium getting all this information is + // pretty involved with complicated algorithm using PAL calls. + // I don't know how important all this stuff is to the user. + // Maybe we can just have the place holder now and figure out + // how to fill them later. + // + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // hw thread number inside the core (if known) + + U16 cpu_threads_per_core; // total number of h/w threads per core (if known) + U16 cpu_module_id; // Processor module number + U16 cpu_num_modules; // Number of processor modules + U32 cpu_core_type; // Core type for hetero + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved1; + U64 reserved2; + U64 reserved3; + +} VTSA_GEN_PER_CPU; + +#define VTSA_GEN_PER_CPU_cpu_number(p_cpu) ((p_cpu)->cpu_number) +#define VTSA_GEN_PER_CPU_cpu_speed_mhz(p_cpu) ((p_cpu)->cpu_speed_mhz) +#define VTSA_GEN_PER_CPU_cpu_fsb_mhz(p_cpu) ((p_cpu)->cpu_fsb_mhz) +#define VTSA_GEN_PER_CPU_cpu_cache_L2(p_cpu) ((p_cpu)->cpu_cache_L2) +#define VTSA_GEN_PER_CPU_cpu_cpuid_array(p_cpu) ((p_cpu)->cpu_cpuid_array) +#define VTSA_GEN_PER_CPU_cpu_tsc_offset(p_cpu) ((p_cpu)->cpu_tsc_offset) +#define VTSA_GEN_PER_CPU_cpu_intel_processor_number(p_cpu) \ + ((p_cpu)->cpu_intel_processor_number) +#define VTSA_GEN_PER_CPU_cpu_cache_L3(p_cpu) ((p_cpu)->cpu_cache_L3) +#define VTSA_GEN_PER_CPU_platform_id(p_cpu) ((p_cpu)->platform_id) +#define VTSA_GEN_PER_CPU_cpu_package_num(p_cpu) ((p_cpu)->cpu_package_num) +#define VTSA_GEN_PER_CPU_cpu_core_num(p_cpu) ((p_cpu)->cpu_core_num) +#define VTSA_GEN_PER_CPU_cpu_hw_thread_num(p_cpu) ((p_cpu)->cpu_hw_thread_num) +#define VTSA_GEN_PER_CPU_cpu_threads_per_core(p_cpu) \ + ((p_cpu)->cpu_threads_per_core) +#define VTSA_GEN_PER_CPU_cpu_module_num(p_cpu) ((p_cpu)->cpu_module_id) +#define VTSA_GEN_PER_CPU_cpu_num_modules(p_cpu) ((p_cpu)->cpu_num_modules) +#define VTSA_GEN_PER_CPU_cpu_core_type(p_cpu) ((p_cpu)->cpu_core_type) +#define VTSA_GEN_PER_CPU_arch_perfmon_ver(p_cpu) ((p_cpu)->arch_perfmon_ver) +#define VTSA_GEN_PER_CPU_num_gp_counters(p_cpu) ((p_cpu)->num_gp_counters) +#define VTSA_GEN_PER_CPU_num_fixed_counters(p_cpu) ((p_cpu)->num_fixed_counters) + +typedef struct __node_info { + U32 node_type_from_shell; + U32 node_id; // The node number/id (if known) + + U32 node_num_available; // total number cpus on this node + U32 node_num_used; // USER: number used based on cpu mask at time of run + + U64 node_physical_memory; // amount of physical memory (bytes) on this node + + // + // pointer to the first generic header that + // contains the per-cpu information + // + // Keep the VTSA_FIXED_SIZE_PTR on an 8-byte boundary... + // + VTSA_FIXED_SIZE_PTR node_percpu_array; + + U32 node_reserved[2]; // leave some space + +} VTSA_NODE_INFO; + +#define VTSA_NODE_INFO_node_type_from_shell(vni) ((vni)->node_type_from_shell) +#define VTSA_NODE_INFO_node_id(vni) ((vni)->node_id) +#define VTSA_NODE_INFO_node_num_available(vni) ((vni)->node_num_available) +#define VTSA_NODE_INFO_node_num_used(vni) ((vni)->node_num_used) +#define VTSA_NODE_INFO_node_physical_memory(vni) ((vni)->node_physical_memory) +#define VTSA_NODE_INFO_node_percpu_array(vni) ((vni)->node_percpu_array) + +typedef struct __sys_info { + // + // Keep this on an 8-byte boundary + // + VTSA_FIXED_SIZE_PTR node_array; // the per-node information + + U64 min_app_address; // USER: lower allowed user space address (if known) + U64 max_app_address; // USER: upper allowed user space address (if known) + U32 page_size; // Current page size + U32 allocation_granularity; // USER: Granularity of allocation requests (if known) + U32 reserved1; // added for future fields + U32 reserved2; // alignment purpose + U64 reserved3[3]; // added for future fields + +} VTSA_SYS_INFO; + +#define VTSA_SYS_INFO_node_array(sys_info) ((sys_info)->node_array) +#define VTSA_SYS_INFO_min_app_address(sys_info) ((sys_info)->min_app_address) +#define VTSA_SYS_INFO_max_app_address(sys_info) ((sys_info)->max_app_address) +#define VTSA_SYS_INFO_page_size(sys_info) ((sys_info)->page_size) +#define VTSA_SYS_INFO_allocation_granularity(sys_info) \ + ((sys_info)->allocation_granularity) + +typedef struct DRV_TOPOLOGY_INFO_NODE_S DRV_TOPOLOGY_INFO_NODE; +typedef DRV_TOPOLOGY_INFO_NODE *DRV_TOPOLOGY_INFO; + +struct DRV_TOPOLOGY_INFO_NODE_S { + U32 cpu_number; // cpu number (as defined by the OS) + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // T0 or T1 if HT enabled + U16 reserved1; + S32 socket_master; + S32 core_master; + S32 thr_master; + U32 cpu_module_num; + U32 cpu_module_master; + U32 cpu_num_modules; + U32 cpu_core_type; + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_TOPOLOGY_INFO_cpu_number(dti) ((dti)->cpu_number) +#define DRV_TOPOLOGY_INFO_cpu_package_num(dti) ((dti)->cpu_package_num) +#define DRV_TOPOLOGY_INFO_cpu_core_num(dti) ((dti)->cpu_core_num) +#define DRV_TOPOLOGY_INFO_socket_master(dti) ((dti)->socket_master) +#define DRV_TOPOLOGY_INFO_core_master(dti) ((dti)->core_master) +#define DRV_TOPOLOGY_INFO_thr_master(dti) ((dti)->thr_master) +#define DRV_TOPOLOGY_INFO_cpu_hw_thread_num(dti) ((dti)->cpu_hw_thread_num) +#define DRV_TOPOLOGY_INFO_cpu_module_num(dti) ((dti)->cpu_module_num) +#define DRV_TOPOLOGY_INFO_cpu_module_master(dti) ((dti)->cpu_module_master) +#define DRV_TOPOLOGY_INFO_cpu_num_modules(dti) ((dti)->cpu_num_modules) +#define DRV_TOPOLOGY_INFO_cpu_core_type(dti) ((dti)->cpu_core_type) +#define DRV_TOPOLOGY_INFO_arch_perfmon_ver(dti) ((dti)->arch_perfmon_ver) +#define DRV_TOPOLOGY_INFO_num_gp_counters(dti) ((dti)->num_gp_counters) +#define DRV_TOPOLOGY_INFO_num_fixed_counters(dti) ((dti)->num_fixed_counters) + +#define VALUE_TO_BE_DISCOVERED 0 + +// dimm information +typedef struct DRV_DIMM_INFO_NODE_S DRV_DIMM_INFO_NODE; +typedef DRV_DIMM_INFO_NODE *DRV_DIMM_INFO; + +struct DRV_DIMM_INFO_NODE_S { + U32 platform_id; + U32 channel_num; + U32 rank_num; + U32 value; + U8 mc_num; + U8 dimm_valid; + U8 valid_value; + U8 rank_value; + U8 density_value; + U8 width_value; + U16 socket_num; + U64 reserved1; + U64 reserved2; +}; + +#define DRV_DIMM_INFO_platform_id(di) ((di)->platform_id) +#define DRV_DIMM_INFO_channel_num(di) ((di)->channel_num) +#define DRV_DIMM_INFO_rank_num(di) ((di)->rank_num) +#define DRV_DIMM_INFO_value(di) ((di)->value) +#define DRV_DIMM_INFO_mc_num(di) ((di)->mc_num) +#define DRV_DIMM_INFO_dimm_valid(di) ((di)->dimm_valid) +#define DRV_DIMM_INFO_valid_value(di) ((di)->valid_value) +#define DRV_DIMM_INFO_rank_value(di) ((di)->rank_value) +#define DRV_DIMM_INFO_density_value(di) ((di)->density_value) +#define DRV_DIMM_INFO_width_value(di) ((di)->width_value) +#define DRV_DIMM_INFO_socket_num(di) ((di)->socket_num) + +//platform information. need to get from driver +#define MAX_PACKAGES 16 +#define MAX_CHANNELS 8 +#define MAX_RANKS 3 + +typedef struct DRV_PLATFORM_INFO_NODE_S DRV_PLATFORM_INFO_NODE; +typedef DRV_PLATFORM_INFO_NODE *DRV_PLATFORM_INFO; + +struct DRV_PLATFORM_INFO_NODE_S { + U64 info; // platform info + U64 ddr_freq_index; // freq table index + U8 misc_valid; // misc enabled valid bit + U8 reserved1; // added for alignment purpose + U16 reserved2; + U32 vmm_timer_freq; // timer frequency from VMM on SoFIA (in HZ) + U64 misc_info; // misc enabled info + U64 ufs_freq; // ufs frequency (HSX only) + DRV_DIMM_INFO_NODE dimm_info[MAX_PACKAGES * MAX_CHANNELS * MAX_RANKS]; + U64 energy_multiplier; // Value of energy multiplier + U64 reserved3; + U64 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_PLATFORM_INFO_info(data) ((data)->info) +#define DRV_PLATFORM_INFO_ddr_freq_index(data) ((data)->ddr_freq_index) +#define DRV_PLATFORM_INFO_misc_valid(data) ((data)->misc_valid) +#define DRV_PLATFORM_INFO_misc_info(data) ((data)->misc_info) +#define DRV_PLATFORM_INFO_ufs_freq(data) ((data)->ufs_freq) +#define DRV_PLATFORM_INFO_dimm_info(data) ((data)->dimm_info) +#define DRV_PLATFORM_INFO_energy_multiplier(data) ((data)->energy_multiplier) +#define DRV_PLATFORM_INFO_vmm_timer_freq(data) ((data)->vmm_timer_freq) + +//platform information. need to get from Platform picker +typedef struct PLATFORM_FREQ_INFO_NODE_S PLATFORM_FREQ_INFO_NODE; +typedef PLATFORM_FREQ_INFO_NODE *PLATFORM_FREQ_INFO; + +struct PLATFORM_FREQ_INFO_NODE_S { + float multiplier; // freq multiplier + double *table; // freq table + U32 table_size; // freq table size + U32 default_core_crystal_clock_freq; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; +#define PLATFORM_FREQ_INFO_multiplier(data) ((data)->multiplier) +#define PLATFORM_FREQ_INFO_table(data) ((data)->table) +#define PLATFORM_FREQ_INFO_table_size(data) ((data)->table_size) +#define PLATFORM_FREQ_INFO_default_core_crystal_clock_freq(data) \ + ((data)->default_core_crystal_clock_freq) + +/************************************************************** + * PMT DEVICE TOPOLOGY STRUCTS START +***************************************************************/ +#define MAX_PMT_DEVICES 32 +#define MAX_PMT_TILES 16 + +typedef struct PMT_DEVICE_TILE_NODE_S PMT_DEVICE_TILE_NODE; +struct PMT_DEVICE_TILE_NODE_S { + U32 tile_id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 pmt_valid; + U32 oobmsm_deviceid; + U32 found; + U32 unit_id; + U64 pmt_endpoint_guid; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; + +typedef struct PMT_TOPOLOGY_DISCOVERY_NODE_S PMT_TOPOLOGY_DISCOVERY_NODE; +struct PMT_TOPOLOGY_DISCOVERY_NODE_S { + U32 id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 deviceid; + U32 num_tiles; + U32 num_entries_found; + U32 device_scan; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; + PMT_DEVICE_TILE_NODE pmt_tiles[MAX_PMT_TILES]; +}; + +typedef struct PMT_TOPOLOGY_DISCOVERY_LIST_NODE_S PMT_TOPOLOGY_DISCOVERY_LIST_NODE; +typedef PMT_TOPOLOGY_DISCOVERY_LIST_NODE *PMT_TOPOLOGY_DISCOVERY_LIST; +struct PMT_TOPOLOGY_DISCOVERY_LIST_NODE_S { + U32 num_pmt_devices; + U32 topology_detected; + PMT_TOPOLOGY_DISCOVERY_NODE pmt_device[MAX_PMT_DEVICES]; +}; + +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_pmt_devices(x) ((x)->num_pmt_devices) +#define PMT_TOPOLOGY_DISCOVERY_LIST_topology_detected(x) ((x)->topology_detected) +#define PMT_TOPOLOGY_DISCOVERY_LIST_pmt_device(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index]) +#define PMT_TOPOLOGY_DISCOVERY_LIST_id(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_domain(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].domain) +#define PMT_TOPOLOGY_DISCOVERY_LIST_bus(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].bus) +#define PMT_TOPOLOGY_DISCOVERY_LIST_device(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].device) +#define PMT_TOPOLOGY_DISCOVERY_LIST_function(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].function) +#define PMT_TOPOLOGY_DISCOVERY_LIST_deviceid(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].deviceid) +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_tiles(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].num_tiles) +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_entries_found(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].num_entries_found) +#define PMT_TOPOLOGY_DISCOVERY_LIST_device_scan(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].device_scan) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_id(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].tile_id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_domain(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].domain) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_bus(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].bus) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_device(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].device) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_function(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].function) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_found(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].found) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_unit_id(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].unit_id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_oobmsm_deviceid(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].oobmsm_deviceid) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_pmt_endpoint_guid(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].pmt_endpoint_guid) + +typedef struct PMT_DEVICE_INFO_NODE_S PMT_DEVICE_INFO_NODE; +typedef PMT_DEVICE_INFO_NODE *PMT_DEVICE_INFO; +struct PMT_DEVICE_INFO_NODE_S { + U32 device_id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 pmt_index; + U64 guid; + U64 reserved1; + U64 reserved2; +}; +/************************************************************** + * PMT DEVICE TOPOLOGY STRUCTS END +***************************************************************/ + +/****************************************************************************************** + * DEVICE INFO STRUCTS START (used in platform picker for detecting each core/uncore device) +*******************************************************************************************/ +typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE; +typedef DEVICE_INFO_NODE *DEVICE_INFO; //NEEDED in PP + +struct DEVICE_INFO_NODE_S { + S8 *dll_name; + PVOID dll_handle; + S8 *cpu_name; + S8 *pmu_name; + DRV_STCHAR *event_db_file_name; + //PLATFORM_IDENTITY plat_identity; // this is undefined right now. Please take this as structure containing U64 + U32 plat_type; // device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below) + U32 plat_sub_type; // cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h) + S32 dispatch_id; // this will be set in user mode dlls and will be unique across all IPF, IA32 (including MIDS). + ECB *ecb; + EVENT_CONFIG ec; + DEV_CONFIG pcfg; + DEV_UNC_CONFIG pcfg_unc; + U32 num_of_groups; + U32 size_of_alloc; // size of each event control block + PVOID drv_event; + U32 num_events; + U32 event_id_index; // event id index of device (basically how many events processed before this device) + U32 num_counters; + U32 group_index; + U32 num_packages; + U32 num_units; + U32 device_type; + U32 core_type; + U32 pmu_clone_id; // cti_type of platform to impersonate in device DLLs + U32 device_scope; + U32 num_subunits; + U64 reserved1; + U64 reserved2; + PMT_DEVICE_INFO_NODE pmt_device; +}; + +#define MAX_EVENT_NAME_LENGTH 256 + +#define DEVICE_INFO_dll_name(pdev) ((pdev)->dll_name) +#define DEVICE_INFO_dll_handle(pdev) ((pdev)->dll_handle) +#define DEVICE_INFO_cpu_name(pdev) ((pdev)->cpu_name) +#define DEVICE_INFO_pmu_name(pdev) ((pdev)->pmu_name) +#define DEVICE_INFO_event_db_file_name(pdev) ((pdev)->event_db_file_name) +#define DEVICE_INFO_plat_type(pdev) ((pdev)->plat_type) +#define DEVICE_INFO_plat_sub_type(pdev) ((pdev)->plat_sub_type) +#define DEVICE_INFO_pmu_clone_id(pdev) ((pdev)->pmu_clone_id) +#define DEVICE_INFO_dispatch_id(pdev) ((pdev)->dispatch_id) +#define DEVICE_INFO_ecb(pdev) ((pdev)->ecb) +#define DEVICE_INFO_ec(pdev) ((pdev)->ec) +#define DEVICE_INFO_pcfg(pdev) ((pdev)->pcfg) +#define DEVICE_INFO_pcfg_unc(pdev) ((pdev)->pcfg_unc) +#define DEVICE_INFO_num_groups(pdev) ((pdev)->num_of_groups) +#define DEVICE_INFO_size_of_alloc(pdev) ((pdev)->size_of_alloc) +#define DEVICE_INFO_drv_event(pdev) ((pdev)->drv_event) +#define DEVICE_INFO_num_events(pdev) ((pdev)->num_events) +#define DEVICE_INFO_event_id_index(pdev) ((pdev)->event_id_index) +#define DEVICE_INFO_num_counters(pdev) ((pdev)->num_counters) +#define DEVICE_INFO_group_index(pdev) ((pdev)->group_index) +#define DEVICE_INFO_num_packages(pdev) ((pdev)->num_packages) +#define DEVICE_INFO_num_units(pdev) ((pdev)->num_units) +#define DEVICE_INFO_device_type(pdev) ((pdev)->device_type) +#define DEVICE_INFO_core_type(pdev) ((pdev)->core_type) +#define DEVICE_INFO_device_scope(pdev) ((pdev)->device_scope) +#define DEVICE_INFO_num_subunits(pdev) ((pdev)->num_subunits) +#define DEVICE_INFO_pmt_device(pdev) ((pdev)->pmt_device) +#define DEVICE_INFO_pmt_device_id(pdev) ((pdev)->pmt_device.device_id) +#define DEVICE_INFO_pmt_device_domain(pdev) ((pdev)->pmt_device.domain) +#define DEVICE_INFO_pmt_device_bus(pdev) ((pdev)->pmt_device.bus) +#define DEVICE_INFO_pmt_device_device(pdev) ((pdev)->pmt_device.device) +#define DEVICE_INFO_pmt_device_function(pdev) ((pdev)->pmt_device.function) +#define DEVICE_INFO_pmt_device_index(pdev) ((pdev)->pmt_device.pmt_index) + +typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE; +typedef DEVICE_INFO_DATA_NODE *DEVICE_INFO_DATA; //NEEDED in PP + +struct DEVICE_INFO_DATA_NODE_S { + DEVICE_INFO pdev_info; + U32 num_elements; + U32 num_allocated; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEVICE_INFO_DATA_pdev_info(d) ((d)->pdev_info) +#define DEVICE_INFO_DATA_num_elements(d) ((d)->num_elements) +#define DEVICE_INFO_DATA_num_allocated(d) ((d)->num_allocated) + +typedef enum { + DEVICE_INFO_CORE = 0, + DEVICE_INFO_UNCORE = 1, + DEVICE_INFO_CHIPSET = 2, + DEVICE_INFO_GFX = 3, + DEVICE_INFO_PWR = 4, + DEVICE_INFO_TELEMETRY = 5 +} DEVICE_INFO_TYPE; + +typedef enum { + INVALID_TERMINATE_TYPE = 0, + STOP_TERMINATE, + CANCEL_TERMINATE +} ABNORMAL_TERMINATE_TYPE; + +typedef enum { + DEVICE_SCOPE_PACKAGE = 0, + DEVICE_SCOPE_SYSTEM = 1 +} DEVICE_SCOPE_TYPE; + +/************************************************************** + * DEVICE_INFO STRUCTS END +***************************************************************/ + +/************************************************************** + * UNCORE PMU TOPOLOGY STRUCTS START +***************************************************************/ +typedef struct PCIFUNC_INFO_NODE_S PCIFUNC_INFO_NODE; +typedef PCIFUNC_INFO_NODE *PCIFUNC_INFO; + +struct PCIFUNC_INFO_NODE_S { + U32 valid; + U32 num_entries; // the number of entries found with same but difference bus_no. + U64 deviceId; + U64 reserved1; + U64 reserved2; +}; + +#define PCIFUNC_INFO_NODE_valid(x) ((x)->valid) +#define PCIFUNC_INFO_NODE_deviceId(x) ((x)->deviceId) +#define PCIFUNC_INFO_NODE_num_entries(x) ((x)->num_entries) + +typedef struct PCIDEV_INFO_NODE_S PCIDEV_INFO_NODE; +typedef PCIDEV_INFO_NODE *PCIDEV_INFO; + +struct PCIDEV_INFO_NODE_S { + PCIFUNC_INFO_NODE func_info[MAX_PCI_FUNCNO]; + U32 valid; + U32 dispatch_id; + U64 reserved1; + U64 reserved2; +}; + +#define PCIDEV_INFO_NODE_func_info(x, i) ((x).func_info[i]) +#define PCIDEV_INFO_NODE_valid(x) ((x).valid) + +typedef struct UNCORE_PCIDEV_NODE_S UNCORE_PCIDEV_NODE; + +struct UNCORE_PCIDEV_NODE_S { + PCIDEV_INFO_NODE pcidev[MAX_PCI_DEVNO]; + U32 dispatch_id; + U32 scan; + U32 num_uncore_units; + U32 num_deviceid_entries; + U8 dimm_device1; + U8 dimm_device2; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; + U32 deviceid_list[MAX_PCI_DEVNO]; +}; + +// Structure used to perform uncore device discovery using PCI device scan + +typedef struct UNCORE_TOPOLOGY_INFO_NODE_S UNCORE_TOPOLOGY_INFO_NODE; +typedef UNCORE_TOPOLOGY_INFO_NODE *UNCORE_TOPOLOGY_INFO; + +struct UNCORE_TOPOLOGY_INFO_NODE_S { + UNCORE_PCIDEV_NODE device[MAX_DEVICES]; +}; + +#define UNCORE_TOPOLOGY_INFO_device(x, dev_index) ((x)->device[dev_index]) +#define UNCORE_TOPOLOGY_INFO_device_dispatch_id(x, dev_index) \ + ((x)->device[dev_index].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_device_scan(x, dev_index) \ + ((x)->device[dev_index].scan) +#define UNCORE_TOPOLOGY_INFO_pcidev_valid(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].valid) +#define UNCORE_TOPOLOGY_INFO_pcidev_dispatch_id(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_pcidev(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno]) +#define UNCORE_TOPOLOGY_INFO_num_uncore_units(x, dev_index) \ + ((x)->device[dev_index].num_uncore_units) +#define UNCORE_TOPOLOGY_INFO_num_deviceid_entries(x, dev_index) \ + ((x)->device[dev_index].num_deviceid_entries) +#define UNCORE_TOPOLOGY_INFO_dimm_device1(x, dev_index) \ + ((x)->device[dev_index].dimm_device1) +#define UNCORE_TOPOLOGY_INFO_dimm_device2(x, dev_index) \ + ((x)->device[dev_index].dimm_device2) +#define UNCORE_TOPOLOGY_INFO_deviceid(x, dev_index, deviceid_idx) \ + ((x)->device[dev_index].deviceid_list[deviceid_idx]) +#define UNCORE_TOPOLOGY_INFO_pcidev_set_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid = 1) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_found_in_platform(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid ? TRUE : FALSE) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_device_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries > 0) +#define UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) + +// Structures used to store platform uncore discovery table + +typedef enum { + DISCOVERY_UNIT_CHA = 0, + DISCOVERY_UNIT_IIO, + DISCOVERY_UNIT_IRP, + DISCOVERY_UNIT_M2PCIE, + DISCOVERY_UNIT_PCU, // 4 + DISCOVERY_UNIT_UBOX, + DISCOVERY_UNIT_MC, + DISCOVERY_UNIT_M2M, + DISCOVERY_UNIT_UPI, // 8 + DISCOVERY_UNIT_M3UPI, + DISCOVERY_UNIT_PCIE, + DISCOVERY_UNIT_MDF, + DISCOVERY_UNIT_CXLCM, + DISCOVERY_UNIT_CXLDP, + DISCOVERY_UNIT_HBM, // 14 +} UNCORE_DISCOVERY_TYPE_IDS; + +typedef struct UNCORE_DISCOVERY_UNIT_NODE_S UNCORE_DISCOVERY_UNIT_NODE; +typedef UNCORE_DISCOVERY_UNIT_NODE *UNCORE_DISCOVERY_UNIT; + +struct UNCORE_DISCOVERY_UNIT_NODE_S { + union { + struct { + U64 num_gp_ctrs : 8; + U64 evtsel0_offset : 8; + U64 ctr_width : 8; + U64 ctr0_offset : 8; + U64 unit_status_offset : 8; + U64 rsvd : 21; + U64 pmu_access_type : 3; + } bits; + U64 bit_field_0; + } u1; + U64 unit_ctrl_addr; + union { + struct { + U64 unit_type : 16; + U64 unit_id : 16; + U64 rsvd : 32; + } bits; + U64 bit_field_1; + } u2; + U64 trace_info; +}; + +#define UNCORE_DISCOVERY_UNIT_num_gp_ctrs(x) ((x)->u1.bits.num_gp_ctrs) +#define UNCORE_DISCOVERY_UNIT_evtsel0_offset(x) ((x)->u1.bits.evtsel0_offset) +#define UNCORE_DISCOVERY_UNIT_ctr_width(x) ((x)->u1.bits.ctr_width) +#define UNCORE_DISCOVERY_UNIT_ctr0_offset(x) ((x)->u1.bits.ctr0_offset) +#define UNCORE_DISCOVERY_UNIT_unit_status_offset(x) \ + ((x)->u1.bits.unit_status_offset) +#define UNCORE_DISCOVERY_UNIT_pmu_access_type(x) ((x)->u1.bits.pmu_access_type) +#define UNCORE_DISCOVERY_UNIT_bit_field_0(x) ((x)->u1.bit_field_0) +#define UNCORE_DISCOVERY_UNIT_unit_ctrl_addr(x) ((x)->unit_ctrl_addr) +#define UNCORE_DISCOVERY_UNIT_unit_type(x) ((x)->u2.bits.unit_type) +#define UNCORE_DISCOVERY_UNIT_unit_id(x) ((x)->u2.bits.unit_id) +#define UNCORE_DISCOVERY_UNIT_bit_field_1(x) ((x)->u2.bit_field_1) +#define UNCORE_DISCOVERY_UNIT_trace_info(x) ((x)->trace_info) + +typedef struct UNCORE_DISCOVERY_GLOBAL_NODE_S UNCORE_DISCOVERY_GLOBAL_NODE; +typedef UNCORE_DISCOVERY_GLOBAL_NODE *UNCORE_DISCOVERY_GLOBAL; + +struct UNCORE_DISCOVERY_GLOBAL_NODE_S { + union { + struct { + U64 unit_type : 8; + U64 block_stride : 8; + U64 max_blocks : 10; + U64 rsvd : 36; + U64 access_type : 2; + } bits; + U64 bit_field_0; + } u1; + U64 global_ctrl_addr; + union { + struct { + U64 gbl_ctr_status_offset : 8; + U64 num_block_status_bits : 16; + U64 rsvd : 40; + } bits; + U64 bit_field_1; + } u2; + U64 trace_info; +}; + +#define UNCORE_DISCOVERY_GLOBAL_unit_type(x) ((x)->u1.bits.unit_type) +#define UNCORE_DISCOVERY_GLOBAL_block_stride(x) ((x)->u1.bits.block_stride) +#define UNCORE_DISCOVERY_GLOBAL_max_blocks(x) ((x)->u1.bits.max_blocks) +#define UNCORE_DISCOVERY_GLOBAL_access_type(x) ((x)->u1.bits.access_type) +#define UNCORE_DISCOVERY_GLOBAL_bit_field_0(x) ((x)->u1.bit_field_0) +#define UNCORE_DISCOVERY_GLOBAL_global_ctrl_addr(x) ((x)->global_ctrl_addr) +#define UNCORE_DISCOVERY_GLOBAL_gbl_ctr_status_offset(x) \ + ((x)->u2.bits.gbl_ctr_status_offset) +#define UNCORE_DISCOVERY_GLOBAL_num_block_status_bits(x) \ + ((x)->u2.bits.num_block_status_bits) +#define UNCORE_DISCOVERY_GLOBAL_bit_field_1(x) ((x)->u2.bit_field_1) +#define UNCORE_DISCOVERY_GLOBAL_trace_info(x) ((x)->trace_info) +#define UNCORE_DISCOVERY_GLOBAL_global_node(x) ((x)->global_node) + +typedef struct UNCORE_DISCOVERY_TABLE_NODE_S UNCORE_DISCOVERY_TABLE_NODE; +typedef UNCORE_DISCOVERY_TABLE_NODE *UNCORE_DISCOVERY_TABLE; + +struct UNCORE_DISCOVERY_TABLE_NODE_S { + DRV_PCI_DEVICE_ENTRY_NODE discovery_entry_node; + UNCORE_DISCOVERY_GLOBAL_NODE discovery_global_node; + UNCORE_DISCOVERY_UNIT_NODE + discovery_unit_node[MAX_DISCOVERY_UNCORE_UNITS]; +}; + +#define UNCORE_DISCOVERY_TABLE_discovery_entry_node(x) ((x)->discovery_entry_node) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_bus_no(x) \ + ((x)->discovery_entry_node.bus_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_dev_no(x) \ + ((x)->discovery_entry_node.dev_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_func_no(x) \ + ((x)->discovery_entry_node.func_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_bar_address(x) \ + ((x)->discovery_entry_node.bar_address) +#define UNCORE_DISCOVERY_TABLE_discovery_global_node(x) \ + ((x)->discovery_global_node) +#define UNCORE_DISCOVERY_TABLE_discovery_unit_node(x) ((x)->discovery_unit_node) +#define UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry(x, unit_id) \ + ((x)->discovery_unit_node[unit_id]) + +typedef struct UNCORE_DISCOVERY_DVSEC_CONFIG_NODE_S UNCORE_DISCOVERY_DVSEC_CONFIG_NODE; +typedef UNCORE_DISCOVERY_DVSEC_CONFIG_NODE *UNCORE_DISCOVERY_DVSEC_CONFIG; + +struct UNCORE_DISCOVERY_DVSEC_CONFIG_NODE_S { + U32 pci_ext_cap_id; // Capability ID + U32 dvsec_offset; // DVSEC offset + U32 dvsec_id_mask; // DVSEC_ID mask + U32 dvsec_id_pmon; // Type ID for discovery + U32 dvsec_bir_mask; // BIR mask + U32 map_size; // map size +}; + +#define UNCORE_DISCOVERY_DVSEC_pci_ext_cap_id(x) ((x)->pci_ext_cap_id) +#define UNCORE_DISCOVERY_DVSEC_dvsec_offset(x) ((x)->dvsec_offset) +#define UNCORE_DISCOVERY_DVSEC_dvsec_id_mask(x) ((x)->dvsec_id_mask) +#define UNCORE_DISCOVERY_DVSEC_dvsec_id_pmon(x) ((x)->dvsec_id_pmon) +#define UNCORE_DISCOVERY_DVSEC_dvsec_bir_mask(x) ((x)->dvsec_bir_mask) +#define UNCORE_DISCOVERY_DVSEC_map_size(x) ((x)->map_size) + +typedef struct UNCORE_DISCOVERY_TABLE_LIST_NODE_S UNCORE_DISCOVERY_TABLE_LIST_NODE; +typedef UNCORE_DISCOVERY_TABLE_LIST_NODE *UNCORE_DISCOVERY_TABLE_LIST; + +struct UNCORE_DISCOVERY_TABLE_LIST_NODE_S { + UNCORE_DISCOVERY_DVSEC_CONFIG_NODE dvsec_config_node; + UNCORE_DISCOVERY_TABLE_NODE discovery_table_list[MAX_PACKAGES]; +}; + +#define UNCORE_DISCOVERY_dvsec_config_node(x) ((x)->dvsec_config_node) +#define UNCORE_DISCOVERY_pci_ext_cap_id(x) ((x)->dvsec_config_node.pci_ext_cap_id) +#define UNCORE_DISCOVERY_dvsec_offset(x) ((x)->dvsec_config_node.dvsec_offset) +#define UNCORE_DISCOVERY_dvsec_id_mask(x) ((x)->dvsec_config_node.dvsec_id_mask) +#define UNCORE_DISCOVERY_dvsec_id_pmon(x) ((x)->dvsec_config_node.dvsec_id_pmon) +#define UNCORE_DISCOVERY_dvsec_bir_mask(x) ((x)->dvsec_config_node.dvsec_bir_mask) +#define UNCORE_DISCOVERY_map_size(x) ((x)->dvsec_config_node.map_size) +#define UNCORE_DISCOVERY_table_list(x) ((x)->discovery_table_list) +#define UNCORE_DISCOVERY_table_list_entry(x, i) ((x)->discovery_table_list[i]) + +typedef enum { + CORE_TOPOLOGY_NODE = 0, + UNCORE_TOPOLOGY_NODE_IMC = 1, + UNCORE_TOPOLOGY_NODE_UBOX = 2, + UNCORE_TOPOLOGY_NODE_QPI = 3, + UNCORE_TOPOLOGY_NODE_IIO = 4, + UNCORE_TOPOLOGY_NODE_MCHBM = 5, + UNCORE_TOPOLOGY_NODE_PMEM = 6, + MAX_TOPOLOGY_DEV = 16, +} UNCORE_TOPOLOGY_NODE_INDEX_TYPE; + +/************************************************************** + * UNCORE PMU TOPOLOGY STRUCTS END +***************************************************************/ + +/************************************************************** + * OLD PLATFORM TOPOLOGY STRUCTS (for backward compatibility) + * New struct definitions are below after the old block +***************************************************************/ +//Define to support backward compatibility (changed max value for the new struct definitions) +#define MAX_TOPOLOGY_DEV_OLD 4 + +typedef struct PLATFORM_TOPOLOGY_REG_NODE_S PLATFORM_TOPOLOGY_REG_NODE; +typedef PLATFORM_TOPOLOGY_REG_NODE *PLATFORM_TOPOLOGY_REG; + +struct PLATFORM_TOPOLOGY_REG_NODE_S { + U32 bus; + U32 device; + U32 function; + U32 reg_id; + U64 reg_mask; + U64 reg_value[MAX_PACKAGES]; + U8 reg_type; + U8 device_valid; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define PLATFORM_TOPOLOGY_REG_bus(x, i) ((x)[(i)].bus) +#define PLATFORM_TOPOLOGY_REG_device(x, i) ((x)[(i)].device) +#define PLATFORM_TOPOLOGY_REG_function(x, i) ((x)[(i)].function) +#define PLATFORM_TOPOLOGY_REG_reg_id(x, i) ((x)[(i)].reg_id) +#define PLATFORM_TOPOLOGY_REG_reg_mask(x, i) ((x)[(i)].reg_mask) +#define PLATFORM_TOPOLOGY_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define PLATFORM_TOPOLOGY_REG_device_valid(x, i) ((x)[(i)].device_valid) +#define PLATFORM_TOPOLOGY_REG_reg_value(x, i, package_no) \ + ((x)[(i)].reg_value[package_no]) + +typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S PLATFORM_TOPOLOGY_DISCOVERY_NODE; +typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE *PLATFORM_TOPOLOGY_DISCOVERY; + +struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S { + U32 device_index; + U32 device_id; + U32 num_registers; + U8 scope; + U8 prog_valid; + U16 reserved2; + U64 reserved3; + U64 reserved4; + U64 reserved5; + PLATFORM_TOPOLOGY_REG_NODE topology_regs[MAX_REGS]; +}; +/************************************************************** + * OLD PLATFORM TOPOLOGY STRUCTS END +***************************************************************/ +/************************************************************** + * NEW PLATFORM TOPOLOGY STRUCTS + * (modified to query more package specific values from driver and support MMIO) + * (Increased the number of devices to be supported) +***************************************************************/ +typedef struct PLATFORM_TOPOLOGY_REG_VALUE_NODE_S PLATFORM_TOPOLOGY_REG_VALUE_NODE; +typedef PLATFORM_TOPOLOGY_REG_VALUE_NODE *PLATFORM_TOPOLOGY_REG_VALUE; + +struct PLATFORM_TOPOLOGY_REG_VALUE_NODE_S { + U64 reg_value; + U16 bus_number; + U16 domain_number; + U32 reserved1; + U64 reserved2; + U64 reserved3; +}; + +typedef struct PLATFORM_TOPOLOGY_REG_NODE_EXT_S PLATFORM_TOPOLOGY_REG_NODE_EXT; +typedef PLATFORM_TOPOLOGY_REG_NODE_EXT *PLATFORM_TOPOLOGY_REG_EXT; + +struct PLATFORM_TOPOLOGY_REG_NODE_EXT_S { + U32 bus; + U32 device; + U32 function; + U32 reg_id; + U64 reg_mask; + U8 reg_type; + U8 device_valid; + U16 unit_index; + + //MMIO specific regs + U32 base_mmio_offset; + U32 mmio_map_size; + U32 main_bar_offset; + U32 secondary_bar_offset; + U8 main_bar_shift; + U8 secondary_bar_shift; + U16 reserved1; + U64 main_bar_mask; + U64 secondary_bar_mask; + + U32 device_id; + U32 reserved2; + U64 reserved3; + U64 reserved4; + //Package specific values + PLATFORM_TOPOLOGY_REG_VALUE_NODE reg_value_list[MAX_PACKAGES]; +}; + +#define PLATFORM_TOPOLOGY_REG_EXT_bus(x, i) ((x)[(i)].bus) +#define PLATFORM_TOPOLOGY_REG_EXT_device(x, i) ((x)[(i)].device) +#define PLATFORM_TOPOLOGY_REG_EXT_function(x, i) ((x)[(i)].function) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_id(x, i) ((x)[(i)].reg_id) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_mask(x, i) ((x)[(i)].reg_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_type(x, i) ((x)[(i)].reg_type) +#define PLATFORM_TOPOLOGY_REG_EXT_device_valid(x, i) ((x)[(i)].device_valid) +#define PLATFORM_TOPOLOGY_REG_EXT_unit_index(x, i) ((x)[(i)].unit_index) +#define PLATFORM_TOPOLOGY_REG_EXT_base_mmio_offset(x, i) \ + ((x)[(i)].base_mmio_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_mmio_map_size(x, i) ((x)[(i)].mmio_map_size) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_offset(x, i) ((x)[(i)].main_bar_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_offset(x, i) \ + ((x)[(i)].secondary_bar_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_shift(x, i) ((x)[(i)].main_bar_shift) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_shift(x, i) \ + ((x)[(i)].secondary_bar_shift) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_mask(x, i) ((x)[(i)].main_bar_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_mask(x, i) \ + ((x)[(i)].secondary_bar_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_device_id(x, i) ((x)[(i)].device_id) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_value(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].reg_value) +#define PLATFORM_TOPOLOGY_REG_EXT_bus_number(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].bus_number) +#define PLATFORM_TOPOLOGY_REG_EXT_domain_number(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].domain_number) + +typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT_S PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT; +typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT *PLATFORM_TOPOLOGY_DISCOVERY_EXT; + +struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT_S { + U32 device_index; + U32 device_id; + U32 num_registers; + U8 scope; + U8 prog_valid; + U8 distinct_buses; + U8 reserved2; + U64 reserved3; + U64 reserved4; + U64 reserved5; + PLATFORM_TOPOLOGY_REG_NODE_EXT topology_regs[MAX_REGS]; +}; +//Structure used to discover the uncore device topology_device +typedef struct PLATFORM_TOPOLOGY_PROG_NODE_S PLATFORM_TOPOLOGY_PROG_NODE; +typedef PLATFORM_TOPOLOGY_PROG_NODE *PLATFORM_TOPOLOGY_PROG; + +struct PLATFORM_TOPOLOGY_PROG_NODE_S { + U32 num_devices; + PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device + [MAX_TOPOLOGY_DEV_OLD]; //Leaving this for backward compatibility + PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT + topology_device_ext[MAX_TOPOLOGY_DEV]; //New topology struct array +}; + +// OLD PLATFORM TOPOLOGY STRUCT (for backward compatibility) +typedef struct PLATFORM_TOPOLOGY_PROG_OLD_NODE_S PLATFORM_TOPOLOGY_PROG_OLD_NODE; +typedef PLATFORM_TOPOLOGY_PROG_OLD_NODE *PLATFORM_TOPOLOGY_PROG_OLD; +struct PLATFORM_TOPOLOGY_PROG_OLD_NODE_S { + U32 num_devices; + PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device[MAX_TOPOLOGY_DEV_OLD]; +}; + +#define PLATFORM_TOPOLOGY_PROG_num_devices(x) ((x)->num_devices) +#define PLATFORM_TOPOLOGY_PROG_topology_device(x, dev_index) \ + ((x)->topology_device[dev_index]) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_index(x, dev_index) \ + ((x)->topology_device[dev_index].device_index) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_id(x, dev_index) \ + ((x)->topology_device[dev_index].device_id) +#define PLATFORM_TOPOLOGY_PROG_topology_device_scope(x, dev_index) \ + ((x)->topology_device[dev_index].scope) +#define PLATFORM_TOPOLOGY_PROG_topology_device_num_registers(x, dev_index) \ + ((x)->topology_device[dev_index].num_registers) +#define PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(x, dev_index) \ + ((x)->topology_device[dev_index].prog_valid) +#define PLATFORM_TOPOLOGY_PROG_topology_topology_regs(x, dev_index) \ + ((x)->topology_device[dev_index].topology_regs) + +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device(x, dev_index) \ + ((x)->topology_device_ext[dev_index]) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_index(x, dev_index) \ + ((x)->topology_device_ext[dev_index].device_index) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_id(x, dev_index) \ + ((x)->topology_device_ext[dev_index].device_id) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_scope(x, dev_index) \ + ((x)->topology_device_ext[dev_index].scope) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_num_registers(x, dev_index) \ + ((x)->topology_device_ext[dev_index].num_registers) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_prog_valid(x, dev_index) \ + ((x)->topology_device_ext[dev_index].prog_valid) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_distinct_buses(x, dev_index) \ + ((x)->topology_device_ext[dev_index].distinct_buses) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_topology_regs(x, dev_index) \ + ((x)->topology_device_ext[dev_index].topology_regs) + +/************************************************************** + * NEW PLATFORM TOPOLOGY STRUCTS END +***************************************************************/ + +/************************************************************** + * FPGA TOPOLOGY STRUCTS START +***************************************************************/ +typedef struct FPGA_GB_DISCOVERY_NODE_S FPGA_GB_DISCOVERY_NODE; + +struct FPGA_GB_DISCOVERY_NODE_S { + U16 bar_num; + U16 feature_id; + U32 device_id; + U64 afu_id_l; + U64 afu_id_h; + U32 feature_offset; + U32 feature_len; + U8 scan; + U8 valid; + U16 reserved1; + U32 reserved2; +}; + +typedef struct FPGA_GB_DEV_NODE_S FPGA_GB_DEV_NODE; +typedef FPGA_GB_DEV_NODE *FPGA_GB_DEV; + +struct FPGA_GB_DEV_NODE_S { + U32 num_devices; + FPGA_GB_DISCOVERY_NODE fpga_gb_device[MAX_DEVICES]; +}; + +#define FPGA_GB_DEV_num_devices(x) ((x)->num_devices) +#define FPGA_GB_DEV_device(x, dev_index) ((x)->fpga_gb_device[dev_index]) +#define FPGA_GB_DEV_bar_num(x, dev_index) ((x)->fpga_gb_device[dev_index].bar_num) +#define FPGA_GB_DEV_feature_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_id) +#define FPGA_GB_DEV_device_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].device_id) +#define FPGA_GB_DEV_afu_id_low(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_l) +#define FPGA_GB_DEV_afu_id_high(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_h) +#define FPGA_GB_DEV_feature_offset(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_offset) +#define FPGA_GB_DEV_feature_len(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_len) +#define FPGA_GB_DEV_scan(x, dev_index) ((x)->fpga_gb_device[dev_index].scan) +#define FPGA_GB_DEV_valid(x, dev_index) ((x)->fpga_gb_device[dev_index].valid) + +/************************************************************** + * FPGA TOPOLOGY STRUCTS END +***************************************************************/ + +typedef enum { + UNCORE_TOPOLOGY_INFO_NODE_IMC = 0, + UNCORE_TOPOLOGY_INFO_NODE_QPILL = 1, + UNCORE_TOPOLOGY_INFO_NODE_HA = 2, + UNCORE_TOPOLOGY_INFO_NODE_R3 = 3, + UNCORE_TOPOLOGY_INFO_NODE_R2 = 4, + UNCORE_TOPOLOGY_INFO_NODE_IRP = 5, + UNCORE_TOPOLOGY_INFO_NODE_IMC_UCLK = 6, + UNCORE_TOPOLOGY_INFO_NODE_EDC_ECLK = 7, + UNCORE_TOPOLOGY_INFO_NODE_EDC_UCLK = 8, + UNCORE_TOPOLOGY_INFO_NODE_M2M = 9, + UNCORE_TOPOLOGY_INFO_NODE_HFI_RXE = 10, + UNCORE_TOPOLOGY_INFO_NODE_HFI_TXE = 11, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_CACHE = 12, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_FAB = 13, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_THERMAL = 14, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_POWER = 15, + UNCORE_TOPOLOGY_INFO_NODE_SOC_SA = 16, + UNCORE_TOPOLOGY_INFO_NODE_EDRAM = 17, + UNCORE_TOPOLOGY_INFO_NODE_IIO = 18, + UNCORE_TOPOLOGY_INFO_NODE_PMT = 19, + UNCORE_TOPOLOGY_INFO_NODE_MCHBM = 20, + UNCORE_TOPOLOGY_INFO_NODE_M2HBM = 21, + UNCORE_TOPOLOGY_INFO_NODE_PMEM_FC = 22, + UNCORE_TOPOLOGY_INFO_NODE_PMEM_MC = 23, + UNCORE_TOPOLOGY_INFO_NODE_CXLCM = 24, + UNCORE_TOPOLOGY_INFO_NODE_CXLDP = 25, + UNCORE_TOPOLOGY_INFO_NODE_OOBMSM = 26 +} UNCORE_TOPOLOGY_INFO_NODE_INDEX_TYPE; + +/************************************************************** + * SIDEBAND INFO STRUCTS START +***************************************************************/ +typedef struct SIDEBAND_INFO_NODE_S SIDEBAND_INFO_NODE; +typedef SIDEBAND_INFO_NODE *SIDEBAND_INFO; + +struct SIDEBAND_INFO_NODE_S { + U32 tid; + U32 pid; + U64 tsc; +}; + +#define SIDEBAND_INFO_pid(x) ((x)->pid) +#define SIDEBAND_INFO_tid(x) ((x)->tid) +#define SIDEBAND_INFO_tsc(x) ((x)->tsc) + +typedef struct SIDEBAND_INFO_EXT_NODE_S SIDEBAND_INFO_EXT_NODE; +typedef SIDEBAND_INFO_EXT_NODE *SIDEBAND_INFO_EXT; + +struct SIDEBAND_INFO_EXT_NODE_S { + U32 tid; + U32 pid; + U64 tsc; + U32 osid; + U32 cpuid; + U16 old_thread_state; + U16 reason; + U32 old_tid; + U64 reserved1; +}; + +#define SIDEBAND_INFO_EXT_pid(x) ((x)->pid) +#define SIDEBAND_INFO_EXT_tid(x) ((x)->tid) +#define SIDEBAND_INFO_EXT_tsc(x) ((x)->tsc) +#define SIDEBAND_INFO_EXT_osid(x) ((x)->osid) +#define SIDEBAND_INFO_EXT_cpuid(x) ((x)->cpuid) +#define SIDEBAND_INFO_EXT_old_thread_state(x) ((x)->old_thread_state) +#define SIDEBAND_INFO_EXT_reason(x) ((x)->reason) +#define SIDEBAND_INFO_EXT_old_tid(x) ((x)->old_tid) + +/************************************************************** + * SIDEBAND INFO STRUCTS END +***************************************************************/ + +/************************************************************** + * SAMPLE DROP INFO STRUCTS START +***************************************************************/ +typedef struct SAMPLE_DROP_NODE_S SAMPLE_DROP_NODE; +typedef SAMPLE_DROP_NODE *SAMPLE_DROP; + +struct SAMPLE_DROP_NODE_S { + U32 os_id; + U32 cpu_id; + U32 sampled; + U32 dropped; +}; + +#define SAMPLE_DROP_os_id(x) ((x)->os_id) +#define SAMPLE_DROP_cpu_id(x) ((x)->cpu_id) +#define SAMPLE_DROP_sampled(x) ((x)->sampled) +#define SAMPLE_DROP_dropped(x) ((x)->dropped) + +#define MAX_SAMPLE_DROP_NODES 20 + +typedef struct SAMPLE_DROP_INFO_NODE_S SAMPLE_DROP_INFO_NODE; +typedef SAMPLE_DROP_INFO_NODE *SAMPLE_DROP_INFO; + +struct SAMPLE_DROP_INFO_NODE_S { + U32 size; + SAMPLE_DROP_NODE drop_info[MAX_SAMPLE_DROP_NODES]; +}; + +#define SAMPLE_DROP_INFO_size(x) ((x)->size) +#define SAMPLE_DROP_INFO_drop_info(x, index) ((x)->drop_info[index]) + +/************************************************************** + * SAMPLE DROP INFO STRUCTS END +***************************************************************/ + +#define IS_PEBS_SAMPLE_RECORD(sample_record) \ + ((SAMPLE_RECORD_pid_rec_index(sample_record) == (U32)-1) && \ + (SAMPLE_RECORD_tid(sample_record) == (U32)-1)) + +/* + * VMM vendor information + */ +#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" +#define XEN_SIGNATURE "XenVMMXenVMM" +#define VMWARE_SIGNATURE "VMwareVMware" +#define HYPERV_SIGNATURE "Microsoft Hv" + +#define DRV_VMM_UNKNOWN 0 +#define DRV_VMM_MOBILEVISOR 1 +#define DRV_VMM_KVM 2 +#define DRV_VMM_XEN 3 +#define DRV_VMM_HYPERV 4 +#define DRV_VMM_VMWARE 5 +#define DRV_VMM_ACRN 6 + +#define DRV_TYPE_UNKNOWN 0 +#define DRV_TYPE_PUBLIC 1 +#define DRV_TYPE_NDA 2 +#define DRV_TYPE_PRIVATE 3 + +/* + * @macro DRV_SETUP_INFO_NODE_S + * @brief + * This structure supports driver information such as NMI profiling mode. + */ + +typedef struct DRV_SETUP_INFO_NODE_S DRV_SETUP_INFO_NODE; +typedef DRV_SETUP_INFO_NODE *DRV_SETUP_INFO; + +struct DRV_SETUP_INFO_NODE_S { + union { + U64 modes; + struct { + U64 nmi_mode : 1; + U64 vmm_mode : 1; + U64 vmm_vendor : 8; + U64 vmm_guest_vm : 1; + U64 pebs_accessible : 1; + U64 cpu_hotplug_mode : 1; + U64 matrix_inaccessible : 1; + U64 page_table_isolation : 2; + U64 pebs_ignored_by_pti : 1; + U64 core_event_mux_unavailable : 1; + U64 profiling_version_unsupported : 1; + U64 tracepoints_available : 1; + U64 register_allowlist_detected : 1; + U64 drv_type : 3; + U64 non_arch_msr_blocked : 1; + U64 symbol_lookup_unavailable : 1; + U64 process_exit_hook_unavailable : 1; + U64 munmap_hook_unavailable : 1; + U64 sched_switch_hook_unavailable : 1; + U64 reserved1 : 35; + } s1; + } u1; + U32 vmm_version; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_SETUP_INFO_modes(info) ((info)->u1.modes) +#define DRV_SETUP_INFO_nmi_mode(info) ((info)->u1.s1.nmi_mode) +#define DRV_SETUP_INFO_vmm_mode(info) ((info)->u1.s1.vmm_mode) +#define DRV_SETUP_INFO_vmm_vendor(info) ((info)->u1.s1.vmm_vendor) +#define DRV_SETUP_INFO_vmm_guest_vm(info) ((info)->u1.s1.vmm_guest_vm) +#define DRV_SETUP_INFO_pebs_accessible(info) ((info)->u1.s1.pebs_accessible) +#define DRV_SETUP_INFO_cpu_hotplug_mode(info) ((info)->u1.s1.cpu_hotplug_mode) +#define DRV_SETUP_INFO_matrix_inaccessible(info) \ + ((info)->u1.s1.matrix_inaccessible) +#define DRV_SETUP_INFO_page_table_isolation(info) \ + ((info)->u1.s1.page_table_isolation) +#define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ + ((info)->u1.s1.pebs_ignored_by_pti) +#define DRV_SETUP_INFO_core_event_mux_unavailable(info) \ + ((info)->u1.s1.core_event_mux_unavailable) +#define DRV_SETUP_INFO_profiling_version_unsupported(info) \ + ((info)->u1.s1.profiling_version_unsupported) +#define DRV_SETUP_INFO_tracepoints_available(info) \ + ((info)->u1.s1.tracepoints_available) +#define DRV_SETUP_INFO_register_allowlist_detected(info) \ + ((info)->u1.s1.register_allowlist_detected) +#define DRV_SETUP_INFO_drv_type(info) ((info)->u1.s1.drv_type) +#define DRV_SETUP_INFO_non_arch_msr_blocked(info) \ + ((info)->u1.s1.non_arch_msr_blocked) +#define DRV_SETUP_INFO_symbol_lookup_unavailable(info) \ + ((info)->u1.s1.symbol_lookup_unavailable) +#define DRV_SETUP_INFO_process_exit_hook_unavailable(info) \ + ((info)->u1.s1.process_exit_hook_unavailable) +#define DRV_SETUP_INFO_munmap_hook_unavailable(info) \ + ((info)->u1.s1.munmap_hook_unavailable) +#define DRV_SETUP_INFO_sched_switch_hook_unavailable(info) \ + ((info)->u1.s1.sched_switch_hook_unavailable) +#define DRV_SETUP_INFO_vmm_version(info) ((info)->vmm_version) + +#define DRV_SETUP_INFO_PTI_DISABLED 0 +#define DRV_SETUP_INFO_PTI_KPTI 1 +#define DRV_SETUP_INFO_PTI_KAISER 2 +#define DRV_SETUP_INFO_PTI_VA_SHADOW 3 +#define DRV_SETUP_INFO_PTI_UNKNOWN 4 + +/* + Type: task_info_t + Description: + Represents the equivalent of a Linux Thread. + Fields: + o id: A unique identifier. May be `NULL_TASK_ID`. + o name: Human-readable name for this task + o executable_name: Literal path to the binary elf that this task's + entry point is executing from. + o address_space_id: The unique ID for the address space this task is + running in. + */ +struct task_info_node_s { + U64 id; + char name[32]; + U64 address_space_id; +}; + +/* + Type: REMOTE_SWITCH + Description: + Collection switch set on target +*/ +typedef struct REMOTE_SWITCH_NODE_S REMOTE_SWITCH_NODE; +typedef REMOTE_SWITCH_NODE *REMOTE_SWITCH; + +struct REMOTE_SWITCH_NODE_S { + U32 auto_mode : 1; + U32 adv_hotspot : 1; + U32 lbr_callstack : 2; + U32 full_pebs : 1; + U32 uncore_supported : 1; + U32 agent_mode : 2; + U32 sched_switch_enabled : 1; + U32 data_transfer_mode : 1; + U32 reserved1 : 22; + U32 reserved2; +}; + +#define REMOTE_SWITCH_auto_mode(x) ((x).auto_mode) +#define REMOTE_SWITCH_adv_hotspot(x) ((x).adv_hotspot) +#define REMOTE_SWITCH_lbr_callstack(x) ((x).lbr_callstack) +#define REMOTE_SWITCH_full_pebs(x) ((x).full_pebs) +#define REMOTE_SWITCH_uncore_supported(x) ((x).uncore_supported) +#define REMOTE_SWITCH_agent_mode(x) ((x).agent_mode) +#define REMOTE_SWITCH_sched_switch_enabled(x) ((x).sched_switch_enabled) +#define REMOTE_SWITCH_data_transfer_mode(x) ((x).data_transfer_mode) + +/* + Type: REMOTE_OS_INFO + Description: + Remote target OS system information +*/ +#define OSINFOLEN 256 +typedef struct REMOTE_OS_INFO_NODE_S REMOTE_OS_INFO_NODE; +typedef REMOTE_OS_INFO_NODE *REMOTE_OS_INFO; + +struct REMOTE_OS_INFO_NODE_S { + U32 os_family; + U32 reserved1; + S8 sysname[OSINFOLEN]; + S8 release[OSINFOLEN]; + S8 version[OSINFOLEN]; +}; + +#define REMOTE_OS_INFO_os_family(x) ((x).os_family) +#define REMOTE_OS_INFO_sysname(x) ((x).sysname) +#define REMOTE_OS_INFO_release(x) ((x).release) +#define REMOTE_OS_INFO_version(x) ((x).version) + +/* + Type: REMOTE_HARDWARE_INFO + Description: + Remote target hardware information +*/ +typedef struct REMOTE_HARDWARE_INFO_NODE_S REMOTE_HARDWARE_INFO_NODE; +typedef REMOTE_HARDWARE_INFO_NODE *REMOTE_HARDWARE_INFO; + +struct REMOTE_HARDWARE_INFO_NODE_S { + U32 num_cpus; + U32 family; + U32 model; + U32 stepping; + U64 tsc_freq; + U64 reserved2; + U64 reserved3; +}; + +#define REMOTE_HARDWARE_INFO_num_cpus(x) ((x).num_cpus) +#define REMOTE_HARDWARE_INFO_family(x) ((x).family) +#define REMOTE_HARDWARE_INFO_model(x) ((x).model) +#define REMOTE_HARDWARE_INFO_stepping(x) ((x).stepping) +#define REMOTE_HARDWARE_INFO_tsc_frequency(x) ((x).tsc_freq) + +/* + Type: SEP_AGENT_MODE + Description: + SEP mode on target agent +*/ +typedef enum { + NATIVE_AGENT = 0, + HOST_VM_AGENT, // Service OS in ACRN + GUEST_VM_AGENT // User OS in ACRN +} SEP_AGENT_MODE; + +/* + Type: DATA_TRANSFER_MODE + Description: + Data transfer mode from target agent to remote host +*/ +typedef enum { + IMMEDIATE_TRANSFER = 0, + DELAYED_TRANSFER // Send after collection is done +} DATA_TRANSFER_MODE; + +#define MAX_NUM_OS_ALLOWED 6 +#define TARGET_IP_NAMELEN 64 + +typedef struct TARGET_INFO_NODE_S TARGET_INFO_NODE; +typedef TARGET_INFO_NODE *TARGET_INFO; + +struct TARGET_INFO_NODE_S { + U32 num_of_agents; + U32 reserved; + U32 os_id[MAX_NUM_OS_ALLOWED]; + S8 ip_address[MAX_NUM_OS_ALLOWED][TARGET_IP_NAMELEN]; + REMOTE_OS_INFO_NODE os_info[MAX_NUM_OS_ALLOWED]; + REMOTE_HARDWARE_INFO_NODE hardware_info[MAX_NUM_OS_ALLOWED]; + REMOTE_SWITCH_NODE remote_switch[MAX_NUM_OS_ALLOWED]; +}; + +#define TARGET_INFO_num_of_agents(x) ((x)->num_of_agents) +#define TARGET_INFO_os_id(x, i) ((x)->os_id[i]) +#define TARGET_INFO_os_info(x, i) ((x)->os_info[i]) +#define TARGET_INFO_ip_address(x, i) ((x)->ip_address[i]) +#define TARGET_INFO_hardware_info(x, i) ((x)->hardware_info[i]) +#define TARGET_INFO_remote_switch(x, i) ((x)->remote_switch[i]) + +typedef struct CPU_MAP_TRACE_NODE_S CPU_MAP_TRACE_NODE; +typedef CPU_MAP_TRACE_NODE *CPU_MAP_TRACE; + +struct CPU_MAP_TRACE_NODE_S { + U64 tsc; + U32 os_id; + U32 vcpu_id; + U32 pcpu_id; + U8 is_static : 1; + U8 initial : 1; + U8 reserved1 : 6; + U8 reserved2; + U16 reserved3; + U64 tsc_offset; +}; + +#define CPU_MAP_TRACE_tsc(x) ((x)->tsc) +#define CPU_MAP_TRACE_os_id(x) ((x)->os_id) +#define CPU_MAP_TRACE_vcpu_id(x) ((x)->vcpu_id) +#define CPU_MAP_TRACE_pcpu_id(x) ((x)->pcpu_id) +#define CPU_MAP_TRACE_is_static(x) ((x)->is_static) +#define CPU_MAP_TRACE_initial(x) ((x)->initial) +#define CPU_MAP_TRACE_tsc_offset(x) ((x)->tsc_offset) + +#define MAX_NUM_VCPU 64 +#define MAX_NUM_VM 16 +typedef struct CPU_MAP_TRACE_LIST_NODE_S CPU_MAP_TRACE_LIST_NODE; +typedef CPU_MAP_TRACE_LIST_NODE *CPU_MAP_TRACE_LIST; + +struct CPU_MAP_TRACE_LIST_NODE_S { + U32 osid; + U8 num_entries; + U8 reserved1; + U16 reserved2; + CPU_MAP_TRACE_NODE entries[MAX_NUM_VCPU]; +}; + +typedef struct VM_OSID_MAP_NODE_S VM_OSID_MAP_NODE; +typedef VM_OSID_MAP_NODE *VM_OSID_MAP; +struct VM_OSID_MAP_NODE_S { + U32 num_vms; + U32 reserved1; + U32 osid[MAX_NUM_VM]; +}; + +typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; +typedef VM_SWITCH_TRACE_NODE *VM_SWITCH_TRACE; + +struct VM_SWITCH_TRACE_NODE_S { + U64 tsc; + U32 from_os_id; + U32 to_os_id; + U64 reason; + U64 reserved1; + U64 reserved2; +}; + +#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) +#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) +#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) +#define VM_SWITCH_TRACE_reason(x) ((x)->reason) + +typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; +typedef EMON_BUFFER_DRIVER_HELPER_NODE *EMON_BUFFER_DRIVER_HELPER; + +struct EMON_BUFFER_DRIVER_HELPER_NODE_S { + U32 num_entries_per_package; + U32 num_cpu; + U32 power_num_package_events; + U32 power_num_module_events; + U32 power_num_thread_events; + U32 power_device_offset_in_package; + U32 core_num_events; + U32 core_index_to_thread_offset_map[]; +}; + +#define EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(x) \ + ((x)->num_entries_per_package) +#define EMON_BUFFER_DRIVER_HELPER_num_cpu(x) ((x)->num_cpu) +#define EMON_BUFFER_DRIVER_HELPER_power_num_package_events(x) \ + ((x)->power_num_package_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_module_events(x) \ + ((x)->power_num_module_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_thread_events(x) \ + ((x)->power_num_thread_events) +#define EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package(x) \ + ((x)->power_device_offset_in_package) +#define EMON_BUFFER_DRIVER_HELPER_core_num_events(x) ((x)->core_num_events) +#define EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(x) \ + ((x)->core_index_to_thread_offset_map) + +// EMON counts buffer follow this hardware topology: package -> device -> unit/thread -> event + +// Calculate the CORE thread offset +// Using for initialization: calculate the cpu_index_to_thread_offset_map in emon_Create_Emon_Buffer_Descriptor() +// EMON_BUFFER_CORE_THREAD_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device base offset +// (module_id * threads_per_module + core_id * threads_per_core + thread_id) * num_core_events //thread offset +#define EMON_BUFFER_CORE_THREAD_OFFSET(package_id, \ + num_entries_per_package, device_offset_in_package,\ + module_enum_supported, module_id, \ + threads_per_module, core_id, \ + threads_per_core, thread_id, num_core_events) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + (module_enum_supported * module_id * threads_per_module + core_id * threads_per_core + thread_id) \ + * num_core_events) + + +// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, and calculate the CORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_CORE_EVENT_OFFSET = +// cpu_index_to_thread_offset + //thread offset +// core_event_id //event_offset +#define EMON_BUFFER_CORE_EVENT_OFFSET(cpu_index_to_thread_offset, \ + core_event_id) \ + (cpu_index_to_thread_offset + core_event_id) + +// Calculate the device level to UNCORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE = +// device_offset_in_package + //device_offset_in_package +// device_unit_id * num_unit_events + //unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE( \ + device_offset_in_package, device_unit_id, num_unit_events, \ + device_event_id) \ + (device_offset_in_package + device_unit_id *num_unit_events + \ + device_event_id) + +// Take 'device level to UNCORE event offset' and package_id, calculate the UNCORE package level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET = +// package_id * num_entries_per_package + //package_offset +// uncore_offset_in_package; //offset_in_package +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( \ + package_id, num_entries_per_package, uncore_offset_in_package) \ + (package_id *num_entries_per_package + uncore_offset_in_package) + +// Take 'device level to UNCORE event offset', calculate the UNCORE system level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET = +// device_offset_in_system + //device_offset_in_system +// device_unit_id * num_system_events + //device_unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET(device_offset_in_system, \ + device_unit_id, \ + num_system_events, \ + device_event_id) \ + (device_offset_in_system + device_unit_id *num_system_events + \ + device_event_id) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// package_event_offset //power package event offset +#define EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + device_event_offset) \ + (package_id *num_entries_per_package + device_offset_in_package + \ + device_event_offset) + +// Calculate the module level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package event offset +// module_id * num_module_events + //module offset +// module_event_offset //power module event offset +#define EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, module_id, num_module_events, device_event_offset) \ + (package_id *num_entries_per_package + device_offset_in_package + \ + num_package_events + module_id *num_module_events + \ + device_event_offset) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package offset +// num_modules_per_package * num_module_events + //module offset +// (module_id * threads_per_module + core_id * threads_per_core + thread_id) * num_thread_events + //thread offset +// thread_event_offset //power thread event offset +#define EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, num_modules_per_package, num_module_events, \ + module_enum_supported, module_id, threads_per_module, core_id, \ + threads_per_core, thread_id, num_unit_events, device_event_offset) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + num_package_events + num_modules_per_package * \ + num_module_events + (module_enum_supported * module_id * \ + threads_per_module + core_id * threads_per_core + thread_id) \ + * num_unit_events + device_event_offset) + +// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, move to the end of core_events +// to reach pmu_metrics +// EMON_BUFFER_CORE_EVENT_OFFSET = +// cpu_index_to_thread_offset + //thread offset +// num_core_events + //end of core events +// core_pmu_metrics_id //index of metric under interest +#define EMON_BUFFER_CORE_PERF_METRIC_OFFSET( \ + cpu_index_to_thread_offset, num_core_events, core_pmu_metrics_id) \ + (cpu_index_to_thread_offset + num_core_events + core_pmu_metrics_id) + +/* + ************************************ + * DRIVER LOG BUFFER DECLARATIONS * + ************************************ + */ + +#define DRV_MAX_NB_LOG_CATEGORIES 256 // Must be a multiple of 8 +#define DRV_NB_LOG_CATEGORIES 14 +#define DRV_LOG_CATEGORY_LOAD 0 +#define DRV_LOG_CATEGORY_INIT 1 +#define DRV_LOG_CATEGORY_DETECTION 2 +#define DRV_LOG_CATEGORY_ERROR 3 +#define DRV_LOG_CATEGORY_STATE_CHANGE 4 +#define DRV_LOG_CATEGORY_MARK 5 +#define DRV_LOG_CATEGORY_DEBUG 6 +#define DRV_LOG_CATEGORY_FLOW 7 +#define DRV_LOG_CATEGORY_ALLOC 8 +#define DRV_LOG_CATEGORY_INTERRUPT 9 +#define DRV_LOG_CATEGORY_TRACE 10 +#define DRV_LOG_CATEGORY_REGISTER 11 +#define DRV_LOG_CATEGORY_NOTIFICATION 12 +#define DRV_LOG_CATEGORY_WARNING 13 + +#define LOG_VERBOSITY_UNSET 0xFF +#define LOG_VERBOSITY_DEFAULT 0xFE +#define LOG_VERBOSITY_NONE 0 + +#define LOG_CHANNEL_MEMLOG 0x1 +#define LOG_CHANNEL_AUXMEMLOG 0x2 +#define LOG_CHANNEL_PRINTK 0x4 +#define LOG_CHANNEL_TRACEK 0x8 +#define LOG_CHANNEL_MOSTWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK) +#define LOG_CHANNEL_EVERYWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK | \ + LOG_CHANNEL_TRACEK) +#define LOG_CHANNEL_MASK LOG_CATEGORY_VERBOSITY_EVERYWHERE + +#define LOG_CONTEXT_REGULAR 0x10 +#define LOG_CONTEXT_INTERRUPT 0x20 +#define LOG_CONTEXT_NOTIFICATION 0x40 +#define LOG_CONTEXT_ALL \ + (LOG_CONTEXT_REGULAR | LOG_CONTEXT_INTERRUPT | LOG_CONTEXT_NOTIFICATION) +#define LOG_CONTEXT_MASK LOG_CONTEXT_ALL +#define LOG_CONTEXT_SHIFT 4 + +#define DRV_LOG_NOTHING 0 +#define DRV_LOG_FLOW_IN 1 +#define DRV_LOG_FLOW_OUT 2 + +/* + * @macro DRV_LOG_ENTRY_NODE_S + * @brief + * This structure is used to store a log message from the driver. + */ + +#define DRV_LOG_MESSAGE_LENGTH 64 +#define DRV_LOG_FUNCTION_NAME_LENGTH 32 + +typedef struct DRV_LOG_ENTRY_NODE_S DRV_LOG_ENTRY_NODE; +typedef DRV_LOG_ENTRY_NODE *DRV_LOG_ENTRY; +struct DRV_LOG_ENTRY_NODE_S { + char function_name[DRV_LOG_FUNCTION_NAME_LENGTH]; + char message[DRV_LOG_MESSAGE_LENGTH]; + + U16 temporal_tag; + U16 integrity_tag; + + U8 category; + U8 secondary_info; // Secondary attribute: + // former driver state for STATE category + // 'ENTER' or 'LEAVE' for FLOW and TRACE categories + U16 processor_id; // NB: not guaranteed to be accurate (due to preemption / core migration) + + U64 tsc; + + U16 nb_active_interrupts; // never 100% accurate, merely indicative + U8 active_drv_operation; // only 100% accurate for IOCTL-called functions + U8 driver_state; + + U16 line_number; // as per the __LINE__ macro + + U16 nb_active_notifications; + + U64 reserved; // need padding to reach 128 bytes +}; // this structure should be exactly 128-byte long + +#define DRV_LOG_ENTRY_temporal_tag(ent) ((ent)->temporal_tag) +#define DRV_LOG_ENTRY_integrity_tag(ent) ((ent)->integrity_tag) +#define DRV_LOG_ENTRY_category(ent) ((ent)->category) +#define DRV_LOG_ENTRY_secondary_info(ent) ((ent)->secondary_info) +#define DRV_LOG_ENTRY_processor_id(ent) ((ent)->processor_id) +#define DRV_LOG_ENTRY_tsc(ent) ((ent)->tsc) +#define DRV_LOG_ENTRY_driver_state(ent) ((ent)->driver_state) +#define DRV_LOG_ENTRY_active_drv_operation(ent) ((ent)->active_drv_operation) +#define DRV_LOG_ENTRY_nb_active_interrupts(ent) ((ent)->nb_active_interrupts) +#define DRV_LOG_ENTRY_nb_active_notifications(ent) \ + ((ent)->nb_active_notifications) +#define DRV_LOG_ENTRY_line_number(ent) ((ent)->line_number) +#define DRV_LOG_ENTRY_message(ent) ((ent)->message) +#define DRV_LOG_ENTRY_function_name(ent) ((ent)->function_name) + +/* + * @macro DRV_LOG_BUFFER_NODE_S + * @brief + * Circular buffer structure storing the latest DRV_LOG_MAX_NB_ENTRIES driver messages + */ + +#define DRV_LOG_SIGNATURE_SIZE 8 // Must be a multiple of 8 +#define DRV_LOG_SIGNATURE_0 'S' +#define DRV_LOG_SIGNATURE_1 'e' +#define DRV_LOG_SIGNATURE_2 'P' +#define DRV_LOG_SIGNATURE_3 'd' +#define DRV_LOG_SIGNATURE_4 'R' +#define DRV_LOG_SIGNATURE_5 'v' +#define DRV_LOG_SIGNATURE_6 '5' +#define DRV_LOG_SIGNATURE_7 '\0' +// The signature is "SePdRv4"; not declared as string on purpose to avoid false positives when trying to identify the log buffer in a crash dump + +#define DRV_LOG_VERSION 1 +#define DRV_LOG_FILLER_BYTE 1 + +#define DRV_LOG_DRIVER_VERSION_SIZE 64 // Must be a multiple of 8 +// 2MB buffer [*HAS TO BE* a power of 2!] [8192 entries = 1 MB] +#define DRV_LOG_MAX_NB_PRI_ENTRIES (8192 * 2) +// 1MB buffer [*HAS TO BE* a power of 2!] +#define DRV_LOG_MAX_NB_AUX_ENTRIES (8192) +#define DRV_LOG_MAX_NB_ENTRIES \ + (DRV_LOG_MAX_NB_PRI_ENTRIES + DRV_LOG_MAX_NB_AUX_ENTRIES) + + +typedef struct DRV_LOG_BUFFER_NODE_S DRV_LOG_BUFFER_NODE; +typedef DRV_LOG_BUFFER_NODE *DRV_LOG_BUFFER; +struct DRV_LOG_BUFFER_NODE_S { + // some signature to be able to locate the log even without -g; ASCII would help + // should we change the signature for each log's version instead of keeping it in a + // dedicated field? + char header_signature[DRV_LOG_SIGNATURE_SIZE]; + + U32 log_size; // filled with sizeof(this structure) at init. + U32 max_nb_pri_entries; // filled with the driver's "DRV_LOG_MAX_NB_PRIM_ENTRIES" at init. + + U32 max_nb_aux_entries; // filled with the driver's "DRV_LOG_MAX_NB_AUX_ENTRIES" at init. + U32 reserved1; + + U64 init_time; // primary log disambiguator + + U32 disambiguator; // used to differentiate the driver's version of the log when a full memory dump can contain some from userland + U32 log_version; // 0 at first, increase when format changes? + + U32 pri_entry_index; // should be incremented *atomically* as a means to (re)allocate the next primary log entry. + U32 aux_entry_index; // should be incremented *atomically* as a means to (re)allocate the next auxiliary log entry. + + char driver_version[DRV_LOG_DRIVER_VERSION_SIZE]; + + U8 driver_state; + U8 active_drv_operation; + U16 reserved2; + U32 nb_drv_operations; + + U32 nb_interrupts; + U16 nb_active_interrupts; + U16 nb_active_notifications; + + U32 nb_notifications; + U32 nb_driver_state_transitions; + + U8 contiguous_physical_memory; + U8 reserved3; + U16 reserved4; + U32 reserved5; + + U8 verbosities[DRV_MAX_NB_LOG_CATEGORIES]; + + DRV_LOG_ENTRY_NODE entries[DRV_LOG_MAX_NB_ENTRIES]; + + char footer_signature[DRV_LOG_SIGNATURE_SIZE]; +}; + +#define DRV_LOG_BUFFER_pri_entry_index(log) ((log)->pri_entry_index) +#define DRV_LOG_BUFFER_aux_entry_index(log) ((log)->aux_entry_index) +#define DRV_LOG_BUFFER_header_signature(log) ((log)->header_signature) +#define DRV_LOG_BUFFER_footer_signature(log) ((log)->footer_signature) +#define DRV_LOG_BUFFER_log_size(log) ((log)->log_size) +#define DRV_LOG_BUFFER_driver_version(log) ((log)->driver_version) +#define DRV_LOG_BUFFER_driver_state(log) ((log)->driver_state) +#define DRV_LOG_BUFFER_active_drv_operation(log) ((log)->active_drv_operation) +#define DRV_LOG_BUFFER_nb_interrupts(log) ((log)->nb_interrupts) +#define DRV_LOG_BUFFER_nb_active_interrupts(log) ((log)->nb_active_interrupts) +#define DRV_LOG_BUFFER_nb_notifications(log) ((log)->nb_notifications) +#define DRV_LOG_BUFFER_nb_active_notifications(log) \ + ((log)->nb_active_notifications) +#define DRV_LOG_BUFFER_nb_driver_state_transitions(log) \ + ((log)->nb_driver_state_transitions) +#define DRV_LOG_BUFFER_nb_drv_operations(log) ((log)->nb_drv_operations) +#define DRV_LOG_BUFFER_max_nb_pri_entries(log) ((log)->max_nb_pri_entries) +#define DRV_LOG_BUFFER_max_nb_aux_entries(log) ((log)->max_nb_aux_entries) +#define DRV_LOG_BUFFER_init_time(log) ((log)->init_time) +#define DRV_LOG_BUFFER_disambiguator(log) ((log)->disambiguator) +#define DRV_LOG_BUFFER_log_version(log) ((log)->log_version) +#define DRV_LOG_BUFFER_entries(log) ((log)->entries) +#define DRV_LOG_BUFFER_contiguous_physical_memory(log) \ + ((log)->contiguous_physical_memory) +#define DRV_LOG_BUFFER_verbosities(log) ((log)->verbosities) + +#define DRV_LOG_CONTROL_MAX_DATA_SIZE \ + DRV_MAX_NB_LOG_CATEGORIES // Must be a multiple of 8 + +typedef struct DRV_LOG_CONTROL_NODE_S DRV_LOG_CONTROL_NODE; +typedef DRV_LOG_CONTROL_NODE *DRV_LOG_CONTROL; + +struct DRV_LOG_CONTROL_NODE_S { + U32 command; + U32 reserved1; + // only DRV_NB_LOG_CATEGORIES elements will be used, but let's plan for backwards compatibility + // if LOG_CATEGORY_UNSET, then READ instead of WRITE + U8 data[DRV_LOG_CONTROL_MAX_DATA_SIZE]; + + U64 reserved2; // may later want to add support for resizing the buffer, or only log 100 first interrupts, etc. + U64 reserved3; + U64 reserved4; + U64 reserved5; +}; + +#define DRV_LOG_CONTROL_command(x) ((x)->command) +#define DRV_LOG_CONTROL_verbosities(x) ((x)->data) +// Userland 'MARK' messages use the 'data' field too. +#define DRV_LOG_CONTROL_message(x) ((x)->data) +#define DRV_LOG_CONTROL_log_size(x) (*((U32 *)((x)->data))) + +#define DRV_LOG_CONTROL_COMMAND_NONE 0 +#define DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY 1 +#define DRV_LOG_CONTROL_COMMAND_MARK 2 +#define DRV_LOG_CONTROL_COMMAND_QUERY_SIZE 3 +#define DRV_LOG_CONTROL_COMMAND_BENCHMARK 4 + +typedef struct DRV_IOCTL_STATUS_NODE_S DRV_IOCTL_STATUS_NODE; +typedef DRV_IOCTL_STATUS_NODE *DRV_IOCTL_STATUS; + +struct DRV_IOCTL_STATUS_NODE_S { + U32 drv_status; + U8 reg_prog_type; + U8 reserved1; + U16 reserved2; + union { + struct { + U64 bus : 8; + U64 dev : 5; + U64 func : 3; + U64 offset : 16; + U64 rsvd : 32; + } s; + U64 reg_key1; + } u; + U32 reg_key2; + U64 counter_mask; + U32 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_IOCTL_STATUS_drv_status(x) ((x)->drv_status) +#define DRV_IOCTL_STATUS_reg_prog_type(x) ((x)->reg_prog_type) +#define DRV_IOCTL_STATUS_bus(x) ((x)->u.s.bus) +#define DRV_IOCTL_STATUS_dev(x) ((x)->u.s.dev) +#define DRV_IOCTL_STATUS_func(x) ((x)->u.s.func) +#define DRV_IOCTL_STATUS_offset(x) ((x)->u.s.offset) +#define DRV_IOCTL_STATUS_reg_key1(x) ((x)->u.reg_key1) +#define DRV_IOCTL_STATUS_reg_key2(x) ((x)->reg_key2) +#define DRV_IOCTL_STATUS_counter_mask(x) ((x)->counter_mask) + +/* common perf strutcures for platform picker and sampling components */ +#define MAXNAMELEN 256 + +typedef struct PERF_DEVICE_CONFIG_INFO_NODE_S PERF_DEVICE_CONFIG_INFO_NODE; +typedef PERF_DEVICE_CONFIG_INFO_NODE *PERF_DEVICE_CONFIG_INFO; + +struct PERF_DEVICE_CONFIG_INFO_NODE_S { + S8 name[MAXNAMELEN]; // unit name + U32 device_type; // device which unit belongs to + U32 unit_scope; // maps to socket level/ thread level/ system level { enum EVENT_SCOPE_TYPES } + U32 unc_unit_type; // freerun / programmable counters + U32 type_config_value; // type value in /sys/bus/event_source/devices/*/type + U32 mux_value; // Multiplex value + U32 unc_unit_id; // Index to identify different units + S8 cpu_mask[1024]; // cpumask value in /sys/bus/event_source/devices/*/cpumask + // dependent on unit_scope, {socket (entry for each socket) / thread level(no entry)/ system level (one entry)} + // We don't expect cpu_mask to over flow the specified size, but on event of overflow it goes into reserved fields +}; + +#define PERF_DEVICE_CONFIG_INFO_name(x) ((x)->name) +#define PERF_DEVICE_CONFIG_INFO_device_type(x) ((x)->device_type) +#define PERF_DEVICE_CONFIG_INFO_unit_scope(x) ((x)->unit_scope) +#define PERF_DEVICE_CONFIG_INFO_unc_unit_type(x) ((x)->unc_unit_type) +#define PERF_DEVICE_CONFIG_INFO_type_config_value(x) ((x)->type_config_value) +#define PERF_DEVICE_CONFIG_INFO_mux_value(x) ((x)->mux_value) +#define PERF_DEVICE_CONFIG_INFO_cpu_mask(x) ((x)->cpu_mask) +#define PERF_DEVICE_CONFIG_INFO_unc_unit_id(x) ((x)->unc_unit_id) + +typedef struct PERF_SYS_DEVICES_NODE_S PERF_SYS_DEVICES_NODE; +typedef PERF_SYS_DEVICES_NODE *PERF_SYS_DEVICES; +struct PERF_SYS_DEVICES_NODE_S { + U32 num_sys_devices; + U32 size_of_alloc; + PERF_DEVICE_CONFIG_INFO_NODE device_list[]; +}; + +#define PERF_SYS_DEVICES_num_sys_devices(x) ((x)->num_sys_devices) +#define PERF_SYS_DEVICES_size_of_alloc(x) ((x)->size_of_alloc) +#define PERF_SYS_DEVICES_device_list(x) ((x)->device_list) +#define PERF_SYS_DEVICES_device_config(x, i) ((x)->device_list[i]) + +typedef enum { + IPT_BUFFER_CAPTURE = 0, + IPT_BUFFER_DROP, +} IPT_INFO_DESCRIPTOR; + +typedef struct DRV_IPT_DROP_INFO_NODE_S DRV_IPT_DROP_INFO_NODE; +typedef DRV_IPT_DROP_INFO_NODE *DRV_IPT_DROP_INFO; +struct DRV_IPT_DROP_INFO_NODE_S { + U16 length; + U16 descriptor_id; + U32 drop_size; + U64 offset; + U64 reserved2; + U64 reserved3; +}; + +#define DRV_IPT_DROP_INFO_length(x) ((x).length) +#define DRV_IPT_DROP_INFO_descriptor_id(x) ((x).descriptor_id) +#define DRV_IPT_DROP_INFO_drop_size(x) ((x).drop_size) +#define DRV_IPT_DROP_INFO_offset(x) ((x).offset) + +typedef struct DRV_MSR_OP_NODE_S DRV_MSR_OP_NODE; +typedef DRV_MSR_OP_NODE *DRV_MSR_OP; + +struct DRV_MSR_OP_NODE_S { + U32 reg_id; + S32 status; + U64 reg_read_val; + U64 reg_write_val; + U64 reserved; +}; + +#define DRV_MSR_OP_reg_id(x) ((x)->reg_id) +#define DRV_MSR_OP_status(x) ((x)->status) +#define DRV_MSR_OP_reg_read_val(x) ((x)->reg_read_val) +#define DRV_MSR_OP_reg_write_val(x) ((x)->reg_write_val) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_types.h b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h new file mode 100644 index 00000000000000..6f28de389e67df --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_types.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_TYPES_H_ +#define _LWPMUDRV_TYPES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(BUILD_DRV_ESX) +//SR: added size_t def +typedef unsigned long size_t; +typedef unsigned long ssize_t; +#endif + +typedef unsigned char U8; +typedef char S8; +typedef short S16; +typedef unsigned short U16; +typedef unsigned int U32; +typedef int S32; +#if defined(DRV_OS_WINDOWS) +typedef unsigned __int64 U64; +typedef __int64 S64; +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || \ + defined(DRV_OS_FREEBSD) +typedef unsigned long long U64; +typedef long long S64; +typedef unsigned long ULONG; +typedef void VOID; +typedef void * LPVOID; + +#if defined(BUILD_DRV_ESX) +//SR: added UWORD64 def +typedef union _UWORD64 { + struct { + U32 low; + S32 hi; + } c; + S64 qword; +} UWORD64, *PWORD64; +#endif +#else +#error "Undefined OS" +#endif + +#if defined(DRV_IA32) +typedef S32 SIOP; +typedef U32 UIOP; +#elif defined(DRV_EM64T) +typedef S64 SIOP; +typedef U64 UIOP; +#else +#error "Unexpected Architecture seen" +#endif + +typedef U32 DRV_BOOL; +typedef void *PVOID; + +#if !defined(__DEFINE_STCHAR__) +#define __DEFINE_STCHAR__ +#if defined(UNICODE) +typedef wchar_t STCHAR; +#define VTSA_T(x) L##x +#else +typedef char STCHAR; +#define VTSA_T(x) x +#endif +#endif + +#if defined(DRV_OS_WINDOWS) +#include +typedef wchar_t DRV_STCHAR; +typedef wchar_t VTSA_CHAR; +#else +typedef char DRV_STCHAR; +#endif + +// +// Handy Defines +// +typedef U32 DRV_STATUS; + +#define MAX_STRING_LENGTH 1024 +#define MAXNAMELEN 256 +#define MAX_PATH_BUFFER_LENGTH 1100 +#define MAX_PATH_USER_LENGTH 1024 + +#if defined(DRV_OS_WINDOWS) +#define WIN_MAX_RELATIVE_PATH_LENGTH 200 +#define WIN_VOLUME_LABEL_INDICATOR L":\\" +#define WIN_PATH_LENGTH_EXTEND_PREFIX L"\\\\?\\" +#endif + +#if defined(DRV_OS_WINDOWS) +#define UNLINK _unlink +#define RENAME rename +#define WCSDUP _wcsdup +#endif +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || \ + defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) +#define UNLINK unlink +#define RENAME rename +#endif + +#if defined(DRV_OS_SOLARIS) && !defined(_KERNEL) +//wcsdup is missing on Solaris +#include +#include + +static inline wchar_t * +solaris_wcsdup(const wchar_t *wc) +{ + wchar_t *tmp = (wchar_t *)malloc((wcslen(wc) + 1) * sizeof(wchar_t)); + wcscpy(tmp, wc); + return tmp; +} +#define WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define WCSDUP wcsdup +#endif + +#if !defined(_WCHAR_T_DEFINED) +#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS) +#if !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif +#endif +#endif + +#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || \ + defined(DRV_OS_SOLARIS)) && \ + !defined(_KERNEL) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#define TRUE 1 +#define FALSE 0 + +#define ALIGN_4(x) (((x) + 3) & ~3) +#define ALIGN_8(x) (((x) + 7) & ~7) +#define ALIGN_16(x) (((x) + 15) & ~15) +#define ALIGN_32(x) (((x) + 31) & ~31) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/lwpmudrv_version.h b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h new file mode 100644 index 00000000000000..2cddab7c20300c --- /dev/null +++ b/drivers/platform/x86/sepdk/include/lwpmudrv_version.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2010 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +/* + * File : lwpmudrv_version.h + */ + +#ifndef _LWPMUDRV_VERSION_H_ +#define _LWPMUDRV_VERSION_H_ + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) +#define _STRINGIFY_W(x) L#x +#define STRINGIFY_W(x) _STRINGIFY_W(x) + +#define SEP_MAJOR_VERSION 5 +#define SEP_MINOR_VERSION 37 +// API version is independent of major/minor and tracks driver version +#define SEP_API_VERSION 12 + +#define SEP_PREV_MAJOR_VERSION 5 +#define EMON_PREV_MAJOR_VERSION 11 + +#if defined(BUILD_SCOPE_RELEASE) +#define SEP_RELEASE_STRING "" +#else +#define SEP_RELEASE_STRING "Beta" +#endif + +#define EMON_MAJOR_VERSION 11 +#define EMON_MINOR_VERSION SEP_MINOR_VERSION +#define EMON_PRODUCT_RELEASE_STRING SEP_RELEASE_STRING + +#if defined(SEP_ENABLE_PRIVATE_CPUS) +#define PRODUCT_TYPE "private" +#define SEP_NAME "sepint" +#define SEP_NAME_W L"sepint" +#else +#if defined(SEP_ENABLE_NDA_CPUS) +#define PRODUCT_TYPE "NDA" +#else +#define PRODUCT_TYPE "public" +#endif +#define SEP_NAME "sep" +#define SEP_NAME_W L"sep" +#endif + +#if !defined(PRODUCT_BUILDER) +#define PRODUCT_BUILDER unknown +#endif + +#define TB_FILE_EXT ".tb7" +#define TB_FILE_EXT_W L".tb7" + +#define IPT_FILE_EXT ".ipt" +#define IPT_FILE_EXT_W L".ipt" + +#define IPT_INFO_FILE_EXT ".pif" +#define IPT_INFO_FILE_EXT_W L".pif" + +#define SEP_PRODUCT_NAME "Sampling Enabling Product" +#define EMON_PRODUCT_NAME "EMON" + +#define PRODUCT_VERSION_DATE __DATE__ " at " __TIME__ + +#define SEP_PRODUCT_COPYRIGHT \ + "Copyright(C) 2007-2020 Intel Corporation. All rights reserved." +#define EMON_PRODUCT_COPYRIGHT \ + "Copyright(C) 1993-2020 Intel Corporation. All rights reserved." + +#define PRODUCT_DISCLAIMER \ + "Warning: This computer program is protected under U.S. and international\ncopyright laws, and may only be used or copied in accordance with the terms\nof the license agreement. Except as permitted by such license, no part\nof this computer program may be reproduced, stored in a retrieval system,\nor transmitted in any form or by any means without the express written consent\nof Intel Corporation." +#define PRODUCT_VERSION \ + STRINGIFY(SEP_MAJOR_VERSION) "." STRINGIFY(SEP_MINOR_VERSION) + +#define SEP_MSG_PREFIX \ + SEP_NAME STRINGIFY(SEP_MAJOR_VERSION) "_" STRINGIFY( \ + SEP_MINOR_VERSION) ":" +#define SEP_VERSION_STR \ + STRINGIFY(SEP_MAJOR_VERSION) "." STRINGIFY(SEP_MINOR_VERSION) + +#if defined(DRV_OS_WINDOWS) + +#define SEP_DRIVER_NAME SEP_NAME "drv" STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_DRIVER_NAME_W SEP_NAME_W L"drv" STRINGIFY_W(SEP_MAJOR_VERSION) +#define SEP_DEVICE_NAME SEP_DRIVER_NAME + +#define SEP_PREV_DRIVER_NAME SEP_NAME "drv" STRINGIFY(SEP_PREV_MAJOR_VERSION) +#define SEP_PREV_DRIVER_NAME_W \ + SEP_NAME_W L"drv" STRINGIFY_W(SEP_PREV_MAJOR_VERSION) +#define SEP_PREV_DEVICE_NAME SEP_PREV_DRIVER_NAME + +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) + +#define SEP_DRIVER_NAME SEP_NAME STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" +#define SEP_UNCORE_NAME SEP_DRIVER_NAME "_u" +#define SEP_SIDEBAND_NAME SEP_DRIVER_NAME "_b" +#define SEP_EMON_NAME SEP_DRIVER_NAME "_e" +#define SEP_IPT_NAME SEP_DRIVER_NAME "_pt" +#define SEP_IPT_INFO_NAME SEP_DRIVER_NAME "_ptinfo" +#define SEP_DEVICE_NAME "/dev/" SEP_DRIVER_NAME + +#define SEP_PREV_DRIVER_NAME SEP_NAME STRINGIFY(SEP_PREV_MAJOR_VERSION) +#define SEP_PREV_SAMPLES_NAME SEP_PREV_DRIVER_NAME "_s" +#define SEP_PREV_UNCORE_NAME SEP_PREV_DRIVER_NAME "_u" +#define SEP_PREV_SIDEBAND_NAME SEP_PREV_DRIVER_NAME "_b" +#define SEP_PREV_DEVICE_NAME "/dev/" SEP_PREV_DRIVER_NAME + +#endif + +#if defined(DRV_OS_MAC) + +#define SEP_DRIVER_NAME SEP_NAME STRINGIFY(SEP_MAJOR_VERSION) +#define SEP_SAMPLES_NAME SEP_DRIVER_NAME "_s" +#define SEP_DEVICE_NAME SEP_DRIVER_NAME + +#define SEP_PREV_DRIVER_NAME SEP_NAME STRINGIFY(SEP_PREV_MAJOR_VERSION) +#define SEP_PREV_SAMPLES_NAME SEP_PREV_DRIVER_NAME "_s" +#define SEP_PREV_DEVICE_NAME SEP_PREV_DRIVER_NAME + +#endif + +#endif diff --git a/drivers/platform/x86/sepdk/include/pax_shared.h b/drivers/platform/x86/sepdk/include/pax_shared.h new file mode 100644 index 00000000000000..844c65458ed497 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/pax_shared.h @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2009 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +/* + * + * Description: types and definitions shared between PAX kernel + * and user modes + * + * NOTE: alignment on page boundaries is required on 64-bit platforms! + * +*/ + +#ifndef _PAX_SHARED_H_ +#define _PAX_SHARED_H_ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) + +// PAX versioning + +// major version (increment only when PAX driver is incompatible with previous versions) +#define PAX_MAJOR_VERSION 1 +// minor version (increment only when new APIs are added, but driver remains backwards compatible) +#define PAX_MINOR_VERSION 0 +// bugfix version (increment only for bug fixes that don't affect usermode/driver compatibility) +#define PAX_BUGFIX_VERSION 2 + +#define PAX_VERSION_STR \ + STRINGIFY(PAX_MAJOR_VERSION) \ + "." STRINGIFY(PAX_MINOR_VERSION) "." STRINGIFY(PAX_BUGFIX_VERSION) + +// PAX device name + +#if defined(DRV_OS_WINDOWS) +#define PAX_NAME "sepdal" +#define PAX_NAME_W L"sepdal" +#else +#define PAX_NAME "pax" +#endif + +// PAX PMU reservation states + +#define PAX_PMU_RESERVED 1 +#define PAX_PMU_UNRESERVED 0 + +#define PAX_GUID_UNINITIALIZED 0 + +// PAX_IOCTL definitions + +#if defined(DRV_OS_WINDOWS) + +// +// The name of the device as seen by the driver +// +#define LSTRING(x) L#x +#define PAX_OBJECT_DEVICE_NAME L"\\Device\\sepdal" // LSTRING(PAX_NAME) +#define PAX_OBJECT_LINK_NAME L"\\DosDevices\\sepdal" // LSTRING(PAX_NAME) + +#define PAX_DEVICE_NAME PAX_NAME // for CreateFile called by app + +#define PAX_IOCTL_DEVICE_TYPE 0xA000 // values 0-32768 reserved for Microsoft +#define PAX_IOCTL_FUNCTION 0xA00 // values 0-2047 reserved for Microsoft + +// +// Basic CTL CODE macro to reduce typographical errors +// +#define PAX_CTL_READ_CODE(x) \ + CTL_CODE(PAX_IOCTL_DEVICE_TYPE, PAX_IOCTL_FUNCTION + (x), \ + METHOD_BUFFERED, FILE_READ_ACCESS) + +#define PAX_IOCTL_INFO PAX_CTL_READ_CODE(1) +#define PAX_IOCTL_STATUS PAX_CTL_READ_CODE(2) +#define PAX_IOCTL_RESERVE_ALL PAX_CTL_READ_CODE(3) +#define PAX_IOCTL_UNRESERVE PAX_CTL_READ_CODE(4) + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || \ + defined(DRV_OS_SOLARIS) + +#define PAX_DEVICE_NAME "/dev/" PAX_NAME + +#define PAX_IOC_MAGIC 100 +#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS) +#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS) +#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) +#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) + +#elif defined(DRV_OS_FREEBSD) + +#define PAX_DEVICE_NAME "/dev/" PAX_NAME + +#define PAX_IOC_MAGIC 100 +#define PAX_IOCTL_INFO _IOW(PAX_IOC_MAGIC, 1, IOCTL_ARGS_NODE) +#define PAX_IOCTL_STATUS _IOW(PAX_IOC_MAGIC, 2, IOCTL_ARGS_NODE) +#define PAX_IOCTL_RESERVE_ALL _IO(PAX_IOC_MAGIC, 3) +#define PAX_IOCTL_UNRESERVE _IO(PAX_IOC_MAGIC, 4) + +#elif defined(DRV_OS_MAC) + +// OSX driver names are always in reverse DNS form. +#define PAXDriverClassName com_intel_driver_PAX +#define kPAXDriverClassName "com_intel_driver_PAX" +#define PAX_DEVICE_NAME "com.intel.driver.PAX" + +// User client method dispatch selectors. +enum { + kPAXUserClientOpen, + kPAXUserClientClose, + kPAXReserveAll, + kPAXUnreserve, + kPAXGetStatus, + kPAXGetInfo, + kPAXDataIO, + kNumberOfMethods // Must be last +}; + +#else +#warning "unknown OS in pax_shared.h" +#endif + +// data for PAX_IOCTL_INFO call + +struct PAX_INFO_NODE_S { + volatile U64 managed_by; // entity managing PAX + volatile U32 version; // PAX version number + volatile U64 reserved1; // force 8-byte alignment + volatile U32 reserved2; // unreserved +}; + +typedef struct PAX_INFO_NODE_S PAX_INFO_NODE; +typedef PAX_INFO_NODE *PAX_INFO; + +// data for PAX_IOCTL_STATUS call + +struct PAX_STATUS_NODE_S { + volatile U64 guid; // reservation ID (globally unique identifier) + volatile DRV_FILE_DESC pid; // pid of process that has the reservation + volatile U64 start_time; // reservation start time + volatile U32 is_reserved; // 1 if there is a reservation, 0 otherwise +}; + +typedef struct PAX_STATUS_NODE_S PAX_STATUS_NODE; +typedef PAX_STATUS_NODE *PAX_STATUS; + +struct PAX_VERSION_NODE_S { + union { + U32 version; + struct { + U32 major : 8; + U32 minor : 8; + U32 bugfix : 16; + } s1; + } u1; +}; + +typedef struct PAX_VERSION_NODE_S PAX_VERSION_NODE; +typedef PAX_VERSION_NODE *PAX_VERSION; + +#define PAX_VERSION_NODE_version(v) ((v)->u1.version) +#define PAX_VERSION_NODE_major(v) ((v)->u1.s1.major) +#define PAX_VERSION_NODE_minor(v) ((v)->u1.s1.minor) +#define PAX_VERSION_NODE_bugfix(v) ((v)->u1.s1.bugfix) + +#endif diff --git a/drivers/platform/x86/sepdk/include/rise_errors.h b/drivers/platform/x86/sepdk/include/rise_errors.h new file mode 100644 index 00000000000000..79d5ec4f7ad5d0 --- /dev/null +++ b/drivers/platform/x86/sepdk/include/rise_errors.h @@ -0,0 +1,350 @@ +/* + * Copyright (C) 2004 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _RISE_ERRORS_H_ +#define _RISE_ERRORS_H_ + +// +// NOTE: +// +// 1) Before adding an error code, first make sure the error code doesn't +// already exist. If it does, use that, don't create a new one just because... +// +// 2) When adding an error code, add it to the end of the list. Don't insert +// error numbers in the middle of the list! For backwards compatibility, +// we don't want the numbers changing unless we really need them +// to for some reason (like we want to switch to negative error numbers) +// +// 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added) +// last error. This is done so SW can verify the number of error codes +// possible matches the number of error strings it has +// +// 4) Don't forget to update the error string table to include your +// error code (rise.c). Since the goal is something human readable +// you don't need to use abbreviations in there (ie. don't say "bad param", +// say "bad parameter" or "illegal parameter passed in") +// +// 5) Compile and run the test_rise app (in the test_rise directory) to +// verify things are still working +// +// + +#define VT_SUCCESS 0 +#define VT_FAILURE -1 + +/*************************************************************/ + +#define VT_INVALID_MAX_SAMP 1 +#define VT_INVALID_SAMP_PER_BUFF 2 +#define VT_INVALID_SAMP_INTERVAL 3 +#define VT_INVALID_PATH 4 +#define VT_TB5_IN_USE 5 +#define VT_INVALID_NUM_EVENTS 6 +#define VT_INTERNAL_ERROR 8 +#define VT_BAD_EVENT_NAME 9 +#define VT_NO_SAMP_SESSION 10 +#define VT_NO_EVENTS 11 +#define VT_MULTIPLE_RUNS 12 +#define VT_NO_SAM_PARAMS 13 +#define VT_SDB_ALREADY_EXISTS 14 +#define VT_SAMPLING_ALREADY_STARTED 15 +#define VT_TBS_NOT_SUPPORTED 16 +#define VT_INVALID_SAMPARAMS_SIZE 17 +#define VT_INVALID_EVENT_SIZE 18 +#define VT_ALREADY_PROCESSES 19 +#define VT_INVALID_EVENTS_PATH 20 +#define VT_INVALID_LICENSE 21 + +/******************************************************/ +//SEP error codes + +#define VT_SAM_ERROR 22 +#define VT_SAMPLE_FILE_ALREADY_MAPPED 23 +#define VT_INVALID_SAMPLE_FILE 24 +#define VT_UNKNOWN_SECTION_NUMBER 25 +#define VT_NO_MEMORY 26 +#define VT_ENV_VAR_NOT_FOUND 27 +#define VT_SAMPLE_FILE_NOT_MAPPED 28 +#define VT_BUFFER_OVERFLOW 29 +#define VT_USER_OP_COMPLETED 30 +#define VT_BINARY_NOT_FOUND 31 +#define VT_ISM_NOT_INITIALIZED 32 +#define VT_NO_SYMBOLS 33 +#define VT_SAMPLE_FILE_MAPPING_ERROR 34 +#define VT_BUFFER_NULL 35 +#define VT_UNEXPECTED_NULL_PTR 36 +#define VT_BINARY_LOAD_FAILED 37 +#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38 +#define VT_ENTRY_NOT_FOUND 39 +#define VT_SEP_SYNTAX_ERROR 40 +#define VT_SEP_OPTIONS_ERROR 41 +#define VT_BAD_EVENT_MODIFIER 42 +#define VT_INCOMPATIBLE_PARAMS 43 +#define VT_FILE_OPEN_FAILED 44 +#define VT_EARLY_EXIT 45 +#define VT_TIMEOUT_RETURN 46 +#define VT_NO_CHILD_PROCESS 47 +#define VT_DRIVER_RUNNING 48 +#define VT_DRIVER_STOPPED 49 +#define VT_MULTIPLE_RUNS_NEEDED 50 +#define VT_QUIT_IMMEDIATE 51 +#define VT_DRIVER_INIT_FAILED 52 +#define VT_NO_TB5_CREATED 53 +#define VT_NO_WRITE_PERMISSION 54 +#define VT_DSA_INIT_FAILED 55 +#define VT_INVALID_CPU_MASK 56 +#define VT_SAMP_IN_RUNNING_STATE 57 +#define VT_SAMP_IN_PAUSE_STATE 58 +#define VT_SAMP_IN_STOP_STATE 59 +#define VT_SAMP_NO_SESSION 60 +#define VT_NOT_CONFIGURED 61 +#define VT_LAUNCH_BUILD64_FAILED 62 +#define VT_BAD_PARAMETER 63 +#define VT_ISM_INIT_FAILED 64 +#define VT_INVALID_STATE_TRANS 65 +#define VT_EARLY_EXIT_N_CANCEL 66 +#define VT_EVT_MGR_NOT_INIT 67 +#define VT_ISM_SECTION_ENUM_FAILED 68 +#define VT_VG_PARSER_ERROR 69 +#define VT_MISSING_VALUE_FOR_TOKEN 70 +#define VT_EMPTY_SAMPLE_FILE_NAME 71 +#define VT_UNEXPECTED_VALUE 72 +#define VT_NOT_IMPLEMENTED 73 +#define VT_MISSING_COL_DEPNDNCIES 74 +#define VT_DEP_COL_NOT_LIB_DEFINED 75 +#define VT_COL_NOT_REG_WITH_LIB 76 +#define VT_SECTION_ALREADY_IN_USE 77 +#define VT_SECTION_NOT_EXIST 78 +#define VT_STREAM_NOT_EXIST 79 +#define VT_INVALID_STREAM 80 +#define VT_STREAM_ALREADY_IN_USE 81 +#define VT_DATA_DESC_NOT_EXIST 82 +#define VT_INVALID_ERROR_CODE 83 +#define VT_INCOMPATIBLE_VERSION 84 +#define VT_LEGACY_DATA_NOT_EXIST 85 +#define VT_INVALID_READ_START 86 +#define VT_DRIVER_OPEN_FAILED 87 +#define VT_DRIVER_IOCTL_FAILED 88 +#define VT_SAMP_FILE_CREATE_FAILED 89 +#define VT_MODULE_FILE_CREATE_FAILED 90 +#define VT_INVALID_SAMPLE_FILE_NAME 91 +#define VT_INVALID_MODULE_FILE_NAME 92 +#define VT_FORK_CHILD_PROCESS_FAILED 93 +#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES 94 +#define VT_INCOMPLETE_TB5_ENCOUNTERED 95 +#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96 +#define VT_INVALID_STRING 97 +#define VT_UNSUPPORTED_DATA_SIZE 98 +#define VT_TBRW_INIT_FAILED 99 +#define VT_PLUGIN_UNLOAD 100 +#define VT_PLUGIN_ENTRY_NULL 101 +#define VT_UNKNOWN_PLUGIN 102 +#define VT_BUFFER_TOO_SMALL 103 +#define VT_CANNOT_MODIFY_COLUMN 104 +#define VT_MULT_FILTERS_NOT_ALLOWED 105 +#define VT_ADDRESS_IN_USE 106 +#define VT_NO_MORE_MMAPS 107 +#define VT_MAX_PAGES_IN_DS_EXCEEDED 108 +#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109 +#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP 110 +#define VT_INVALID_ACCESS_PERMS 111 +#define VT_NO_DATA_TO_DISPLAY 112 +#define VT_TB5_IS_NOT_BOUND 113 +#define VT_MISSING_GROUP_BY_COLUMN 114 +#define VT_SMRK_MAX_STREAMS_EXCEEDED 115 +#define VT_SMRK_STREAM_NOT_CREATED 116 +#define VT_SMRK_NOT_IMPL 117 +#define VT_SMRK_TYPE_NOT_IMPL 118 +#define VT_SMRK_TYPE_ALREADY_SET 119 +#define VT_SMRK_NO_STREAM 120 +#define VT_SMRK_INVALID_STREAM_TYPE 121 +#define VT_SMRK_STREAM_NOT_FOUND 122 +#define VT_SMRK_FAIL 123 +#define VT_SECTION_NOT_READABLE 124 +#define VT_SECTION_NOT_WRITEABLE 125 +#define VT_GLOBAL_SECTION_NOT_CLOSED 126 +#define VT_STREAM_SECTION_NOT_CLOSED 127 +#define VT_STREAM_NOT_CLOSED 128 +#define VT_STREAM_NOT_BOUND 129 +#define VT_NO_COLS_SPECIFIED 130 +#define VT_NOT_ALL_SECTIONS_CLOSED 131 +#define VT_SMRK_INVALID_PTR 132 +#define VT_UNEXPECTED_BIND_MISMATCH 133 +#define VT_WIN_TIMER_ERROR 134 +#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD 135 +#define VT_BAD_MODULE 136 +#define VT_INPUT_SOURCE_INFO_NOT_SET 137 +#define VT_UNSUPPORTED_TIME_GRAN 138 +#define VT_NO_SAMPLES_COLLECTED 139 +#define VT_INVALID_CPU_TYPE_VERSION 140 +#define VT_BIND_UNEXPECTED_1STMODREC 141 +#define VT_BIND_MODULES_NOT_SORTED 142 +#define VT_UNEXPECTED_NUM_CPUIDS 143 +#define VT_UNSUPPORTED_ARCH_TYPE 144 +#define VT_NO_DATA_TO_WRITE 145 +#define VT_EM_TIME_SLICE_TOO_SMALL 146 +#define VT_EM_TOO_MANY_EVENT_GROUPS 147 +#define VT_EM_ZERO_GROUPS 148 +#define VT_EM_NOT_SUPPORTED 149 +#define VT_PMU_IN_USE 150 +#define VT_TOO_MANY_INTERRUPTS 151 +#define VT_MAX_SAMPLES_REACHED 152 +#define VT_MODULE_COLLECTION_FAILED 153 +#define VT_INCOMPATIBLE_DRIVER 154 +#define VT_UNABLE_LOCATE_TRIGGER_EVENT 155 +#define VT_COMMAND_NOT_HANDLED 156 +#define VT_DRIVER_VERSION_MISMATCH 157 +#define VT_MAX_MARKERS 158 +#define VT_DRIVER_COMM_FAILED 159 +#define VT_CHIPSET_CONFIG_FAILED 160 +#define VT_BAD_DATA_BASE 161 +#define VT_PAX_SERVICE_NOT_CONNECTED 162 +#define VT_PAX_SERVICE_ERROR 163 +#define VT_PAX_PMU_RESERVE_FAILED 164 +#define VT_INVALID_CPU_INFO_TYPE 165 +#define VT_CACHE_DOESNT_EXIST 166 +#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167 +#define VT_EXCEEDED_MAX_EVENTS 168 +#define VT_MARKER_TIMER_FAILED 169 +#define VT_PAX_PMU_UNRESERVE_FAILED 170 +#define VT_MULTIPLE_PROCESSES_FOUND 171 +#define VT_NO_SUCH_PROCESS_FOUND 172 +#define VT_PCL_NOT_ENABLED 173 +#define VT_PCL_UID_CHECK 174 +#define VT_DEL_RESULTS_DIR_FAILED 175 +#define VT_NO_VALID_EVENTS 176 +#define VT_INVALID_EVENT 177 +#define VT_EVENTS_COUNTED 178 +#define VT_EVENTS_COLLECTED 179 +#define VT_UNSUPPORTED_GFX_ARCH_TYPE 180 +#define VT_GFX_CONFIG_FAILED 181 +#define VT_UNSUPPORTED_NON_NATIVE_MODE 182 +#define VT_INVALID_DEVICE 183 +#define VT_ENV_SETUP_FAILED 184 +#define VT_RESUME_NOT_RECEIVED 185 +#define VT_UNSUPPORTED_PWR_ARCH_TYPE 186 +#define VT_PWR_CONFIG_FAILED 187 +#define VT_NMI_WATCHDOG_FOUND 188 +#define VT_NO_PMU_RESOURCES 189 +#define VT_MIC_CARD_NOT_ONLINE 190 +#define VT_FREEZE_ON_PMI_NOT_AVAIL 191 +#define VT_FLUSH_FAILED 192 +#define VT_FLUSH_SUCCESS 193 +#define VT_WRITE_ERROR 194 +#define VT_NO_SPACE 195 +#define VT_MSR_ACCESS_ERROR 196 +#define VT_PEBS_NOT_SUPPORTED 197 +#define VT_LUA_PARSE_ERROR 198 +#define VT_COMM_CONNECTION_CLOSED_BY_REMOTE 199 +#define VT_COMM_LISTEN_ERROR 200 +#define VT_COMM_BIND_ERROR 201 +#define VT_COMM_ACCEPT_ERROR 202 +#define VT_COMM_SEND_ERROR 203 +#define VT_COMM_RECV_ERROR 204 +#define VT_COMM_SOCKET_ERROR 205 +#define VT_COMM_CONNECT_ERROR 206 +#define VT_TARGET_COLLECTION_MISMATCH 207 +#define VT_INVALID_SEP_DRIVER_LOG 208 +#define VT_COMM_PROTOCOL_VER_NOT_SUPPORTED 209 +#define VT_SAMP_IN_UNEXPECTED_STATE 210 +#define VT_COMM_RECV_BUF_RESIZE_ERROR 211 +#define VT_COMM_SEND_BUF_RESIZE_ERROR 212 +#define VT_NO_ADMIN_PRIVILEGE 213 +#define VT_CORE_EVT_MUX_NOT_SUPPORTED 214 +#define VT_OS_VERSION_NOT_SUPPORTED 215 +#define VT_COMM_RECV_TIMEOUT_SET_ERROR 216 +#define VT_COMM_RECV_TIMEOUT 217 +#define VT_COMM_NOT_COMPATIBLE 218 +#define VT_COMM_DATA_CHANNEL_UNAVAILABLE 219 +#define VT_COMM_INVALID_TARGET_OS_INFO 220 +#define VT_COMM_INVALID_TARGET_HARDWARE_INFO 221 +#define VT_COMM_INVALID_TARGET_COLLECT_SWITCH 222 +#define VT_NO_OFFCORE_SUPPORT 223 +#define VT_INVALID_EMON_STATIC_EVENT 224 +#define VT_ACRN_VMM_VERSION_MISMATCH 225 +#define VT_COMM_INVALID_NUM_OF_CORE_EVENTS 226 +#define VT_EVENT_NEEDS_EXPERIMENTAL_FLAG 227 +#define VT_INVALID_PROG_INFO 228 +#define VT_UNSUPPORTED_CPU_TOPOLOGY 229 +#define VT_IPT_NOT_SUPPORTED 230 +#define VT_EXCEED_OPEN_FILE_LIMIT 231 +#define VT_DRIVERLESS_SUCCESS 232 +#define VT_FEATURE_NOT_SUPPORTED_IN_DRIVER 232 +#define VT_UNCORE_DISCOVERY_NOT_SUPPORTED 233 +#define VT_UNCORE_DISCOVERY_DATA_INVALID 234 +#define VT_INCORRECT_COUNTER_MASK 235 +/* + * define error code for checking on async marker request + */ +#define VT_INVALID_MARKER_ID -1 + +/* + * ************************************************************ + * NOTE: after adding new error code(s), remember to also + * update the following: + * 1) VT_LAST_ERROR_CODE below + * 2) /src/rise.c + * 3) /driver/qnx/sepdk/include/rise_errors.h + * + * ************************************************************ + */ + +// +// To make error checking easier, the special VT_LAST_ERROR_CODE +// should be set to whatever is the last error on the list above +// +#define VT_LAST_ERROR_CODE VT_INCORRECT_COUNTER_MASK + +// +// Define a macro to determine success or failure. Users of this +// error header file should use the macros instead of direct +// checks so that we can change the error numbers in the future +// (such as making negative numbers be an error indication and positive +// numbers being a success with a value indication) +// +#define VTSA_SUCCESS(x) ((x) == VT_SUCCESS) +#define VTSA_FAILED(x) (!VTSA_SUCCESS(x)) + +// +// These should be deprecated, but we'll keep them here just in case +// +#define SEP_IS_SUCCESS(x) VTSA_SUCCESS(x) +#define SEP_IS_FAILED(x) VTSA_FAILED(x) + +/************************************************************* + * API Error Codes + *************************************************************/ +#define VTAPI_INVALID_MAX_SAMP VT_INVALID_MAX_SAMP +#define VTAPI_INVALID_SAMP_PER_BUFF VT_INVALID_SAMP_PER_BUFF +#define VTAPI_INVALID_SAMP_INTERVAL VT_INVALID_SAMP_INTERVAL +#define VTAPI_INVALID_PATH VT_INVALID_PATH +#define VTAPI_TB5_IN_USE VT_TB5_IN_USE +#define VTAPI_INVALID_NUM_EVENTS VT_INVALID_NUM_EVENTS +#define VTAPI_INTERNAL_ERROR VT_INTERNAL_ERROR +#define VTAPI_BAD_EVENT_NAME VT_BAD_EVENT_NAME +#define VTAPI_NO_SAMP_SESSION VT_NO_SAMP_SESSION +#define VTAPI_NO_EVENTS VT_NO_EVENTS +#define VTAPI_MULTIPLE_RUNS VT_MULTIPLE_RUNS +#define VTAPI_NO_SAM_PARAMS VT_NO_SAM_PARAMS +#define VTAPI_SDB_ALREADY_EXISTS VT_SDB_ALREADY_EXISTS +#define VTAPI_SAMPLING_ALREADY_STARTED VT_SAMPLING_ALREADY_STARTED +#define VTAPI_TBS_NOT_SUPPORTED VT_TBS_NOT_SUPPORTED +#define VTAPI_INVALID_SAMPARAMS_SIZE VT_INVALID_SAMPARAMS_SIZE +#define VTAPI_INVALID_EVENT_SIZE VT_INVALID_EVENT_SIZE +#define VTAPI_ALREADY_PROCESSES VT_ALREADY_PROCESSES +#define VTAPI_INVALID_EVENTS_PATH VT_INVALID_EVENTS_PATH +#define VTAPI_INVALID_LICENSE VT_INVALID_LICENSE + +typedef int RISE_ERROR; +typedef void *RISE_PTR; + +#endif diff --git a/drivers/platform/x86/sepdk/pax/Makefile b/drivers/platform/x86/sepdk/pax/Makefile new file mode 100644 index 00000000000000..f0d7e33f8dd8fc --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/Makefile @@ -0,0 +1,36 @@ +# +# Version: 1.5 +# +# Copyright (C) 2009 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. +# + +# -------------------- user configurable options ------------------------ + +# base name of driver +DRIVER_NAME = pax + +ccflags-y := -I$(srctree)/drivers/platform/x86/sepdk/include -I$(srctree)/drivers/platform/x86/sepdk/inc + +obj-m := $(DRIVER_NAME).o + + diff --git a/drivers/platform/x86/sepdk/pax/pax.c b/drivers/platform/x86/sepdk/pax/pax.c new file mode 100644 index 00000000000000..5cf047600c146f --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/pax.c @@ -0,0 +1,968 @@ +/**** + Copyright (C) 2009 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#include +#include +#include +#endif + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv.h" +#include "lwpmudrv_ioctl.h" + +#include "control.h" +#include "pax_shared.h" + +MODULE_AUTHOR("Copyright (C) 2009-2021 Intel Corporation"); +MODULE_VERSION(PAX_NAME"_"PAX_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +typedef struct PAX_DEV_NODE_S PAX_DEV_NODE; +typedef PAX_DEV_NODE *PAX_DEV; + +struct PAX_DEV_NODE_S { + long buffer; + struct semaphore sem; + struct cdev cdev; +}; + +#define PAX_DEV_buffer(dev) ((dev)->buffer) +#define PAX_DEV_sem(dev) ((dev)->sem) +#define PAX_DEV_cdev(dev) ((dev)->cdev) + +// global variables for the PAX driver + +PAX_DEV pax_control; // main control +static dev_t pax_devnum; // the major char device number for PAX +static PAX_VERSION_NODE pax_version; // version of PAX +static PAX_INFO_NODE pax_info; // information on PAX +static PAX_STATUS_NODE pax_status; // PAX reservation status + +#if !defined(DRV_UDEV_UNAVAILABLE) +static struct class *pax_class; +#endif + +#define NMI_WATCHDOG_PATH "/proc/sys/kernel/nmi_watchdog" +S8 nmi_watchdog_restore = '0'; + +struct proc_dir_entry *pax_version_file; + +static int pax_version_proc_read(struct seq_file *, void *); +static int pax_version_proc_open(struct inode *, struct file *); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) +static struct proc_ops pax_version_ops = { + .proc_open = pax_version_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +#else +static struct file_operations pax_version_ops = { + .owner = THIS_MODULE, + .open = pax_version_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +// Print macros for kernel debugging + +#if defined(DEBUG) +#define PAX_PRINT_DEBUG(fmt, args...) \ + { \ + printk(KERN_INFO "PAX: [DEBUG] " fmt, ##args); \ + } +#else +#define PAX_PRINT_DEBUG(fmt, args...) \ + { \ + ; \ + } +#endif +#define PAX_PRINT(fmt, args...) \ + { \ + printk(KERN_INFO "PAX: " fmt, ##args); \ + } +#define PAX_PRINT_WARNING(fmt, args...) \ + { \ + printk(KERN_ALERT "PAX: [Warning] " fmt, ##args); \ + } +#define PAX_PRINT_ERROR(fmt, args...) \ + { \ + printk(KERN_CRIT "PAX: [ERROR] " fmt, ##args); \ + } + +// various other useful macros + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) +#define PAX_FIND_TASK_BY_PID(pid) find_task_by_pid(pid) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) +#define PAX_FIND_TASK_BY_PID(pid) \ + pid_task(find_pid_ns(pid, &init_pid_ns), PIDTYPE_PID); +#else +#define PAX_FIND_TASK_BY_PID(pid) pid_task(find_get_pid(pid), PIDTYPE_PID); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define PAX_TASKLIST_READ_LOCK() read_lock(&tasklist_lock) +#define PAX_TASKLIST_READ_UNLOCK() read_unlock(&tasklist_lock) +#else +#define PAX_TASKLIST_READ_LOCK() rcu_read_lock() +#define PAX_TASKLIST_READ_UNLOCK() rcu_read_unlock() +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + +struct task_struct *pax_Enable_NMIWatchdog_Thread; +struct semaphore pax_Enable_NMIWatchdog_Sem; +struct task_struct *pax_Disable_NMIWatchdog_Thread; +struct semaphore pax_Disable_NMIWatchdog_Sem; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn S32 pax_Disable_NMIWatchdog(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Disable nmi watchdog + * + * Special Notes + */ +S32 +pax_Disable_NMIWatchdog(PVOID data) +{ + struct file *fd; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + mm_segment_t old_fs; +#endif + struct cred *kcred; + loff_t pos = 0; + S8 new_val = '0'; + + up(&pax_Disable_NMIWatchdog_Sem); + + kcred = prepare_kernel_cred(NULL); + if (kcred) { + commit_creds(kcred); + } else { + PAX_PRINT_ERROR( + "pax_Disable_NMIWatchdog: prepare_kernel_cred returns NULL\n"); + } + + fd = filp_open(NMI_WATCHDOG_PATH, O_RDWR, 0); + + if (fd) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + fd->f_op->read(fd, &nmi_watchdog_restore, 1, &fd->f_pos); +#else + kernel_read(fd, &nmi_watchdog_restore, 1, &fd->f_pos); +#endif + PAX_PRINT_DEBUG("Existing nmi_watchdog value = %c\n", + nmi_watchdog_restore); + if (nmi_watchdog_restore != '0') { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + old_fs = get_fs(); + set_fs(KERNEL_DS); + fd->f_op->write(fd, &new_val, 1, &pos); + set_fs(old_fs); +#else + kernel_write(fd, &new_val, 1, &pos); +#endif + } else { + PAX_PRINT_DEBUG( + "pax_Disable_NMIWatchdog: NMI watchdog already disabled!\n"); + } + + filp_close(fd, NULL); + } else { + PAX_PRINT_ERROR( + "pax_Disable_NMIWatchdog: filp_open returns NULL\n"); + } + + while (!kthread_should_stop()) { + schedule(); + } + + return 0; +} + +/*! + * @fn S32 pax_Enable_NMIWatchdog(PVOID data) + * + * @param data - Pointer to data + * + * @return S32 + * + * @brief Enable nmi watchdog + * + * Special Notes + */ +S32 +pax_Enable_NMIWatchdog(PVOID data) +{ + struct file *fd; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + mm_segment_t old_fs; +#endif + struct cred *kcred; + loff_t pos = 0; + S8 new_val = '1'; + + up(&pax_Enable_NMIWatchdog_Sem); + + kcred = prepare_kernel_cred(NULL); + if (kcred) { + commit_creds(kcred); + } else { + PAX_PRINT_ERROR( + "pax_Enable_NMIWatchdog: prepare_kernel_cred returns NULL!\n"); + } + + fd = filp_open(NMI_WATCHDOG_PATH, O_WRONLY, 0); + + if (fd) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + old_fs = get_fs(); + set_fs(KERNEL_DS); + fd->f_op->write(fd, &new_val, 1, &pos); + set_fs(old_fs); +#else + kernel_write(fd, &new_val, 1, &pos); +#endif + filp_close(fd, NULL); + } else { + PAX_PRINT_ERROR( + "pax_Enable_NMIWatchdog: filp_open returns NULL!\n"); + } + + while (!kthread_should_stop()) { + schedule(); + } + + return 0; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void pax_Init() + * + * @param none + * + * @return none + * + * @brief Initialize PAX system + * + * Special Notes + */ +static void +pax_Init(VOID) +{ + // + // Initialize PAX driver version (done once at driver load time) + // + PAX_VERSION_NODE_major(&pax_version) = PAX_MAJOR_VERSION; + PAX_VERSION_NODE_minor(&pax_version) = PAX_MINOR_VERSION; + PAX_VERSION_NODE_bugfix(&pax_version) = PAX_BUGFIX_VERSION; + + // initialize PAX_Info + pax_info.version = PAX_VERSION_NODE_version(&pax_version); + pax_info.managed_by = 1; // THIS_MODULE->name; + + // initialize PAX_Status + pax_status.guid = PAX_GUID_UNINITIALIZED; + pax_status.pid = 0; + pax_status.start_time = 0; + pax_status.is_reserved = PAX_PMU_UNRESERVED; + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void pax_Cleanup() + * + * @param none + * + * @return none + * + * @brief UnInitialize PAX system + * + * Special Notes + */ +static void +pax_Cleanup(VOID) +{ + // uninitialize PAX_Info + pax_info.managed_by = 0; + + // uninitialize PAX_Status + pax_status.guid = PAX_GUID_UNINITIALIZED; + pax_status.pid = 0; + pax_status.start_time = 0; + pax_status.is_reserved = PAX_PMU_UNRESERVED; + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 pax_Process_Valid() + * + * @param U32 pid - process ID + * + * @return TRUE or FALSE + * + * @brief Check whether process with pid still exists, and if so, + * whether it is still "alive". If so, then process is + * deemed valid. Otherwise, process is deemed invalid. + * + * Special Notes + */ +static U32 +pax_Process_Valid(U32 pid) +{ + struct task_struct *process_task; + U32 valid_process; + + // + // There doesn't seem to be a way to force the process_task to continue + // to exist after the read_lock is released (SMP system could delete the + // process after lock is released on another processor), so we need to + // do all the work with the lock held... There is a routine on later + // 2.6 kernels (get_task_struct() and put_task_struct()) which seems + // to do what we want, but the code behind the macro calls a function + // that isn't EXPORT'ed so we can't use it in a device driver... + // + PAX_TASKLIST_READ_LOCK(); + process_task = PAX_FIND_TASK_BY_PID(pax_status.pid); + if ((process_task == NULL) || + (process_task->exit_state == EXIT_ZOMBIE) || + (process_task->exit_state == EXIT_DEAD)) { + // not a valid process + valid_process = FALSE; + } else { + // process is "alive", so assume it is still valid ... + valid_process = TRUE; + } + PAX_TASKLIST_READ_UNLOCK(); + + return valid_process; +} + +// ************************************************************************** +// +// below are PAX Open/Read/Write device functions (appears in /proc/kallsyms) +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Open() + * + * @param struct inode *inode + * @param struct file *filp + * + * @return int (TODO: check for open failure) + * + * @brief This function is called when doing an open(/dev/pax) + * + * Special Notes + */ +static int +pax_Open(struct inode *inode, struct file *filp) +{ + PAX_PRINT_DEBUG("open called on maj:%d, min:%d\n", imajor(inode), + iminor(inode)); + filp->private_data = container_of(inode->i_cdev, PAX_DEV_NODE, cdev); + + return 0; +} + +// ************************************************************************** +// +// below are PAX IOCTL function handlers +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Get_Info() + * + * @param IOCTL_ARGS arg - pointer to the output buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_INFO call + * Returns static information related to PAX (e.g., version) + * + * Special Notes + */ +static OS_STATUS +pax_Get_Info(IOCTL_ARGS arg) +{ + int error; + error = copy_to_user((PAX_INFO_NODE *)(arg->buf_usr_to_drv), + &pax_info, sizeof(PAX_INFO_NODE)); + if (error) { + PAX_PRINT_ERROR( + "pax_Get_Info: unable to copy to user (error=%d)!\n", + error); + return OS_FAULT; + } + + PAX_PRINT_DEBUG("pax_Get_Info: sending PAX info (%ld bytes):\n", + sizeof(PAX_INFO_NODE)); + PAX_PRINT_DEBUG("pax_Get_Info: raw_version = %u (0x%x)\n", + pax_info.version, pax_info.version); + PAX_PRINT_DEBUG("pax_Get_Info: major = %u\n", + PAX_VERSION_NODE_major(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: minor = %u\n", + PAX_VERSION_NODE_minor(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: bugfix = %u\n", + PAX_VERSION_NODE_bugfix(&pax_version)); + PAX_PRINT_DEBUG("pax_Get_Info: managed_by = 0x%lu\n", + (long unsigned int)pax_info.managed_by); + PAX_PRINT_DEBUG("pax_Get_Info: information sent.\n"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Get_Status() + * + * @param IOCTL_ARGS arg - pointer to the output buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_STATUS call + * Returns status of the reservation (e.g., who owns) + * + * Special Notes + */ +static OS_STATUS +pax_Get_Status(IOCTL_ARGS arg) +{ + int error; + error = copy_to_user((PAX_STATUS_NODE *)(arg->buf_usr_to_drv), + &pax_status, sizeof(PAX_STATUS_NODE)); + if (error) { + PAX_PRINT_ERROR( + "pax_Get_Status: unable to copy to user (error=%d)!\n", + error); + return OS_FAULT; + } + + PAX_PRINT_DEBUG("pax_Get_Status: sending PAX status (%ld bytes):\n", + sizeof(PAX_STATUS_NODE)); + PAX_PRINT_DEBUG("pax_Get_Status: guid = 0x%lu\n", + (long unsigned int)pax_status.guid); + PAX_PRINT_DEBUG("pax_Get_Status: pid = %lu\n", + (long unsigned int)pax_status.pid); + PAX_PRINT_DEBUG("pax_Get_Status: start_time = %lu\n", + (long unsigned int)pax_status.start_time); + PAX_PRINT_DEBUG("pax_Get_Status: is_reserved = %u\n", + pax_status.is_reserved); + PAX_PRINT_DEBUG("pax_Get_Status: status sent.\n"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Unreserve() + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_UNRESERVE call + * Returns OS_SUCCESS if PMU unreservation succeeded, otherwise failure + * + * Special Notes + */ +static OS_STATUS +pax_Unreserve(VOID) +{ + // if no reservation is currently held, then return success + if (pax_status.is_reserved == PAX_PMU_UNRESERVED) { + PAX_PRINT_DEBUG("pax_Unreserve: currently unreserved\n"); + return OS_SUCCESS; + } + + // otherwise, there is a reservation ... + // allow the process which started the reservation to unreserve + // or if that process is invalid, then any other process can unreserve + if ((pax_status.pid == current->pid) || + (!pax_Process_Valid(pax_status.pid))) { + S32 reservation = -1; + PAX_PRINT_DEBUG( + "pax_Unreserve: pid %d attempting to unreserve PMU held by pid %d\n", + (U32)current->pid, (U32)pax_status.pid); + +#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + if (nmi_watchdog_restore != '0') { + PAX_PRINT_DEBUG( + "Attempting to enable NMI watchdog...\n"); + + sema_init(&pax_Enable_NMIWatchdog_Sem, 0); + + pax_Enable_NMIWatchdog_Thread = + kthread_run(&pax_Enable_NMIWatchdog, NULL, + "pax_enable_nmi_watchdog"); + if (!pax_Enable_NMIWatchdog_Thread || + pax_Enable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { + PAX_PRINT_ERROR( + "pax_Unreserve: could not create pax_enable_nmi_watchdog kthread."); + } else { + down(&pax_Enable_NMIWatchdog_Sem); + kthread_stop(pax_Enable_NMIWatchdog_Thread); + } + pax_Enable_NMIWatchdog_Thread = NULL; + nmi_watchdog_restore = '0'; + } +#endif + + reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_RESERVED, + PAX_PMU_UNRESERVED); + if (reservation < 0) { + // no-op ... eliminates "variable not used" compiler warning + } + PAX_PRINT_DEBUG("pax_Unreserve: reserve=%d, is_reserved=%d\n", + reservation, pax_status.is_reserved); + // unreserve but keep track of last PID/GUID that had reservation + } + + PAX_PRINT_DEBUG("pax_Unreserve: pid %d unreserve status: %d\n", + current->pid, pax_status.is_reserved); + + return ((pax_status.is_reserved == PAX_PMU_UNRESERVED) ? OS_SUCCESS : + OS_FAULT); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Reserve_All() + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the PAX_IOCTL_RESERVE_ALL call + * Returns OS_SUCCESS if PMU reservation succeeded, otherwise failure + * + * Special Notes + */ +static OS_STATUS +pax_Reserve_All(VOID) +{ + S32 reservation = -1; // previous reservation state (initially, unknown) + + // check if PMU can be unreserved + if (pax_status.is_reserved == PAX_PMU_RESERVED) { + OS_STATUS unreserve_err = pax_Unreserve(); + if (unreserve_err != OS_SUCCESS) { + return unreserve_err; // attempt to unreserve failed, so return error + } + } + + PAX_PRINT_DEBUG("pax_Reserve_All: pid %d attempting to reserve PMU\n", + current->pid); + + // at this point, there is no reservation, so commence race to reserve ... + reservation = cmpxchg(&pax_status.is_reserved, PAX_PMU_UNRESERVED, + PAX_PMU_RESERVED); + + // only one request to reserve will succeed, and when it does, update status + // information with the successful request + if ((reservation == PAX_PMU_UNRESERVED) && + (pax_status.is_reserved == PAX_PMU_RESERVED)) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + *(&pax_status.start_time) = rdtsc_ordered(); +#else + rdtscll(*(&pax_status.start_time)); +#endif + pax_status.pid = current->pid; + +#if !defined(DRV_ANDROID) && !defined(DRV_CHROMEOS) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + sema_init(&pax_Disable_NMIWatchdog_Sem, 0); + pax_Disable_NMIWatchdog_Thread = + kthread_run(&pax_Disable_NMIWatchdog, NULL, + "pax_disable_nmi_watchdog"); + if (!pax_Disable_NMIWatchdog_Thread || + pax_Disable_NMIWatchdog_Thread == ERR_PTR(-ENOMEM)) { + PAX_PRINT_ERROR( + "pax_Reserve_All: could not create pax_disable_nmi_watchdog kthread."); + } else { + down(&pax_Disable_NMIWatchdog_Sem); + kthread_stop(pax_Disable_NMIWatchdog_Thread); + } + pax_Disable_NMIWatchdog_Thread = NULL; +#endif + + return OS_SUCCESS; + } + + return OS_FAULT; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS pax_Service_IOCTL() + * + * @param inode - pointer to the device object + * @param filp - pointer to the file object + * @param cmd - ioctl value (defined in lwpmu_ioctl.h) + * @param arg - arg or arg pointer + * + * @return OS_STATUS + * + * @brief Worker function that handles IOCTL requests from the user mode + * + * Special Notes + */ +extern IOCTL_OP_TYPE +pax_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + IOCTL_ARGS_NODE local_args) +{ + int status = OS_SUCCESS; + + // dispatch to appropriate PAX IOCTL function + switch (cmd) { + case PAX_IOCTL_INFO: + PAX_PRINT_DEBUG("PAX_IOCTL_INFO\n"); + status = pax_Get_Info(&local_args); + break; + + case PAX_IOCTL_STATUS: + PAX_PRINT_DEBUG("PAX_IOCTL_STATUS\n"); + status = pax_Get_Status(&local_args); + break; + + case PAX_IOCTL_RESERVE_ALL: + PAX_PRINT_DEBUG("PAX_IOCTL_RESERVE_ALL\n"); + status = pax_Reserve_All(); + break; + + case PAX_IOCTL_UNRESERVE: + PAX_PRINT_DEBUG("PAX_IOCTL_UNRESERVE\n"); + status = pax_Unreserve(); + break; + + default: + PAX_PRINT_ERROR("unknown IOCTL cmd: %d magic:%d number:%d\n", + cmd, _IOC_TYPE(cmd), _IOC_NR(cmd)); + status = OS_ILLEGAL_IOCTL; + break; + } + + return status; +} + +extern long +pax_Device_Control(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_ARGS_NODE local_args; + + if (arg) { + status = copy_from_user(&local_args, (IOCTL_ARGS)arg, + sizeof(IOCTL_ARGS_NODE)); + } + + status = pax_Service_IOCTL(IOCTL_USE_INODE filp, cmd, local_args); + return status; +} + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +extern IOCTL_OP_TYPE +pax_Device_Control_Compat(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_COMPAT_ARGS_NODE local_args_compat; + IOCTL_ARGS_NODE local_args; + + memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); + if (arg) { + status = copy_from_user(&local_args_compat, + (IOCTL_COMPAT_ARGS)arg, + sizeof(IOCTL_COMPAT_ARGS_NODE)); + } + + local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; + local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; + local_args.buf_drv_to_usr = + (char *)compat_ptr(local_args_compat.buf_drv_to_usr); + local_args.buf_usr_to_drv = + (char *)compat_ptr(local_args_compat.buf_usr_to_drv); + + status = pax_Service_IOCTL(filp, cmd, local_args); + + return status; +} +#endif + +// ************************************************************************** +// +// PAX device file operation definitions (required by kernel) +// +// ************************************************************************** + +/* + * Structure that declares the usual file access functions + * First one is for pax, the control functions + */ +static struct file_operations pax_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = pax_Device_Control, +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) + .compat_ioctl = pax_Device_Control_Compat, +#endif + .read = NULL, + .write = NULL, + .open = pax_Open, + .release = NULL, + .llseek = NULL, +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Setup_Cdev() + * + * @param dev - pointer to the device object + * @param devnum - major/minor device number + * @param fops - point to file operations struct + * + * @return int + * + * @brief Set up functions to be handled by PAX device + * + * Special Notes + */ +static int +pax_Setup_Cdev(PAX_DEV dev, struct file_operations *fops, dev_t devnum) +{ + cdev_init(&PAX_DEV_cdev(dev), fops); + PAX_DEV_cdev(dev).owner = THIS_MODULE; + PAX_DEV_cdev(dev).ops = fops; + + return cdev_add(&PAX_DEV_cdev(dev), devnum, 1); +} + +static int +pax_version_proc_read(struct seq_file *file, void *v) +{ + seq_printf(file, "%u", PAX_VERSION_NODE_version(&pax_version)); + + return 0; +} + +static int +pax_version_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, pax_version_proc_read, NULL); +} + +// ************************************************************************** +// +// Exported PAX functions (see pax.h) ; will appear under /proc/kallsyms +// +// ************************************************************************** + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Load() + * + * @param none + * + * @return int + * + * @brief Load the PAX subsystem + * + * Special Notes + */ +extern int +pax_Load(VOID) +{ + int result; + + PAX_PRINT_DEBUG("checking for %s interface...\n", PAX_NAME); + + /* If PAX interface does not exist, create it */ + pax_devnum = MKDEV(0, 0); + PAX_PRINT_DEBUG("got major device %d\n", pax_devnum); + /* allocate character device */ + result = alloc_chrdev_region(&pax_devnum, 0, 1, PAX_NAME); + if (result < 0) { + PAX_PRINT_ERROR("unable to alloc chrdev_region for %s!\n", + PAX_NAME); + return result; + } + +#if !defined(DRV_UDEV_UNAVAILABLE) + pax_class = class_create(THIS_MODULE, "pax"); + if (IS_ERR(pax_class)) { + PAX_PRINT_ERROR("Error registering pax class\n"); + } + device_create(pax_class, NULL, pax_devnum, NULL, "pax"); +#endif + + PAX_PRINT_DEBUG("%s major number is %d\n", PAX_NAME, MAJOR(pax_devnum)); + /* Allocate memory for the PAX control device */ + pax_control = (PVOID)kmalloc(sizeof(PAX_DEV_NODE), GFP_KERNEL); + if (!pax_control) { + PAX_PRINT_ERROR("Unable to allocate memory for %s device\n", + PAX_NAME); + return OS_NO_MEM; + } + /* Initialize memory for the PAX control device */ + memset(pax_control, '\0', sizeof(PAX_DEV_NODE)); + /* Register PAX file operations with the OS */ + result = pax_Setup_Cdev(pax_control, &pax_Fops, pax_devnum); + if (result) { + PAX_PRINT_ERROR("Unable to add %s as char device (error=%d)\n", + PAX_NAME, result); + return result; + } + + pax_Init(); + + pax_version_file = + proc_create("pax_version", 0, NULL, &pax_version_ops); + if (pax_version_file == NULL) { + SEP_PRINT_ERROR("Unalbe to create the pax_version proc file\n"); + } + + // + // Display driver version information + // + PAX_PRINT("PMU arbitration service v%d.%d.%d has been started.\n", + PAX_VERSION_NODE_major(&pax_version), + PAX_VERSION_NODE_minor(&pax_version), + PAX_VERSION_NODE_bugfix(&pax_version)); + + return result; +} + +EXPORT_SYMBOL(pax_Load); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int pax_Unload() + * + * @param none + * + * @return none + * + * @brief Unload the PAX subsystem + * + * Special Notes + */ +extern VOID +pax_Unload(VOID) +{ + // warn if unable to unreserve + if (pax_Unreserve() != OS_SUCCESS) { + PAX_PRINT_WARNING( + "Unloading driver with existing reservation ...."); + PAX_PRINT_WARNING(" guid = 0x%lu\n", + (long unsigned int)pax_status.guid); + PAX_PRINT_WARNING(" pid = %ld\n", + (long int)pax_status.pid); + PAX_PRINT_WARNING(" start_time = %lu\n", + (long unsigned int)pax_status.start_time); + PAX_PRINT_WARNING(" is_reserved = %u\n", + pax_status.is_reserved); + } + + // unregister PAX device +#if !defined(DRV_UDEV_UNAVAILABLE) + unregister_chrdev(MAJOR(pax_devnum), "pax"); + device_destroy(pax_class, pax_devnum); + class_destroy(pax_class); +#endif + + cdev_del(&PAX_DEV_cdev(pax_control)); + unregister_chrdev_region(pax_devnum, 1); + if (pax_control != NULL) { + kfree(pax_control); + } + + remove_proc_entry("pax_version", NULL); + + // + // Display driver version information + // + PAX_PRINT("PMU arbitration service v%d.%d.%d has been stopped.\n", + PAX_VERSION_NODE_major(&pax_version), + PAX_VERSION_NODE_minor(&pax_version), + PAX_VERSION_NODE_bugfix(&pax_version)); + + // clean up resources used by PAX + pax_Cleanup(); + + return; +} + +EXPORT_SYMBOL(pax_Unload); + +/* Declaration of the init and exit functions */ +module_init(pax_Load); +module_exit(pax_Unload); + diff --git a/drivers/platform/x86/sepdk/pax/pax.h b/drivers/platform/x86/sepdk/pax/pax.h new file mode 100644 index 00000000000000..798d2e9a02d476 --- /dev/null +++ b/drivers/platform/x86/sepdk/pax/pax.h @@ -0,0 +1,41 @@ +/**** + Copyright (C) 2009 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#ifndef _PAX_H_ +#define _PAX_H_ + +extern int pax_Load(VOID); + +extern VOID pax_Unload(VOID); + +#endif + diff --git a/drivers/platform/x86/sepdk/sep/Makefile b/drivers/platform/x86/sepdk/sep/Makefile new file mode 100644 index 00000000000000..5d034b05646a52 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/Makefile @@ -0,0 +1,112 @@ +# +# Version: 1.6 +# +# Copyright (C) 2008 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. +# + +# -------------------- user configurable options ------------------------ + +# base name of SEP driver +DRIVER_NAME = "sep5" + +ccflags-y := -I$(srctree)/drivers/platform/x86/sepdk/include -I$(srctree)/drivers/platform/x86/sepdk/inc -I$(srctree)/drivers/platform/x86/sepdk + +ccflags-y += -DSEP_CONFIG_MODULE_LAYOUT + +ifeq ($(shell test -e ../../../../../include/linux/kaiser.h && echo -n YES),YES) + ccflags-y += -DKAISER_HEADER_PRESENT +endif + +obj-m := $(DRIVER_NAME).o + +ifdef CONFIG_X86_64 + arch-objs := core2.o \ + perfver4.o \ + sys64.o \ + silvermont.o \ + pci.o \ + apic.o \ + pebs.o \ + ipt.o \ + unc_gt.o \ + unc_mmio.o \ + unc_msr.o \ + unc_common.o \ + unc_pci.o \ + sepdrv_p_state.o +else + arch-objs := core2.o \ + perfver4.o \ + sys32.o \ + silvermont.o \ + pci.o \ + apic.o \ + pebs.o \ + ipt.o \ + unc_gt.o \ + unc_mmio.o \ + unc_msr.o \ + unc_common.o \ + unc_pci.o \ + sepdrv_p_state.o +endif + +ifdef CONFIG_SEP_PER_USER_MODE + ccflags-y += -DSECURE_SEP +endif + +ccflags-y += -DDRV_CPU_HOTPLUG -DDRV_USE_TASKLET_WORKAROUND -DDRV_SAFE_MSR -DDRV_USE_RDPMC + +ifdef CONFIG_SEP_BUILD_PMT + ccflags-y += -DDRV_PMT_ENABLE + pmt-objs := unc_pmt.o +endif + + +ifdef CONFIG_SEP_MINLOG_MODE + ccflags-y += -DDRV_MINIMAL_LOGGING +else + ifdef CONFIG_SEP_MAXLOG_MODE + ccflags-y += -DDRV_MAXIMAL_LOGGING + endif +endif + + +$(DRIVER_NAME)-y := \ + lwpmudrv.o \ + control.o \ + cpumon.o \ + eventmux.o \ + linuxos.o \ + output.o \ + pmi.o \ + sys_info.o \ + utility.o \ + valleyview_sochap.o \ + unc_power.o \ + pmu_list.o \ + unc_sa.o \ + $(pmt-objs) \ + $(arch-objs) + + diff --git a/drivers/platform/x86/sepdk/sep/apic.c b/drivers/platform/x86/sepdk/sep/apic.c new file mode 100644 index 00000000000000..08b6705738d514 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/apic.c @@ -0,0 +1,212 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#if defined(CONFIG_XEN) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) +#include +#endif +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) +#include +#include +#endif + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" + +static DEFINE_PER_CPU(unsigned long, saved_apic_lvtpc); + +/*! + * @fn VOID apic_Get_APIC_ID(S32 cpu) + * + * @brief Obtain APIC ID + * + * @param S32 cpuid - cpu index + * + * @return U32 APIC ID + */ +VOID +apic_Get_APIC_ID(S32 cpu) +{ + U32 apic_id = 0; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("CPU: %d.", cpu); + pcpu = &pcb[cpu]; + +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) + if (xen_initial_domain()) { + S32 ret = 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = cpu, + }; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ret = HYPERVISOR_platform_op(&op); +#else + ret = HYPERVISOR_dom0_op(&op); +#endif + if (ret) { + SEP_DRV_LOG_ERROR("apic_Get_APIC_ID: Error in reading APIC ID on Xen PV."); + apic_id = 0; + } else { + apic_id = op.u.pcpu_info.apic_id; + } + } else { +#endif + apic_id = read_apic_id(); +#if defined(CONFIG_XEN_DOM0) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0) + } +#endif + + CPU_STATE_apic_id(pcpu) = apic_id; + + SEP_DRV_LOG_TRACE_OUT("Apic_id[%d] is %d.", cpu, + CPU_STATE_apic_id(pcpu)); +} + +/*! + * @fn extern VOID APIC_Init(param) + * + * @brief initialize the local APIC + * + * @param int cpu_idx - The cpu to deinit + * + * @return None + * + * Special Notes: + * This routine is expected to be called via the CONTROL_Parallel routine + */ +extern VOID +APIC_Init(PVOID param) +{ + int me; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + preempt_disable(); + me = CONTROL_THIS_CPU(); + preempt_enable(); + + apic_Get_APIC_ID(me); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Install_Interrupt_Handler(param) + * + * @brief Install the interrupt handler + * + * @param int param - The linear address of the Local APIC + * + * @return None + * + * Special Notes: + * The linear address is necessary if the LAPIC is used. If X2APIC is + * used the linear address is not necessary. + */ +extern VOID +APIC_Install_Interrupt_Handler(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + per_cpu(saved_apic_lvtpc, this_cpu) = apic_read(APIC_LVTPC); + apic_write(APIC_LVTPC, APIC_DM_NMI); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Enable_PMI(void) + * + * @brief Enable the PMU interrupt + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +extern VOID +APIC_Enable_Pmi(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + apic_write(APIC_LVTPC, APIC_DM_NMI); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID APIC_Restore_LVTPC(void) + * + * @brief Restore APIC LVTPC value + * + * @param None + * + * @return None + * + * Special Notes: + * + */ +extern VOID +APIC_Restore_LVTPC(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + apic_write(APIC_LVTPC, per_cpu(saved_apic_lvtpc, this_cpu)); + + SEP_DRV_LOG_TRACE_OUT(""); +} + diff --git a/drivers/platform/x86/sepdk/sep/control.c b/drivers/platform/x86/sepdk/sep/control.c new file mode 100644 index 00000000000000..da8829c2753256 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/control.c @@ -0,0 +1,885 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) on_each_cpu((func), (ctx), (wait)) +#else +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (retry), (wait)) +#define SMP_CALL_FUNCTION_SINGLE(cpuid, func, ctx, retry, wait) \ + smp_call_function_single((cpuid), (func), (ctx), (retry), (wait)) +#define ON_EACH_CPU(func, ctx, retry, wait) \ + on_each_cpu((func), (ctx), (retry), (wait)) +#endif + +GLOBAL_STATE_NODE driver_state; +MSR_DATA msr_data = NULL; +MEM_TRACKER mem_tr_head = NULL; // start of the mem tracker list +MEM_TRACKER mem_tr_tail = NULL; // end of mem tracker list +spinlock_t mem_tr_lock; // spinlock for mem tracker list +static unsigned long flags; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CONTROL_Invoke_Cpu (func, ctx, arg) + * + * @brief Set up a DPC call and insert it into the queue + * + * @param IN cpu_idx - the core id to dispatch this function to + * IN func - function to be invoked by the specified core(s) + * IN ctx - pointer to the parameter block for each function + * invocation + * + * @return None + * + * Special Notes: + * + */ +extern VOID +CONTROL_Invoke_Cpu(int cpu_idx, VOID (*func)(PVOID), PVOID ctx) +{ + SEP_DRV_LOG_TRACE_IN("CPU: %d, function: %p, ctx: %p.", cpu_idx, func, + ctx); + SMP_CALL_FUNCTION_SINGLE(cpu_idx, func, ctx, 0, 1); + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns None + * + * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking or + * non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. + * Use the aliases CONTROL_Invoke_Parallel(), CONTROL_Invoke_Parallel_NB(), + * or CONTROL_Invoke_Parallel_XS(). + * + */ +extern VOID +CONTROL_Invoke_Parallel_Service(VOID (*func)(PVOID), + PVOID ctx, + int blocking, + int exclude) +{ + SEP_DRV_LOG_TRACE_IN("Fn: %p, ctx: %p, block: %d, excl: %d.", func, ctx, + blocking, exclude); + + GLOBAL_STATE_cpu_count(driver_state) = 0; + GLOBAL_STATE_dpc_count(driver_state) = 0; + + if (GLOBAL_STATE_num_cpus(driver_state) == 1) { + if (!exclude) { + func(ctx); + } + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + if (!exclude) { + ON_EACH_CPU(func, ctx, 0, blocking); + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + + preempt_disable(); + SMP_CALL_FUNCTION(func, ctx, 0, blocking); + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr) + * + * @param IN mem_tr - memory tracker node to delete + * + * @returns None + * + * @brief Delete specified node in the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + */ +static VOID +control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr) +{ + MEM_TRACKER prev_tr = NULL; + MEM_TRACKER next_tr = NULL; + U32 size = 0; + + SEP_DRV_LOG_ALLOC_IN(""); + + if (!mem_tr) { + SEP_DRV_LOG_ALLOC_OUT("mem_tr is NULL!"); + return; + } + size = MEM_TRACKER_max_size(mem_tr) * sizeof(MEM_EL_NODE); + // update the linked list + prev_tr = MEM_TRACKER_prev(mem_tr); + next_tr = MEM_TRACKER_next(mem_tr); + if (prev_tr) { + MEM_TRACKER_next(prev_tr) = next_tr; + } + if (next_tr) { + MEM_TRACKER_prev(next_tr) = prev_tr; + } + + // free the allocated mem_el array (if any) + if (MEM_TRACKER_mem(mem_tr)) { + if (MEM_TRACKER_array_vmalloc(mem_tr)) { + vfree(MEM_TRACKER_mem(mem_tr)); + } else { + if (size < MAX_KMALLOC_SIZE) { + kfree(MEM_TRACKER_mem(mem_tr)); + } else { + free_pages( + (unsigned long)MEM_TRACKER_mem(mem_tr), + get_order(size)); + } + } + } + + // free the mem_tracker node + if (MEM_TRACKER_node_vmalloc(mem_tr)) { + vfree(mem_tr); + } else { + kfree(mem_tr); + } + SEP_DRV_LOG_ALLOC_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Create_Node(void) + * + * @param None - size of the memory to allocate + * + * @returns OS_SUCCESS if successful, otherwise error + * + * @brief Initialize the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + * + * Since this function can be called within either GFP_KERNEL or + * GFP_ATOMIC contexts, the most restrictive allocation is used + * (viz., GFP_ATOMIC). + */ +static U32 +control_Memory_Tracker_Create_Node(void) +{ + U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); + PVOID location = NULL; + MEM_TRACKER mem_tr = NULL; + + SEP_DRV_LOG_ALLOC_IN(""); + + // create a mem tracker node + mem_tr = (MEM_TRACKER)kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC); + if (!mem_tr) { + mem_tr = (MEM_TRACKER)vmalloc(sizeof(MEM_TRACKER_NODE)); + if (mem_tr) { + MEM_TRACKER_node_vmalloc(mem_tr) = 1; + } else { + SEP_DRV_LOG_ERROR_ALLOC_OUT("Failed to allocate mem tracker node."); + return OS_FAULT; + } + } else { + MEM_TRACKER_node_vmalloc(mem_tr) = 0; + } + SEP_DRV_LOG_TRACE("Node %p, vmalloc %d.", mem_tr, + MEM_TRACKER_node_vmalloc(mem_tr)); + + // create an initial array of mem_el's inside the mem tracker node + MEM_TRACKER_array_vmalloc(mem_tr) = 0; + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d).", + location, (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d).", + location, (S32)size); + } + if (!location) { + location = (PVOID)vmalloc(size); + if (location) { + MEM_TRACKER_array_vmalloc(mem_tr) = 1; + SEP_DRV_LOG_ALLOC("Allocated memory (vmalloc) (0x%p, %d).", + location, (S32)size); + } else { + if (MEM_TRACKER_node_vmalloc(mem_tr)) { + vfree(mem_tr); + } else { + kfree(mem_tr); + } + SEP_DRV_LOG_ERROR_ALLOC_OUT("Failed to allocate mem_el array... deleting node."); + return OS_FAULT; + } + } + + // initialize new mem tracker node + MEM_TRACKER_mem(mem_tr) = location; + MEM_TRACKER_prev(mem_tr) = NULL; + MEM_TRACKER_next(mem_tr) = NULL; + + // initialize mem_tracker's mem_el array + MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE; + MEM_TRACKER_elements(mem_tr) = 0; + memset(MEM_TRACKER_mem(mem_tr), 0, size); + + // update the linked list + if (!mem_tr_head) { + mem_tr_head = mem_tr; + } else { + MEM_TRACKER_prev(mem_tr) = mem_tr_tail; + MEM_TRACKER_next(mem_tr_tail) = mem_tr; + } + mem_tr_tail = mem_tr; + + SEP_DRV_LOG_ALLOC_OUT("Allocated node=0x%p, max_elements=%d, size=%d.", + MEM_TRACKER_mem(mem_tr_tail), + MEM_EL_MAX_ARRAY_SIZE, size); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag) + * + * @param IN location - memory location + * @param IN size - size of the memory to allocate + * @param IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc + * + * @returns None + * + * @brief Keep track of allocated memory with memory tracker + * + * Special Notes: + * Starting from first mem_tracker node, the algorithm + * finds the first "hole" in the mem_tracker list and + * tracks the memory allocation there. + */ +static U32 +control_Memory_Tracker_Add(PVOID location, ssize_t size, DRV_BOOL vmalloc_flag) +{ + S32 i, n; + U32 status; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + SEP_DRV_LOG_ALLOC_IN("Location: %p, size: %u, flag: %u.", location, + (U32)size, vmalloc_flag); + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check if there is space in ANY of mem_tracker's nodes for the memory item + mem_tr = mem_tr_head; + found = FALSE; + status = OS_SUCCESS; + i = n = 0; + while (mem_tr && (!found)) { + if (MEM_TRACKER_elements(mem_tr) < + MEM_TRACKER_max_size(mem_tr)) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (!MEM_TRACKER_mem_address(mem_tr, i)) { + SEP_DRV_LOG_ALLOC("Found index %d of %d available.", + i, + MEM_TRACKER_max_size( + mem_tr) - + 1); + n = i; + found = TRUE; + break; + } + } + } + if (!found) { + mem_tr = MEM_TRACKER_next(mem_tr); + } + } + + if (!found) { + // extend into (i.e., create new) mem_tracker node ... + status = control_Memory_Tracker_Create_Node(); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("Unable to create mem tracker node."); + goto finish_add; + } + // use mem tracker tail node and first available entry in mem_el array + mem_tr = mem_tr_tail; + n = 0; + } + + // we now have a location in mem tracker to keep track of the memory item + MEM_TRACKER_mem_address(mem_tr, n) = location; + MEM_TRACKER_mem_size(mem_tr, n) = size; + MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag; + MEM_TRACKER_elements(mem_tr)++; + SEP_DRV_LOG_ALLOC("Tracking (0x%p, %d) in node %d of %d.", location, + (S32)size, n, MEM_TRACKER_max_size(mem_tr) - 1); + +finish_add: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_ALLOC_OUT("Result: %u.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being loaded. + */ +extern VOID +CONTROL_Memory_Tracker_Init(VOID) +{ + SEP_DRV_LOG_ALLOC_IN("Initializing mem tracker."); + + mem_tr_head = NULL; + mem_tr_tail = NULL; + + spin_lock_init(&mem_tr_lock); + + SEP_DRV_LOG_ALLOC_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being unloaded. + */ +extern VOID +CONTROL_Memory_Tracker_Free(VOID) +{ + S32 i; + MEM_TRACKER temp; + + SEP_DRV_LOG_ALLOC_IN("Destroying mem tracker."); + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check for any memory that was not freed, and free it + while (mem_tr_head) { + if (MEM_TRACKER_elements(mem_tr_head)) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); + i++) { + if (MEM_TRACKER_mem_address(mem_tr_head, i)) { + SEP_DRV_LOG_WARNING( + "Index %d of %d, not freed (0x%p, %d) ... freeing now.", + i, + MEM_TRACKER_max_size( + mem_tr_head) - + 1, + MEM_TRACKER_mem_address( + mem_tr_head, i), + MEM_TRACKER_mem_size( + mem_tr_head, i)); + if (MEM_TRACKER_mem_vmalloc(mem_tr_head, + i)) { + vfree(MEM_TRACKER_mem_address( + mem_tr_head, i)); + } else { + free_pages( + (unsigned long) + MEM_TRACKER_mem_address( + mem_tr_head, + i), + get_order(MEM_TRACKER_mem_size( + mem_tr_head, + i))); + } + MEM_TRACKER_mem_address(mem_tr_head, + i) = NULL; + MEM_TRACKER_mem_size(mem_tr_head, i) = + 0; + MEM_TRACKER_mem_vmalloc(mem_tr_head, + i) = 0; + } + } + } + temp = mem_tr_head; + mem_tr_head = MEM_TRACKER_next(mem_tr_head); + control_Memory_Tracker_Delete_Node(temp); + } + + mem_tr_tail = NULL; + + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_ALLOC_OUT("Mem tracker destruction complete."); + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID CONTROL_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * The algorithm compacts mem_tracker nodes such that + * node entries are full starting from mem_tr_head + * up until the first empty node is detected, after + * which nodes up to mem_tr_tail will be empty. + * At end of collection (or at other safe sync point), + * we reclaim/compact space used by mem tracker. + */ +extern VOID +CONTROL_Memory_Tracker_Compaction(void) +{ + S32 i, j, n, m, c, d; + DRV_BOOL found, overlap; + MEM_TRACKER mem_tr1, mem_tr2, empty_tr; + + SEP_DRV_LOG_FLOW_IN(""); + + spin_lock_irqsave(&mem_tr_lock, flags); + + mem_tr1 = mem_tr_head; + + i = j = n = c = d = 0; + + /* + * step1: free up the track node which does not contain any elements. + */ + while (mem_tr1) { + SEP_DRV_LOG_ALLOC("Node %p, index %d, elememts %d.", mem_tr1, n, + MEM_TRACKER_elements(mem_tr1)); + if (MEM_TRACKER_elements(mem_tr1)) { + mem_tr1 = MEM_TRACKER_next(mem_tr1); + } else { + empty_tr = mem_tr1; + mem_tr1 = MEM_TRACKER_next(mem_tr1); + if (empty_tr == mem_tr_head) { + mem_tr_head = mem_tr1; + } + if (empty_tr == mem_tr_tail) { + mem_tr_tail = MEM_TRACKER_prev(empty_tr); + } + control_Memory_Tracker_Delete_Node(empty_tr); + d++; + SEP_DRV_LOG_ALLOC("Delete node %p.", mem_tr1); + } + } + + mem_tr1 = mem_tr_head; + mem_tr2 = mem_tr_tail; + + /* + * there is no need to compact if memory tracker was never used, or only have one track node + */ + overlap = (mem_tr1 == mem_tr2); + if (!mem_tr1 || !mem_tr2 || overlap) { + goto finish_compact; + } + + /* + * step2: there are more than 2 track node. + * starting from head node, find an empty element slot in a node + * if there is no empty slot or the node is tail, the compact is done. + * find an element in tail node, and move it to the empty slot fount below. + * if tail node is empty after moving, free it up. + * repeat until only one node. + */ + m = MEM_TRACKER_max_size(mem_tr2) - 1; + while (!overlap) { + // find an empty node + found = FALSE; + while (!found && !overlap && mem_tr1) { + SEP_DRV_LOG_TRACE("Looking at mem_tr1 0x%p, index=%d, elements %d.", + mem_tr1, n, + MEM_TRACKER_elements(mem_tr1)); + if (MEM_TRACKER_elements(mem_tr1) < + MEM_TRACKER_max_size(mem_tr1)) { + for (i = n; i < MEM_TRACKER_max_size(mem_tr1); + i++) { + if (!MEM_TRACKER_mem_address(mem_tr1, + i)) { + SEP_DRV_LOG_TRACE("Found index %d of %d empty.", + i, + MEM_TRACKER_max_size( + mem_tr1) - + 1); + found = TRUE; + break; // tentative + } + } + } + + // if no overlap and an empty node was not found, then advance to next node + if (!found) { + mem_tr1 = MEM_TRACKER_next(mem_tr1); + // check for overlap + overlap = (mem_tr1 == mem_tr2); + n = 0; + } + } + // all nodes going in forward direction are full, so exit + if (!found || overlap || !mem_tr1) { + goto finish_compact; + } + + // find a non-empty node + found = FALSE; + while (!found && !overlap && mem_tr2) { + SEP_DRV_LOG_ALLOC("Looking at mem_tr2 0x%p, index=%d, elements %d.", + mem_tr2, m, + MEM_TRACKER_elements(mem_tr2)); + if (MEM_TRACKER_elements(mem_tr2)) { + for (j = m; j >= 0; j--) { + if (MEM_TRACKER_mem_address(mem_tr2, + j)) { + SEP_DRV_LOG_ALLOC("Found index %d of %d non-empty.", + j, + MEM_TRACKER_max_size( + mem_tr2) - + 1); + found = TRUE; + // Any reason why we are not 'breaking' here? + } + } + } + + // if no overlap and no non-empty node was found, then retreat to prev node + if (!found) { + empty_tr = mem_tr2; // keep track of empty node + mem_tr2 = MEM_TRACKER_prev(mem_tr2); + m = MEM_TRACKER_max_size(mem_tr2) - 1; + mem_tr_tail = mem_tr2; // keep track of new tail + // reclaim empty mem_tracker node + control_Memory_Tracker_Delete_Node(empty_tr); + // keep track of number of node deletions performed + d++; + // check for overlap + overlap = (mem_tr1 == mem_tr2); + } + } + // all nodes going in reverse direction are empty, so exit + if (!found || overlap || !mem_tr2) { + goto finish_compact; + } + + // swap empty node with non-empty node so that "holes" get bubbled towards the end of list + MEM_TRACKER_mem_address(mem_tr1, i) = + MEM_TRACKER_mem_address(mem_tr2, j); + MEM_TRACKER_mem_size(mem_tr1, i) = + MEM_TRACKER_mem_size(mem_tr2, j); + MEM_TRACKER_mem_vmalloc(mem_tr1, i) = + MEM_TRACKER_mem_vmalloc(mem_tr2, j); + MEM_TRACKER_elements(mem_tr1)++; + + MEM_TRACKER_mem_address(mem_tr2, j) = NULL; + MEM_TRACKER_mem_size(mem_tr2, j) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE; + MEM_TRACKER_elements(mem_tr2)--; + + SEP_DRV_LOG_ALLOC("Node <%p,elemts %d,index %d> moved to <%p,elemts %d,index %d>.", + mem_tr2, MEM_TRACKER_elements(mem_tr2), j, + mem_tr1, MEM_TRACKER_elements(mem_tr1), i); + + // keep track of number of memory compactions performed + c++; + + // start new search starting from next element in mem_tr1 + n = i + 1; + + // start new search starting from prev element in mem_tr2 + m = j - 1; + } + +finish_compact: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SEP_DRV_LOG_FLOW_OUT("Number of elements compacted = %d, nodes deleted = %d.", + c, d); + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * CONTROL_Allocate_KMemory instead. + */ +extern PVOID +CONTROL_Allocate_Memory(size_t size) +{ + U32 status; + PVOID location = NULL; + + SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); + + if (size <= 0) { + SEP_DRV_LOG_WARNING_ALLOC_OUT("Cannot allocate a number of bytes <= 0."); + return NULL; + } + + // determine whether to use mem_tracker or not + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_KERNEL); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, + (S32)size); + } + if (!location) { + location = (PVOID)vmalloc(size); + if (location) { + status = control_Memory_Tracker_Add(location, size, + TRUE); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + SEP_DRV_LOG_ERROR("Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", + (S32)size); + vfree(location); + location = NULL; + } + } + } + + if (!location) { + SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); + } else { + memset(location, 0, size); + } + + SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +extern PVOID +CONTROL_Allocate_KMemory(size_t size) +{ + U32 status; + PVOID location; + + SEP_DRV_LOG_ALLOC_IN("Attempting to allocate %d bytes.", (S32)size); + + if (size <= 0) { + SEP_DRV_LOG_ALLOC_OUT("Cannot allocate a number of bytes <= 0."); + return NULL; + } + + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SEP_DRV_LOG_ALLOC("Allocated small memory (0x%p, %d)", location, + (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + if (location) { + status = control_Memory_Tracker_Add(location, size, + FALSE); + SEP_DRV_LOG_ALLOC("Allocated large memory (0x%p, %d)", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + SEP_DRV_LOG_ERROR("Allocated %db; failed to track w/ MEM_TRACKER. Freeing...", + (S32)size); + free_pages((unsigned long)location, + get_order(size)); + location = NULL; + } + } + } + + if (!location) { + SEP_DRV_LOG_ERROR("Failed to allocated %db.", (S32)size); + } else { + memset(location, 0, size); + } + + SEP_DRV_LOG_ALLOC_OUT("Returning %p.", location); + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID CONTROL_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = CONTROL_Free_Memory(ptr); + * Does not do compaction ... can have "holes" in + * mem_tracker list after this operation. + */ +extern PVOID +CONTROL_Free_Memory(PVOID location) +{ + S32 i; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + SEP_DRV_LOG_ALLOC_IN("Attempting to free %p.", location); + + if (!location) { + SEP_DRV_LOG_ALLOC_OUT("Cannot free NULL."); + return NULL; + } + + spin_lock_irqsave(&mem_tr_lock, flags); + + // scan through mem_tracker nodes for matching entry (if any) + mem_tr = mem_tr_head; + found = FALSE; + while (mem_tr) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (location == MEM_TRACKER_mem_address(mem_tr, i)) { + SEP_DRV_LOG_ALLOC("Freeing large memory location 0x%p", + location); + found = TRUE; + if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) { + vfree(location); + } else { + free_pages( + (unsigned long)location, + get_order(MEM_TRACKER_mem_size( + mem_tr, i))); + } + MEM_TRACKER_mem_address(mem_tr, i) = NULL; + MEM_TRACKER_mem_size(mem_tr, i) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr, i) = 0; + MEM_TRACKER_elements(mem_tr)--; + goto finish_free; + } + } + mem_tr = MEM_TRACKER_next(mem_tr); + } + +finish_free: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + // must have been of smaller than the size limit for mem tracker nodes + if (!found) { + SEP_DRV_LOG_ALLOC("Freeing small memory location 0x%p", + location); + kfree(location); + } + + SEP_DRV_LOG_ALLOC_OUT("Success. Returning NULL."); + return NULL; +} + diff --git a/drivers/platform/x86/sepdk/sep/core2.c b/drivers/platform/x86/sepdk/sep/core2.c new file mode 100644 index 00000000000000..c1f45afca6dc0d --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/core2.c @@ -0,0 +1,1481 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "core2.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" + +#if !defined(DRV_ANDROID) +#include "pci.h" +#endif + +extern EVENT_CONFIG global_ec; +extern U64 *read_counter_info; +extern LBR lbr; +extern DRV_CONFIG drv_cfg; +extern DEV_CONFIG pcfg; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +static U32 restore_reg_addr[3]; + +typedef struct SADDR_S { + S64 addr : CORE2_LBR_DATA_BITS; +} SADDR; + +#define SADDR_addr(x) ((x).addr) +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID +core2_Write_PMU(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + EVENT_CONFIG ec; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION ( + pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + ECB_entries_reg_value(pecb, i); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = + this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_ALL_REG) { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && + i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS) && + ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Register 0x%x: wrvalue 0x%llx, rdvalue 0x%llx.", + ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i), val); + } +#endif + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID +core2_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE("Driver state is not RUNNING."); + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID +core2_Enable_PMU(PVOID param) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE("Write reg 0x%x--- read 0x%llx.", + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + val); + } +#endif + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void +core2_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j; + U64 *buffer = (U64 *)param; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), + buffer); + LFENCE_SERIALIZE(); + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + j = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + ECB_entries_core_event_id(pecb, i)); + buffer[j] = SYS_Read_PMC(ECB_entries_reg_id(pecb, i), + ECB_entries_fixed_reg_get(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + LFENCE_SERIALIZE(); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void +core2_Check_Overflow(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = + dispatch->check_overflow_errata( + pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE("register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR( + ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr); + + SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID +core2_Swap_Group(DRV_BOOL restart) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + st_index = + CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("current group : 0x%x.", + CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("next group : 0x%x.", next_group); + + // Save the counters for the current group + if (!DRV_CONFIG_event_based_counts(drv_cfg)) { + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_CTRL_GP) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_event_id_index(pecb, i) != + CPU_STATE_trigger_event_num(pcpu)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } else { + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE("Restore value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_OCR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID +core2_Initialize(VOID *param) +{ + U32 this_cpu, j; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + ECB pecb = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + pcpu = &pcb[this_cpu]; + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to save PMU state on CPU %d.", + this_cpu); + return; + } + + // Looping through the groups to identify a non-NULL ECB as the cur_grp + // may have a NULL ECB + for (j = 0; j < LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); j++) { + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + + if (pecb == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to save PMU state on CPU %d.", + this_cpu); + return; + } + + restore_reg_addr[0] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + if (DRV_CONFIG_ds_area_available(drv_cfg) && + DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[2]); + +#if !defined(DRV_ANDROID) + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in core2_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID +core2_Destroy(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to restore PMU state on CPU %d.", + this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); + + // Tentative code below (trying to avoid race conditions with the NMI watchdog). Should be evaluated in the coming few days. (2018/05/21) + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * @fn corei7_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return Last branch source IP address + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 +corei7_Read_LBRs(VOID *buffer) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value = 0; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 pairs = 0; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (lbr == NULL) { + SEP_DRV_LOG_TRACE_OUT("No lbr for this device."); + return tos_ip_addr; + } + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + if (LBR_num_entries(lbr) > 0) { + pairs = (LBR_num_entries(lbr) - 1) / 2; + } + for (i = 0; i < LBR_num_entries(lbr); i++) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + SEP_DRV_LOG_TRACE("I: %u, value: 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == + LBR_ENTRY_FROM_IP) { // LBR from register + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & CORE2_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE("tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +static VOID +core2_Clean_Up(VOID *param) +{ +#if !defined(DRV_ANDROID) + U32 this_cpu; + CPU_STATE pcpu; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + +#if !defined(DRV_ANDROID) + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; +#endif + + FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS (pecb, i, + PMU_OPERATION_ALL_REG) { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("clean up set --- RegId --- %x.", + ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS; + +#if !defined(DRV_ANDROID) + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +static VOID +corei7_Errata_Fix(void) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + ECB(pecb) = NULL; + U32 dev_idx, cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + cur_grp = CPU_STATE_current_group(pcpu); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_HW_ERRATA) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void core2_Check_Overflow_Htoff_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void +core2_Check_Overflow_Htoff_Mode(DRV_MASKS masks) +{ + U32 index; + U64 value = 0; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + U32 dev_idx; + U32 cur_grp; + DISPATCH dispatch; + DEV_CONFIG pcfg; + ECB pecb; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + SEP_DRV_LOG_TRACE(""); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && + ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + if (i >= (ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP) + + 4) && + i <= (ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP) + + 7)) { + value = SYS_Read_MSR( + ECB_entries_reg_id(pecb, i)); + if (value > 0 && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + } + } + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE("register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR( + ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * + DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_OVF_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + overflow_status_clr); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn core2_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts for the events with reg value=0 and store into the buffer param; + * + */ +static VOID +core2_Read_Counts(PVOID param, U32 id) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + LFENCE_SERIALIZE(); + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & + ECB_entries_max_bits(pecb, i); + ; + } else { + *data = SYS_Read_PMC(ECB_entries_reg_id(pecb, i), + ECB_entries_fixed_reg_get(pecb, + i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + LFENCE_SERIALIZE(); + + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + CPU_STATE_p_state_counting(pcpu) = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn corei7_Check_Overflow_Errata(pecb) + * + * @param pecb: The current event control block + * @param overflow_status: current overflow mask + * + * @return Updated Event mask of the overflowed registers. + * + * @brief There is a bug where highly correlated precise events do + * not raise an indication on overflows in Core i7 and SNB. + */ +static U64 +corei7_Check_Overflow_Errata(ECB pecb, U64 *overflow_status_clr) +{ + U64 index = 0, value = 0, overflow_status = 0; + + SEP_DRV_LOG_TRACE_IN("PECB: %p, overflow_status_clr: %p.", pecb, + overflow_status_clr); + + overflow_status = *overflow_status_clr; + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_reg_value(pecb, i) == 0) { + continue; + } + if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + if (value > 0LL && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + *overflow_status_clr |= ((U64)1 << index); + SEP_DRV_LOG_TRACE("Counter 0x%x value 0x%llx.", + ECB_entries_reg_id(pecb, i), + value); + } + continue; + } + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (!(overflow_status & ((U64)1 << index))) { + value = SYS_Read_MSR( + ECB_entries_reg_id(pecb, i)); + if (ECB_entries_reg_id(pecb, i) == + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, 0, + PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA))) { + if (!(value > 0LL && + value <= 0x1000000LL) && + (*overflow_status_clr & + ((U64)1 << index))) { + //Clear it only for overflow_status so that we do not create sample records + //Please do not remove the check for MSR index + overflow_status = + overflow_status & + ~((U64)1 << index); + continue; + } + } + if (value > 0LL && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + *overflow_status_clr |= + ((U64)1 << index); + SEP_DRV_LOG_TRACE("counter 0x%x value 0x%llx\n", + ECB_entries_reg_id( + pecb, i), + value); + } + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT("Res = %llu.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 corei7_Read_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID +corei7_Platform_Info(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Platform_data is NULL!"); + return; + } + + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = 0; + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; +#undef IA32_MSR_PLATFORM_INFO +#define IA32_MSR_MISC_ENABLE 0x1A4 + DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; + value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); + DRV_PLATFORM_INFO_misc_info(platform_data) = value; +#undef IA32_MSR_MISC_ENABLE + SEP_DRV_LOG_TRACE("Read from MSR_ENERGY_MULTIPLIER reg is %llu.", + SYS_Read_MSR(MSR_ENERGY_MULTIPLIER)); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(SYS_Read_MSR(MSR_ENERGY_MULTIPLIER) & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE corei7_dispatch = { + .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE corei7_dispatch_htoff_mode = { + .init = core2_Initialize, + .fini = core2_Destroy, + .write = core2_Write_PMU, + .freeze = core2_Disable_PMU, + .restart = core2_Enable_PMU, + .read_data = core2_Read_PMU_Data, + .check_overflow = core2_Check_Overflow_Htoff_Mode, + .swap_group = core2_Swap_Group, + .read_lbrs = corei7_Read_LBRs, + .cleanup = core2_Clean_Up, + .hw_errata = corei7_Errata_Fix, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = core2_Read_Counts, + .check_overflow_gp_errata = corei7_Check_Overflow_Errata, + .read_ro = NULL, + .platform_info = corei7_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + diff --git a/drivers/platform/x86/sepdk/sep/cpumon.c b/drivers/platform/x86/sepdk/sep/cpumon.c new file mode 100644 index 00000000000000..bfded75a12a4e3 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/cpumon.c @@ -0,0 +1,354 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +/* + * CVS_Id="$Id$" + */ + +#include "lwpmudrv_defines.h" +#include +#include +#if defined(DRV_EM64T) +#include +#endif + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include "cpumon.h" +#include "pmi.h" +#include "sys_info.h" + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) +#include +static int +cpumon_NMI_Handler(unsigned int cmd, struct pt_regs *regs) +{ + U32 captured_state = GET_DRIVER_STATE(); + + if (DRIVER_STATE_IN(captured_state, STATE_BIT_RUNNING | + STATE_BIT_PAUSING | + STATE_BIT_PREPARE_STOP | + STATE_BIT_TERMINATING)) { + if (captured_state != DRV_STATE_TERMINATING) { + PMI_Interrupt_Handler(regs); + } + return NMI_HANDLED; + } else { + return NMI_DONE; + } +} + +#define EBS_NMI_CALLBACK cpumon_NMI_Handler + +#else +#include +static int +cpumon_NMI_Handler(struct notifier_block *self, unsigned long val, void *data) +{ + struct die_args *args = (struct die_args *)data; + U32 captured_state = GET_DRIVER_STATE(); + + if (args) { + switch (val) { + case DIE_NMI: +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + case DIE_NMI_IPI: +#endif + if (DRIVER_STATE_IN(captured_state, + STATE_BIT_RUNNING | + STATE_BIT_PAUSING | + STATE_BIT_PREPARE_STOP | + STATE_BIT_TERMINATING)) { + if (captured_state != DRV_STATE_TERMINATING) { + PMI_Interrupt_Handler(args->regs); + } + return NOTIFY_STOP; + } + } + } + return NOTIFY_DONE; +} + +static struct notifier_block cpumon_notifier = { + .notifier_call = cpumon_NMI_Handler, + .next = NULL, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)) + .priority = 2 +#else + .priority = NMI_LOCAL_LOW_PRIOR, +#endif +}; +#endif + +static volatile S32 cpuhook_installed = 0; + +/* + * CPU Monitoring Functionality + */ + +/* + * General per-processor initialization + */ +#if defined(DRV_CPU_HOTPLUG) +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL CPUMON_is_Online_Allowed() + * + * @param None + * + * @return DRV_BOOL TRUE if cpu is allowed to go Online, else FALSE + * + * @brief Checks if the cpu is allowed to go online during the + * @brief current driver state + * + */ +extern DRV_BOOL +CPUMON_is_Online_Allowed() +{ + U32 cur_driver_state; + DRV_BOOL is_allowed = FALSE; + + SEP_DRV_LOG_TRACE_IN(""); + + cur_driver_state = GET_DRIVER_STATE(); + + switch (cur_driver_state) { + case DRV_STATE_IDLE: + case DRV_STATE_PAUSED: + case DRV_STATE_RUNNING: + case DRV_STATE_PAUSING: + is_allowed = TRUE; + break; + default: + SEP_DRV_LOG_TRACE("CPU is prohibited to online in driver state %d.", + cur_driver_state); + break; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); + return is_allowed; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL CPUMON_is_Offline_Allowed() + * + * @param None + * + * @return DRV_BOOL TRUE if cpu is allowed to go Offline, else FALSE + * + * @brief Checks if the cpu is allowed to go offline during the + * @brief current driver state + * + */ +extern DRV_BOOL +CPUMON_is_Offline_Allowed() +{ + U32 cur_driver_state; + DRV_BOOL is_allowed = FALSE; + + SEP_DRV_LOG_TRACE_IN(""); + + cur_driver_state = GET_DRIVER_STATE(); + + switch (cur_driver_state) { + case DRV_STATE_PAUSED: + case DRV_STATE_RUNNING: + case DRV_STATE_PAUSING: + is_allowed = TRUE; + break; + default: + SEP_DRV_LOG_TRACE("CPU is prohibited to offline in driver state %d.", + cur_driver_state); + break; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", is_allowed); + return is_allowed; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CPUMON_Online_Cpu( + * PVOID param) + * + * @param PVOID parm + * + * @return None + * + * @brief Sets a cpu online, initialize APIC on it, + * @brief Build the sys_info for this cpu + * + */ +extern VOID +CPUMON_Online_Cpu(PVOID parm) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + if (pcpu == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d online!", + this_cpu); + return; + } + SEP_DRV_LOG_INIT("Setting CPU %d online, PCPU = %p.", this_cpu, pcpu); + CPU_STATE_offlined(pcpu) = FALSE; + CPU_STATE_accept_interrupt(pcpu) = 1; + CPU_STATE_initial_mask(pcpu) = 1; + CPU_STATE_group_swap(pcpu) = 1; + APIC_Init(NULL); + APIC_Install_Interrupt_Handler(NULL); + + SYS_INFO_Build_Cpu(NULL); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID CPUMON_Offline_Cpu( + * PVOID param) + * + * @param PVOID parm + * + * @return None + * + * @brief Sets a cpu offline + * + */ +extern VOID +CPUMON_Offline_Cpu(PVOID parm) +{ + U32 cpu_idx; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy parm: %p.", parm); + + cpu_idx = *(U32 *)parm; + pcpu = &pcb[cpu_idx]; + + if (pcpu == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to set CPU %d offline.", + cpu_idx); + return; + } + SEP_DRV_LOG_INIT("Setting CPU %d offline.", cpu_idx); + CPU_STATE_offlined(pcpu) = TRUE; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void CPUMON_Install_Cpuhooks(void) + * + * @param None + * + * @return None No return needed + * + * @brief set up the interrupt handler (on a per-processor basis) + * @brief Initialize the APIC in two phases (current CPU, then others) + * + */ +extern VOID +CPUMON_Install_Cpuhooks(void) +{ + S32 me = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (cpuhook_installed) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Cpuhook already installed."); + return; + } + + CONTROL_Invoke_Parallel(APIC_Init, NULL); + CONTROL_Invoke_Parallel(APIC_Install_Interrupt_Handler, + (PVOID)(size_t)me); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + register_nmi_handler(NMI_LOCAL, EBS_NMI_CALLBACK, 0, "sep_pmi"); +#else + register_die_notifier(&cpumon_notifier); +#endif + + cpuhook_installed = 1; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void CPUMON_Remove_Cpuhools(void) + * + * @param None + * + * @return None No return needed + * + * @brief De-Initialize the APIC in phases + * @brief clean up the interrupt handler (on a per-processor basis) + * + */ +extern VOID +CPUMON_Remove_Cpuhooks(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + CONTROL_Invoke_Parallel(APIC_Restore_LVTPC, NULL); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + unregister_nmi_handler(NMI_LOCAL, "sep_pmi"); +#else + unregister_die_notifier(&cpumon_notifier); +#endif + + cpuhook_installed = 0; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/eventmux.c b/drivers/platform/x86/sepdk/sep/eventmux.c new file mode 100644 index 00000000000000..6ef2e8af11f9d3 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/eventmux.c @@ -0,0 +1,486 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" + +static PVOID em_tables = NULL; +static size_t em_tables_size = 0; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Allocate_Groups ( + * VOID *params + * ) + * + * @brief Allocate memory need to support event multiplexing + * + * @param params - pointer to a S32 that holds the size of buffer to allocate + * + * @return NONE + * + * Special Notes: + * Allocate the memory needed to save different group counters + * Called via the parallel control mechanism + */ +static VOID +eventmux_Allocate_Groups(PVOID params) +{ + U32 this_cpu; + CPU_STATE cpu_state; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + cpu_state = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (ec == NULL) { + return; + } + + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + CPU_STATE_em_tables(cpu_state) = + em_tables + CPU_STATE_em_table_offset(cpu_state); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Deallocate_Groups ( + * VOID *params + * ) + * + * @brief Free the scratch memory need to support event multiplexing + * + * @param params - pointer to NULL + * + * @return NONE + * + * Special Notes: + * Free the memory needed to save different group counters + * Called via the parallel control mechanism + */ +static VOID +eventmux_Deallocate_Groups(PVOID params) +{ + U32 this_cpu; + CPU_STATE cpu_state; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + cpu_state = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (ec == NULL) { + return; + } + + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + CPU_STATE_em_tables(cpu_state) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Timer_Callback_Thread ( + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param none + * + * @return NONE + * + * Special Notes: + * timer routine - The event multiplexing happens here. + */ +static VOID +eventmux_Timer_Callback_Thread( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + SEP_DRV_LOG_TRACE_IN(""); +#else + SEP_DRV_LOG_TRACE_IN("Arg: %u.", (U32)arg); +#endif + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + + if (CPU_STATE_em_tables(pcpu) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Em_tables is NULL!"); + return; + } + + dispatch->swap_group(TRUE); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(CPU_STATE_em_timer(pcpu), + jiffies + CPU_STATE_em_timer_delay(pcpu)); +#else + CPU_STATE_em_timer(pcpu)->expires = jiffies + arg; + add_timer(CPU_STATE_em_timer(pcpu)); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Prepare_Timer_Threads ( + * VOID + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Set up the timer threads to prepare for event multiplexing. + * Do not start the threads as yet + */ +static VOID +eventmux_Prepare_Timer_Threads(PVOID arg) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + // initialize and set up the timer for all cpus + // Do not start the timer as yet. + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (ec == NULL) { + return; + } + + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { + return; + } + + CPU_STATE_em_timer(pcpu) = (struct timer_list *)CONTROL_Allocate_Memory( + sizeof(struct timer_list)); + + if (CPU_STATE_em_timer(pcpu) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Pcpu = NULL!"); + return; + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Cancel_Timers ( + * VOID + * ) + * + * @brief Stop all the timer threads and terminate them + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Cancel all the timer threads that have been started + */ +static VOID +eventmux_Cancel_Timers(VOID) +{ + CPU_STATE pcpu; + S32 i; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Cancel the timer for all active CPUs + */ + for (i = 0; i < GLOBAL_STATE_active_cpus(driver_state); i++) { + pcpu = &pcb[i]; + dev_idx = core_to_dev_map[i]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + if (ec == NULL) { + continue; + } + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED) { + continue; + } + del_timer_sync(CPU_STATE_em_timer(pcpu)); + CPU_STATE_em_timer(pcpu) = + (struct timer_list *)CONTROL_Free_Memory( + CPU_STATE_em_timer(pcpu)); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID eventmux_Start_Timers ( + * long unsigned arg + * ) + * + * @brief Start the timer on a single cpu + * + * @param delay interval time in jiffies + * + * @return NONE + * + * Special Notes: + * start the timer on a single cpu + * Call from each cpu to get cpu affinity for Timer_Callback_Thread + */ +static VOID +eventmux_Start_Timers(PVOID arg) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + unsigned long delay; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + preempt_enable(); + + if (ec == NULL) { + return; + } + + if (EVENT_CONFIG_mode(ec) != EM_TIMER_BASED || + EVENT_CONFIG_num_groups(ec) == 1) { + return; + } + + /* + * notice we want to use group 0's time slice for the initial timer + */ + delay = msecs_to_jiffies(EVENT_CONFIG_em_factor(ec)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + CPU_STATE_em_timer_delay(pcpu) = delay; + timer_setup(CPU_STATE_em_timer(pcpu), eventmux_Timer_Callback_Thread, + 0); + mod_timer(CPU_STATE_em_timer(pcpu), + jiffies + CPU_STATE_em_timer_delay(pcpu)); +#else + init_timer(CPU_STATE_em_timer(pcpu)); + CPU_STATE_em_timer(pcpu)->function = eventmux_Timer_Callback_Thread; + CPU_STATE_em_timer(pcpu)->data = delay; + CPU_STATE_em_timer(pcpu)->expires = jiffies + delay; + add_timer(CPU_STATE_em_timer(pcpu)); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Start ( + * VOID + * ) + * + * @brief Start the timers and enable all the threads + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, set up the time slices and + * start the timer threads for all the timers + */ +extern VOID +EVENTMUX_Start(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Start the timer for all cpus + */ + CONTROL_Invoke_Parallel(eventmux_Start_Timers, NULL); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Initialize ( + * VOID + * ) + * + * @brief Initialize the event multiplexing module + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, + * then allocate the memory needed to save and restore all the counter data + * set up the timers needed, but do not start them + */ +extern VOID +EVENTMUX_Initialize(VOID) +{ + S32 size_of_vector; + S32 cpu_num; + CPU_STATE pcpu; + U32 dev_idx; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN(""); + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + pcpu = &pcb[cpu_num]; + dev_idx = core_to_dev_map[cpu_num]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + if (ec == NULL) { + continue; + } + if (EVENT_CONFIG_mode(ec) == EM_DISABLED || + EVENT_CONFIG_num_groups(ec) == 1) { + continue; + } + size_of_vector = EVENT_CONFIG_num_groups(ec) * + EVENT_CONFIG_max_gp_events(ec) * sizeof(S64); + CPU_STATE_em_table_offset(pcpu) = em_tables_size; + em_tables_size += size_of_vector; + } + + if (em_tables_size) { + em_tables = CONTROL_Allocate_Memory(em_tables_size); + } + CONTROL_Invoke_Parallel(eventmux_Allocate_Groups, NULL); + + CONTROL_Invoke_Parallel(eventmux_Prepare_Timer_Threads, + (VOID *)(size_t)0); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID EVENTMUX_Destroy ( + * VOID + * ) + * + * @brief Clean up the event multiplexing threads + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * if event multiplexing has been enabled, then stop and cancel all the timers + * free up all the memory that is associated with EM + */ +extern VOID +EVENTMUX_Destroy(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + eventmux_Cancel_Timers(); + + if (em_tables) { + em_tables = CONTROL_Free_Memory(em_tables); + em_tables_size = 0; + } + CONTROL_Invoke_Parallel(eventmux_Deallocate_Groups, (VOID *)(size_t)0); + + SEP_DRV_LOG_TRACE_OUT(""); +} + diff --git a/drivers/platform/x86/sepdk/sep/ipt.c b/drivers/platform/x86/sepdk/sep/ipt.c new file mode 100644 index 00000000000000..4afca800b809f3 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/ipt.c @@ -0,0 +1,650 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "inc/master.h" + +#define ipt_node_on_cpu(cpu) CPU_STATE_ipt_node(&pcb[cpu]) +#define ipt_node_mine CPU_STATE_ipt_node(&pcb[this_cpu]) + +extern IPT_CONFIG ipt_config; +static IPT_DISPATCH ipt_dispatch = NULL; +static U64 ipt_enable_value = 0ULL; +static U32 ipt_buffer_num = 0; +static U64 cr3match_value = 0ULL; +static U64 ipt_buffer_size_mask = 0ULL; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Initialize (PVOID) + * + * @brief Initialize IPT + * + * @param param - current cpu id + * + * @return NONE + * + * Special Notes: + */ +static VOID +ipt_Initialize(PVOID param) +{ + S32 i; + U64 value; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + CPU_STATE_ipt_collected_count(&pcb[i]) = 0; + CPU_STATE_ipt_dropped_count(&pcb[i]) = 0; + CPU_STATE_ipt_dropped_packets(&pcb[i]) = 0; + CPU_STATE_ipt_data_offset(&pcb[i]) = 0; + } + + SEP_DRV_LOG_TRACE("Reset IPT MSRs to clear exiting settings"); + value = SYS_Read_MSR(IA32_RTIT_CTL); + SYS_Write_MSR(IA32_RTIT_CTL, value & ~IA32_RTIT_CTL_TRACEEN); + SYS_Write_MSR(IA32_RTIT_CTL, 0ULL); + SYS_Write_MSR(IA32_RTIT_STATUS, 0ULL); + SYS_Write_MSR(IA32_RTIT_OUTPUT_BASE, 0ULL); + SYS_Write_MSR(IA32_RTIT_OUTPUT_MASK_PTRS, 0ULL); + + SEP_DRV_LOG_TRACE("VMX on IPT supported: %llu", + (SYS_Read_MSR(IA32_VMX_MISC) >> 14) & 0x1ULL); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Finalize (S32) + * + * @brief Finalize IPT + * + * @param param - current cpu id + * + * @return NONE + * + * Special Notes: + */ +static VOID +ipt_Finalize(PVOID param) +{ + U64 value; + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_LOG_TRACE("Reset IPT MSRs to clear exiting settings"); + value = SYS_Read_MSR(IA32_RTIT_CTL); + SYS_Write_MSR(IA32_RTIT_CTL, value & ~IA32_RTIT_CTL_TRACEEN); + SYS_Write_MSR(IA32_RTIT_CTL, 0ULL); + SYS_Write_MSR(IA32_RTIT_STATUS, 0ULL); + SYS_Write_MSR(IA32_RTIT_OUTPUT_BASE, 0ULL); + SYS_Write_MSR(IA32_RTIT_OUTPUT_MASK_PTRS, 0ULL); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Enable (S32) + * + * @brief Enable IPT + * + * @param this_cpu current cpu id + * + * @return NONE + * + * Special Notes: + */ +static VOID +ipt_Enable(S32 this_cpu) +{ + U64 value, collection = 0ULL; + + SEP_DRV_LOG_TRACE_IN(""); + + if (ipt_node_mine.topa_virt_address) { + value = SYS_Read_MSR(IA32_RTIT_CTL); + SYS_Write_MSR(IA32_RTIT_CTL, value & ~IA32_RTIT_CTL_TRACEEN); + + // Disable LBRs and BTS because IPT can't be enabled with LBR on hardware + SYS_Write_MSR(IA32_DEBUG_CTRL, 0ULL); + + // ToPA interrupt threshold + ((U64 *)ipt_node_mine + .topa_virt_address)[ipt_buffer_num / 4 * 3] |= 0x04; + // ToPA stop buffering + ((U64 *)ipt_node_mine.topa_virt_address)[ipt_buffer_num - 1] |= + 0x10; + // End of buffer + ((U64 *)ipt_node_mine.topa_virt_address)[ipt_buffer_num] = + ipt_node_mine.topa_phys_address | 0x1; + + SYS_Write_MSR(IA32_RTIT_STATUS, 0ULL); + SYS_Write_MSR(IA32_RTIT_OUTPUT_BASE, + ipt_node_mine.topa_phys_address); + SYS_Write_MSR(IA32_RTIT_OUTPUT_MASK_PTRS, 0x7F); + + if (cr3match_value) { + SYS_Write_MSR(IA32_RTIT_CR3_MATCH, cr3match_value); + } + collection = IA32_RTIT_CTL_TOPA | IA32_RTIT_CTL_TSCEN | + ipt_enable_value; + SYS_Write_MSR(IA32_RTIT_CTL, collection); + SEP_DRV_LOG_TRACE("IA32_RTIT_STATUS = 0x%llx", + SYS_Read_MSR(IA32_RTIT_STATUS)); + SYS_Write_MSR(IA32_RTIT_CTL, + collection | IA32_RTIT_CTL_TRACEEN); + + SEP_DRV_LOG_TRACE("collection = %llx", collection); + } else { + SEP_DRV_LOG_ERROR("invalid ToPA buffer"); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Disable (S32) + * + * @brief Disable IPT + * + * @param this_cpu current cpu id + * + * @return NONE + * + * Special Notes: + */ +static VOID +ipt_Disable(S32 this_cpu) +{ + U64 value; + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_LOG_TRACE("disable IPT collection"); + value = SYS_Read_MSR(IA32_RTIT_CTL); + SYS_Write_MSR(IA32_RTIT_CTL, value & ~IA32_RTIT_CTL_TRACEEN); + SYS_Write_MSR(IA32_RTIT_CTL, 0ULL); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Flush () + * + * @brief Flush ToPA buffer to internal buffer + * + * @param this_cpu - cpu id + * + * @return NONE + * + */ +static U64 +ipt_Flush(S32 this_cpu) +{ + U32 data_size; + S32 copied_size; + DRV_IPT_DROP_INFO_NODE ipt_drop_info; + + SEP_DRV_LOG_TRACE_IN(""); + + // Copy packets to driver buffer for flushing + data_size = + (U32)(((U64)SYS_Read_MSR(IA32_RTIT_OUTPUT_MASK_PTRS) >> 32) & + ipt_buffer_size_mask); + data_size += (U32)(((U64)SYS_Read_MSR(IA32_RTIT_OUTPUT_MASK_PTRS) & + RTIT_OUTPUT_MASK_PTRS_TABLEOFFSET) + << 5); + + SEP_DRV_LOG_TRACE("IA32_RTIT_OUTPUT_MASK_PTRS = 0x%llx", + SYS_Read_MSR(IA32_RTIT_OUTPUT_MASK_PTRS)); + SEP_DRV_LOG_TRACE("IA32_RTIT_STATUS = 0x%llx", + SYS_Read_MSR(IA32_RTIT_STATUS)); + + if (data_size == 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No IPT packet available to flush"); + return 0; + } + + copied_size = + OUTPUT_IPT_Fill(ipt_node_mine.outbuf_virt_address, data_size); + if ((U32)copied_size < data_size) { + SEP_DRV_LOG_ERROR("IPT packet loss copied_size = %d, data_size = %u", + copied_size, data_size); + CPU_STATE_ipt_dropped_count(&pcb[this_cpu])++; + CPU_STATE_ipt_dropped_packets(&pcb[this_cpu]) += + (data_size - copied_size); + + memset(&ipt_drop_info, 0, sizeof(DRV_IPT_DROP_INFO_NODE)); + DRV_IPT_DROP_INFO_length(ipt_drop_info) = + sizeof(DRV_IPT_DROP_INFO_NODE); + DRV_IPT_DROP_INFO_descriptor_id(ipt_drop_info) = + IPT_BUFFER_DROP; + DRV_IPT_DROP_INFO_drop_size(ipt_drop_info) = data_size; + DRV_IPT_DROP_INFO_offset(ipt_drop_info) = + CPU_STATE_ipt_data_offset(&pcb[this_cpu]); + + if (OUTPUT_IPT_Info_Fill((char *)&ipt_drop_info, + sizeof(DRV_IPT_DROP_INFO_NODE)) < + sizeof(DRV_IPT_DROP_INFO_NODE)) { + SEP_DRV_LOG_WARNING("Failed to flush the IPT buffer information"); + } + } + + if (copied_size) { + CPU_STATE_ipt_collected_count(&pcb[this_cpu])++; + SEP_DRV_LOG_TRACE_OUT("copied_size = %d", copied_size); + CPU_STATE_ipt_data_offset(&pcb[this_cpu]) += copied_size; + } else { + SEP_DRV_LOG_ERROR_TRACE_OUT("Failed to copy packets from HW buffer"); + } + + return copied_size; +} + +/* + * Initialize the IPT micro dispatch tables + */ +IPT_DISPATCH_NODE gen2_ipt = { + .init = ipt_Initialize, + .fini = ipt_Finalize, + .enable = ipt_Enable, + .disable = ipt_Disable, + .flush = ipt_Flush +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS ipt_Allocate_Buffers (VOID) + * + * @brief Allocate memory and set up MSRs in preparation for IPT + * + * @param NONE + * + * @return OS_STATUS status + * + */ +static OS_STATUS +ipt_Allocate_Buffers(VOID) +{ + S32 i; + U32 j; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + ipt_node_on_cpu(i).topa_virt_address = + CONTROL_Allocate_Memory_Aligned((size_t)IPT_BUF_SIZE, + TRUE); + if (!(ipt_node_on_cpu(i).topa_virt_address)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("IPT buffer allocation failed!"); + return OS_NO_MEM; + } + ipt_node_on_cpu(i).topa_phys_address = + VIRT_TO_PHYS_ADDR(ipt_node_on_cpu(i).topa_virt_address); + ipt_node_on_cpu(i).outbuf_virt_address = + CONTROL_Allocate_Memory_Aligned( + (size_t)IPT_BUF_SIZE * ipt_buffer_num, TRUE); + if (!(ipt_node_on_cpu(i).outbuf_virt_address)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("IPT buffer allocation failed!"); + return OS_NO_MEM; + } + SEP_DRV_LOG_TRACE("cpu%d topa_va=%llx, topa_pa=%llx", i, + ipt_node_on_cpu(i).topa_virt_address, + ipt_node_on_cpu(i).topa_phys_address); + + for (j = 0; j < ipt_buffer_num; j++) { + ipt_node_on_cpu(i) + .outbuf_phys_address[j] = VIRT_TO_PHYS_ADDR( + (PVOID)((U64)(ipt_node_on_cpu(i) + .outbuf_virt_address) + + j * IPT_BUF_SIZE)); + ((U64 *)ipt_node_on_cpu(i).topa_virt_address)[j] = + ipt_node_on_cpu(i).outbuf_phys_address[j]; + SEP_DRV_LOG_TRACE("cpu%d buffer%d outbuf_pa=%llx", i, j, + ipt_node_on_cpu(i) + .outbuf_phys_address[j]); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID ipt_Dellocate_Buffers (VOID *params) + * + * @brief Clean up IPT buffers + * + * @param NONE + * + * @return NONE + * + */ +static VOID +ipt_Deallocate_Buffers(VOID) +{ + S32 i; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (ipt_node_on_cpu(i).topa_virt_address) { + ipt_node_on_cpu(i).topa_virt_address = + CONTROL_Free_Memory_Aligned( + ipt_node_on_cpu(i).topa_virt_address, + TRUE); + ipt_node_on_cpu(i).topa_phys_address = 0ULL; + } + if (ipt_node_on_cpu(i).outbuf_virt_address) { + ipt_node_on_cpu(i).outbuf_virt_address = + CONTROL_Free_Memory_Aligned( + ipt_node_on_cpu(i).outbuf_virt_address, + TRUE); + memset(ipt_node_on_cpu(i).outbuf_phys_address, 0, + sizeof(U64) * IPT_BUF_NUM_MAX); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 IPT_TOPA_Interrupted (this_cpu, overflow_status) + * + * @brief Figure out if the IPT ToPA interrupt occurs + * + * @param this_cpu -- the current cpu + * overflow_status -- current value of the global overflow status + * + * @return data size from IPT ToPA buffer + */ +extern U64 +IPT_TOPA_Interrupted(S32 this_cpu, U64 overflow_status) +{ + U64 data_size = 0; + + SEP_DRV_LOG_TRACE_IN("this_cpus: %d, overflow_status: %llu", this_cpu, + overflow_status); + + if (!(overflow_status & PERF_GLOBAL_STATUS_TOPA_PMI_MASK)) { + SEP_DRV_LOG_TRACE_OUT("overflow_status: %llu", overflow_status); + return 0; + } + + if (!ipt_dispatch || !ipt_dispatch->flush || !ipt_dispatch->enable || + !ipt_dispatch->disable) { + SEP_DRV_LOG_TRACE_OUT("early exit"); + return 0; + } + + SEP_DRV_LOG_TRACE("IA32_RTIT_STATUS = 0x%llx", + SYS_Read_MSR(IA32_RTIT_STATUS)); + + ipt_dispatch->disable(this_cpu); + + data_size = ipt_dispatch->flush(this_cpu); + + SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, + PERF_GLOBAL_STATUS_TOPA_PMI_MASK); + + ipt_dispatch->enable(this_cpu); + + SEP_DRV_LOG_TRACE_OUT("data_size: %llu", data_size); + return data_size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 IPT_Start (PVOID param) + * + * @brief Start IPT collection + * + * @param PVOID param + * + * @return NONE + */ +extern VOID +IPT_Start(PVOID param) +{ + S32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!ipt_dispatch || !ipt_dispatch->enable) { + SEP_DRV_LOG_TRACE_OUT("early exit"); + return; + } + + this_cpu = CONTROL_THIS_CPU(); + + ipt_dispatch->enable(this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 IPT_TOPA_Flush (PVOID param) + * + * @brief Flush the remaining IPT ToPA buffer + * + * @param PVOID param + * + * @return NONE + */ +extern VOID +IPT_TOPA_Flush(PVOID param) +{ + S32 this_cpu; + U64 data_size = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!ipt_dispatch || !ipt_dispatch->flush || !ipt_dispatch->disable) { + SEP_DRV_LOG_TRACE_OUT("early exit"); + return; + } + + this_cpu = CONTROL_THIS_CPU(); + + SEP_DRV_LOG_TRACE("IA32_RTIT_STATUS = 0x%llx", + SYS_Read_MSR(IA32_RTIT_STATUS)); + + ipt_dispatch->disable(this_cpu); + data_size = ipt_dispatch->flush(this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID IPT_Initialize (DRV_CONFIG pcfg) + * + * @brief Initialize the IPT buffers + * + * @param pcfg - Driver Configuration + * + * @return NONE + */ +extern OS_STATUS +IPT_Initialize(DRV_CONFIG pcfg) +{ + OS_STATUS status = OS_SUCCESS; + S32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + + if (IPT_CONFIG_branch(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_BRANCHEN; + SEP_DRV_LOG_TRACE("branch enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_power(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_PWREVTEN; + SEP_DRV_LOG_TRACE("power enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_usr(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_USER; + SEP_DRV_LOG_TRACE("user enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_os(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_OS; + SEP_DRV_LOG_TRACE("os enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_cyc(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_CYCEN; + ipt_enable_value |= IPT_CONFIG_cycthresh(ipt_config) << 19; + SEP_DRV_LOG_TRACE("cyc enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_mtc(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_MTCEN; + ipt_enable_value |= IPT_CONFIG_mtcfreq(ipt_config) << 14; + SEP_DRV_LOG_TRACE("mtc enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_tsc(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_TSCEN; + SEP_DRV_LOG_TRACE("tsc enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_psbfreq(ipt_config)) { + ipt_enable_value |= IPT_CONFIG_psbfreq(ipt_config) << 24; + SEP_DRV_LOG_TRACE("psbfreq enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_cr3filter(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_CR3FILTER; + cr3match_value = IPT_CONFIG_cr3match(ipt_config); + SEP_DRV_LOG_TRACE("cr3fileter enabled 0x%llx", + ipt_enable_value); + SEP_DRV_LOG_TRACE("cr3match 0x%llx\n", cr3match_value); + } + if (IPT_CONFIG_ptw(ipt_config)) { + ipt_enable_value |= IA32_RTIT_CTL_PTWEN; + SEP_DRV_LOG_TRACE("ptw enabled 0x%llx", ipt_enable_value); + } + if (IPT_CONFIG_ringbuf(ipt_config)) { + SEP_DRV_LOG_TRACE("ringbuf enabled"); + } + switch (IPT_CONFIG_bufsize(ipt_config)) { + case IPT_BUFFER_SMALL: + ipt_buffer_num = IPT_BUF_NUM_SMALL; + break; + case IPT_BUFFER_LARGE: + ipt_buffer_num = IPT_BUF_NUM_LARGE; + break; + default: + ipt_buffer_num = IPT_BUF_NUM_DEFAULT; + break; + } + SEP_DRV_LOG_TRACE("bufsize set to %u", ipt_buffer_num); + ipt_buffer_size_mask = ipt_buffer_num * 4096 - 1; + SEP_DRV_LOG_TRACE("buf size mask set to 0x%llx", ipt_buffer_size_mask); + + SEP_DRV_LOG_TRACE("ipt_enable_value 0x%llx, ipt_buffer_num = %u", + ipt_enable_value, ipt_buffer_num); + + if (DRV_CONFIG_ipt_mode(pcfg)) { + SEP_DRV_LOG_TRACE("collection mode=%u", + DRV_CONFIG_ipt_mode(pcfg)); + switch (DRV_CONFIG_ipt_mode(pcfg)) { + case 2: + SEP_DRV_LOG_TRACE("Set up the 2nd gen IPT dispatch table"); + ipt_dispatch = &gen2_ipt; + break; + default: + SEP_DRV_LOG_TRACE("Unknown IPT generation. Will not collect IPT information"); + break; + } + if (ipt_dispatch) { + if (ipt_dispatch->init) { + CONTROL_Invoke_Parallel(ipt_dispatch->init, + (PVOID)(size_t)this_cpu); + } + status = ipt_Allocate_Buffers(); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID IPT_Destroy (DRV_CONFIG pcfg) + * + * @brief Clean up the IPT related buffers + * + * @param pcfg - Driver Configuration + * + * @return NONE + */ +extern VOID +IPT_Destroy(DRV_CONFIG pcfg) +{ + S32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + + if (DRV_CONFIG_ipt_mode(pcfg) && ipt_dispatch) { + if (ipt_dispatch->fini) { + CONTROL_Invoke_Parallel(ipt_dispatch->fini, + (PVOID)(size_t)this_cpu); + } + + ipt_Deallocate_Buffers(); + + ipt_dispatch = NULL; + } + + ipt_enable_value = 0ULL; + cr3match_value = 0ULL; + ipt_buffer_num = 0; + ipt_buffer_size_mask = 0ULL; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/linuxos.c b/drivers/platform/x86/sepdk/sep/linuxos.c new file mode 100644 index 00000000000000..014a3019ec3c02 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/linuxos.c @@ -0,0 +1,1977 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include + +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#include +#else +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) +#include +#endif +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) +#include +#endif + +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "msrdefs.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "ecb_iterators.h" +#include "inc/lwpmudrv.h" +#include "lwpmudrv_ioctl.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/cpumon.h" +#include "inc/output.h" +#include "inc/pebs.h" + +#include "inc/linuxos.h" +#include "inc/apic.h" + +extern DRV_BOOL collect_sideband; +extern uid_t uid; +extern volatile pid_t control_pid; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; + +static volatile S32 hooks_installed = 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +static struct tracepoint *tp_sched_switch = NULL; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) +static struct tracepoint *tp_sched_process_exit = NULL; +static struct kprobe *kp_munmap = NULL; +#endif +static struct kprobe *kp_guest_enter = NULL; +static struct kprobe *kp_guest_exit = NULL; + +#define HOOK_FREE 0 +#define HOOK_UNINSTALL -10000 +static atomic_t hook_state = ATOMIC_INIT(HOOK_UNINSTALL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) && defined(DRV_CPU_HOTPLUG) +static enum cpuhp_state cpuhp_sepdrv_state; +#endif +extern wait_queue_head_t wait_exit; + +extern int +LWPMUDRV_Abnormal_Terminate(void); + +#define MY_TASK PROFILE_TASK_EXIT +#define MY_UNMAP PROFILE_MUNMAP +#ifdef CONFIG_X86_64 +#define MR_SEG_NUM 0 +#else +#define MR_SEG_NUM 2 +#endif + +#if !defined(KERNEL_IMAGE_SIZE) +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) +#endif + +#if defined(DRV_IA32) +static U16 +linuxos_Get_Exec_Mode(struct task_struct *p) +{ + return ((unsigned short)MODE_32BIT); +} +#endif + +#if defined(DRV_EM64T) +static U16 +linuxos_Get_Exec_Mode(struct task_struct *p) +{ + SEP_DRV_LOG_TRACE_IN("P: %p.", p); + + if (!p) { + SEP_DRV_LOG_ERROR_TRACE_OUT("MODE_UNKNOWN (p is NULL!)."); + return MODE_UNKNOWN; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) + if (test_tsk_thread_flag(p, TIF_ADDR32)) { + SEP_DRV_LOG_TRACE_OUT( + "Res: %u (test_tsk_thread_flag TIF_ADDR32).", + (U16)(unsigned short)MODE_32BIT); +#else + if (test_tsk_thread_flag(p, TIF_IA32)) { + SEP_DRV_LOG_TRACE_OUT( + "Res: %u (test_tsk_thread_flag TIF_IA32).", + (U16)(unsigned short)MODE_32BIT); +#endif + return ((unsigned short)MODE_32BIT); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U16)(unsigned short)MODE_64BIT); + return ((unsigned short)MODE_64BIT); +} +#endif + +static S32 +linuxos_Load_Image_Notify_Routine(char *name, + U64 base, + U32 size, + U64 page_offset, + U32 pid, + U32 parent_pid, + U32 options, + unsigned short mode, + S32 load_event, + U32 segment_num, + U32 kernel_modules) +{ + char *raw_path; + ModuleRecord *mra; + char buf[sizeof(ModuleRecord) + MAXNAMELEN + 32]; + U64 tsc_read; + S32 local_load_event = (load_event == -1) ? 0 : load_event; + U64 page_offset_shift; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, + "Name: '%s', pid: %d.", name, pid); + + mra = (ModuleRecord *)buf; + memset(mra, '\0', sizeof(buf)); + raw_path = (char *)mra + sizeof(ModuleRecord); + + page_offset_shift = page_offset << PAGE_SHIFT; + MR_page_offset_Set(mra, page_offset_shift); + MODULE_RECORD_segment_type(mra) = mode; + MODULE_RECORD_load_addr64(mra) = (U64)(size_t)base; + MODULE_RECORD_length64(mra) = size; + MODULE_RECORD_tsc_used(mra) = 1; + MODULE_RECORD_first_module_rec_in_process(mra) = + options & LOPTS_1ST_MODREC; + MODULE_RECORD_segment_number(mra) = segment_num; + MODULE_RECORD_exe(mra) = (LOPTS_EXE & options) ? 1 : 0; + MODULE_RECORD_global_module_tb5(mra) = + (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; + MODULE_RECORD_global_module(mra) = + (options & LOPTS_GLOBAL_MODULE) ? 1 : 0; + MODULE_RECORD_processed(mra) = 0; + MODULE_RECORD_parent_pid(mra) = parent_pid; + MODULE_RECORD_osid(mra) = OS_ID_NATIVE; + MODULE_RECORD_pid_rec_index(mra) = pid; + + if (kernel_modules) { + MODULE_RECORD_tsc(mra) = 0; + MR_unloadTscSet(mra, 0xffffffffffffffffLL); + } else { + UTILITY_Read_TSC(&tsc_read); + preempt_disable(); + tsc_read -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); + + if (local_load_event) { + MR_unloadTscSet(mra, tsc_read); + } else { + MR_unloadTscSet(mra, (U64)(-1)); + } + } + + MODULE_RECORD_pid_rec_index_raw(mra) = 1; // raw pid +#if defined(DEBUG) + if (total_loads_init) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + load_event == 1, + "Setting pid_rec_index_raw pid 0x%x %s.", pid, name); + } +#endif + + strncpy(raw_path, name, MAXNAMELEN); + raw_path[MAXNAMELEN] = 0; + MODULE_RECORD_path_length(mra) = (U16)strlen(raw_path) + 1; + MODULE_RECORD_rec_length(mra) = (U16)ALIGN_8( + sizeof(ModuleRecord) + MODULE_RECORD_path_length(mra)); + +#if defined(DRV_IA32) + MODULE_RECORD_selector(mra) = (pid == 0) ? __KERNEL_CS : __USER_CS; +#endif +#if defined(DRV_EM64T) + if (mode == MODE_64BIT) { + MODULE_RECORD_selector(mra) = + (pid == 0) ? __KERNEL_CS : __USER_CS; + } else if (mode == MODE_32BIT) { + MODULE_RECORD_selector(mra) = + (pid == 0) ? __KERNEL32_CS : __USER32_CS; + } +#endif + + OUTPUT_Module_Fill((PVOID)mra, MODULE_RECORD_rec_length(mra), + load_event == 1); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); + return OS_SUCCESS; +} + +#ifdef DRV_MM_EXE_FILE_PRESENT +static DRV_BOOL +linuxos_Equal_VM_Exe_File(struct vm_area_struct *vma) +{ + S8 name_vm_file[MAXNAMELEN]; + S8 name_exe_file[MAXNAMELEN]; + S8 *pname_vm_file = NULL; + S8 *pname_exe_file = NULL; + DRV_BOOL res; + + SEP_DRV_LOG_TRACE_IN("FMA: %p.", vma); + + if (vma == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma)."); + return FALSE; + } + + if (vma->vm_file == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_file)."); + return FALSE; + } + + if (vma->vm_mm->exe_file == NULL) { + SEP_DRV_LOG_TRACE_OUT("FALSE (!vma->vm_mm->exe_file)."); + return FALSE; + } + + pname_vm_file = D_PATH(vma->vm_file, name_vm_file, MAXNAMELEN); + pname_exe_file = + D_PATH(vma->vm_mm->exe_file, name_exe_file, MAXNAMELEN); + if (IS_ERR(pname_vm_file) || !pname_vm_file || IS_ERR(pname_exe_file) || + !pname_exe_file) { + SEP_DRV_LOG_TRACE_OUT("FALSE pname_vm_file or pname_exe_file."); + return FALSE; + } + res = strcmp(pname_vm_file, pname_exe_file) == 0; + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn linuxos_Map_Kernel_Modules (void) + * + * @brief Obtain kernel module details from modules list + * and map the details to the module record. + * + * @return S32 VT_SUCCESS on success + */ +static S32 +linuxos_Map_Kernel_Modules(void) +{ + struct module *current_module; + struct list_head *modules; + U16 exec_mode = MODE_UNKNOWN; + unsigned long long addr; + unsigned long long size; +#if defined(CONFIG_RANDOMIZE_BASE) + unsigned long dyn_addr = 0; +#endif + + SEP_DRV_LOG_TRACE_IN(""); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) + rcu_read_lock_sched(); +#else + mutex_lock(&module_mutex); +#endif + +#if defined(DRV_EM64T) + addr = (unsigned long)__START_KERNEL_map; + exec_mode = MODE_64BIT; +#elif defined(DRV_IA32) + addr = (unsigned long)PAGE_OFFSET; + exec_mode = MODE_32BIT; +#endif + + SEP_DRV_LOG_TRACE( + " kernel module address size"); + SEP_DRV_LOG_TRACE( + " ------------------- ------------------ -------"); + + addr += (CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & + ~(CONFIG_PHYSICAL_ALIGN - 1); + size = (unsigned long)KERNEL_IMAGE_SIZE - + ((CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & + ~(CONFIG_PHYSICAL_ALIGN - 1)) - + 1; + +#if defined(CONFIG_RANDOMIZE_BASE) + if (!dyn_addr) { + dyn_addr = (unsigned long)UTILITY_Find_Symbol("_text"); + if (!dyn_addr) { + dyn_addr = (unsigned long)UTILITY_Find_Symbol("_stext"); + } + + if (dyn_addr && dyn_addr > addr) { + dyn_addr &= ~(PAGE_SIZE - 1); + size -= (dyn_addr - addr); + addr = dyn_addr; + } else { + SEP_DRV_LOG_WARNING_TRACE_OUT( + "Could not find the kernel start address!"); + } + } +#endif + + linuxos_Load_Image_Notify_Routine( + "vmlinux", addr, size, 0, 0, 0, + LOPTS_1ST_MODREC | LOPTS_GLOBAL_MODULE | LOPTS_EXE, exec_mode, + -1, MR_SEG_NUM, 1); + SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", "vmlinux", addr, + size); + + for (modules = THIS_MODULE->list.prev; + (unsigned long)modules > MODULES_VADDR; modules = modules->prev); + list_for_each_entry(current_module, modules, list) { + char *name = current_module->name; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || \ + defined(SEP_CONFIG_MODULE_LAYOUT) + addr = (unsigned long)current_module->core_layout.base; + size = current_module->core_layout.size; +#else + addr = (unsigned long)current_module->module_core; + size = current_module->core_size; +#endif + + if (module_is_live(current_module)) { + SEP_DRV_LOG_TRACE("kmodule: %20s 0x%llx 0x%llx.", + name, addr, size); + linuxos_Load_Image_Notify_Routine(name, addr, size, 0, + 0, 0, + LOPTS_GLOBAL_MODULE, + exec_mode, -1, 0, 1); + } + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0) + rcu_read_unlock_sched(); +#else + mutex_unlock(&module_mutex); +#endif + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); + return OS_SUCCESS; +} + +// +// Register the module for a process. The task_struct and mm +// should be locked if necessary to make sure they don't change while we're +// iterating... +// Used as a service routine +// +static S32 +linuxos_VMA_For_Process(struct task_struct *p, + struct vm_area_struct *vma, + S32 load_event, + U32 *first) +{ + U32 options = 0; + S8 name[MAXNAMELEN]; + S8 *pname = NULL; + U32 ppid = 0; + U16 exec_mode; + U64 page_offset = 0; + +#if defined(DRV_ANDROID) + char andr_app[TASK_COMM_LEN]; +#endif + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + load_event == 1, "P = %p, vma = %p, load_event: %d, first: %p.", + p, vma, load_event, first); + + if (p == NULL) { + SEP_DRV_LOG_NOTIFICATION_ERROR(load_event == 1, + "Skipped p=NULL."); + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, + "OS_SUCCESS (!p)."); + return OS_SUCCESS; + } + + if (vma->vm_file) { + pname = D_PATH(vma->vm_file, name, MAXNAMELEN); + } + + page_offset = vma->vm_pgoff; + + if (!IS_ERR(pname) && pname != NULL) { + SEP_DRV_LOG_NOTIFICATION_TRACE(load_event == 1, + "enum: %s, %d, %lx, %lx %llu.", + pname, p->pid, vma->vm_start, + (vma->vm_end - vma->vm_start), + page_offset); + + // if the VM_EXECUTABLE flag is set then this is the module + // that is being used to name the module + if (DRV_VM_MOD_EXECUTABLE(vma)) { + options |= LOPTS_EXE; +#if defined(DRV_ANDROID) + if (!strcmp(pname, "/system/bin/app_process") || + !strcmp(pname, "/system/bin/app_process32") || + !strcmp(pname, "/system/bin/app_process64")) { + memset(andr_app, '\0', TASK_COMM_LEN); + strncpy(andr_app, p->comm, TASK_COMM_LEN); + pname = andr_app; + } +#endif + } + // mark the first of the bunch... + if (*first == 1) { + options |= LOPTS_1ST_MODREC; + *first = 0; + } + } +#if defined(DRV_ALLOW_VDSO) + else if (vma->vm_mm && + vma->vm_start == (long)vma->vm_mm->context.vdso) { + pname = "[vdso]"; + } +#endif +#if defined(DRV_ALLOW_SYSCALL) + else if (vma->vm_start == VSYSCALL_START) { + pname = "[vsyscall]"; + } +#endif + + if (!IS_ERR(pname) && pname != NULL) { + options = 0; + if (DRV_VM_MOD_EXECUTABLE(vma)) { + options |= LOPTS_EXE; + } + + if (p->real_parent) { + ppid = p->real_parent->tgid; + } + exec_mode = linuxos_Get_Exec_Mode(p); + // record this module + linuxos_Load_Image_Notify_Routine(pname, vma->vm_start, + (vma->vm_end - vma->vm_start), + page_offset, p->pid, ppid, + options, exec_mode, + load_event, 1, 0); + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS."); + return OS_SUCCESS; +} + +// +// Common loop to enumerate all modules for a process. The task_struct and mm +// should be locked if necessary to make sure they don't change while we're +// iterating... +// +static S32 +linuxos_Enum_Modules_For_Process(struct task_struct *p, + struct mm_struct *mm, + S32 load_event) +{ + struct vm_area_struct *mmap; + U32 first = 1; + +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(load_event == 1, + "P: %p, mm: %p, load_event: %d.", p, + mm, load_event); + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(p); + /* + * Check for: same uid, or root uid + */ + if (l_uid != uid && l_uid != 0) { + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( + load_event == 1, + "OS_SUCCESS (secure_sep && l_uid != uid && l_uid != 0)."); + return OS_SUCCESS; + } +#endif + for (mmap = mm->mmap; mmap; mmap = mmap->vm_next) { + /* We have 3 distinct conditions here. + * 1) Is the page executable? + * 2) Is is a part of the vdso area? + * 3) Is it the vsyscall area? + */ + if (((mmap->vm_flags & VM_EXEC) && mmap->vm_file && + mmap->vm_file->DRV_F_DENTRY) +#if defined(DRV_ALLOW_VDSO) + || (mmap->vm_mm && + mmap->vm_start == (long)mmap->vm_mm->context.vdso) +#endif +#if defined(DRV_ALLOW_VSYSCALL) + || (mmap->vm_start == VSYSCALL_START) +#endif + ) { + + linuxos_VMA_For_Process(p, mmap, load_event, &first); + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(load_event == 1, "OS_SUCCESS"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int linuxos_Exec_Unmap_Notify( + * struct notifier_block *self, + * unsigned long val, + * VOID *data) + * + * @brief this function is called whenever a task exits + * + * @param self IN - not used + * val IN - not used + * data IN - this is cast in the mm_struct of the task that is call unmap + * + * @return none + * + * Special Notes: + * + * This notification is called from do_munmap(mm/mmap.c). This is called when ever + * a module is loaded or unloaded. It looks like it is called right after a module is + * loaded or before its unloaded (if using dlopen,dlclose). + * However it is not called when a process is exiting instead exit_mmap is called + * (resulting in an EXIT_MMAP notification). + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) +static int +linuxos_Exec_Unmap_Notify(struct notifier_block *self, + unsigned long val, + PVOID data) +#else +static int +linuxos_Exec_Unmap_Notify(struct kprobe *p, struct pt_regs *regs) +#endif +{ + struct mm_struct *mm; + struct vm_area_struct *mmap = NULL; + U32 first = 1; + U32 cur_driver_state; + unsigned long addr; + int status = 0; +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, + data); + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "enter: unmap: hook_state %d.", + atomic_read(&hook_state)); +#else + SEP_DRV_LOG_NOTIFICATION_IN("regs: %p, addr: 0x%lx.", regs, regs->di); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + addr = (unsigned long)data; +#else + addr = (unsigned long)regs->di; +#endif + + cur_driver_state = GET_DRIVER_STATE(); + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(current); + /* + * Check for: same uid, or root uid + */ + if (l_uid != uid && l_uid != 0) { + SEP_DRV_LOG_NOTIFICATION_OUT( + "Returns 0 (secure_sep && l_uid != uid && l_uid != 0)."); + return status; + } +#endif + + if (!IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + return status; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + if (!atomic_add_negative(1, &hook_state)) { + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "unmap: hook_state %d.", + atomic_read(&hook_state)); +#endif + mm = get_task_mm(current); + if (mm) { + UTILITY_down_read_mm(mm); + mmap = FIND_VMA(mm, addr); + if (mmap && mmap->vm_file +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + && (mmap->vm_flags & VM_EXEC) +#endif + ) { + linuxos_VMA_For_Process(current, mmap, TRUE, + &first); + } + UTILITY_up_read_mm(mm); + mmput(mm); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + } + atomic_dec(&hook_state); + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "exit: unmap done: hook_state %d.", + atomic_read(&hook_state)); +#endif + + SEP_DRV_LOG_NOTIFICATION_OUT(""); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_Guest_Enter_Notify(VOID) + * @brief registers kprobe for guest vmenter + * + * @param p IN - not used + * regs IN - not used + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +linuxos_Guest_Enter_Notify(struct kprobe *p, struct pt_regs *regs) +{ + struct kvm_vcpu *local_vcpu = NULL; + int cpu; + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + return 0; + } + + local_vcpu = (struct kvm_vcpu *)regs->di; + cpu = smp_processor_id(); + + // Need to disable PEBS during guest VM execution + // because PEBS DS area on host side becomes invalid in guest context + if (local_vcpu->cpu == cpu) { + SYS_Write_MSR(IA32_PEBS_ENABLE, 0LL); + } + + return 0; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_Guest_Exit_Notify(VOID) + * @brief registers kprobe for guest vmexit + * + * @param p IN - not used + * regs IN - not used + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +linuxos_Guest_Exit_Notify(struct kprobe *p, struct pt_regs *regs) +{ + struct kvm_vcpu *local_vcpu = NULL; + int cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + return 0; + } + + local_vcpu = (struct kvm_vcpu *)regs->di; + cpu = smp_processor_id(); + pcpu = &pcb[cpu]; + dev_idx = core_to_dev_map[cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + // Need to restore PEBS when guest vm exits + if (local_vcpu->cpu == cpu) { + SYS_Write_MSR( + IA32_PEBS_ENABLE, + ECB_SECTION_REG_INDEX (pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + } + + return 0; +} + +#if defined(DRV_CPU_HOTPLUG) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID linuxos_Handle_Online_cpu( + * PVOID param) + * + * @param PVOID param + * + * @return None + * + * @brief Callback function to set the cpu online + * @brief and begin collection on it + */ +static VOID +linuxos_Handle_Online_cpu(PVOID param) +{ + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, + "Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + CPUMON_Online_Cpu((PVOID)&this_cpu); + if (CPU_STATE_pmu_state(&pcb[this_cpu]) == NULL) { + if (dispatch && dispatch->init) { + dispatch->init(NULL); + } + } + if (dispatch && dispatch->write) { + dispatch->write(NULL); + } + CPU_STATE_group_swap(&pcb[this_cpu]) = 1; + // possible race conditions with notifications. cleanup should wait + // until all notifications are done, and new notifications should not proceed + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + if (dispatch && dispatch->restart) { + dispatch->restart(NULL); + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID linuxos_Handle_Offline_cpu( + * PVOID param) + * + * @param PVOID param + * + * @return None + * + * @brief Callback function to set the cpu offline + * @brief and stop collection on it + */ +static VOID +linuxos_Handle_Offline_cpu(PVOID param) +{ + U32 this_cpu; + U32 apic_lvterr; + U32 dev_idx; + DISPATCH dispatch; + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(SEP_IN_NOTIFICATION, + "Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + preempt_enable(); + CPUMON_Offline_Cpu((PVOID)&this_cpu); + if (dispatch && dispatch->freeze) { + dispatch->freeze(NULL); + } + apic_lvterr = apic_read(APIC_LVTERR); + apic_write(APIC_LVTERR, apic_lvterr | APIC_LVT_MASKED); + APIC_Restore_LVTPC(NULL); + apic_write(APIC_LVTERR, apic_lvterr); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(SEP_IN_NOTIFICATION, ""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_online_cpu( + * unsigned int cpu) + * + * @param unsigned int cpu + * + * @return None + * + * @brief Invokes appropriate call back function when CPU is online + */ +int +linuxos_online_cpu(unsigned int cpu) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d coming online.", cpu); + + if (CPUMON_is_Online_Allowed()) { + CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Online_cpu, NULL); + SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d came online.", cpu); + return 0; + } else { + SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( + "Cpu %d is not allowed to come online!", cpu); + return 0; + } +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn int linuxos_offline_cpu( + * unsigned int cpu) + * + * @param unsigned int cpu + * + * @return None + * + * @brief Invokes appropriate call back function when CPU is offline + */ +int +linuxos_offline_cpu(unsigned int cpu) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Cpu %d going offline.", cpu); + + if (CPUMON_is_Offline_Allowed()) { + CONTROL_Invoke_Cpu(cpu, linuxos_Handle_Offline_cpu, NULL); + SEP_DRV_LOG_NOTIFICATION_OUT("Cpu %d went offline.", cpu); + return 0; + } else { + SEP_DRV_LOG_WARNING_NOTIFICATION_OUT( + "Cpu %d is not allowed to go offline!", cpu); + return 0; + } +} +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS linuxos_Hotplug_Notifier( + * struct notifier_block *block, unsigned long action, void *pcpu) + * + * @param struct notifier_block *block - notifier block + * unsigned long action - notifier action + * void *pcpu - per cpu pcb + * + * @return NOTIFY_OK, if successful + * + * @brief Hotplug Notifier function that handles various cpu states + * @brief and invokes respective callback functions + */ +static OS_STATUS +linuxos_Hotplug_Notifier(struct notifier_block *block, + unsigned long action, + void *pcpu) +{ + U32 cpu = (unsigned int)(unsigned long)pcpu; + + // nb: will overcount number of pending notifications when using this routine + SEP_DRV_LOG_NOTIFICATION_IN("Cpu: %u, action: %u.", cpu, action); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: + SEP_DRV_LOG_ERROR("SEP cpu %d offline failed!", cpu); + case CPU_ONLINE: + linuxos_online_cpu(cpu); + break; + case CPU_DOWN_PREPARE: + linuxos_offline_cpu(cpu); + break; + default: + SEP_DRV_LOG_WARNING( + "DEFAULT: cpu %d unhandled action value is %d.", cpu, + action); + break; + } + + SEP_DRV_LOG_NOTIFICATION_OUT(""); + return NOTIFY_OK; +} + +static struct notifier_block cpu_hotplug_notifier = { + .notifier_call = &linuxos_Hotplug_Notifier, +}; +#endif +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Register_Hotplug( + * VOID) + * + * @param None + * + * @return None + * + * @brief Registers the Hotplug Notifier + */ +extern VOID +LINUXOS_Register_Hotplug(VOID) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + S32 err; + + SEP_DRV_LOG_INIT_IN( + "Kernel version >= 4.10.0: using direct notifications."); + + err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "ia64/sep5:online", + linuxos_online_cpu, + linuxos_offline_cpu); + cpuhp_sepdrv_state = err; +#else + SEP_DRV_LOG_INIT_IN("Kernel version < 4.10.0: using notification hub."); + register_cpu_notifier(&cpu_hotplug_notifier); +#endif + SEP_DRV_LOG_INIT_OUT("Hotplug notifier registered."); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Unregister_Hotplug( + * VOID) + * + * @param None + * + * @return None + * + * @brief Unregisters the Hotplug Notifier + */ +extern VOID +LINUXOS_Unregister_Hotplug(VOID) +{ + SEP_DRV_LOG_INIT_IN("Unregistering hotplug notifier."); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) + cpuhp_remove_state_nocalls(cpuhp_sepdrv_state); +#else + unregister_cpu_notifier(&cpu_hotplug_notifier); +#endif + SEP_DRV_LOG_INIT_OUT("Hotplug notifier unregistered."); +} +#endif +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) + * + * @brief gather all the process modules that are present. + * + * @param at_end - the collection happens at the end of the sampling run + * + * @return OS_SUCCESS + * + * Special Notes: + * This routine gathers all the process modules that are present + * in the system at this time. If at_end is set to be TRUE, then + * act as if all the modules are being unloaded. + * + */ +extern OS_STATUS +LINUXOS_Enum_Process_Modules(DRV_BOOL at_end) +{ + int n = 0; + struct task_struct *p; + + SEP_DRV_LOG_TRACE_IN("At_end: %u.", at_end); + SEP_DRV_LOG_TRACE("Begin tasks."); + + if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS (TERMINATING)."); + return OS_SUCCESS; + } + + FOR_EACH_TASK(p) { + struct mm_struct *mm; + + SEP_DRV_LOG_TRACE("Looking at task %d.", n); + /* + * Call driver notification routine for each module + * that is mapped into the process created by the fork + */ + + if (p == NULL) { + SEP_DRV_LOG_TRACE("Skipped (p=NULL)."); + continue; + } + + p->comm[TASK_COMM_LEN - 1] = + 0; // making sure there is a trailing 0 + mm = get_task_mm(p); + + if (!mm) { + // Since mm is null, these notifications can be skipped but logging + // them anyway to know such process ids + // But skip the extra incorrect process notifications with null mm, + // seen with mpi launch use case (launched by kernel processes (0,1,2) as parents + // because they are conflicting with the actual process notifications + // which are received later) + if (p->parent && p->parent->tgid <= 2) { + linuxos_Load_Image_Notify_Routine( + p->comm, 0, 0, 0, p->pid, + (p->parent) ? p->parent->tgid : 0, + LOPTS_EXE | LOPTS_1ST_MODREC, + linuxos_Get_Exec_Mode(p), + 2, // '2' to trigger 'if (load_event)' conditions, but still be distinguishable from actual load events + 1, 0); + } else { + SEP_DRV_LOG_TRACE( + "Skipped (p->mm=NULL). P=0x%p, pid=%d, p->comm=%s.", + p, p->pid, p->comm); + } + continue; + } + + UTILITY_down_read_mm(mm); + linuxos_Enum_Modules_For_Process(p, mm, at_end ? -1 : 0); + UTILITY_up_read_mm(mm); + mmput(mm); + n++; + } + + SEP_DRV_LOG_TRACE("Enum_Process_Modules done with %d tasks.", n); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int linuxos_Exit_Task_Notify(struct notifier_block * self, + * unsigned long val, PVOID data) + * @brief this function is called whenever a task exits + * + * @param self IN - not used + * val IN - not used + * data IN - this is cast into the task_struct of the exiting task + * + * @return none + * + * Special Notes: + * this function is called whenever a task exits. It is called right before + * the virtual memory areas are freed. We just enumerate through all the modules + * of the task and set the unload sample count and the load event flag to 1 to + * indicate this is a module unload + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) +static int +linuxos_Exit_Task_Notify(struct notifier_block *self, + unsigned long val, + PVOID data) +#else +static void +linuxos_Exit_Task_Notify(void *ignore, struct task_struct *data) +#endif +{ + struct task_struct *p = (struct task_struct *)data; + int status = OS_SUCCESS; + U32 cur_driver_state; + struct mm_struct *mm; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + SEP_DRV_LOG_NOTIFICATION_IN("Self: %p, val: %lu, data: %p.", self, val, + data); +#else + SEP_DRV_LOG_NOTIFICATION_IN("task_struct: %p", data); +#endif + + cur_driver_state = GET_DRIVER_STATE(); + + if (cur_driver_state == DRV_STATE_UNINITIALIZED || + cur_driver_state == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + goto final; + } + SEP_DRV_LOG_TRACE("Pid = %d tgid = %d.", p->pid, p->tgid); + if (p->pid == control_pid) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + SEP_IN_NOTIFICATION, + "The collector task has been terminated via an uncatchable signal."); + SEP_DRV_LOG_NOTIFICATION_WARNING(SEP_IN_NOTIFICATION, + "Sep was killed!"); + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + wake_up_interruptible(&wait_exit); + + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (pid == control_pid).", + status); + goto final; + } + + if (cur_driver_state != DRV_STATE_IDLE && + !IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (stopping collection).", + status); + goto final; + } + + mm = get_task_mm(p); + if (!mm) { + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u (!p->mm).", status); + goto final; + } + UTILITY_down_read_mm(mm); + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!atomic_add_negative(1, &hook_state)) { + linuxos_Enum_Modules_For_Process(p, mm, 1); + } + atomic_dec(&hook_state); + } + UTILITY_up_read_mm(mm); + mmput(mm); + + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, "Hook_state %d.", + atomic_read(&hook_state)); + +final: +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + SEP_DRV_LOG_NOTIFICATION_OUT("Res = %u.", status); + return status; +#else + SEP_DRV_LOG_NOTIFICATION_OUT(""); + return; +#endif +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) +/* + * The notifier block. All the static entries have been defined at this point + */ +static struct notifier_block linuxos_exec_unmap_nb = { + .notifier_call = linuxos_Exec_Unmap_Notify, +}; + +static struct notifier_block linuxos_exit_task_nb = { + .notifier_call = linuxos_Exit_Task_Notify, +}; +#endif + +#if defined(CONFIG_TRACEPOINTS) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void capture_sched_switch(VOID *) + * @brief capture current pid/tid on all cpus + * + * @param p IN - not used + * + * @return none + * + * Special Notes: + * + * None + */ +static void +capture_sched_switch(void *p) +{ + U32 this_cpu; + BUFFER_DESC bd; + SIDEBAND_INFO_EXT sideband_info; + U64 tsc; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_TSC(&tsc); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + bd = &cpu_sideband_buf[this_cpu]; + if (bd == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Bd is NULL!"); + return; + } + + sideband_info = (SIDEBAND_INFO_EXT)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_EXT_NODE), FALSE, + !SEP_IN_NOTIFICATION); + if (sideband_info == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Sideband_info is NULL!"); + return; + } + + SIDEBAND_INFO_EXT_pid(sideband_info) = current->tgid; + SIDEBAND_INFO_EXT_tid(sideband_info) = current->pid; + SIDEBAND_INFO_EXT_tsc(sideband_info) = tsc; + SIDEBAND_INFO_EXT_cpuid(sideband_info) = this_cpu; + SIDEBAND_INFO_EXT_osid(sideband_info) = OS_ID_NATIVE; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void record_pebs_process_info(...) + * @brief record all sched switch pid/tid info + * + * @param ignore IN - not used + * from IN + * to IN + * + * @return none + * + * Special Notes: + * + * None + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +static void +record_pebs_process_info(void *ignore, + bool preempt, + struct task_struct *from, + struct task_struct *to) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) +static void +record_pebs_process_info(void *ignore, + struct task_struct *from, + struct task_struct *to) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) +static void +record_pebs_process_info(struct rq *ignore, + struct task_struct *from, + struct task_struct *to) +#endif +{ + U32 this_cpu; + BUFFER_DESC bd; + SIDEBAND_INFO_EXT sideband_info; + U64 tsc; + U32 cur_driver_state; + + SEP_DRV_LOG_NOTIFICATION_IN("From: %p, to: %p.", from, to); + + cur_driver_state = GET_DRIVER_STATE(); + + if (cur_driver_state != DRV_STATE_IDLE && + !IS_COLLECTING_STATE(cur_driver_state)) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (driver state)."); + return; + } + + UTILITY_Read_TSC(&tsc); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + SEP_DRV_LOG_NOTIFICATION_TRACE(SEP_IN_NOTIFICATION, + "[OUT<%d:%d:%s>-IN<%d:%d:%s>].", + from->tgid, from->pid, from->comm, + to->tgid, to->pid, to->comm); + + bd = &cpu_sideband_buf[this_cpu]; + if (bd == NULL) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!bd)."); + return; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) + sideband_info = (SIDEBAND_INFO_EXT)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_EXT_NODE), TRUE, SEP_IN_NOTIFICATION); +#else + sideband_info = (SIDEBAND_INFO_EXT)OUTPUT_Reserve_Buffer_Space( + bd, sizeof(SIDEBAND_INFO_EXT_NODE), FALSE, SEP_IN_NOTIFICATION); +#endif + + if (sideband_info == NULL) { + SEP_DRV_LOG_NOTIFICATION_OUT("Early exit (!sideband_info)."); + return; + } + + SIDEBAND_INFO_EXT_pid(sideband_info) = to->tgid; + SIDEBAND_INFO_EXT_tid(sideband_info) = to->pid; + SIDEBAND_INFO_EXT_old_tid(sideband_info) = from->pid; + SIDEBAND_INFO_EXT_tsc(sideband_info) = tsc; + SIDEBAND_INFO_EXT_cpuid(sideband_info) = this_cpu; + SIDEBAND_INFO_EXT_osid(sideband_info) = OS_ID_NATIVE; +#if defined(get_current_state) + SIDEBAND_INFO_EXT_old_thread_state(sideband_info) = + (U16)READ_ONCE(from->__state); +#else + SIDEBAND_INFO_EXT_old_thread_state(sideband_info) = (U16)from->state; +#endif + + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void find_sched_switch_tracepoint + * @brief find trace poing for sched_switch + * + * @param tp pass in by system + * param pointer of trace point + * + * @return none + * + * Special Notes: + * + * None + */ +static void +find_sched_switch_tracepoint(struct tracepoint *tp, VOID *param) +{ + struct tracepoint **ptp = (struct tracepoint **)param; + + SEP_DRV_LOG_TRACE_IN("Tp: %p, param: %p.", tp, param); + + if (tp && ptp) { + SEP_DRV_LOG_TRACE("trace point name: %s.", tp->name); + if (!strcmp(tp->name, "sched_switch")) { + SEP_DRV_LOG_TRACE( + "Found trace point for sched_switch."); + *ptp = tp; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void find_sched_process_exit_tracepoint + * @brief find tracepoint function for sched_process_exit + * + * @param tp pass in by system + * param pointer of trace point + * + * @return none + * + * Special Notes: + * + * None + */ +static void +find_sched_process_exit_tracepoint(struct tracepoint *tp, VOID *param) +{ + struct tracepoint **ptp = (struct tracepoint **)param; + + SEP_DRV_LOG_TRACE_IN("Tp: %p, param: %p.", tp, param); + + if (tp && ptp) { + SEP_DRV_LOG_TRACE("trace point name: %s.", tp->name); + if (!strcmp(tp->name, "sched_process_exit")) { + SEP_DRV_LOG_TRACE( + "Found trace point for sched_process_exit."); + *ptp = tp; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int install_sched_switch_callback(VOID) + * @brief registers sched_switch callbacks for PEBS sideband + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +install_sched_switch_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Installing PEBS linux OS Hooks."); + +#if defined(CONFIG_TRACEPOINTS) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + if (!tp_sched_switch) { + for_each_kernel_tracepoint(&find_sched_switch_tracepoint, + &tp_sched_switch); + } + if (!tp_sched_switch) { + err = -EIO; + SEP_DRV_LOG_INIT( + "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); + } else { + err = tracepoint_probe_register( + tp_sched_switch, (void *)record_pebs_process_info, + NULL); + } +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + err = register_trace_sched_switch(record_pebs_process_info, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + err = register_trace_sched_switch(record_pebs_process_info); +#else + SEP_DRV_LOG_INIT( + "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); + err = -1; +#endif + CONTROL_Invoke_Parallel(capture_sched_switch, NULL); +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int install_sched_process_exit_callback(VOID) + * @brief registers sched_process_exit callback for process termination notification + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +install_sched_process_exit_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Installing shced_process_exit Hook."); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + err = profile_event_register(MY_TASK, &linuxos_exit_task_nb); +#else +#if defined(CONFIG_TRACEPOINTS) + if (!tp_sched_process_exit) { + for_each_kernel_tracepoint(&find_sched_process_exit_tracepoint, + &tp_sched_process_exit); + } + if (!tp_sched_process_exit) { + err = -1; + } else { + err = tracepoint_probe_register( + tp_sched_process_exit, (void *)linuxos_Exit_Task_Notify, + NULL); + } +#else + err = -1; + SEP_DRV_LOG_WARNING( + "This kernel does not support profiling hook for process exit."); + SEP_DRV_LOG_WARNING( + "Task termination will not be tracked during sampling session!"); +#endif +#endif + + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to register the profiling hoook for process termination."); + SEP_DRV_LOG_WARNING( + "Task termination will not be tracked during sampling session!"); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int install_sys_enter_munmap_callback(VOID) + * @brief registers kprobe for munmap syscall + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +install_sys_enter_munmap_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Installing sys_enter_munmap Hook."); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + err = profile_event_register(MY_UNMAP, &linuxos_exec_unmap_nb); +#else +#if defined(CONFIG_KPROBES) + kp_munmap = CONTROL_Allocate_Memory(sizeof(struct kprobe)); + if (!kp_munmap) { + SEP_DRV_LOG_ERROR("Failed to allocate memory for kp_munmap."); + err = -1; + } else { + kp_munmap->symbol_name = "__vm_munmap"; + kp_munmap->pre_handler = linuxos_Exec_Unmap_Notify; + err = register_kprobe(kp_munmap); + } +#else + err = -1; + SEP_DRV_LOG_WARNING( + "This kernel does not support profiling hook for image unload."); + SEP_DRV_LOG_WARNING( + "Dynamic image load/unload will not be tracked during sampling session!"); +#endif +#endif + + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to register the profiling hoook for image unload."); + SEP_DRV_LOG_WARNING( + "Dynamic image load/unload will not be tracked during sampling session!"); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int LINUXOS_Install_GuestVM_Transition_Callback(VOID) + * @brief registers kprobe for kvm vmenter/vmexit syscall + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +extern int +LINUXOS_Install_GuestVM_Transition_Callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Installing KVM guest vmenter/vmexit Hook."); + + kp_guest_enter = CONTROL_Allocate_Memory(sizeof(struct kprobe)); + if (!kp_guest_enter) { + SEP_DRV_LOG_ERROR( + "Failed to allocate memory for kp_guest_enter."); + err = -1; + } else { + kp_guest_enter->symbol_name = "kvm_load_guest_xsave_state"; + kp_guest_enter->pre_handler = linuxos_Guest_Enter_Notify; + err = register_kprobe(kp_guest_enter); + } + + if (err) { +#if defined(CONFIG_KPROBES) + SEP_DRV_LOG_WARNING( + "Failed to register the profiling hoook for guest enter."); +#else + SEP_DRV_LOG_WARNING( + "The KPROBES profiling hoook for guest enter is not supported."); +#endif + SEP_DRV_LOG_WARNING( + "VM enter will not be tracked during sampling session!"); + } + + kp_guest_exit = CONTROL_Allocate_Memory(sizeof(struct kprobe)); + if (!kp_guest_exit) { + SEP_DRV_LOG_ERROR( + "Failed to allocate memory for kp_guest_exit."); + err = -1; + } else { + kp_guest_exit->symbol_name = "kvm_load_host_xsave_state"; + kp_guest_exit->pre_handler = linuxos_Guest_Exit_Notify; + err = register_kprobe(kp_guest_exit); + } + + if (err) { +#if defined(CONFIG_KPROBES) + SEP_DRV_LOG_WARNING( + "Failed to register the profiling hoook for guest exit."); +#else + SEP_DRV_LOG_WARNING( + "The KPROBES profiling hoook for guest exit is not supported."); +#endif + SEP_DRV_LOG_WARNING( + "VM exit will not be tracked during sampling session!"); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Install_Hooks(VOID) + * @brief registers the profiling callbacks + * + * @param none + * + * @return none + * + * Special Notes: + * + * None + */ +extern VOID +LINUXOS_Install_Hooks(VOID) +{ + int err = 0; + + SEP_DRV_LOG_INIT_IN("Installing Linux OS Hooks."); + + if (hooks_installed == 1) { + SEP_DRV_LOG_INIT_OUT("The OS Hooks are already installed."); + return; + } + + linuxos_Map_Kernel_Modules(); + + err = install_sched_process_exit_callback(); + if (err) { + SEP_DRV_LOG_ERROR( + "Failed to install sched_process_exit callback"); + } + + err = install_sys_enter_munmap_callback(); + if (err) { + SEP_DRV_LOG_ERROR( + "Failed to install sys_enter_munmap callback"); + } + + if (collect_sideband) { + err = install_sched_switch_callback(); + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to install sched_switch callback for multiple pebs."); + } + } + + // in case of host kvm system + if (DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) && + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) == DRV_VMM_KVM && + !DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info)) { + err = LINUXOS_Install_GuestVM_Transition_Callback(); + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to install guestvm transition callback"); + } + } + + hooks_installed = 1; + atomic_set(&hook_state, HOOK_FREE); + + SEP_DRV_LOG_INIT_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int uninstall_sched_switch_callback(VOID) + * @brief unregisters sched_switch callbacks for PEBS sideband + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +uninstall_sched_switch_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Uninstalling PEBS Linux OS Hooks."); + +#if defined(CONFIG_TRACEPOINTS) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + if (!tp_sched_switch) { + err = -EIO; + SEP_DRV_LOG_INIT( + "Please check Linux is built w/ CONFIG_CONTEXT_SWITCH_TRACER."); + } else { + err = tracepoint_probe_unregister( + tp_sched_switch, (void *)record_pebs_process_info, + NULL); + } +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + err = unregister_trace_sched_switch(record_pebs_process_info, NULL); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) + err = unregister_trace_sched_switch(record_pebs_process_info); +#else + SEP_DRV_LOG_INIT( + "Please use Linux kernel version >= 2.6.28 to use multiple pebs."); + err = -1; +#endif + CONTROL_Invoke_Parallel(capture_sched_switch, NULL); +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int uninstall_sched_process_exit_callback(VOID) + * @brief unregisters sched_process_exit callbacks for process exit notification + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +uninstall_sched_process_exit_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Uninstalling sched_process_exit Hook."); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + err = profile_event_unregister(MY_TASK, &linuxos_exit_task_nb); +#else +#if defined(CONFIG_TRACEPOINTS) + if (tp_sched_process_exit) { + err = tracepoint_probe_unregister( + tp_sched_process_exit, (void *)linuxos_Exit_Task_Notify, + NULL); + } +#endif +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int uninstall_sys_enter_munmap_callback(VOID) + * @brief unregisters kprobe for munmap syscall + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +static int +uninstall_sys_enter_munmap_callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Uninstalling sys_enter_munmap Hook."); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + err = profile_event_unregister(MY_UNMAP, &linuxos_exec_unmap_nb); +#else +#if defined(CONFIG_KPROBES) + if (kp_munmap) { + unregister_kprobe(kp_munmap); + CONTROL_Free_Memory(kp_munmap); + } +#endif +#endif + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int LINUXOS_Uninstall_GuestVM_Transition_Callback(VOID) + * @brief unregisters kprobe for KVM guest vmenter/vmexit + * + * @param none + * + * @return 0 success else error number + * + * Special Notes: + * + * None + */ +extern int +LINUXOS_Uninstall_GuestVM_Transition_Callback(VOID) +{ + int err = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_INIT("Uninstalling guest Hook."); + + if (kp_guest_enter) { + unregister_kprobe(kp_guest_enter); + CONTROL_Free_Memory(kp_guest_enter); + } + + if (kp_guest_exit) { + unregister_kprobe(kp_guest_exit); + CONTROL_Free_Memory(kp_guest_exit); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %d.", err); + return err; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID LINUXOS_Uninstall_Hooks(VOID) + * @brief unregisters the profiling callbacks + * + * @param none + * + * @return + * + * Special Notes: + * + * None + */ +extern VOID +LINUXOS_Uninstall_Hooks(VOID) +{ + int err = 0; + int value = 0; + int tries = 10; + + SEP_DRV_LOG_INIT_IN("Uninstalling Linux OS Hooks."); + + if (hooks_installed == 0) { + SEP_DRV_LOG_INIT_OUT("Hooks are not installed!"); + return; + } + + hooks_installed = 0; + + LINUXOS_Uninstall_GuestVM_Transition_Callback(); + + err = uninstall_sched_process_exit_callback(); + if (err) { + SEP_DRV_LOG_ERROR( + "Failed to uninstall sched_process_exit callback."); + } + + err = uninstall_sys_enter_munmap_callback(); + if (err) { + SEP_DRV_LOG_ERROR( + "Failed to uninstall sys_enter_munmap callback."); + } + + if (collect_sideband) { + err = uninstall_sched_switch_callback(); + if (err) { + SEP_DRV_LOG_WARNING( + "Failed to uninstall sched_switch callback for multiple pebs."); + } + } + + value = atomic_cmpxchg(&hook_state, HOOK_FREE, HOOK_UNINSTALL); + if ((value == HOOK_FREE) || + (value == HOOK_UNINSTALL)) { // already in free or uninstall state + SEP_DRV_LOG_INIT_OUT( + "Uninstall hook done (already in state %d).", value); + return; + } + atomic_add(HOOK_UNINSTALL, &hook_state); + while (tries) { + SYS_IO_Delay(); + SYS_IO_Delay(); + value = atomic_read(&hook_state); + if (value == HOOK_UNINSTALL) { + break; + } + tries--; + } + + SEP_DRV_LOG_INIT_OUT("Done -- state %d, tries %d.", value, tries); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn DRV_BOOL LINUXOS_Check_KVM_Guest_Process() + * + * @brief check the presence of kvm guest process + * + * @param none + * + * @return TRUE if the kvm guest process is running, FALSE if not + */ +extern DRV_BOOL +LINUXOS_Check_KVM_Guest_Process(VOID) +{ + struct task_struct *p; + + SEP_DRV_LOG_TRACE_IN(""); + + FOR_EACH_TASK(p) { + if (p == NULL) { + continue; + } + // making sure there is a trailing 0 + p->comm[TASK_COMM_LEN - 1] = 0; + if (!strncmp(p->comm, "qemu-kvm", 8) || + !strncmp(p->comm, "qemu-system", 11) || + !strncmp(p->comm, "crosvm", 6)) { + SEP_DRV_LOG_TRACE_OUT( + "TRUE (found guest VM process(es)!)."); + return TRUE; + } + } + + SEP_DRV_LOG_TRACE_OUT("FALSE"); + return FALSE; +} + diff --git a/drivers/platform/x86/sepdk/sep/lwpmudrv.c b/drivers/platform/x86/sepdk/sep/lwpmudrv.c new file mode 100644 index 00000000000000..4a17e3ef410420 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/lwpmudrv.c @@ -0,0 +1,8053 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_version.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#if defined(CONFIG_HYPERVISOR_GUEST) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34) +#include +#endif +#endif +#if defined(CONFIG_XEN) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) +#include +#endif + +#if defined(CONFIG_XEN_HAVE_VPMU) +#include +#include +#include +#endif + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_ioctl.h" +#include "lwpmudrv_struct.h" +#include "inc/ecb_iterators.h" +#include "pci.h" +#include "inc/unc_common.h" + +#include "apic.h" +#include "cpumon.h" +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "core2.h" +#include "pmi.h" + +#include "output.h" +#include "linuxos.h" +#include "sys_info.h" +#include "eventmux.h" +#include "pebs.h" +#include "pmu_info_struct.h" +#include "pmu_list.h" +#include "ipt.h" + +#if defined(DRV_PMT_ENABLE) +#include +#endif + +MODULE_AUTHOR("Copyright (C) 2007-2020 Intel Corporation"); +MODULE_VERSION(SEP_NAME"_"SEP_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +char *sym_lookup_func_addr = NULL; +module_param(sym_lookup_func_addr, charp, 0); +MODULE_PARM_DESC(sym_lookup_func_addr, + "kallsyms_lookup_name function address in hex"); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) +#define MAX_ADDR_STR_LEN 16 +#define MAX_CHARS_IN_LINE 512 +char sym_lookup_func_addr[MAX_ADDR_STR_LEN + 1]; +#endif + +struct task_struct *abnormal_handler = NULL; + +typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; +typedef LWPMU_DEV_NODE *LWPMU_DEV; + +struct LWPMU_DEV_NODE_S { + long buffer; + struct semaphore sem; + struct cdev cdev; +}; + +#define LWPMU_DEV_buffer(dev) ((dev)->buffer) +#define LWPMU_DEV_sem(dev) ((dev)->sem) +#define LWPMU_DEV_cdev(dev) ((dev)->cdev) + +/* Global variables of the driver */ +SEP_VERSION_NODE drv_version; +U64 *read_counter_info; +U64 *prev_counter_data; +U32 emon_buffer_size; +U32 emon_read_threshold; +VOID **desc_data; +U64 total_ram; +U32 output_buffer_size = OUTPUT_LARGE_BUFFER; +U32 saved_buffer_size; +static S32 desc_count; +uid_t uid; +DRV_CONFIG drv_cfg; +DEV_CONFIG cur_pcfg; +volatile pid_t control_pid; +U64 *interrupt_counts; +LWPMU_DEV lwpmu_control; +LWPMU_DEV lwmod_control; +LWPMU_DEV lwemon_control; +LWPMU_DEV lwsamp_control; +LWPMU_DEV lwsampunc_control; +LWPMU_DEV lwsideband_control; +LWPMU_DEV lwipt_control; +LWPMU_DEV lwiptinfo_control; +EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +EMON_DESC emon_desc; +unsigned long emon_timer_interval; + +/* needed for multiple devices (core/uncore) */ +U32 num_devices; +U32 num_core_devs; +U32 cur_device; +LWPMU_DEVICE devices; +static U32 uncore_em_factor; +unsigned long unc_timer_interval; +struct timer_list *unc_read_timer; +UNC_EM_DESC unc_em_desc; +S32 max_groups_unc; +DRV_BOOL multi_pebs_enabled = FALSE; +DRV_BOOL unc_buf_init = FALSE; +DRV_BOOL NMI_mode = TRUE; +DRV_BOOL KVM_guest_mode = FALSE; +DRV_SETUP_INFO_NODE req_drv_setup_info; +IPT_CONFIG ipt_config; +DRV_BOOL collect_sideband = FALSE; +DRV_BOOL unc_discovery_init = FALSE; +#if defined(DRV_PMT_ENABLE) +DRV_PMT_TELEM_DEV_NODE pmt_devices[MAX_PMT_DEVICES]; +U32 pmt_dev_index; +#endif + +#define UNCORE_EM_GROUP_SWAP_FACTOR 100 +#define PMU_DEVICES 2 // pmu, mod + +extern U32 *cpu_built_sysinfo; + +#define DRV_DEVICE_DELIMITER "!" + +#if defined(DRV_USE_UNLOCKED_IOCTL) +static struct mutex ioctl_lock; +#endif + +static S8 *cpu_mask_bits; + +/* + * Global data: Buffer control structure + */ +BUFFER_DESC cpu_buf; +BUFFER_DESC unc_buf; +BUFFER_DESC module_buf; +BUFFER_DESC emon_buf; +BUFFER_DESC cpu_sideband_buf; +BUFFER_DESC ipt_trace_buf; +BUFFER_DESC ipt_info_buf; + +static dev_t lwpmu_DevNum; /* the major and minor parts for SEP3 base */ +static dev_t lwsamp_DevNum; /* the major and minor parts for SEP3 percpu */ +/* the major and minor parts for SEP3 per package */ +static dev_t lwsampunc_DevNum; +static dev_t lwsideband_DevNum; +static dev_t lwemon_DevNum; +static dev_t lwipt_DevNum; +static dev_t lwiptinfo_DevNum; + +#if !defined(DRV_UDEV_UNAVAILABLE) +static struct class *pmu_class; +#endif + +extern volatile int config_done; + +CPU_STATE pcb; +size_t pcb_size; +U32 *core_to_package_map; +U32 *core_to_module_map; +U32 *core_to_phys_core_map; +U32 *core_to_thread_map; +U32 *core_to_dev_map; +U32 *cores_per_module; +U32 *threads_per_core; +U32 max_threads_per_core; +U32 max_cores_per_module; +U32 module_enum_supported; +U32 num_packages; +U32 num_pci_domains; +U64 *pmu_state; +U64 *cpu_tsc; +U64 *prev_cpu_tsc; +U64 *diff_cpu_tsc; +U64 max_rmid; + +UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +PMT_TOPOLOGY_DISCOVERY_LIST_NODE pmt_topology; +PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; +static PLATFORM_TOPOLOGY_PROG_NODE req_platform_topology_prog_node; +UNCORE_DISCOVERY_TABLE_LIST_NODE uncore_discovery_tables; + +static U8 *prev_set_CR4; +wait_queue_head_t wait_exit; + +static S32 allowlist_index = -1; + +#if !defined(DISABLE_BUILD_SOCPERF) +extern OS_STATUS SOCPERF_Switch_Group3(void); +#endif + +#if !defined(DRV_USE_UNLOCKED_IOCTL) +#define MUTEX_INIT(lock) +#define MUTEX_LOCK(lock) +#define MUTEX_UNLOCK(lock) +#else +#define MUTEX_INIT(lock) mutex_init(&(lock)); +#define MUTEX_LOCK(lock) mutex_lock(&(lock)) +#define MUTEX_UNLOCK(lock) mutex_unlock(&(lock)) +#endif + +#if defined(CONFIG_XEN_HAVE_VPMU) +typedef struct xen_pmu_params xen_pmu_params_t; +typedef struct xen_pmu_data xen_pmu_data_t; + +DEFINE_PER_CPU(xen_pmu_data_t *, xenpmu_shared); +#endif + +/* + * @fn void lwpmudrv_Allocate_Uncore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief allocate buffer space for writing/reading uncore data + */ +static OS_STATUS +lwpmudrv_Allocate_Uncore_Buffer(VOID) +{ + U32 i, j, k, l; + U32 max_entries = 0; + U32 num_entries; + DEV_UNC_CONFIG pcfg_unc; + ECB ecb; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + if (!pcfg_unc || !DEV_UNC_CONFIG_num_events(pcfg_unc)) { + SEP_DRV_LOG_TRACE( + "Skipping uncore buffer allocation for device %u with no events.", + i); + continue; + } + LWPMU_DEVICE_acc_value(&devices[i]) = + CONTROL_Allocate_Memory(num_packages * sizeof(U64 **)); + LWPMU_DEVICE_prev_value(&devices[i]) = + CONTROL_Allocate_Memory(num_packages * sizeof(U64 *)); + for (j = 0; j < num_packages; j++) { + // Allocate memory and zero out accumulator array (one per group) + LWPMU_DEVICE_acc_value(&devices[i])[j] = + CONTROL_Allocate_Memory( + LWPMU_DEVICE_em_groups_count( + &devices[i]) * + sizeof(U64 *)); + for (k = 0; + k < LWPMU_DEVICE_em_groups_count(&devices[i]); + k++) { + ecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[k]; + num_entries = + ECB_num_events(ecb) * + LWPMU_DEVICE_num_units(&devices[i]); + LWPMU_DEVICE_acc_value(&devices[i])[j][k] = + CONTROL_Allocate_Memory(num_entries * + sizeof(U64)); + for (l = 0; l < num_entries; l++) { + LWPMU_DEVICE_acc_value( + &devices[i])[j][k][l] = 0LL; + } + if (max_entries < num_entries) { + max_entries = num_entries; + } + } + // Allocate memory and zero out prev_value array (one across groups) + LWPMU_DEVICE_prev_value(&devices[i])[j] = + CONTROL_Allocate_Memory(max_entries * + sizeof(U64)); + for (k = 0; k < max_entries; k++) { + LWPMU_DEVICE_prev_value(&devices[i])[j][k] = + 0LL; + } + } + max_entries = 0; + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* + * @fn void lwpmudrv_Free_Uncore_Buffer + * + * @param + * + * @return OS_STATUS + * + * @brief Free uncore data buffers + */ +static OS_STATUS +lwpmudrv_Free_Uncore_Buffer(U32 i) +{ + U32 j, k; + + SEP_DRV_LOG_TRACE_IN(""); + + if (LWPMU_DEVICE_prev_value(&devices[i])) { + for (j = 0; j < num_packages; j++) { + LWPMU_DEVICE_prev_value(&devices[i])[j] = + CONTROL_Free_Memory(LWPMU_DEVICE_prev_value( + &devices[i])[j]); + } + LWPMU_DEVICE_prev_value(&devices[i]) = CONTROL_Free_Memory( + LWPMU_DEVICE_prev_value(&devices[i])); + } + if (LWPMU_DEVICE_acc_value(&devices[i])) { + for (j = 0; j < num_packages; j++) { + if (LWPMU_DEVICE_acc_value(&devices[i])[j]) { + for (k = 0; k < LWPMU_DEVICE_em_groups_count( + &devices[i]); + k++) { + LWPMU_DEVICE_acc_value( + &devices[i])[j][k] = + CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value( + &devices[i])[j] + [k]); + } + LWPMU_DEVICE_acc_value(&devices[i])[j] = + CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value( + &devices[i])[j]); + } + } + LWPMU_DEVICE_acc_value(&devices[i]) = CONTROL_Free_Memory( + LWPMU_DEVICE_acc_value(&devices[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_State(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Allocates the memory needed at load time. Initializes all the + * @brief necessary state variables with the default values. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_State(VOID) +{ + S32 i, max_cpu_id = 0; + + SEP_DRV_LOG_INIT_IN(""); + + for_each_possible_cpu (i) { + if (cpu_present(i)) { + if (i > max_cpu_id) { + max_cpu_id = i; + } + } + } + max_cpu_id++; + + /* + * Machine Initializations + * Abstract this information away into a separate entry point + * + * Question: Should we allow for the use of Hot-cpu + * add/subtract functionality while the driver is executing? + */ + if (max_cpu_id > num_present_cpus()) { + GLOBAL_STATE_num_cpus(driver_state) = max_cpu_id; + } else { + GLOBAL_STATE_num_cpus(driver_state) = num_present_cpus(); + } + GLOBAL_STATE_active_cpus(driver_state) = num_online_cpus(); + GLOBAL_STATE_cpu_count(driver_state) = 0; + GLOBAL_STATE_dpc_count(driver_state) = 0; + GLOBAL_STATE_num_em_groups(driver_state) = 0; + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_UNINITIALIZED); + + SEP_DRV_LOG_INIT_OUT("Success: num_cpus=%d, active_cpus=%d.", + GLOBAL_STATE_num_cpus(driver_state), + GLOBAL_STATE_active_cpus(driver_state)); + return OS_SUCCESS; +} + +#if !defined(CONFIG_PREEMPT_COUNT) +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Fill_TSC_Info (PVOID param) + * + * @param param - pointer the buffer to fill in. + * + * @return none + * + * @brief Read the TSC and write into the correct array slot. + * + * Special Notes + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) +atomic_t read_now; +static wait_queue_head_t read_tsc_now; +#endif +static VOID +lwpmudrv_Fill_TSC_Info(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + // + // Wait until all CPU's are ready to proceed + // This will serve as a synchronization point to compute tsc skews. + // +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + if (atomic_read(&read_now) >= 1) { + if (atomic_dec_and_test(&read_now) == FALSE) { + wait_event_interruptible(read_tsc_now, + (atomic_read(&read_now) >= 1)); + } + } else { + wake_up_interruptible_all(&read_tsc_now); + } +#endif + + UTILITY_Read_TSC(&cpu_tsc[this_cpu]); + SEP_DRV_LOG_TRACE("This cpu %d --- tsc --- 0x%llx.", this_cpu, + cpu_tsc[this_cpu]); + + SEP_DRV_LOG_TRACE_OUT("Success"); + return; +} +#endif + +/********************************************************************* + * Internal Driver functions + * Should be called only from the lwpmudrv_DeviceControl routine + *********************************************************************/ + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Dump_Tracer(const char *) + * + * @param Name of the tracer + * + * @return void + * + * @brief Function that handles the generation of markers into the ftrace stream + * + * Special Notes + */ +static void +lwpmudrv_Dump_Tracer(char const *name, U64 tsc) +{ + SEP_DRV_LOG_TRACE_IN(""); + if (tsc == 0) { + preempt_disable(); + UTILITY_Read_TSC(&tsc); + tsc -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); + } + SEP_DRV_LOG_TRACE_OUT("Success"); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_VERSION call. + * @brief Returns the version number of the kernel mode sampling. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Version(IOCTL_ARGS arg) +{ + OS_STATUS status; + + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(U32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + status = put_user(SEP_VERSION_NODE_sep_version(&drv_version), + (U32 *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Reserve(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief + * @brief Local function that handles the LWPMU_IOCTL_RESERVE call. + * @brief Sets the state to RESERVED if possible. Returns BUSY if unable + * @brief to reserve the PMU. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Reserve(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(S32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + status = put_user(!CHANGE_DRIVER_STATE(STATE_BIT_UNINITIALIZED, + DRV_STATE_RESERVED), + (int *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Finish_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Finalize PMU after collection + * + * Special Notes + */ +static VOID +lwpmudrv_Finish_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (dispatch != NULL && dispatch->fini != NULL) { + dispatch->fini(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Clean_Up(DRV_BOOL) + * + * @param DRV_BOOL finish - Flag to call finish + * + * @return VOID + * + * @brief Cleans up the memory allocation. + * + * Special Notes + */ +static VOID +lwpmudrv_Clean_Up(DRV_BOOL finish) +{ + U32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + drv_cfg = CONTROL_Free_Memory(drv_cfg); + goto signal_end; + } + + if (devices) { + U32 id; + U32 num_groups = 0; + EVENT_CONFIG ec; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc = NULL; + + for (id = 0; id < num_devices; id++) { + if (LWPMU_DEVICE_pcfg(&devices[id])) { + if (LWPMU_DEVICE_device_type(&devices[id]) == + DEVICE_INFO_UNCORE) { + pcfg_unc = + LWPMU_DEVICE_pcfg(&devices[id]); + dispatch_unc = LWPMU_DEVICE_dispatch( + &devices[id]); + if (DEV_UNC_CONFIG_num_events( + pcfg_unc) && + dispatch_unc && + dispatch_unc->fini) { + SEP_DRV_LOG_TRACE( + "LWP: calling UNC Init."); + dispatch_unc->fini( + (PVOID *)&id); + } + lwpmudrv_Free_Uncore_Buffer(id); + } else if (finish) { + CONTROL_Invoke_Parallel( + lwpmudrv_Finish_Op, NULL); + } + } + + if (LWPMU_DEVICE_PMU_register_data(&devices[id])) { + ec = LWPMU_DEVICE_ec(&devices[id]); + if (LWPMU_DEVICE_device_type(&devices[id]) == + DEVICE_INFO_CORE) { + num_groups = + EVENT_CONFIG_num_groups(ec); + } else { + num_groups = + EVENT_CONFIG_num_groups_unc(ec); + } + for (i = 0; i < num_groups; i++) { + LWPMU_DEVICE_PMU_register_data( + &devices[id])[i] = + CONTROL_Free_Memory( + LWPMU_DEVICE_PMU_register_data( + &devices[id])[i]); + } + LWPMU_DEVICE_PMU_register_data(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_PMU_register_data( + &devices[id])); + } + LWPMU_DEVICE_pcfg(&devices[id]) = CONTROL_Free_Memory( + LWPMU_DEVICE_pcfg(&devices[id])); + LWPMU_DEVICE_ec(&devices[id]) = CONTROL_Free_Memory( + LWPMU_DEVICE_ec(&devices[id])); + if (LWPMU_DEVICE_lbr(&devices[id])) { + LWPMU_DEVICE_lbr(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_lbr(&devices[id])); + } + if (LWPMU_DEVICE_cur_group(&devices[id])) { + LWPMU_DEVICE_cur_group(&devices[id]) = + CONTROL_Free_Memory( + LWPMU_DEVICE_cur_group( + &devices[id])); + } + } + devices = CONTROL_Free_Memory(devices); + } + + if (desc_data) { + for (i = 0; i < GLOBAL_STATE_num_descriptors(driver_state); + i++) { + desc_data[i] = CONTROL_Free_Memory(desc_data[i]); + } + desc_data = CONTROL_Free_Memory(desc_data); + } + + drv_cfg = CONTROL_Free_Memory(drv_cfg); + pmu_state = CONTROL_Free_Memory(pmu_state); + cpu_mask_bits = CONTROL_Free_Memory(cpu_mask_bits); + core_to_dev_map = CONTROL_Free_Memory(core_to_dev_map); + ipt_config = CONTROL_Free_Memory(ipt_config); + +signal_end: + GLOBAL_STATE_num_em_groups(driver_state) = 0; + GLOBAL_STATE_num_descriptors(driver_state) = 0; + num_devices = 0; + num_core_devs = 0; + max_groups_unc = 0; + control_pid = 0; + unc_buf_init = FALSE; + + OUTPUT_Cleanup(); + memset(pcb, 0, pcb_size); + + SEP_DRV_LOG_FLOW_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static NTSTATUS lwpmudrv_Initialize_Driver (PVOID buf_usr_to_drv, size_t len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return NTSTATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT_DRIVER call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_Driver(PVOID buf_usr_to_drv, size_t len_usr_to_drv) +{ + S32 cpu_num; + int status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!CHANGE_DRIVER_STATE(STATE_BIT_RESERVED, DRV_STATE_IDLE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); + return OS_FAULT; + } + + interrupt_counts = NULL; + pmu_state = NULL; + + drv_cfg = CONTROL_Allocate_Memory(len_usr_to_drv); + if (!drv_cfg) { + status = OS_NO_MEM; + SEP_DRV_LOG_ERROR("Memory allocation failure for drv_cfg!"); + goto clean_return; + } + + if (copy_from_user(drv_cfg, buf_usr_to_drv, len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for drv_cfg!"); + status = OS_FAULT; + goto clean_return; + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { +#if (defined(DRV_EM64T)) + if (output_buffer_size == OUTPUT_LARGE_BUFFER) { + output_buffer_size = OUTPUT_CP_BUFFER; + } +#endif + interrupt_counts = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + DRV_CONFIG_num_events(drv_cfg) * sizeof(U64)); + if (interrupt_counts == NULL) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for interrupt_counts!"); + status = OS_NO_MEM; + goto clean_return; + } + } else if (output_buffer_size == OUTPUT_CP_BUFFER) { + output_buffer_size = OUTPUT_LARGE_BUFFER; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); + return OS_SUCCESS; + } + + pmu_state = CONTROL_Allocate_KMemory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64) * 3); + if (!pmu_state) { + SEP_DRV_LOG_ERROR("Memory allocation failure for pmu_state!"); + status = OS_NO_MEM; + goto clean_return; + } + uncore_em_factor = 0; + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + CPU_STATE_accept_interrupt(&pcb[cpu_num]) = 1; + CPU_STATE_initial_mask(&pcb[cpu_num]) = 1; + CPU_STATE_group_swap(&pcb[cpu_num]) = 1; + CPU_STATE_reset_mask(&pcb[cpu_num]) = 0; + CPU_STATE_num_samples(&pcb[cpu_num]) = 0; + CPU_STATE_last_p_state_valid(&pcb[cpu_num]) = FALSE; +#if defined(DRV_CPU_HOTPLUG) + CPU_STATE_offlined(&pcb[cpu_num]) = TRUE; +#else + CPU_STATE_offlined(&pcb[cpu_num]) = FALSE; +#endif + CPU_STATE_nmi_handled(&pcb[cpu_num]) = 0; + } + + DRV_CONFIG_seed_name(drv_cfg) = NULL; + DRV_CONFIG_seed_name_len(drv_cfg) = 0; + + SEP_DRV_LOG_TRACE("Config : size = %d.", DRV_CONFIG_size(drv_cfg)); + SEP_DRV_LOG_TRACE("Config : counting_mode = %d.", + DRV_CONFIG_counting_mode(drv_cfg)); + + control_pid = current->pid; + SEP_DRV_LOG_TRACE("Control PID = %d.", control_pid); + + if (core_to_dev_map == NULL) { + core_to_dev_map = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!core_to_dev_map) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for core_to_dev_map!"); + status = OS_NO_MEM; + goto clean_return; + } + } + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + if (cpu_buf == NULL) { + cpu_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!cpu_buf) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for cpu_buf!"); + status = OS_NO_MEM; + goto clean_return; + } + } + + if (module_buf == NULL) { + module_buf = CONTROL_Allocate_Memory( + sizeof(BUFFER_DESC_NODE)); + if (!module_buf) { + status = OS_NO_MEM; + goto clean_return; + } + } + +#if defined(CONFIG_TRACEPOINTS) + multi_pebs_enabled = DRV_CONFIG_multi_pebs_enabled(drv_cfg); + SEP_DRV_LOG_TRACE("Multi PEBS collection request = %d.", + multi_pebs_enabled); + collect_sideband = DRV_CONFIG_enable_sideband(drv_cfg); + SEP_DRV_LOG_TRACE("Sideband collection request = %d.", + collect_sideband); +#endif + + if (collect_sideband) { + if (cpu_sideband_buf == NULL) { + cpu_sideband_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!cpu_sideband_buf) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for cpu_sideband_buf!"); + status = OS_NO_MEM; + goto clean_return; + } + } + } + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + if (ipt_trace_buf == NULL) { + ipt_trace_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!ipt_trace_buf) { + status = OS_NO_MEM; + goto clean_return; + } + } + if (ipt_info_buf == NULL) { + ipt_info_buf = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(BUFFER_DESC_NODE)); + if (!ipt_info_buf) { + status = OS_NO_MEM; + goto clean_return; + } + } + } + + /* + * Allocate the output and control buffers for each CPU in the system + * Allocate and set up the temp output files for each CPU in the system + * Allocate and set up the temp outout file for detailing the Modules in the system + */ + status = OUTPUT_Initialize(); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("OUTPUT_Initialize failed!"); + goto clean_return; + } + + /* + * Program the APIC and set up the interrupt handler + */ + CPUMON_Install_Cpuhooks(); + SEP_DRV_LOG_TRACE("Finished Installing cpu hooks."); +#if defined(DRV_CPU_HOTPLUG) + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (cpu_built_sysinfo && + cpu_built_sysinfo[cpu_num] == 0) { + cpu_tsc[cpu_num] = cpu_tsc[0]; + CONTROL_Invoke_Cpu(cpu_num, SYS_INFO_Build_Cpu, + NULL); + } + } +#endif + +#if defined(DRV_EM64T) + SYS_Get_GDT_Base((PVOID *)&gdt_desc); +#endif + SEP_DRV_LOG_TRACE("About to install module notification."); + LINUXOS_Install_Hooks(); + } else if (DRV_CONFIG_emon_timer_interval(drv_cfg)) { + output_buffer_size = OUTPUT_EMON_BUFFER; + if (emon_buf == NULL) { + emon_buf = CONTROL_Allocate_Memory( + sizeof(BUFFER_DESC_NODE)); + if (!emon_buf) { + status = OS_NO_MEM; + goto clean_return; + } + } + status = OUTPUT_Initialize_EMON(); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("OUTPUT_Initialize_EMON failed!"); + goto clean_return; + } + emon_desc = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(EMON_DESC_NODE)); + if (!emon_desc) { + status = OS_NO_MEM; + goto clean_return; + } + } + +clean_return: + if (status != OS_SUCCESS) { + drv_cfg = CONTROL_Free_Memory(drv_cfg); + interrupt_counts = CONTROL_Free_Memory(interrupt_counts); + pmu_state = CONTROL_Free_Memory(pmu_state); + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize (PVOID buf_usr_to_drv, size_t len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize(PVOID buf_usr_to_drv, size_t len_usr_to_drv) +{ + int status = OS_SUCCESS; + S32 cpu_num; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_FLOW_IN(""); + + if (buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + pcfg = CONTROL_Allocate_Memory(len_usr_to_drv); + if (!pcfg) { + status = OS_NO_MEM; + SEP_DRV_LOG_ERROR("Memory allocation failure for pcfg!"); + goto clean_return; + } + + if (copy_from_user(pcfg, buf_usr_to_drv, len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for pcfg!"); + status = OS_FAULT; + goto clean_return; + } + + cur_device = DEV_CONFIG_device_index(pcfg); + if (cur_device >= num_devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); + return OS_FAULT; + } + + /* + * Program State Initializations + */ + LWPMU_DEVICE_pcfg(&devices[cur_device]) = pcfg; + cur_pcfg = (DEV_CONFIG)LWPMU_DEVICE_pcfg(&devices[cur_device]); + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success, using PCL."); + return OS_SUCCESS; + } + + if (DEV_CONFIG_num_events(cur_pcfg) > 0) { + LWPMU_DEVICE_dispatch(&devices[cur_device]) = + UTILITY_Configure_CPU(DEV_CONFIG_dispatch_id(cur_pcfg)); + if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Dispatch pointer is NULL!"); + status = OS_INVALID; + goto clean_return; + } + } + +#if defined(DRV_DISABLE_PEBS) + DEV_CONFIG_pebs_mode(cur_pcfg) = 0; +#endif + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + status = PEBS_Initialize(cur_device); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("PEBS_Initialize failed!"); + goto clean_return; + } + } + + if (DRV_CONFIG_ipt_mode(drv_cfg) && ipt_config) { + status = IPT_Initialize(drv_cfg); + if (status != OS_SUCCESS) { + SEP_DRV_LOG_ERROR("IPT_Initialize failed!"); + goto clean_return; + } + } + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + CPU_STATE_num_ebs_sampled(&pcb[cpu_num]) = 0; + CPU_STATE_num_dropped_ebs_samples(&pcb[cpu_num]) = 0; + } + + /* Create core to device ID map */ + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (CPU_STATE_core_type(&pcb[cpu_num]) == + DEV_CONFIG_core_type(cur_pcfg)) { + core_to_dev_map[cpu_num] = cur_device; + } + } + num_core_devs++; //New core device + LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_CORE; + +clean_return: + if (status != OS_SUCCESS) { + // release all memory allocated in this function: + lwpmudrv_Clean_Up(FALSE); + PEBS_Destroy(); + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief + * @brief Local function that handles the LWPMU_IOCTL_INIT_NUM_DEV call. + * @brief Init # uncore devices. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_Num_Devices(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_usr_to_drv != sizeof(U32)) || + (arg->buf_usr_to_drv == NULL)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (copy_from_user(&num_devices, arg->buf_usr_to_drv, + arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + /* + * Allocate memory for number of devices + */ + if (num_devices != 0) { + devices = CONTROL_Allocate_Memory(num_devices * + sizeof(LWPMU_DEVICE_NODE)); + if (!devices) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Unable to allocate memory for devices!"); + return OS_NO_MEM; + } + } + cur_device = 0; + + SEP_DRV_LOG_FLOW_OUT("Success: num_devices=%d, devices=0x%p.", + num_devices, devices); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, U32 len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the input buffer + * @param len_usr_to_drv - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_UNC(PVOID buf_usr_to_drv, U32 len_usr_to_drv) +{ + DEV_UNC_CONFIG pcfg_unc; + U32 i; + int status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: current state is not IDLE."); + return OS_IN_PROGRESS; + } + + if (!devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("No devices allocated!"); + return OS_INVALID; + } + if (buf_usr_to_drv == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + if (len_usr_to_drv != sizeof(DEV_UNC_CONFIG_NODE)) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Got len_usr_to_drv=%d, expecting size=%d", + len_usr_to_drv, (int)sizeof(DEV_UNC_CONFIG_NODE)); + return OS_FAULT; + } + + /* + * Program State Initializations: + * Foreach device, copy over pcfg and configure dispatch table + */ + // allocate memory + pcfg_unc = CONTROL_Allocate_Memory(sizeof(DEV_UNC_CONFIG_NODE)); + if (pcfg_unc == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("pcfg_unc allocation failed!"); + return OS_NO_MEM; + } + // copy over pcfg + if (copy_from_user(pcfg_unc, buf_usr_to_drv, len_usr_to_drv)) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Failed to copy pcfg_unc from user!"); + return OS_FAULT; + } + cur_device = DEV_UNC_CONFIG_device_index(pcfg_unc); + if (cur_device >= num_devices) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "No more devices to allocate! Wrong lwpmudrv_Init_Num_Devices."); + return OS_FAULT; + } + LWPMU_DEVICE_pcfg(&devices[cur_device]) = pcfg_unc; + if (DEV_UNC_CONFIG_num_events(pcfg_unc) > 0) { + // configure dispatch from dispatch_id + LWPMU_DEVICE_dispatch(&devices[cur_device]) = + UTILITY_Configure_CPU( + DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + if (LWPMU_DEVICE_dispatch(&devices[cur_device]) == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT("Unable to configure CPU!"); + return OS_FAULT; + } + } + + LWPMU_DEVICE_cur_group(&devices[cur_device]) = + CONTROL_Allocate_Memory(num_packages * sizeof(S32)); + if (LWPMU_DEVICE_cur_group(&devices[cur_device]) == NULL) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Cur_grp allocation failed for device %u!", cur_device); + return OS_NO_MEM; + } + for (i = 0; i < num_packages; i++) { + LWPMU_DEVICE_cur_group(&devices[cur_device])[i] = 0; + } + + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; + LWPMU_DEVICE_num_units(&devices[cur_device]) = + DEV_UNC_CONFIG_num_units(pcfg_unc); + LWPMU_DEVICE_device_type(&devices[cur_device]) = DEVICE_INFO_UNCORE; + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE && + DEV_UNC_CONFIG_num_events(pcfg_unc)) { + if (unc_buf == NULL) { + unc_buf = CONTROL_Allocate_Memory( + num_packages * sizeof(BUFFER_DESC_NODE)); + if (!unc_buf) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure."); + return OS_NO_MEM; + } + } + + if (!unc_buf_init) { + status = OUTPUT_Initialize_UNC(); + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "OUTPUT_Initialize failed!"); + return status; + } + unc_buf_init = TRUE; + unc_em_desc = CONTROL_Allocate_Memory( + num_packages * sizeof(UNC_EM_DESC_NODE)); + if (!unc_em_desc) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure."); + return OS_NO_MEM; + } + } + } + + SEP_DRV_LOG_FLOW_OUT("unc dispatch id = %d.", + DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Terminate(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_TERMINATE call. + * @brief Cleans up the interrupt handler and resets the PMU state. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Terminate(VOID) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() == DRV_STATE_UNINITIALIZED) { + SEP_DRV_LOG_FLOW_OUT("Success (already uninitialized)."); + return OS_SUCCESS; + } + + if (!CHANGE_DRIVER_STATE(STATE_BIT_STOPPED | STATE_BIT_TERMINATING | + STATE_BIT_IDLE, + DRV_STATE_UNINITIALIZED)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected state!"); + return OS_FAULT; + } + + if (drv_cfg && DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + LINUXOS_Uninstall_Hooks(); + } + + lwpmudrv_Clean_Up(TRUE); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Switch_To_Next_Group(param) + * + * @param none + * + * @return none + * + * @brief Switch to the next event group for both core and uncore. + * @brief This function assumes an active collection is frozen + * @brief or no collection is active. + * + * Special Notes + */ +static VOID +lwpmudrv_Switch_To_Next_Group(VOID) +{ + S32 cpuid; + U32 i, j; + CPU_STATE pcpu; + EVENT_CONFIG ec; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc; + ECB pecb_unc = NULL; + U32 cur_grp = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + for (cpuid = 0; cpuid < GLOBAL_STATE_num_cpus(driver_state); cpuid++) { + pcpu = &pcb[cpuid]; + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[cpuid]]); + if (!ec) { + continue; + } + CPU_STATE_current_group(pcpu)++; + // make the event group list circular + CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); + } + + if (num_devices) { + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + if (pcb && pcfg_unc && + DEV_UNC_CONFIG_num_events(pcfg_unc) && + dispatch_unc && + DRV_CONFIG_emon_mode(drv_cfg)) { + for (j = 0; j < num_packages; j++) { + cur_grp = + LWPMU_DEVICE_cur_group( + &devices[i])[j]; + pecb_unc = + LWPMU_DEVICE_PMU_register_data( + &devices[i]) + [cur_grp]; + LWPMU_DEVICE_cur_group( + &devices[i])[j]++; + if (CPU_STATE_current_group( + &pcb[0]) == 0) { + LWPMU_DEVICE_cur_group( + &devices[i])[j] = + 0; + } + LWPMU_DEVICE_cur_group( + &devices[i])[j] %= + LWPMU_DEVICE_em_groups_count( + &devices[i]); + } + SEP_DRV_LOG_TRACE( + "Swap Group to %d for device %d.", + cur_grp, i); +#if !defined(DISABLE_BUILD_SOCPERF) + if (pecb_unc && + ECB_device_type(pecb_unc) == + DEVICE_UNC_SOCPERF) { + SOCPERF_Switch_Group3(); + } +#endif + } + } + } + } + + SEP_DRV_LOG_FLOW_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwmpudrv_Get_Driver_State(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_GET_Driver_State call. + * @brief Returns the current driver state. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Driver_State(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(U32)) || + (arg->buf_drv_to_usr == NULL)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); + return OS_FAULT; + } + + status = put_user(GET_DRIVER_STATE(), (U32 *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Pause the uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Pause_Uncore(U32 switch_grp) +{ + U32 i; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + dispatch_unc && dispatch_unc->freeze) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Pause."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > + 1) { + dispatch_unc->freeze(&i); + } + } else { + dispatch_unc->freeze(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Pause the core/uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Pause_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->freeze != NULL && DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + dispatch->freeze(param); + } + + lwpmudrv_Pause_Uncore(0); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Pause the collection + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Pause(VOID) +{ + int i; + int done = FALSE; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSING)) { + if (DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); + i++) { + CPU_STATE_accept_interrupt(&pcb[i]) = 0; + } + while (!done) { + done = TRUE; + for (i = 0; + i < GLOBAL_STATE_num_cpus(driver_state); + i++) { + if (atomic_read(&CPU_STATE_in_interrupt( + &pcb[i]))) { + done = FALSE; + } + } + } + } + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); + /* + * This means that the PAUSE state has been reached. + */ + CHANGE_DRIVER_STATE(STATE_BIT_PAUSING, DRV_STATE_PAUSED); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Resume the uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Resume_Uncore(U32 switch_grp) +{ + U32 i; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + dispatch_unc && dispatch_unc->restart) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Resume."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + dispatch_unc->restart(&i); + } + } else { + dispatch_unc->restart(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Resume the core/uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Resume_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->restart != NULL && DRV_CONFIG_use_pcl(drv_cfg) == FALSE) { + dispatch->restart((VOID *)(size_t)0); + } + + lwpmudrv_Resume_Uncore(0); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Resume(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Resume the collection + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Resume(VOID) +{ + int i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + /* + * If we are in the process of pausing sampling, wait until the pause has been + * completed. Then start the Resume process. + */ + while (GET_DRIVER_STATE() == DRV_STATE_PAUSING) { + /* + * This delay probably needs to be expanded a little bit more for large systems. + * For now, it is probably sufficient. + */ + SYS_IO_Delay(); + SYS_IO_Delay(); + } + + if (CHANGE_DRIVER_STATE(STATE_BIT_PAUSED, DRV_STATE_RUNNING)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (cpu_mask_bits) { + CPU_STATE_accept_interrupt(&pcb[i]) = + cpu_mask_bits[i] ? 1 : 0; + CPU_STATE_group_swap(&pcb[i]) = 1; + } else { + CPU_STATE_accept_interrupt(&pcb[i]) = 1; + CPU_STATE_group_swap(&pcb[i]) = 1; + } + } + CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_Uncore(void) + * + * @param - 1 if switching group, 0 otherwise + * + * @return OS_STATUS + * + * @brief Program the uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Write_Uncore(U32 switch_grp) +{ + U32 i; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + dispatch_unc && dispatch_unc->write) { + SEP_DRV_LOG_TRACE("LWP: calling UNC Write."); + if (switch_grp) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > + 1) { + dispatch_unc->write(&i); + } + } else { + dispatch_unc->write(&i); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_Op(void) + * + * @param - Do operation for Core only + * + * @return OS_STATUS + * + * @brief Program the core/uncore collection + * + * Special Notes + */ +static VOID +lwpmudrv_Write_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (!DRV_CONFIG_ipt_mode(drv_cfg) || !ipt_config) { + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->write != NULL) { + dispatch->write((VOID *)(size_t)0); + } + + if (param == NULL) { + lwpmudrv_Write_Uncore(0); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Switch_Group(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Switch the current group that is being collected. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 4 distinct steps are taken: + * Step 1: Pause the sampling + * Step 2: Increment the current group count + * Step 3: Write the new group to the PMU + * Step 4: Resume sampling + */ +static OS_STATUS +lwpmudrv_Switch_Group(VOID) +{ + S32 idx; + CPU_STATE pcpu; + EVENT_CONFIG ec; + OS_STATUS status = OS_SUCCESS; + U32 current_state = GET_DRIVER_STATE(); + U32 this_cpu; + + SEP_DRV_LOG_FLOW_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (current_state != DRV_STATE_RUNNING && + current_state != DRV_STATE_PAUSED) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Return value: %d (invalid driver state!).", status); + return status; + } + + status = lwpmudrv_Pause(); + + for (idx = 0; idx < GLOBAL_STATE_num_cpus(driver_state); idx++) { + pcpu = &pcb[idx]; + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[idx]]); + CPU_STATE_current_group(pcpu)++; + // make the event group list circular + CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); + } + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, (VOID *)(size_t)this_cpu); + if (drv_cfg && DRV_CONFIG_start_paused(drv_cfg) == FALSE) { + lwpmudrv_Resume(); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Trigger_Read_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Read uncore data + * + * Special Notes + */ +static VOID +lwpmudrv_Trigger_Read_Op(VOID) +{ + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num; + U64 tsc; + BUFFER_DESC bd; + EVENT_DESC evt_desc; + U32 cur_grp; + ECB pecb; + U32 sample_size = 0; + U32 offset = 0; + PVOID buf; + UncoreSampleRecordPC *psamp; + U32 i; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("State is not RUNNING or PAUSED!"); + return; + } + + UTILITY_Read_TSC(&tsc); + bd = &unc_buf[package_num]; + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + !DEV_UNC_CONFIG_device_with_intr_events(pcfg_unc)) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + evt_desc = desc_data[ECB_descriptor_id(pecb)]; + sample_size += EVENT_DESC_sample_size(evt_desc); + } + } + + buf = OUTPUT_Reserve_Buffer_Space(bd, sample_size, FALSE, + !SEP_IN_NOTIFICATION); + + if (buf) { + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = + (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + !DEV_UNC_CONFIG_device_with_intr_events(pcfg_unc) && + dispatch_unc && dispatch_unc->trigger_read) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + evt_desc = desc_data[ECB_descriptor_id(pecb)]; + + psamp = (UncoreSampleRecordPC *)(((S8 *)buf) + + offset); + UNCORE_SAMPLE_RECORD_descriptor_id(psamp) = + ECB_descriptor_id(pecb); + UNCORE_SAMPLE_RECORD_tsc(psamp) = tsc; + UNCORE_SAMPLE_RECORD_uncore_valid(psamp) = 1; + UNCORE_SAMPLE_RECORD_cpu_num(psamp) = + (U16)this_cpu; + UNCORE_SAMPLE_RECORD_pkg_num(psamp) = + (U16)package_num; + + dispatch_unc->trigger_read(psamp, i, 0); + offset += EVENT_DESC_sample_size(evt_desc); + } + } + } else { + SEP_DRV_LOG_WARNING( + "Buffer space reservation failed; some samples will be dropped."); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Uncore_Switch_Group(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Switch the current group that is being collected. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 4 distinct steps are taken: + * Step 1: Pause the sampling + * Step 2: Increment the current group count + * Step 3: Write the new group to the PMU + * Step 4: Resume sampling + */ +static OS_STATUS +lwpmudrv_Uncore_Switch_Group(VOID) +{ + OS_STATUS status = OS_SUCCESS; + U32 current_state = GET_DRIVER_STATE(); + U32 this_cpu = CONTROL_THIS_CPU(); + U32 package_num = core_to_package_map[this_cpu]; + U32 i = 0; + U32 j; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc; + ECB ecb_unc; + U32 cur_grp; + U32 num_units; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!devices || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Devices or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (current_state != DRV_STATE_RUNNING && + current_state != DRV_STATE_PAUSED) { + SEP_DRV_LOG_FLOW_OUT("Driver state is not RUNNING or PAUSED!"); + return OS_INVALID; + } + + if (max_groups_unc > 1) { + lwpmudrv_Pause_Uncore(1); + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + num_units = LWPMU_DEVICE_num_units(&devices[i]); + if (!pcfg_unc || !DEV_UNC_CONFIG_num_events(pcfg_unc) || + !dispatch_unc) { + continue; + } + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + ecb_unc = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + // Switch group + LWPMU_DEVICE_cur_group( + &devices[i])[package_num]++; + LWPMU_DEVICE_cur_group( + &devices[i])[package_num] %= + LWPMU_DEVICE_em_groups_count( + &devices[i]); +#if !defined(DISABLE_BUILD_SOCPERF) + if (ecb_unc && + (ECB_device_type(ecb_unc) == + DEVICE_UNC_SOCPERF) && + (package_num == 0)) { + SOCPERF_Switch_Group3(); + } +#endif + // Post group switch + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + ecb_unc = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + for (j = 0; + j < (ECB_num_events(ecb_unc) * num_units); + j++) { + LWPMU_DEVICE_prev_value( + &devices[i])[package_num][j] = + 0LL; //zero out prev_value for new collection + } + } + } + lwpmudrv_Write_Uncore(1); + lwpmudrv_Resume_Uncore(1); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Trigger_Read(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static VOID +lwpmudrv_Trigger_Read( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 pkg = core_to_package_map[this_cpu]; + + SEP_DRV_LOG_TRACE_IN(""); + + if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_TRACE_OUT("Success: Using PCL"); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE("Sampling driver state is not RUNNING"); + goto reset_uncore_timer; + } + + lwpmudrv_Trigger_Read_Op(); + + UNC_EM_em_factor(&unc_em_desc[pkg])++; + if (UNC_EM_em_factor(&unc_em_desc[pkg]) == + DRV_CONFIG_unc_em_factor(drv_cfg)) { + SEP_DRV_LOG_TRACE("Switching Uncore Group..."); + lwpmudrv_Uncore_Switch_Group(); + UNC_EM_em_factor(&unc_em_desc[pkg]) = 0; + } + +reset_uncore_timer: + UNC_EM_read_timer(&unc_em_desc[pkg])->expires = + jiffies + unc_timer_interval; + add_timer_on(UNC_EM_read_timer(&unc_em_desc[pkg]), this_cpu); + + SEP_DRV_LOG_TRACE_OUT("Success."); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwmudrv_Read_Specific_TSC (PVOID param) + * + * @param param - pointer to the result + * + * @return none + * + * @brief Read the tsc value in the current processor and + * @brief write the result into param. + * + * Special Notes + */ +static VOID +lwpmudrv_Read_Specific_TSC(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + if (this_cpu == 0) { + UTILITY_Read_TSC((U64 *)param); + } + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID lwpmudrv_Uncore_Stop_Timer (void) + * + * @brief Stop the uncore read timer + * + * @param none + * + * @return none + * + * Special Notes: + */ +static VOID +lwpmudrv_Uncore_Stop_Timer(VOID) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 pkg; + + SEP_DRV_LOG_FLOW_IN(""); + + for (this_cpu = 0; this_cpu < GLOBAL_STATE_num_cpus(driver_state); + this_cpu++) { + pcpu = &pcb[this_cpu]; + pkg = core_to_package_map[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu) || + (UNC_EM_read_timer(&unc_em_desc[pkg]) == NULL)) { + continue; + } + + del_timer_sync(UNC_EM_read_timer(&unc_em_desc[pkg])); + UNC_EM_read_timer(&unc_em_desc[pkg]) = CONTROL_Free_Memory( + UNC_EM_read_timer(&unc_em_desc[pkg])); + } + + SEP_DRV_LOG_FLOW_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_Uncore_Start_Timer (void) + * + * @brief Start the uncore read timer + * + * @param none + * + * @return OS_STATUS + * + * Special Notes: + */ +static VOID +lwpmudrv_Uncore_Start_Timer(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + U32 pkg = core_to_package_map[this_cpu]; + + SEP_DRV_LOG_FLOW_IN(""); + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Not socket master."); + return; + } + + UNC_EM_read_timer(&unc_em_desc[pkg]) = + CONTROL_Allocate_Memory(sizeof(struct timer_list)); + if (UNC_EM_read_timer(&unc_em_desc[pkg]) == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for unc_read_timer!"); + return; + } + UNC_EM_em_factor(&unc_em_desc[pkg]) = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(UNC_EM_read_timer(&unc_em_desc[pkg]), lwpmudrv_Trigger_Read, + 0); +#else + init_timer(UNC_EM_read_timer(&unc_em_desc[pkg])); + UNC_EM_read_timer(&unc_em_desc[pkg])->function = lwpmudrv_Trigger_Read; +#endif + UNC_EM_read_timer(&unc_em_desc[pkg])->expires = + jiffies + unc_timer_interval; + add_timer_on(UNC_EM_read_timer(&unc_em_desc[pkg]), this_cpu); + + SEP_DRV_LOG_FLOW_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Read_Data_Op(PVOID param) + * + * @param param - dummy + * + * @return void + * + * @brief Read all the core/uncore data counters at one shot + * + * Special Notes + */ +static void +lwpmudrv_Read_Data_Op(VOID *param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + DISPATCH dispatch; + U32 dev_idx; + DEV_CONFIG pcfg; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN(""); + + if (devices == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Devices is null!"); + return; + } + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->read_data != NULL) { + dispatch->read_data(param, dev_idx); + } + for (dev_idx = num_core_devs; dev_idx < num_devices; dev_idx++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!pcfg_unc || !DEV_UNC_CONFIG_num_events(pcfg_unc)) { + continue; + } + if (!(DRV_CONFIG_emon_mode(drv_cfg) || + DRV_CONFIG_counting_mode(drv_cfg))) { + continue; + } + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + if (dispatch == NULL) { + continue; + } + if (dispatch->read_data == NULL) { + continue; + } + dispatch->read_data(param, dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Emon_Switch_Group(param) + * + * @param none + * + * @return none + * + * @brief Switch to the next event group for both core and uncore. + * @brief This function assumes an active collection is frozen + * @brief or no collection is active. + * + * Special Notes + */ +static VOID +lwpmudrv_Emon_Switch_Group(VOID) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + U32 dev_idx = core_to_dev_map[this_cpu]; + EVENT_CONFIG ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[dev_idx]); + U32 package_num = core_to_package_map[this_cpu]; + U32 i; + DEV_UNC_CONFIG pcfg_unc; + DISPATCH dispatch_unc; + ECB pecb_unc; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN(""); + + CPU_STATE_current_group(pcpu)++; + // make the event group list circular + CPU_STATE_current_group(pcpu) %= EVENT_CONFIG_num_groups(ec); + + if (CPU_STATE_socket_master(pcpu)) { + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (!pcfg_unc || !DEV_UNC_CONFIG_num_events(pcfg_unc) || + !dispatch_unc) { + continue; + } + if (LWPMU_DEVICE_em_groups_count(&devices[i]) > 1) { + cur_grp = LWPMU_DEVICE_cur_group( + &devices[i])[package_num]; + pecb_unc = LWPMU_DEVICE_PMU_register_data( + &devices[i])[cur_grp]; + LWPMU_DEVICE_cur_group( + &devices[i])[package_num]++; + if (CPU_STATE_current_group(&pcb[0]) == 0) { + LWPMU_DEVICE_cur_group( + &devices[i])[package_num] = 0; + } + LWPMU_DEVICE_cur_group( + &devices[i])[package_num] %= + LWPMU_DEVICE_em_groups_count( + &devices[i]); +#if !defined(DISABLE_BUILD_SOCPERF) + if (pecb_unc && + (ECB_device_type(pecb_unc) == + DEVICE_UNC_SOCPERF) && + (package_num == 0)) { + SOCPERF_Switch_Group3(); + } +#endif + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +static VOID +lwpmudrv_Emon_Read_Op(PVOID arg) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + PVOID buf = arg; + U64 *tsc = NULL; + DRV_BOOL enter_in_pause_state = FALSE; + + if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { + SEP_DRV_LOG_TRACE("Entering in pause state."); + enter_in_pause_state = 1; + } + + prev_cpu_tsc[this_cpu] = cpu_tsc[this_cpu]; + if (DRV_CONFIG_per_cpu_tsc(drv_cfg) || (this_cpu == 0)) { + UTILITY_Read_TSC(&cpu_tsc[this_cpu]); + } + + // Counters should be frozen right after time stamped. + if (!enter_in_pause_state) { + lwpmudrv_Pause_Op(NULL); + } + + buf = (PVOID)(((U64 *)buf) + GLOBAL_STATE_num_cpus(driver_state)); + diff_cpu_tsc[this_cpu] = cpu_tsc[this_cpu] - prev_cpu_tsc[this_cpu]; + tsc = ((U64 *)buf) + this_cpu; + *tsc = diff_cpu_tsc[this_cpu]; + + buf = (PVOID)(((U64 *)buf) + GLOBAL_STATE_num_cpus(driver_state)); + lwpmudrv_Read_Data_Op(buf); + + lwpmudrv_Emon_Switch_Group(); + + lwpmudrv_Write_Op(NULL); + + if (DRV_CONFIG_per_cpu_tsc(drv_cfg) || (this_cpu == 0)) { + UTILITY_Read_TSC(&cpu_tsc[this_cpu]); + } + + if (!enter_in_pause_state) { + CPU_STATE_group_swap(&pcb[this_cpu]) = 1; + lwpmudrv_Resume_Op(NULL); + } + + EMON_call_count(&emon_desc[this_cpu])++; + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Emon_Read(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the EMON Counter Data. + * + * Special Notes + */ +static VOID +lwpmudrv_Emon_Read( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + struct timer_list *tl +#else + unsigned long arg +#endif +) +{ + PVOID buf = NULL; + U64 *time_info; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) + struct timespec64 t; +#else + struct timeval t; +#endif + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); + return; + } + + buf = OUTPUT_Reserve_Buffer_Space(emon_buf, emon_buffer_size, FALSE, + !SEP_IN_NOTIFICATION); + + if (buf) { + time_info = (U64 *)buf; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) + ktime_get_real_ts64(&t); + time_info[0] = t.tv_sec; + time_info[1] = t.tv_nsec / NSEC_PER_USEC; +#else + do_gettimeofday(&t); + time_info[0] = t.tv_sec; + time_info[1] = t.tv_usec; +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Emon_Read_Op, buf); + } else { + SEP_DRV_LOG_WARNING( + "Output buffers are full. Might be dropping some samples!"); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + mod_timer(unc_read_timer, jiffies + unc_timer_interval); +#else + unc_read_timer->expires = jiffies + unc_timer_interval; + add_timer(unc_read_timer); +#endif + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID lwpmudrv_Emon_Stop_Timer (void) + * + * @brief Stop the EMON read timer + * + * @param none + * + * @return none + * + * Special Notes: + */ +static VOID +lwpmudrv_Emon_Stop_Timer(PVOID arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (unc_read_timer == NULL) { + return; + } + + del_timer_sync(unc_read_timer); + unc_read_timer = CONTROL_Free_Memory(unc_read_timer); + + SEP_DRV_LOG_FLOW_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_Emon_Start_Timer (void) + * + * @brief Start the EMON read timer + * + * @param none + * + * @return OS_STATUS + * + * Special Notes: + */ +static VOID +lwpmudrv_Emon_Start_Timer(PVOID arg) +{ + U32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + EMON_call_count(&emon_desc[i]) = 0; + EMON_buf_index(&emon_desc[i]) = 0; + } + unc_timer_interval = + msecs_to_jiffies(DRV_CONFIG_emon_timer_interval(drv_cfg)); + unc_read_timer = CONTROL_Allocate_Memory(sizeof(struct timer_list)); + if (unc_read_timer == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for unc_read_timer!"); + return; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) + timer_setup(unc_read_timer, lwpmudrv_Emon_Read, 0); + mod_timer(unc_read_timer, jiffies + unc_timer_interval); +#else + init_timer(unc_read_timer); + unc_read_timer->function = lwpmudrv_Emon_Read; + unc_read_timer->expires = jiffies + unc_timer_interval; + add_timer(unc_read_timer); +#endif + + SEP_DRV_LOG_FLOW_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Init_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Initialize PMU before collection + * + * Special Notes + */ +static VOID +lwpmudrv_Init_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->init != NULL) { + dispatch->init(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Init_PMU(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Initialize the PMU and the driver state in preparation for data collection. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Init_PMU(IOCTL_ARGS args) +{ + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + EVENT_CONFIG ec; + U32 i; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + if (copy_from_user(&emon_buffer_size, args->buf_usr_to_drv, + sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + emon_read_threshold = OUTPUT_BUFFER_SIZE / emon_buffer_size; + + if (!drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg not set!"); + return OS_FAULT; + } + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success: using PCL."); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Discarded: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec( + &devices[core_to_dev_map[i]]); + if (!ec) { + continue; + } + CPU_STATE_trigger_count(&pcb[i]) = EVENT_CONFIG_em_factor(ec); + CPU_STATE_trigger_event_num(&pcb[i]) = + EVENT_CONFIG_em_event_num(ec); + } + + // set cur_device's total groups to max groups of all devices + max_groups_unc = 0; + for (i = num_core_devs; i < num_devices; i++) { + if (max_groups_unc < + LWPMU_DEVICE_em_groups_count(&devices[i])) { + max_groups_unc = + LWPMU_DEVICE_em_groups_count(&devices[i]); + } + } + // now go back and up total groups for all devices + if (DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { + for (i = num_core_devs; i < num_devices; i++) { + if (LWPMU_DEVICE_em_groups_count(&devices[i]) < + max_groups_unc) { + LWPMU_DEVICE_em_groups_count(&devices[i]) = + max_groups_unc; + } + } + } + + // allocate uncore read buffers for SEP + if (unc_buf_init && !DRV_CONFIG_emon_mode(drv_cfg)) { + lwpmudrv_Allocate_Uncore_Buffer(); + } + + // must be done after pcb is created and before PMU is first written to + CONTROL_Invoke_Parallel(lwpmudrv_Init_Op, NULL); + + for (i = num_core_devs; i < num_devices; i++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[i]); + dispatch_unc = LWPMU_DEVICE_dispatch(&devices[i]); + if (pcfg_unc && DEV_UNC_CONFIG_num_events(pcfg_unc) && + dispatch_unc && dispatch_unc->init) { + dispatch_unc->init((VOID *)&i); + } + } + + // Allocate PEBS buffers + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + PEBS_Allocate(); + } + + // + // Transfer the data into the PMU registers + // + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); + + SEP_DRV_LOG_TRACE("IOCTL_Init_PMU - finished initial Write."); + + if (DRV_CONFIG_counting_mode(drv_cfg) == TRUE || + DRV_CONFIG_emon_mode(drv_cfg) == TRUE) { + if (!read_counter_info) { + read_counter_info = + CONTROL_Allocate_Memory(emon_buffer_size); + if (!read_counter_info) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + if (!prev_counter_data) { + prev_counter_data = + CONTROL_Allocate_Memory(emon_buffer_size); + if (!prev_counter_data) { + read_counter_info = + CONTROL_Free_Memory(read_counter_info); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + if (!emon_buffer_driver_helper) { + // allocate size = size of EMON_BUFFER_DRIVER_HELPER_NODE + the number of entries in core_index_to_thread_offset_map, which is num of cpu + emon_buffer_driver_helper = CONTROL_Allocate_Memory( + sizeof(EMON_BUFFER_DRIVER_HELPER_NODE) + + sizeof(U32) * + GLOBAL_STATE_num_cpus(driver_state)); + if (!emon_buffer_driver_helper) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure!"); + return OS_NO_MEM; + } + } + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Read_MSR(pvoid param) + * + * @param param - pointer to the buffer to store the MSR counts + * + * @return none + * + * @brief + * @brief Read the U64 value at address in buf_drv_to_usr and + * @brief write the result into buf_usr_to_drv. + * + * Special Notes + */ +static VOID +lwpmudrv_Read_MSR(PVOID param) +{ + U32 this_cpu; + MSR_DATA this_node; + U32 reg_num; + S32 status; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + this_node = &msr_data[this_cpu]; + reg_num = MSR_DATA_addr(this_node); + + if (reg_num == 0) { + preempt_enable(); + SEP_DRV_LOG_ERROR_TRACE_OUT("Error: tried to read MSR 0"); + return; + } + + MSR_DATA_value(this_node) = + (U64)SYS_Read_MSR_With_Status(reg_num, &status); + MSR_DATA_status(this_node) = status; + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Perf_Capab(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the U64 value at IA32_PERF_CAPABILITIES and write + * @brief the result into buf_usr_to_drv. + * @brief Returns OS_SUCCESS if the read across all cores succeed, + * @brief otherwise OS_FAULT. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Perf_Capab(IOCTL_ARGS arg) +{ + U64 *val; + S32 i; + MSR_DATA node; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments!"); + return OS_FAULT; + } + + val = (U64 *)arg->buf_drv_to_usr; + + msr_data = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state) * + sizeof(MSR_DATA_NODE)); + if (!msr_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + MSR_DATA_addr(node) = IA32_PERF_CAPABILITIES; + } + + CONTROL_Invoke_Parallel(lwpmudrv_Read_MSR, (VOID *)(size_t)0); + + /* copy values to arg array */ + if (arg->len_drv_to_usr < + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)) { + msr_data = CONTROL_Free_Memory(msr_data); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Not enough memory allocated in output buffer!"); + return OS_FAULT; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + if (MSR_DATA_status(node)) { + SEP_DRV_LOG_ERROR("Reading the MSR 0x%x failed", + IA32_PERF_CAPABILITIES); + } + if (copy_to_user(&val[i], (U64 *)&MSR_DATA_value(node), + sizeof(U64))) { + msr_data = CONTROL_Free_Memory(msr_data); + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } + + msr_data = CONTROL_Free_Memory(msr_data); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Allowlist_MSR_All_Cores(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the U64 value at address into buf_drv_to_usr and write + * @brief the result into buf_usr_to_drv. + * @brief Returns OS_SUCCESS if the read across all cores succeed, + * @brief otherwise OS_FAULT. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Read_Allowlist_MSR_All_Cores(IOCTL_ARGS arg) +{ + static DRV_MSR_OP req_msr_ops; + S32 i; + MSR_DATA node; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (arg->len_usr_to_drv != + sizeof(DRV_MSR_OP_NODE) * GLOBAL_STATE_num_cpus(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (arg->len_drv_to_usr != + sizeof(DRV_MSR_OP_NODE) * GLOBAL_STATE_num_cpus(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + req_msr_ops = (DRV_MSR_OP)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(DRV_MSR_OP_NODE)); + if (!req_msr_ops) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + + if (copy_from_user(req_msr_ops, arg->buf_usr_to_drv, + sizeof(DRV_MSR_OP_NODE) * + GLOBAL_STATE_num_cpus(driver_state))) { + SEP_DRV_LOG_ERROR("Memory copy failure!"); + status = OS_FAULT; + goto clean_return; + } + + if (!msr_data) { + msr_data = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(MSR_DATA_NODE)); + if (!msr_data) { + SEP_DRV_LOG_ERROR("Memory allocation failure!"); + status = OS_NO_MEM; + goto clean_return; + } + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (!PMU_LIST_Check_MSR(req_msr_ops[i].reg_id)) { + SEP_DRV_LOG_ERROR("Invalid MSR information! 0x%x", + req_msr_ops[i].reg_id); + status = OS_INVALID; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE("Verified the MSR 0x%x", + msr_ops[i].reg_id); + } + + node = &msr_data[i]; + MSR_DATA_addr(node) = req_msr_ops[i].reg_id; + } + + CONTROL_Invoke_Parallel(lwpmudrv_Read_MSR, (VOID *)(size_t)0); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + req_msr_ops[i].reg_read_val = MSR_DATA_value(node); + req_msr_ops[i].status = MSR_DATA_status(node); + } + + if (copy_to_user(arg->buf_drv_to_usr, req_msr_ops, + sizeof(DRV_MSR_OP_NODE) * + GLOBAL_STATE_num_cpus(driver_state))) { + SEP_DRV_LOG_ERROR("Memory copy failure!"); + status = OS_FAULT; + goto clean_return; + } + +clean_return: + req_msr_ops = CONTROL_Free_Memory(req_msr_ops); + msr_data = CONTROL_Free_Memory(msr_data); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Write_MSR(pvoid iaram) + * + * @param param - pointer to array containing the MSR address and the value to be written + * + * @return none + * + * @brief + * @brief Read the U64 value at address in buf_drv_to_usr and + * @brief write the result into buf_usr_to_drv. + * + * Special Notes + */ +static VOID +lwpmudrv_Write_MSR(PVOID param) +{ + U32 this_cpu; + MSR_DATA this_node; + U32 reg_num; + U64 val; + S32 status; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + this_node = &msr_data[this_cpu]; + reg_num = (U32)MSR_DATA_addr(this_node); + val = (U64)MSR_DATA_value(this_node); + // don't attempt to write MSR 0 + if (reg_num == 0) { + preempt_enable(); + SEP_DRV_LOG_ERROR_TRACE_OUT("Error: tried to write MSR 0!"); + return; + } + + SYS_Write_MSR_With_Status(reg_num, val, &status); + MSR_DATA_status(this_node) = status; + preempt_enable(); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Write_Allowlist_MSR_All_Cores(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the U64 value at address into buf_usr_to_drv and write + * @brief the result into buf_usr_to_drv. + * @brief Returns OS_SUCCESS if the write across all cores succeed, + * @brief otherwise OS_FAULT. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Write_Allowlist_MSR_All_Cores(IOCTL_ARGS arg) +{ + static DRV_MSR_OP req_msr_ops; + S32 i; + MSR_DATA node; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (arg->len_usr_to_drv != + sizeof(DRV_MSR_OP_NODE) * GLOBAL_STATE_num_cpus(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (arg->len_drv_to_usr != + sizeof(DRV_MSR_OP_NODE) * GLOBAL_STATE_num_cpus(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + req_msr_ops = (DRV_MSR_OP)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(DRV_MSR_OP_NODE)); + if (!req_msr_ops) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + + if (copy_from_user(req_msr_ops, arg->buf_usr_to_drv, + sizeof(DRV_MSR_OP_NODE) * + GLOBAL_STATE_num_cpus(driver_state))) { + SEP_DRV_LOG_ERROR("Memory copy failure!"); + status = OS_FAULT; + goto clean_return; + } + + if (!msr_data) { + msr_data = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(MSR_DATA_NODE)); + if (!msr_data) { + SEP_DRV_LOG_ERROR("Memory allocation failure!"); + status = OS_NO_MEM; + goto clean_return; + } + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (!PMU_LIST_Check_MSR(req_msr_ops[i].reg_id)) { + SEP_DRV_LOG_ERROR("Invalid MSR information! 0x%x", + req_msr_ops[i].reg_id); + status = OS_INVALID; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE("Verified the MSR 0x%x", + msr_ops[i].reg_id); + } + + node = &msr_data[i]; + MSR_DATA_addr(node) = req_msr_ops[i].reg_id; + MSR_DATA_value(node) = req_msr_ops[i].reg_write_val; + } + + CONTROL_Invoke_Parallel(lwpmudrv_Write_MSR, (VOID *)(size_t)0); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + node = &msr_data[i]; + req_msr_ops[i].status = MSR_DATA_status(node); + } + + if (copy_to_user(arg->buf_drv_to_usr, req_msr_ops, + sizeof(DRV_MSR_OP_NODE) * + GLOBAL_STATE_num_cpus(driver_state))) { + SEP_DRV_LOG_ERROR("Memory copy failure!"); + status = OS_FAULT; + goto clean_return; + } + +clean_return: + req_msr_ops = CONTROL_Free_Memory(req_msr_ops); + msr_data = CONTROL_Free_Memory(msr_data); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Counters(IOCTL_ARG arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read all the programmed data counters and accumulate them + * @brief into a single buffer. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Read_Counters(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_SUCCESS; + } + // + // Transfer the data in the PMU registers to the output buffer + // + if (!read_counter_info || !prev_counter_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "read_counter_info or prev_counter_data is NULL!"); + return OS_NO_MEM; + } + memset(read_counter_info, 0, arg->len_drv_to_usr); + + CONTROL_Invoke_Parallel(lwpmudrv_Read_Data_Op, + (VOID *)read_counter_info); + + if (copy_to_user(arg->buf_drv_to_usr, read_counter_info, + arg->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read / Store the counters and switch to the next valid group. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 10 distinct steps are taken: + * Step 1: Save the previous cpu's tsc + * Step 2: Read the current cpu's tsc + * Step 3: Pause the counting PMUs + * Step 4: Calculate the difference between the current and previous cpu's tsc + * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer + * Increment the buffer position by number of CPU + * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer + * Restore the original buffer ptr. + * Step 7: Write the new group to the PMU + * Step 8: Write the new group to the PMU + * Step 9: Read the current cpu's tsc for next collection (so read MSRs time not included in report) + * Step 10: Resume the counting PMUs + */ +static OS_STATUS +lwpmudrv_Read_Counters_And_Switch_Group(IOCTL_ARGS arg) +{ + U64 *p_buffer = NULL; + char *orig_r_buf_ptr = NULL; + U64 orig_r_buf_len = 0; + OS_STATUS status = OS_SUCCESS; + DRV_BOOL enter_in_pause_state = 0; + U32 i = 0; +#if !defined(CONFIG_PREEMPT_COUNT) + U64 *tmp = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_FLOW_OUT( + "'Success'/error: driver state is not RUNNING or PAUSED!"); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { + enter_in_pause_state = 1; + } + + // step 1 +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. + tmp = prev_cpu_tsc; + prev_cpu_tsc = cpu_tsc; + cpu_tsc = tmp; + } else +#endif + prev_cpu_tsc[0] = cpu_tsc[0]; + + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc and save in var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc and save in var cpu_tsc by lwpmudrv_Fill_TSC_Info +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 3 + // Counters should be frozen right after time stamped. + if (!enter_in_pause_state) { + status = lwpmudrv_Pause(); + } + + // step 4 + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { +#if !defined(CONFIG_PREEMPT_COUNT) + diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; +#else + // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. + // return all cpu's tsc difference with cpu0's tsc difference instead + diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; +#endif + } + } else { + diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; + } + + // step 5 + orig_r_buf_ptr = arg->buf_drv_to_usr; + orig_r_buf_len = arg->len_drv_to_usr; + + if (copy_to_user(arg->buf_drv_to_usr, diff_cpu_tsc, + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + p_buffer = (U64 *)(arg->buf_drv_to_usr); + p_buffer += GLOBAL_STATE_num_cpus(driver_state); + arg->buf_drv_to_usr = (char *)p_buffer; + arg->len_drv_to_usr -= + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + + // step 6 + status = lwpmudrv_Read_Counters(arg); + + arg->buf_drv_to_usr = orig_r_buf_ptr; + arg->len_drv_to_usr = orig_r_buf_len; + + // step 7 + // for each processor, increment its current group number + lwpmudrv_Switch_To_Next_Group(); + + // step 8 + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); + + // step 9 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 10 + if (!enter_in_pause_state) { + status = lwpmudrv_Resume(); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* + * @fn static OS_STATUS lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Read the current value of the counters, and reset them all to 0. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple group + * situation. 9 distinct steps are taken: + * Step 1: Save the previous cpu's tsc + * Step 2: Read the current cpu's tsc + * Step 3: Pause the counting PMUs + * Step 4: Calculate the difference between the current and previous cpu's tsc + * Step 5: Save original buffer ptr and copy cpu's tsc into the output buffer + * Increment the buffer position by number of CPU + * Step 6: Read the currently programmed data PMUs and copy the data into the output buffer + * Restore the original buffer ptr. + * Step 7: Write the new group to the PMU + * Step 8: Read the current cpu's tsc for next collection (so read MSRs time not included in report) + * Step 9: Resume the counting PMUs + */ +static OS_STATUS +lwpmudrv_Read_And_Reset_Counters(IOCTL_ARGS arg) +{ + U64 *p_buffer = NULL; + char *orig_r_buf_ptr = NULL; + U64 orig_r_buf_len = 0; + OS_STATUS status = OS_SUCCESS; + DRV_BOOL enter_in_pause_state = 0; + U32 i = 0; +#if !defined(CONFIG_PREEMPT_COUNT) + U64 *tmp = NULL; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_FAULT; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), + STATE_BIT_RUNNING | STATE_BIT_PAUSED)) { + SEP_DRV_LOG_FLOW_OUT( + "'Success'/error: driver state is not RUNNING or PAUSED!"); + return OS_SUCCESS; + } + + if (GET_DRIVER_STATE() == DRV_STATE_PAUSED) { + enter_in_pause_state = 1; + } + + // step 1 +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + // swap cpu_tsc and prev_cpu_tsc, so that cpu_tsc is saved in prev_cpu_tsc. + tmp = prev_cpu_tsc; + prev_cpu_tsc = cpu_tsc; + cpu_tsc = tmp; + } else +#endif + prev_cpu_tsc[0] = cpu_tsc[0]; + + // step 2 + // if per_cpu_tsc is not defined, read cpu0's tsc into var cpu_tsc[0] + // if per_cpu_tsc is defined, read all cpu's tsc into var cpu_tsc by lwpmudrv_Fill_TSC_Info +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 3 + // Counters should be frozen right after time stamped. + if (!enter_in_pause_state) { + status = lwpmudrv_Pause(); + } + + // step 4 + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { +#if !defined(CONFIG_PREEMPT_COUNT) + diff_cpu_tsc[i] = cpu_tsc[i] - prev_cpu_tsc[i]; +#else + // if CONFIG_PREEMPT_COUNT is defined, means lwpmudrv_Fill_TSC_Info can not be run. + // return all cpu's tsc difference with cpu0's tsc difference instead + diff_cpu_tsc[i] = cpu_tsc[0] - prev_cpu_tsc[0]; +#endif + } + } else { + diff_cpu_tsc[0] = cpu_tsc[0] - prev_cpu_tsc[0]; + } + + // step 5 + orig_r_buf_ptr = arg->buf_drv_to_usr; + orig_r_buf_len = arg->len_drv_to_usr; + + if (copy_to_user(arg->buf_drv_to_usr, diff_cpu_tsc, + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64))) { + return OS_FAULT; + } + + p_buffer = (U64 *)(arg->buf_drv_to_usr); + p_buffer += GLOBAL_STATE_num_cpus(driver_state); + arg->buf_drv_to_usr = (char *)p_buffer; + arg->len_drv_to_usr -= + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + + // step 6 + status = lwpmudrv_Read_Counters(arg); + + arg->buf_drv_to_usr = orig_r_buf_ptr; + arg->len_drv_to_usr = orig_r_buf_len; + + // step 7 + CONTROL_Invoke_Parallel(lwpmudrv_Write_Op, NULL); + + // step 8 + // if per_cpu_tsc is defined, read all cpu's tsc and save in cpu_tsc for next run +#if !defined(CONFIG_PREEMPT_COUNT) + if (DRV_CONFIG_per_cpu_tsc(drv_cfg)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, + (PVOID)(size_t)0); + } else +#endif + CONTROL_Invoke_Cpu(0, lwpmudrv_Read_Specific_TSC, &cpu_tsc[0]); + + // step 9 + if (!enter_in_pause_state) { + status = lwpmudrv_Resume(); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Num_EM_Groups(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Configure the event multiplexing group. + * + * Special Notes + * None + */ +static OS_STATUS +lwpmudrv_Set_EM_Config(IOCTL_ARGS arg) +{ + EVENT_CONFIG ec; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: Driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv != sizeof(EVENT_CONFIG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arguments."); + return OS_INVALID; + } + + LWPMU_DEVICE_ec(&devices[cur_device]) = + CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); + if (!LWPMU_DEVICE_ec(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for ec!"); + return OS_NO_MEM; + } + + if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), + arg->buf_usr_to_drv, sizeof(EVENT_CONFIG_NODE))) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure (event config)!"); + return OS_FAULT; + } + + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = + CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups(ec) * + sizeof(VOID *)); + if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for PMU_register_data!"); + return OS_NO_MEM; + } + + EVENTMUX_Initialize(); + + SEP_DRV_LOG_FLOW_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Set the number of em groups in the global state node. + * @brief Also, copy the EVENT_CONFIG struct that has been passed in, + * @brief into a global location for now. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) +{ + EVENT_CONFIG ec; + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + // allocate memory + LWPMU_DEVICE_ec(&devices[cur_device]) = + CONTROL_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); + if (copy_from_user(LWPMU_DEVICE_ec(&devices[cur_device]), + arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for LWPMU_device_ec!"); + return OS_FAULT; + } + // configure num_groups from ec of the specific device + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + SEP_DRV_LOG_TRACE("Num Groups UNCORE: %d.", + EVENT_CONFIG_num_groups_unc(ec)); + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]) = + CONTROL_Allocate_Memory(EVENT_CONFIG_num_groups_unc(ec) * + sizeof(VOID *)); + if (!LWPMU_DEVICE_PMU_register_data(&devices[cur_device])) { + LWPMU_DEVICE_ec(&devices[cur_device]) = CONTROL_Free_Memory( + LWPMU_DEVICE_ec(&devices[cur_device])); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for LWPMU_DEVICE_PMU_register_data"); + return OS_NO_MEM; + } + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = 0; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_events(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Copies one group of events into kernel space at + * @brief PMU_register_data[em_groups_count]. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Configure_Events(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + U32 group_id; + ECB ecb; + U32 em_groups_count; + EVENT_CONFIG ec; +#if !defined(DISABLE_REGISTER_CHECK) + U32 idx, reg_id; +#endif + DRV_IOCTL_STATUS reg_check_status = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR("Skipped: driver state is not IDLE!"); + status = OS_IN_PROGRESS; + goto clean_return; + } + + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(&devices[cur_device]); + em_groups_count = LWPMU_DEVICE_em_groups_count(&devices[cur_device]); + + if (em_groups_count >= EVENT_CONFIG_num_groups(ec)) { + SEP_DRV_LOG_ERROR( + "Error: EM groups number exceeded initial configuration!"); + status = OS_INVALID; + goto clean_return; + } + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(ECB_NODE) || + arg->buf_drv_to_usr == NULL || + arg->len_drv_to_usr < sizeof(DRV_IOCTL_STATUS_NODE)) { + SEP_DRV_LOG_ERROR("Invalid arguments."); + status = OS_INVALID; + goto clean_return; + } + + ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + if (!ecb) { + SEP_DRV_LOG_ERROR("Memory allocation failure for ecb!"); + status = OS_NO_MEM; + goto clean_return; + } + if (copy_from_user(ecb, arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for ecb data!"); + CONTROL_Free_Memory(ecb); + status = OS_FAULT; + goto clean_return; + } +#if !defined(DISABLE_REGISTER_CHECK) + reg_check_status = + CONTROL_Allocate_Memory(sizeof(DRV_IOCTL_STATUS_NODE)); + if (!reg_check_status) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for reg_check_status"); + CONTROL_Free_Memory(ecb); + status = OS_NO_MEM; + goto clean_return; + } + + // Validation check from PMU list + for ((idx) = 0; (idx) < ECB_num_entries(ecb); (idx)++) { + reg_id = ECB_entries_reg_id((ecb), (idx)); + if (reg_id == 0) { + continue; + } + if (!PMU_LIST_Check_MSR(reg_id)) { + SEP_DRV_LOG_ERROR("Invalid MSR information! 0x%x", + reg_id); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status(reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type(reg_check_status) = + PMU_REG_PROG_MSR; + DRV_IOCTL_STATUS_reg_key1(reg_check_status) = reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE("Verified the msr 0x%x, idx=%u", + reg_id, idx); + } + } +#endif + group_id = ECB_group_id(ecb); + + if (group_id >= EVENT_CONFIG_num_groups(ec)) { + CONTROL_Free_Memory(ecb); + SEP_DRV_LOG_ERROR( + "Group_id is larger than total number of groups!"); + status = OS_INVALID; + goto clean_return; + } + + LWPMU_DEVICE_PMU_register_data(&devices[cur_device])[group_id] = ecb; + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; + +clean_return: + if (reg_check_status) { + if (copy_to_user(arg->buf_drv_to_usr, reg_check_status, + sizeof(DRV_IOCTL_STATUS_NODE))) { + SEP_DRV_LOG_ERROR( + "Memory copy to user failure for reg_check_status data!"); + } + CONTROL_Free_Memory(reg_check_status); + } + + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_events_UNC(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Make a copy of the uncore registers that need to be programmed + * @brief for the next event set used for event multiplexing + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Configure_Events_UNC(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + VOID **PMU_register_data_unc; + S32 em_groups_count_unc; + ECB ecb; + EVENT_CONFIG ec_unc; + DEV_UNC_CONFIG pcfg_unc; + U32 group_id = 0; + ECB in_ecb = NULL; + DRV_BOOL check_status = TRUE; + U32 init_idx = 0; +#if !defined(DISABLE_REGISTER_CHECK) + PMU_MMIO_BAR_INFO_NODE primary; + PMU_MMIO_BAR_INFO_NODE secondary; + U32 idx, reg_id; + U32 bar_idx; + U32 itr; + MMIO_BAR_INFO mmio_bar_list; +#endif + DRV_IOCTL_STATUS reg_check_status = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR("Skipped: driver state is not IDLE!"); + status = OS_IN_PROGRESS; + goto clean_return; + } + + em_groups_count_unc = + LWPMU_DEVICE_em_groups_count(&devices[cur_device]); + PMU_register_data_unc = + LWPMU_DEVICE_PMU_register_data(&devices[cur_device]); + ec_unc = LWPMU_DEVICE_ec(&devices[cur_device]); + pcfg_unc = LWPMU_DEVICE_pcfg(&devices[cur_device]); + + if (!pcfg_unc || !DEV_UNC_CONFIG_num_events(pcfg_unc) || + ec_unc == NULL) { + SEP_DRV_LOG_ERROR("Pcfg_unc or ec_unc NULL!"); + status = OS_INVALID; + goto clean_return; + } + + if (em_groups_count_unc >= (S32)EVENT_CONFIG_num_groups_unc(ec_unc)) { + SEP_DRV_LOG_ERROR( + "Uncore EM groups number exceeded initial configuration!"); + status = OS_INVALID; + goto clean_return; + } + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(ECB_NODE) || + arg->buf_drv_to_usr == NULL || + arg->len_drv_to_usr < sizeof(DRV_IOCTL_STATUS_NODE)) { + SEP_DRV_LOG_ERROR("Invalid arguments."); + status = OS_INVALID; + goto clean_return; + } + + in_ecb = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + if (!in_ecb) { + SEP_DRV_LOG_ERROR("Memory allocation failure for uncore ecb!"); + status = OS_NO_MEM; + goto clean_return; + } + if (copy_from_user(in_ecb, arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + CONTROL_Free_Memory(in_ecb); + SEP_DRV_LOG_ERROR("Memory copy failure for uncore ecb data!"); + status = OS_FAULT; + goto clean_return; + } +#if !defined(DISABLE_REGISTER_CHECK) + reg_check_status = + CONTROL_Allocate_Memory(sizeof(DRV_IOCTL_STATUS_NODE)); + if (!reg_check_status) { + SEP_DRV_LOG_ERROR( + "Memory allocation failure for reg_check_status"); + CONTROL_Free_Memory(in_ecb); + status = OS_NO_MEM; + goto clean_return; + } + + // Validation check from PMU list + for ((idx) = 0; (idx) < ECB_num_entries(in_ecb); (idx)++) { + reg_id = ECB_entries_reg_id((in_ecb), (idx)); + if (reg_id == 0) { + continue; + } + switch (ECB_entries_reg_prog_type((in_ecb), (idx))) { + case PMU_REG_PROG_MSR: + if (!PMU_LIST_Check_MSR(reg_id)) { + SEP_DRV_LOG_ERROR( + "Invalid MSR information! 0x%x", + reg_id); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status(reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type( + reg_check_status) = PMU_REG_PROG_MSR; + DRV_IOCTL_STATUS_reg_key1(reg_check_status) = + (U64)reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE("Verified the msr 0x%x", + reg_id); + } + break; + case PMU_REG_PROG_PCI: + if (!PMU_LIST_Check_PCI( + (U8)ECB_entries_bus_no((in_ecb), (idx)), + (U8)ECB_entries_dev_no((in_ecb), (idx)), + (U8)ECB_entries_func_no((in_ecb), (idx)), + reg_id)) { + SEP_DRV_LOG_ERROR( + "Invalid PCI information! B%d.D%d.F%d.O0x%x", + ECB_entries_bus_no((in_ecb), (idx)), + ECB_entries_dev_no((in_ecb), (idx)), + ECB_entries_func_no((in_ecb), (idx)), + reg_id); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status(reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type( + reg_check_status) = PMU_REG_PROG_PCI; + DRV_IOCTL_STATUS_bus(reg_check_status) = + ECB_entries_bus_no((in_ecb), (idx)); + DRV_IOCTL_STATUS_dev(reg_check_status) = + ECB_entries_dev_no((in_ecb), (idx)); + DRV_IOCTL_STATUS_func(reg_check_status) = + ECB_entries_func_no((in_ecb), (idx)); + DRV_IOCTL_STATUS_offset(reg_check_status) = + reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE( + "Verified the PCI B%d.D%d.F%d.O0x%x", + ECB_entries_bus_no((in_ecb), (idx)), + ECB_entries_dev_no((in_ecb), (idx)), + ECB_entries_func_no((in_ecb), (idx)), + reg_id); + } + break; + case PMU_REG_PROG_MMIO: + memset(&primary, 0, sizeof(PMU_MMIO_BAR_INFO_NODE)); + memset(&secondary, 0, sizeof(PMU_MMIO_BAR_INFO_NODE)); + if (ECB_device_type(in_ecb) == DEVICE_UNC_SOCPERF || + ECB_device_type(in_ecb) == DEVICE_UNC_GFX_PMT) { + continue; + } + + if (ECB_unc_discovery_mode_get(in_ecb)) { + if (ECB_entries_reg_type((in_ecb), (idx)) == + PMU_OPERATION_INITIALIZE) { + if (!ECB_entries_phys_addr((in_ecb), + (idx))) { + continue; + } + primary.mask = ECB_entries_phys_addr( + (in_ecb), (idx)); + primary.bar_prog_type = + MMIO_DIRECT_BAR_TYPE; + check_status = PMU_LIST_Check_MMIO( + primary, secondary, reg_id); + } else { + for (init_idx = ECB_operations_register_start( + in_ecb, + PMU_OPERATION_INITIALIZE); + init_idx < + ECB_operations_register_start( + in_ecb, + PMU_OPERATION_INITIALIZE) + + ECB_operations_register_len( + in_ecb, + PMU_OPERATION_INITIALIZE); + init_idx++) { + if (ECB_entries_scheduled( + in_ecb, init_idx) != + TRUE || + ECB_entries_reg_bar_index( + (in_ecb), (idx)) != + ECB_entries_reg_bar_index( + (in_ecb), + (init_idx))) { + continue; + } + if (ECB_entries_phys_addr( + (in_ecb), + (init_idx))) { + primary.mask = + ECB_entries_phys_addr( + (in_ecb), + (init_idx)); + primary.bar_prog_type = + MMIO_DIRECT_BAR_TYPE; + if (!ECB_entries_generic_status_get( + (in_ecb), + (init_idx))) { + check_status = PMU_LIST_Check_MMIO( + primary, + secondary, + 0x0); + ECB_entries_generic_status_set( + (in_ecb), + (init_idx)); + } + if (check_status) { + check_status = PMU_LIST_Check_MMIO( + primary, + secondary, + reg_id); + } + if (!check_status) { + break; + } + } + } + } + if (!check_status) { + SEP_DRV_LOG_ERROR( + "Invalid discovery MMIO information! Offset:0x%x, Addr:0x%llx.", + reg_id, primary.mask); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status( + reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type( + reg_check_status) = + PMU_REG_PROG_MMIO; + DRV_IOCTL_STATUS_counter_mask( + reg_check_status) = + primary.mask; + DRV_IOCTL_STATUS_reg_key2( + reg_check_status) = reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE( + "Verified discovery MMIO Offset:0x%x, Addr:0x%llx.", + reg_id, primary.mask); + } + } else if (ECB_device_type(in_ecb) == DEVICE_PMEM_FC || + ECB_device_type(in_ecb) == DEVICE_PMEM_MC) { + for (itr = 0; itr < num_packages; itr++) { + mmio_bar_list = &ECB_mmio_bar_list( + (in_ecb), (itr)); + primary.u.s.bus = MMIO_BAR_INFO_bus_no( + mmio_bar_list); + primary.u.s.dev = MMIO_BAR_INFO_dev_no( + mmio_bar_list); + primary.u.s.func = + MMIO_BAR_INFO_func_no( + mmio_bar_list); + primary.u.s.offset = + MMIO_BAR_INFO_main_bar_offset( + mmio_bar_list); + primary.mask = + MMIO_BAR_INFO_main_bar_mask( + mmio_bar_list); + primary.shift = + MMIO_BAR_INFO_main_bar_shift( + mmio_bar_list); + if (!MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_list)) { + primary.bar_prog_type = + MMIO_SINGLE_BAR_TYPE; + } else { + primary.bar_prog_type = + MMIO_DUAL_BAR_TYPE; + secondary.bar_prog_type = + MMIO_DUAL_BAR_TYPE; + secondary.u.s.bus = + MMIO_BAR_INFO_bus_no( + mmio_bar_list); + secondary.u.s.dev = + MMIO_BAR_INFO_dev_no( + mmio_bar_list); + secondary.u.s.func = + MMIO_BAR_INFO_func_no( + mmio_bar_list); + secondary.u.s.offset = + MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_list); + secondary.mask = + MMIO_BAR_INFO_secondary_bar_mask( + mmio_bar_list); + secondary.shift = + MMIO_BAR_INFO_secondary_bar_shift( + mmio_bar_list); + } + if (!PMU_LIST_Check_MMIO(primary, + secondary, + reg_id)) { + SEP_DRV_LOG_ERROR( + "Invalid MMIO information! Offset:0x%x, B%d.D%d.F%d.O0x%x, M0x%llx.S%d, Sec O0x%x, M0x%llx.S%d", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, + primary.shift, + secondary.u.s.offset, + secondary.mask, + secondary.shift); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status( + reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type( + reg_check_status) = + PMU_REG_PROG_MMIO; + DRV_IOCTL_STATUS_bus( + reg_check_status) = + primary.u.s.bus; + DRV_IOCTL_STATUS_dev( + reg_check_status) = + primary.u.s.dev; + DRV_IOCTL_STATUS_func( + reg_check_status) = + primary.u.s.func; + DRV_IOCTL_STATUS_offset( + reg_check_status) = + primary.u.s.offset; + DRV_IOCTL_STATUS_reg_key2( + reg_check_status) = + reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE( + "Verified the MMIO Offset:0x%x, B%d.D%d.F%d.O0x%x, M0x%llx.S%d, Sec O0x%x, M0x%llx.S%d", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, + primary.shift, + secondary.u.s.offset, + secondary.mask, + secondary.shift); + } + } + } else { + bar_idx = ECB_entries_reg_bar_index((in_ecb), + (idx)); + mmio_bar_list = + &ECB_mmio_bar_list((in_ecb), (bar_idx)); + primary.u.s.bus = + MMIO_BAR_INFO_bus_no(mmio_bar_list); + primary.u.s.dev = + MMIO_BAR_INFO_dev_no(mmio_bar_list); + primary.u.s.func = + MMIO_BAR_INFO_func_no(mmio_bar_list); + primary.u.s.offset = + MMIO_BAR_INFO_main_bar_offset( + mmio_bar_list); + primary.mask = MMIO_BAR_INFO_main_bar_mask( + mmio_bar_list); + primary.shift = MMIO_BAR_INFO_main_bar_shift( + mmio_bar_list); + if (!MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_list)) { + primary.bar_prog_type = + MMIO_SINGLE_BAR_TYPE; + } else { + primary.bar_prog_type = + MMIO_DUAL_BAR_TYPE; + secondary.bar_prog_type = + MMIO_DUAL_BAR_TYPE; + secondary.u.s.bus = + MMIO_BAR_INFO_bus_no( + mmio_bar_list); + secondary.u.s.dev = + MMIO_BAR_INFO_dev_no( + mmio_bar_list); + secondary.u.s.func = + MMIO_BAR_INFO_func_no( + mmio_bar_list); + secondary.u.s.offset = + MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_list); + secondary.mask = + MMIO_BAR_INFO_secondary_bar_mask( + mmio_bar_list); + secondary.shift = + MMIO_BAR_INFO_secondary_bar_shift( + mmio_bar_list); + } + if (!PMU_LIST_Check_MMIO(primary, secondary, + reg_id)) { + SEP_DRV_LOG_ERROR( + "Invalid MMIO information! Offset:0x%x, B%d.D%d.F%d.O0x%x, M0x%llx.S%d, Sec O0x%x, M0x%llx.S%d", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, primary.shift, + secondary.u.s.offset, + secondary.mask, + secondary.shift); + status = OS_INVALID; + DRV_IOCTL_STATUS_drv_status( + reg_check_status) = + VT_INVALID_PROG_INFO; + DRV_IOCTL_STATUS_reg_prog_type( + reg_check_status) = + PMU_REG_PROG_MMIO; + DRV_IOCTL_STATUS_bus(reg_check_status) = + primary.u.s.bus; + DRV_IOCTL_STATUS_dev(reg_check_status) = + primary.u.s.dev; + DRV_IOCTL_STATUS_func( + reg_check_status) = + primary.u.s.func; + DRV_IOCTL_STATUS_offset( + reg_check_status) = + primary.u.s.offset; + DRV_IOCTL_STATUS_reg_key2( + reg_check_status) = reg_id; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE( + "Verified the MMIO Offset:0x%x, B%d.D%d.F%d.O0x%x, M0x%llx.S%d, Sec O0x%x, M0x%llx.S%d", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, primary.shift, + secondary.u.s.offset, + secondary.mask, + secondary.shift); + } + } + break; + default: + SEP_DRV_LOG_ERROR("Invalid reg_prog_type! %u, idx=%u", + ECB_entries_reg_prog_type((in_ecb), + (idx)), + idx); + + status = OS_INVALID; + goto clean_return; + } + } +#endif + group_id = ECB_group_id(in_ecb); + + if (group_id >= EVENT_CONFIG_num_groups_unc(ec_unc)) { + CONTROL_Free_Memory(in_ecb); + SEP_DRV_LOG_ERROR( + "Group_id is larger than total number of groups!"); + status = OS_INVALID; + goto clean_return; + } + + PMU_register_data_unc[group_id] = in_ecb; + // at this point, we know the number of uncore events for this device, + // so allocate the results buffer per thread for uncore only for SEP event based uncore counting + ecb = PMU_register_data_unc[group_id]; + if (ecb == NULL) { + SEP_DRV_LOG_ERROR("Encountered NULL ECB!"); + status = OS_INVALID; + goto clean_return; + } + LWPMU_DEVICE_num_events(&devices[cur_device]) = ECB_num_events(ecb); + LWPMU_DEVICE_em_groups_count(&devices[cur_device]) = group_id + 1; + +clean_return: + if (reg_check_status) { + if (copy_to_user(arg->buf_drv_to_usr, reg_check_status, + sizeof(DRV_IOCTL_STATUS_NODE))) { + SEP_DRV_LOG_ERROR( + "Memory copy to user failure for reg_check_status data!"); + } + CONTROL_Free_Memory(reg_check_status); + } + + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Set the number of descriptor groups in the global state node. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Set_Sample_Descriptors(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + if (arg->len_usr_to_drv != sizeof(U32) || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (Unknown size of Sample Descriptors!)."); + return OS_INVALID; + } + + desc_count = 0; + if (copy_from_user(&GLOBAL_STATE_num_descriptors(driver_state), + arg->buf_usr_to_drv, sizeof(U32))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure"); + return OS_FAULT; + } + + desc_data = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_descriptors(driver_state) * sizeof(VOID *)); + if (desc_data == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for desc_data!"); + return OS_NO_MEM; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * @return OS_STATUS + * + * @brief Make a copy of the descriptors that need to be read in order + * @brief to configure a sample record. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Configure_Descriptors(IOCTL_ARGS arg) +{ + U32 uncopied; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (desc_count >= GLOBAL_STATE_num_descriptors(driver_state)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Descriptor groups number exceeded initial configuration!"); + return OS_INVALID; + } + + if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Invalid arg value!"); + return OS_INVALID; + } + if (desc_data == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("NULL desc_data!"); + return OS_INVALID; + } + // + // First things first: Make a copy of the data for global use. + // + desc_data[desc_count] = CONTROL_Allocate_Memory(arg->len_usr_to_drv); + uncopied = copy_from_user(desc_data[desc_count], arg->buf_usr_to_drv, + arg->len_usr_to_drv); + if (uncopied > 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Unable to copy desc_data from user!"); + return OS_NO_MEM; + } + SEP_DRV_LOG_TRACE("Added descriptor # %d.", desc_count); + desc_count++; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_LBR_Info(IOCTL_ARGS arg) + * + * + * @param arg - pointer to the IOCTL_ARGS structure + * @return OS_STATUS + * + * @brief Make a copy of the LBR information that is passed in. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_LBR_Info(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + LBR lbr_list; + U32 idx, reg_id; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR("Skipped: driver state is not IDLE!"); + status = OS_IN_PROGRESS; + goto clean_return; + } + + if (cur_pcfg == NULL || DEV_CONFIG_collect_lbrs(cur_pcfg) == FALSE) { + SEP_DRV_LOG_ERROR("LBR capture has not been configured!"); + status = OS_INVALID; + goto clean_return; + } + + if (arg->len_usr_to_drv == 0 || arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR("Invalid arguments!"); + status = OS_INVALID; + goto clean_return; + } + + // + // First things first: Make a copy of the data for global use. + // + + LWPMU_DEVICE_lbr(&devices[cur_device]) = + CONTROL_Allocate_Memory((int)arg->len_usr_to_drv); + if (!LWPMU_DEVICE_lbr(&devices[cur_device])) { + SEP_DRV_LOG_ERROR("Error: Memory allocation failure for lbr!"); + status = OS_NO_MEM; + goto clean_return; + } + + if (copy_from_user(LWPMU_DEVICE_lbr(&devices[cur_device]), + arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for lbr struct!"); + status = OS_FAULT; + goto clean_return; + } + + lbr_list = LWPMU_DEVICE_lbr(&devices[cur_device]); + if (lbr_list) { + for (idx = 0; idx < LBR_num_entries(lbr_list); idx++) { + reg_id = LBR_entries_reg_id(lbr_list, idx); + if (reg_id == 0) { + continue; + } + if (!PMU_LIST_Check_MSR(reg_id)) { + LBR_entries_reg_id(lbr_list, idx) = 0; + SEP_DRV_LOG_ERROR( + "Invalid MSR information! 0x%x", + reg_id); + status = OS_INVALID; + goto clean_return; + } else { + SEP_DRV_LOG_TRACE("Verified the msr 0x%x\n", + reg_id); + } + } + } + +clean_return: + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_IPT_Config(IOCTL_ARGS arg) + * + * + * @param arg - pointer to the IOCTL_ARGS structure + * @return OS_STATUS + * + * @brief Make a copy of the IPT configuration information that is passed in. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Set_IPT_Config(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + DRV_IOCTL_STATUS ioctl_status = NULL; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(IPT_CONFIG_NODE) || + arg->buf_drv_to_usr == NULL || + arg->len_drv_to_usr < sizeof(DRV_IOCTL_STATUS_NODE)) { + SEP_DRV_LOG_ERROR("Invalid arguments!"); + status = OS_INVALID; + goto clean_return; + } + + ioctl_status = CONTROL_Allocate_Memory(sizeof(DRV_IOCTL_STATUS_NODE)); + if (!ioctl_status) { + SEP_DRV_LOG_ERROR("Memory allocation failure for ioctl_status"); + status = OS_NO_MEM; + goto clean_return; + } + + // + // First things first: Make a copy of the data for global use. + // + ipt_config = CONTROL_Allocate_Memory(sizeof(IPT_CONFIG_NODE)); + if (!ipt_config) { + SEP_DRV_LOG_ERROR( + "Error: Memory allocation failure for IPT config!"); + status = OS_NO_MEM; + goto clean_return; + } + + if (copy_from_user(ipt_config, arg->buf_usr_to_drv, + arg->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR("Memory copy failure for IPT config!"); + status = OS_FAULT; + goto clean_return; + } + +clean_return: + if (ioctl_status) { + if (copy_to_user(arg->buf_drv_to_usr, ioctl_status, + sizeof(DRV_IOCTL_STATUS_NODE))) { + SEP_DRV_LOG_ERROR( + "Memory copy to user failure for ioctl_status data!"); + } + CONTROL_Free_Memory(ioctl_status); + } + + if (status != OS_SUCCESS) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, DRV_STATE_TERMINATING); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return status; +} + +#define CR4_PCE 0x00000100 //Performance-monitoring counter enable RDPMC +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Set_CR4_PCE_Bit(PVOID param) + * + * @param param - dummy parameter + * + * @return NONE + * + * @brief Set CR4's PCE bit on the logical processor + * + * Special Notes + */ +static VOID +lwpmudrv_Set_CR4_PCE_Bit(PVOID param) +{ + U32 this_cpu; +#if defined(DRV_IA32) + U32 prev_CR4_value = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + // remember if RDPMC bit previously set + // and then enabled it + __asm__("movl %%cr4,%%eax\n\t" + "movl %%eax,%0\n\t" + "orl %1,%%eax\n\t" + "movl %%eax,%%cr4\n\t" + : "=irg"(prev_CR4_value) + : "irg"(CR4_PCE) + : "eax"); +#endif +#if defined(DRV_EM64T) + U64 prev_CR4_value = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + // remember if RDPMC bit previously set + // and then enabled it + __asm__("movq %%cr4,%%rax\n\t" + "movq %%rax,%0\n\t" + "orq %1,%%rax\n\t" + "movq %%rax,%%cr4" + : "=irg"(prev_CR4_value) + : "irg"(CR4_PCE) + : "rax"); +#endif + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + // if bit RDPMC bit was set before, + // set flag for when we clear it + if (prev_CR4_value & CR4_PCE) { + prev_set_CR4[this_cpu] = 1; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) + * + * @param param - dummy parameter + * + * @return NONE + * + * @brief ClearSet CR4's PCE bit on the logical processor + * + * Special Notes + */ +static VOID +lwpmudrv_Clear_CR4_PCE_Bit(PVOID param) +{ + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + // only clear the CR4 bit if it wasn't set + // before we started + if (prev_set_CR4 && !prev_set_CR4[this_cpu]) { +#if defined(DRV_IA32) + __asm__("movl %%cr4,%%eax\n\t" + "andl %0,%%eax\n\t" + "movl %%eax,%%cr4\n" + : + : "irg"(~CR4_PCE) + : "eax"); +#endif +#if defined(DRV_EM64T) + __asm__("movq %%cr4,%%rax\n\t" + "andq %0,%%rax\n\t" + "movq %%rax,%%cr4\n" + : + : "irg"(~CR4_PCE) + : "rax"); +#endif + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Start(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_START call. + * @brief Set up the OS hooks for process/thread/load notifications. + * @brief Write the initial set of MSRs. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Start(VOID) +{ + OS_STATUS status = OS_SUCCESS; +#if !defined(CONFIG_PREEMPT_COUNT) + U32 cpu_num; +#endif + + SEP_DRV_LOG_FLOW_IN(""); + + if (!CHANGE_DRIVER_STATE(STATE_BIT_IDLE, DRV_STATE_RUNNING)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("NULL drv_cfg!"); + return OS_INVALID; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + if (DRV_CONFIG_start_paused(drv_cfg)) { + CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, + DRV_STATE_PAUSED); + } + SEP_DRV_LOG_FLOW_OUT("[PCL enabled] Early return value: %d", + status); + return status; + } + + prev_set_CR4 = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U8)); + CONTROL_Invoke_Parallel(lwpmudrv_Set_CR4_PCE_Bit, (PVOID)(size_t)0); + +#if !defined(CONFIG_PREEMPT_COUNT) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); +#endif + +#if !defined(CONFIG_PREEMPT_COUNT) + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + if (CPU_STATE_offlined(&pcb[cpu_num])) { + cpu_tsc[cpu_num] = cpu_tsc[0]; + } + } +#else + UTILITY_Read_TSC(&cpu_tsc[0]); +#endif + + if (DRV_CONFIG_start_paused(drv_cfg)) { + CHANGE_DRIVER_STATE(STATE_BIT_RUNNING, DRV_STATE_PAUSED); + } else { + CONTROL_Invoke_Parallel(lwpmudrv_Resume_Op, NULL); + + EVENTMUX_Start(); + lwpmudrv_Dump_Tracer("start", 0); + } + if (unc_buf_init) { + unc_timer_interval = msecs_to_jiffies( + DRV_CONFIG_unc_timer_interval(drv_cfg)); + CONTROL_Invoke_Parallel(lwpmudrv_Uncore_Start_Timer, NULL); + } else if (DRV_CONFIG_emon_timer_interval(drv_cfg)) { + lwpmudrv_Emon_Start_Timer(NULL); + } + + if (DRV_CONFIG_ipt_mode(drv_cfg) && ipt_config) { + CONTROL_Invoke_Parallel(IPT_Start, NULL); + } + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Cleanup_Op(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Clean up registers after collection + * + * Special Notes + */ +static VOID +lwpmudrv_Cleanup_Op(PVOID param) +{ + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + DISPATCH dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN(""); + + if (pcfg && DEV_CONFIG_num_events(pcfg) && dispatch != NULL && + dispatch->cleanup != NULL) { + dispatch->cleanup(&dev_idx); + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn lwpmudrv_Prepare_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS +lwpmudrv_Prepare_Stop(VOID) +{ + S32 i; + S32 done = FALSE; + S32 cpu_num; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!CHANGE_DRIVER_STATE(STATE_BIT_RUNNING | STATE_BIT_PAUSED, + DRV_STATE_PREPARE_STOP)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state."); + return OS_INVALID; + } + } else { + SEP_DRV_LOG_WARNING("Abnormal termination path."); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + if (DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + SEP_DRV_LOG_FLOW_OUT("Success: using PCL"); + return OS_SUCCESS; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + CPU_STATE_accept_interrupt(&pcb[i]) = 0; + } + while (!done) { + done = TRUE; + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (atomic_read(&CPU_STATE_in_interrupt(&pcb[i]))) { + done = FALSE; + } + } + } + CONTROL_Invoke_Parallel(lwpmudrv_Pause_Op, NULL); + + SEP_DRV_LOG_TRACE("Outside of all interrupts."); + + if (unc_buf_init) { + lwpmudrv_Uncore_Stop_Timer(); + } else if (DRV_CONFIG_emon_timer_interval(drv_cfg)) { + lwpmudrv_Emon_Stop_Timer(NULL); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + /* + * Clean up all the control registers + */ + CONTROL_Invoke_Parallel(lwpmudrv_Cleanup_Op, (VOID *)NULL); + SEP_DRV_LOG_TRACE("Cleanup finished."); + + if (prev_set_CR4) { + CONTROL_Invoke_Parallel(lwpmudrv_Clear_CR4_PCE_Bit, + (VOID *)(size_t)0); + prev_set_CR4 = CONTROL_Free_Memory(prev_set_CR4); + } + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + SEP_DRV_LOG_TRACE( + "# of PMU interrupts via NMI triggered on cpu%d: %u.", + cpu_num, CPU_STATE_nmi_handled(&pcb[cpu_num])); + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* + * @fn lwpmudrv_Finish_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the DRV_OPERATION_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS +lwpmudrv_Finish_Stop(VOID) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + if (!CHANGE_DRIVER_STATE(STATE_BIT_PREPARE_STOP, + DRV_STATE_STOPPED)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Unexpected driver state!"); + return OS_FAULT; + } + } else { + SEP_DRV_LOG_WARNING("Abnormal termination path."); + } + + if (drv_cfg == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("drv_cfg is NULL!"); + return OS_INVALID; + } + + if (DRV_CONFIG_counting_mode(drv_cfg) == FALSE) { + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); + if (DRV_CONFIG_ipt_mode(drv_cfg) && ipt_config) { + CONTROL_Invoke_Parallel(IPT_TOPA_Flush, NULL); + } + /* + * Make sure that the module buffers are not deallocated + * and that the module flushthread has not been terminated. + */ + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + status = LINUXOS_Enum_Process_Modules(TRUE); + } + OUTPUT_Flush(); + } + /* + * Clean up the interrupt handler via the IDT + */ + CPUMON_Remove_Cpuhooks(); + PEBS_Destroy(); + if (DRV_CONFIG_ipt_mode(drv_cfg) && ipt_config) { + IPT_Destroy(drv_cfg); + } + EVENTMUX_Destroy(); + } else if (DRV_CONFIG_emon_timer_interval(drv_cfg)) { + OUTPUT_Flush_EMON(); + emon_desc = CONTROL_Free_Memory(emon_desc); + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + if (interrupt_counts) { + S32 idx, cpu; + for (cpu = 0; cpu < GLOBAL_STATE_num_cpus(driver_state); + cpu++) { + for (idx = 0; + idx < DRV_CONFIG_num_events(drv_cfg); + idx++) { + SEP_DRV_LOG_TRACE( + "Interrupt count: CPU %d, event %d = %lld.", + cpu, idx, + interrupt_counts + [cpu * DRV_CONFIG_num_events( + drv_cfg) + + idx]); + } + } + } + } + + read_counter_info = CONTROL_Free_Memory(read_counter_info); + prev_counter_data = CONTROL_Free_Memory(prev_counter_data); + emon_buffer_driver_helper = + CONTROL_Free_Memory(emon_buffer_driver_helper); + lwpmudrv_Dump_Tracer("stop", 0); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the current value of the normalized TSC. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Normalized_TSC(IOCTL_ARGS arg) +{ + U64 tsc = 0; + U64 this_cpu = 0; + size_t size_to_copy = sizeof(U64); + + SEP_DRV_LOG_TRACE_IN(""); + + if (arg->len_drv_to_usr != size_to_copy || + arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid arguments!"); + return OS_INVALID; + } + + preempt_disable(); + UTILITY_Read_TSC(&tsc); + this_cpu = CONTROL_THIS_CPU(); + tsc -= TSC_SKEW(CONTROL_THIS_CPU()); + preempt_enable(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + if (drv_cfg && DRV_CONFIG_use_pcl(drv_cfg) == TRUE) { + preempt_disable(); + tsc = cpu_clock(this_cpu); + preempt_enable(); + } else { +#endif + tsc -= TSC_SKEW(this_cpu); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) + } +#endif + if (copy_to_user(arg->buf_drv_to_usr, (VOID *)&tsc, size_to_copy)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return OS_FAULT; + } + lwpmudrv_Dump_Tracer("marker", tsc); + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Quickly return the (total) number of cpus in the system. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Num_Cores(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + S32 num = GLOBAL_STATE_num_cpus(driver_state); + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Num_Cores is %d, buf_usr_to_drv is 0x%p.", num, + arg->buf_drv_to_usr); + status = put_user(num, (S32 *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, U32 len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the CPU mask buffer + * @param len_usr_to_drv - size of the CPU mask buffer + * + * @return OS_STATUS + * + * @brief process the CPU mask as requested by the user + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Set_CPU_Mask(PVOID buf_usr_to_drv, size_t len_usr_to_drv) +{ + U32 cpu_count = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + if (len_usr_to_drv == 0 || buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "len_usr_to_drv == 0 or buf_usr_to_drv is NULL!"); + return OS_INVALID; + } + + cpu_mask_bits = CONTROL_Allocate_Memory((int)len_usr_to_drv); + if (!cpu_mask_bits) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for cpu_mask_bits!"); + return OS_NO_MEM; + } + + if (copy_from_user(cpu_mask_bits, (S8 *)buf_usr_to_drv, + (int)len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + for (cpu_count = 0; + cpu_count < (U32)GLOBAL_STATE_num_cpus(driver_state); + cpu_count++) { + CPU_STATE_accept_interrupt(&pcb[cpu_count]) = + cpu_mask_bits[cpu_count] ? 1 : 0; + CPU_STATE_initial_mask(&pcb[cpu_count]) = + cpu_mask_bits[cpu_count] ? 1 : 0; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the value of the Kernel symbol KERNEL_CS. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_KERNEL_CS(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + S32 num = __KERNEL_CS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr != sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("__KERNEL_CS is %d, buf_usr_to_drv is 0x%p.", num, + arg->buf_drv_to_usr); + status = put_user(num, (S32 *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* + * @fn lwpmudrv_Set_UID + * + * @param IN arg - pointer to the output buffer + * @return OS_STATUS + * + * @brief Receive the value of the UID of the collector process. + */ +static OS_STATUS +lwpmudrv_Set_UID(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_usr_to_drv != sizeof(uid_t) || + arg->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + + status = get_user(uid, (S32 *)arg->buf_usr_to_drv); + SEP_DRV_LOG_TRACE("Uid is %d.", uid); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * @brief Return the current value of the TSC skew data + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_TSC_Skew_Info(IOCTL_ARGS arg) +{ + S64 *skew_array; + size_t skew_array_len; + S32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + skew_array_len = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64); + if (arg->len_drv_to_usr < skew_array_len || + arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Input buffer too small or NULL!"); + return OS_INVALID; + } + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg) && + GET_DRIVER_STATE() != DRV_STATE_STOPPED) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: cp_mode not enabled and driver is not STOPPED!"); + return OS_IN_PROGRESS; + } + + SEP_DRV_LOG_TRACE("Dispatched with len_drv_to_usr=%lld.", + arg->len_drv_to_usr); + + skew_array = CONTROL_Allocate_Memory(skew_array_len); + if (skew_array == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for skew_array!"); + return OS_NO_MEM; + } + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + skew_array[i] = TSC_SKEW(i); + } + + if (copy_to_user(arg->buf_drv_to_usr, skew_array, skew_array_len)) { + skew_array = CONTROL_Free_Memory(skew_array); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for skew_array!"); + return OS_FAULT; + } + + skew_array = CONTROL_Free_Memory(skew_array); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Local function that handles the COLLECT_SYS_CONFIG call. + * @brief Builds and collects the SYS_INFO data needed. + * @brief Writes the result into the argument. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Collect_Sys_Config(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + U32 num; + + SEP_DRV_LOG_FLOW_IN(""); + + num = SYS_INFO_Build(); + + if (arg->len_drv_to_usr < sizeof(S32) || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Size of sys info is %d.", num); + status = put_user(num, (S32 *)arg->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Sys_Config(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Return the current value of the normalized TSC. + * + * @brief Transfers the VTSA_SYS_INFO data back to the abstraction layer. + * @brief The buf_usr_to_drv should have enough space to handle the transfer. + */ +static OS_STATUS +lwpmudrv_Sys_Config(IOCTL_ARGS arg) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + SYS_INFO_Transfer(arg->buf_drv_to_usr, arg->len_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Read memory mapped i/o physical location + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Samp_Read_Num_Of_Core_Counters(IOCTL_ARGS arg) +{ + U64 rax, rbx, rcx, rdx, num_basic_functions; + U32 val = 0; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_FLOW_IN(""); + + if (arg->len_drv_to_usr == 0 || arg->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + UTILITY_Read_Cpuid(0x0, &num_basic_functions, &rbx, &rcx, &rdx); + + if (num_basic_functions >= 0xA) { + UTILITY_Read_Cpuid(0xA, &rax, &rbx, &rcx, &rdx); + val = ((U32)(rax >> 8)) & 0xFF; + } + status = put_user(val, (U32 *)arg->buf_drv_to_usr); + SEP_DRV_LOG_TRACE("Num of counter is %d.", val); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Platform_Info(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Platform_Info(IOCTL_ARGS args) +{ + U32 size = sizeof(DRV_PLATFORM_INFO_NODE); + OS_STATUS status = OS_SUCCESS; + DRV_PLATFORM_INFO platform_data = NULL; + U32 *dispatch_ids = NULL; + DISPATCH dispatch_ptr = NULL; + U32 i = 0; + U32 num_entries; // # dispatch ids to process + + SEP_DRV_LOG_FLOW_IN(""); + + num_entries = + args->len_usr_to_drv / sizeof(U32); // # dispatch ids to process + + platform_data = CONTROL_Allocate_Memory(sizeof(DRV_PLATFORM_INFO_NODE)); + if (!platform_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for platform_data!"); + return OS_NO_MEM; + } + + memset(platform_data, 0, sizeof(DRV_PLATFORM_INFO_NODE)); + if (args->len_usr_to_drv > 0 && args->buf_usr_to_drv != NULL) { + dispatch_ids = CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (!dispatch_ids) { + platform_data = CONTROL_Free_Memory(platform_data); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for dispatch_ids!"); + return OS_NO_MEM; + } + + status = copy_from_user(dispatch_ids, args->buf_usr_to_drv, + args->len_usr_to_drv); + if (status) { + platform_data = CONTROL_Free_Memory(platform_data); + dispatch_ids = CONTROL_Free_Memory(dispatch_ids); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for dispatch_ids!"); + return status; + } + for (i = 0; i < num_entries; i++) { + if (dispatch_ids[i] > 0) { + dispatch_ptr = + UTILITY_Configure_CPU(dispatch_ids[i]); + if (dispatch_ptr && + dispatch_ptr->platform_info) { + dispatch_ptr->platform_info( + (PVOID)platform_data); + } + } + } + dispatch_ids = CONTROL_Free_Memory(dispatch_ids); + } else if (devices) { + dispatch_ptr = LWPMU_DEVICE_dispatch( + &devices[0]); //placeholder, needs to be fixed + if (dispatch_ptr && dispatch_ptr->platform_info) { + dispatch_ptr->platform_info((PVOID)platform_data); + } + } + + if (args->len_drv_to_usr < size || args->buf_drv_to_usr == NULL) { + platform_data = CONTROL_Free_Memory(platform_data); + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments!"); + return OS_FAULT; + } + + status = copy_to_user(args->buf_drv_to_usr, platform_data, size); + platform_data = CONTROL_Free_Memory(platform_data); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn void lwpmudrv_Setup_Cpu_Topology (value) + * + * @brief Sets up the per CPU state structures + * + * @param IOCTL_ARGS args + * + * @return OS_STATUS + * + * Special Notes: + * This function was added to support abstract dll creation. + */ +static OS_STATUS +lwpmudrv_Setup_Cpu_Topology(IOCTL_ARGS args) +{ + S32 cpu_num; + S32 iter; + DRV_TOPOLOGY_INFO drv_topology, dt; + + SEP_DRV_LOG_FLOW_IN(""); + + if (GET_DRIVER_STATE() != DRV_STATE_IDLE) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Skipped: driver state is not IDLE!"); + return OS_IN_PROGRESS; + } + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Topology information has been misconfigured!"); + return OS_INVALID; + } + + drv_topology = CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (drv_topology == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for drv_topology!"); + return OS_NO_MEM; + } + + if (copy_from_user(drv_topology, + (DRV_TOPOLOGY_INFO)(args->buf_usr_to_drv), + args->len_usr_to_drv)) { + drv_topology = CONTROL_Free_Memory(drv_topology); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for drv_topology!"); + return OS_FAULT; + } + /* + * Topology Initializations + */ + num_packages = 0; + for (iter = 0; iter < GLOBAL_STATE_num_cpus(driver_state); iter++) { + dt = &drv_topology[iter]; + cpu_num = DRV_TOPOLOGY_INFO_cpu_number(dt); + CPU_STATE_socket_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_socket_master(dt); + num_packages += CPU_STATE_socket_master(&pcb[cpu_num]); + CPU_STATE_core_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_core_master(dt); + CPU_STATE_thr_master(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_thr_master(dt); + CPU_STATE_core_type(&pcb[cpu_num]) = + DRV_TOPOLOGY_INFO_cpu_core_type(dt); + CPU_STATE_cpu_module_num(&pcb[cpu_num]) = + (U16)DRV_TOPOLOGY_INFO_cpu_module_num( + &drv_topology[iter]); + CPU_STATE_cpu_module_master(&pcb[cpu_num]) = + (U16)DRV_TOPOLOGY_INFO_cpu_module_master( + &drv_topology[iter]); + CPU_STATE_system_master(&pcb[cpu_num]) = (iter) ? 0 : 1; + SEP_DRV_LOG_TRACE("Cpu %d sm = %d cm = %d tm = %d.", cpu_num, + CPU_STATE_socket_master(&pcb[cpu_num]), + CPU_STATE_core_master(&pcb[cpu_num]), + CPU_STATE_thr_master(&pcb[cpu_num])); + } + drv_topology = CONTROL_Free_Memory(drv_topology); + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Num_Samples(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Returns the number of samples collected during the current + * @brief sampling run + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Num_Samples(IOCTL_ARGS args) +{ + S32 cpu_num; + U64 samples = 0; + OS_STATUS status; + + SEP_DRV_LOG_FLOW_IN(""); + + if (pcb == NULL) { + SEP_DRV_LOG_ERROR("PCB was not initialized."); + return OS_FAULT; + } + + if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Topology information has been misconfigured!"); + return OS_INVALID; + } + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + samples += CPU_STATE_num_samples(&pcb[cpu_num]); + + SEP_DRV_LOG_TRACE("Samples for cpu %d = %lld.", cpu_num, + CPU_STATE_num_samples(&pcb[cpu_num])); + } + SEP_DRV_LOG_TRACE("Total number of samples %lld.", samples); + status = put_user(samples, (U64 *)args->buf_drv_to_usr); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Dummy function to support backward compatibility with old driver + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Set_Device_Num_Units(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + SEP_DRV_LOG_FLOW_OUT("Success [but did not do anything]"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Get_Interval_Counts(IOCTL_ARGS arg) + * + * @param arg - Pointer to the IOCTL structure + * + * @return OS_STATUS + * + * @brief Returns the number of samples collected during the current + * @brief sampling run + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Get_Interval_Counts(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Not in CP mode!"); + return OS_INVALID; + } + if (args->len_drv_to_usr == 0 || args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Interval Counts information has been misconfigured!"); + return OS_INVALID; + } + if (!interrupt_counts) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Interrupt_counts is NULL!"); + return OS_INVALID; + } + + if (copy_to_user(args->buf_drv_to_usr, interrupt_counts, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Set_Uncore_Topology_Info_And_Scan + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Set_Uncore_Topology_Info_And_Scan(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + SEP_DRV_LOG_FLOW_OUT("Success [but did not do anything]"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Uncore_Topology + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Uncore_Topology(IOCTL_ARGS args) +{ + U32 dev; + static UNCORE_TOPOLOGY_INFO_NODE req_uncore_topology; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_uncore_topology, 0, + sizeof(UNCORE_TOPOLOGY_INFO_NODE)); + if (copy_from_user(&req_uncore_topology, args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + for (dev = 0; dev < MAX_DEVICES; dev++) { + // skip if user does not require to scan this device + if (!UNCORE_TOPOLOGY_INFO_device_scan(&req_uncore_topology, + dev)) { + continue; + } + // skip if this device has been discovered + if (UNCORE_TOPOLOGY_INFO_device_scan(&uncore_topology, dev)) { + continue; + } + memcpy((U8 *)&(UNCORE_TOPOLOGY_INFO_device(&uncore_topology, + dev)), + (U8 *)&(UNCORE_TOPOLOGY_INFO_device(&req_uncore_topology, + dev)), + sizeof(UNCORE_PCIDEV_NODE)); + UNC_COMMON_PCI_Scan_For_Uncore((VOID *)&dev, dev, NULL); + } + + if (copy_to_user(args->buf_drv_to_usr, &uncore_topology, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Uncore_Discovery_Tables + * + * @brief Reads and populates Uncore discovery tables if available + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Uncore_Discovery_Tables(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(UNCORE_DISCOVERY_TABLE_LIST_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(UNCORE_DISCOVERY_TABLE_LIST_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + if (unc_discovery_init == FALSE) { + memset((char *)&uncore_discovery_tables, 0, + sizeof(UNCORE_DISCOVERY_TABLE_LIST_NODE)); + if (copy_from_user(&uncore_discovery_tables, + args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + UNC_COMMON_Read_Uncore_Discovery_Tables(); + unc_discovery_init = TRUE; + } + + if (copy_to_user(args->buf_drv_to_usr, &uncore_discovery_tables, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } +#endif + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_PMT_Topology + * + * @brief Reads the PMT topology from PMT driver + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_PMT_Topology(IOCTL_ARGS args) +{ + static PMT_TOPOLOGY_DISCOVERY_LIST_NODE req_pmt_topology; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(PMT_TOPOLOGY_DISCOVERY_LIST_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(PMT_TOPOLOGY_DISCOVERY_LIST_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_pmt_topology, 0, + sizeof(PMT_TOPOLOGY_DISCOVERY_LIST_NODE)); + if (copy_from_user(&req_pmt_topology, args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + if (!PMT_TOPOLOGY_DISCOVERY_LIST_topology_detected(&pmt_topology)) { + memcpy((U8 *)&pmt_topology, (U8 *)&req_pmt_topology, + sizeof(PMT_TOPOLOGY_DISCOVERY_LIST_NODE)); +#if defined(DRV_PMT_ENABLE) + UNC_COMMON_Scan_PMT_Device(); +#endif + } + + if (copy_to_user(args->buf_drv_to_usr, &pmt_topology, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Platform_Topology + * + * @brief Reads the MSR or PCI PLATFORM_INFO register if present + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Platform_Topology(IOCTL_ARGS args) +{ + U32 dev; + U32 num_topology_devices = 0; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(PLATFORM_TOPOLOGY_PROG_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_platform_topology_prog_node, 0, + sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); + if (copy_from_user(&req_platform_topology_prog_node, + args->buf_usr_to_drv, args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for req_platform_topology_prog_node!"); + return OS_FAULT; + } + + num_topology_devices = PLATFORM_TOPOLOGY_PROG_num_devices( + &req_platform_topology_prog_node); + for (dev = 0; dev < num_topology_devices; dev++) { + //skip if we have populated the register values already + if (PLATFORM_TOPOLOGY_PROG_EXT_topology_device_prog_valid( + &platform_topology_prog_node, dev)) { + continue; + } + memcpy((U8 *)&(PLATFORM_TOPOLOGY_PROG_EXT_topology_device( + &platform_topology_prog_node, dev)), + (U8 *)&(PLATFORM_TOPOLOGY_PROG_EXT_topology_device( + &req_platform_topology_prog_node, dev)), + sizeof(PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT)); + UNC_COMMON_Get_Platform_Topology(dev); + } + + if (copy_to_user(args->buf_drv_to_usr, &platform_topology_prog_node, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for platform_topology_prog_node!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS lwpmudrv_Flush(void) + * + * @brief Flushes the current contents of sampling buffers + * + * @param - none + * + * @return status + * + * Special Notes: + */ +static OS_STATUS +lwpmudrv_Flush(VOID) +{ + OS_STATUS status = OS_FAULT; + SEP_DRV_LOG_FLOW_IN(""); + + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + SEP_DRV_LOG_ERROR( + "The flush failed. Continuous profiling, -cp, is not enabled!"); + goto clean_return; + } + + if (!DRIVER_STATE_IN(GET_DRIVER_STATE(), STATE_BIT_PAUSED)) { + SEP_DRV_LOG_ERROR( + "The flush failed. The driver should be paused!"); + goto clean_return; + } + + if (multi_pebs_enabled) { + CONTROL_Invoke_Parallel(PEBS_Flush_Buffer, NULL); + } + + LINUXOS_Uninstall_Hooks(); + LINUXOS_Enum_Process_Modules(TRUE); + status = OUTPUT_Flush(); + +clean_return: + SEP_DRV_LOG_FLOW_OUT("Status: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Driver_log + * + * @brief Dumps the driver log + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Driver_Log(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr < sizeof(*DRV_LOG())) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + if (copy_to_user(args->buf_drv_to_usr, DRV_LOG(), sizeof(*DRV_LOG()))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_DISAMBIGUATE(); // keeps the driver log's footprint unique (has the highest disambiguator field) + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Control_Driver_log + * + * @brief Sets or/and gets the driver log's configuration + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Control_Driver_Log(IOCTL_ARGS args) +{ + DRV_LOG_CONTROL_NODE log_control; + U32 i; + + SEP_DRV_LOG_FLOW_IN(""); + + if (args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv < sizeof(log_control)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + + if (copy_from_user(&log_control, args->buf_usr_to_drv, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + + if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY) { + for (i = 0; i < DRV_NB_LOG_CATEGORIES; i++) { + if (DRV_LOG_CONTROL_verbosities(&log_control)[i] == + LOG_VERBOSITY_UNSET) { + SEP_DRV_LOG_TRACE( + "Current verbosity mask for '%s' is 0x%x", + (UTILITY_Log_Category_Strings()[i]), + ((U32)DRV_LOG_VERBOSITY(i))); + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + DRV_LOG_VERBOSITY(i); + } else if (DRV_LOG_CONTROL_verbosities( + &log_control)[i] == + LOG_VERBOSITY_DEFAULT) { + U32 verbosity; + switch (i) { + case DRV_LOG_CATEGORY_LOAD: + verbosity = + DRV_LOG_DEFAULT_LOAD_VERBOSITY; + break; + case DRV_LOG_CATEGORY_INIT: + verbosity = + DRV_LOG_DEFAULT_INIT_VERBOSITY; + break; + case DRV_LOG_CATEGORY_DETECTION: + verbosity = + DRV_LOG_DEFAULT_DETECTION_VERBOSITY; + break; + case DRV_LOG_CATEGORY_ERROR: + verbosity = + DRV_LOG_DEFAULT_ERROR_VERBOSITY; + break; + case DRV_LOG_CATEGORY_STATE_CHANGE: + verbosity = + DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; + break; + case DRV_LOG_CATEGORY_MARK: + verbosity = + DRV_LOG_DEFAULT_MARK_VERBOSITY; + break; + case DRV_LOG_CATEGORY_DEBUG: + verbosity = + DRV_LOG_DEFAULT_DEBUG_VERBOSITY; + break; + case DRV_LOG_CATEGORY_FLOW: + verbosity = + DRV_LOG_DEFAULT_FLOW_VERBOSITY; + break; + case DRV_LOG_CATEGORY_ALLOC: + verbosity = + DRV_LOG_DEFAULT_ALLOC_VERBOSITY; + break; + case DRV_LOG_CATEGORY_INTERRUPT: + verbosity = + DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; + break; + case DRV_LOG_CATEGORY_TRACE: + verbosity = + DRV_LOG_DEFAULT_TRACE_VERBOSITY; + break; + case DRV_LOG_CATEGORY_REGISTER: + verbosity = + DRV_LOG_DEFAULT_REGISTER_VERBOSITY; + break; + case DRV_LOG_CATEGORY_NOTIFICATION: + verbosity = + DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; + break; + case DRV_LOG_CATEGORY_WARNING: + verbosity = + DRV_LOG_DEFAULT_WARNING_VERBOSITY; + break; + + default: + SEP_DRV_LOG_ERROR( + "Unspecified category '%s' when resetting to default!", + UTILITY_Log_Category_Strings() + [i]); + verbosity = LOG_VERBOSITY_NONE; + break; + } + SEP_DRV_LOG_INIT( + "Resetting verbosity mask for '%s' from 0x%x to 0x%x.", + UTILITY_Log_Category_Strings()[i], + (U32)DRV_LOG_VERBOSITY(i), verbosity); + DRV_LOG_VERBOSITY(i) = verbosity; + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + verbosity; + } else { + SEP_DRV_LOG_INIT( + "Changing verbosity mask for '%s' from 0x%x to 0x%x.", + UTILITY_Log_Category_Strings()[i], + (U32)DRV_LOG_VERBOSITY(i), + (U32)DRV_LOG_CONTROL_verbosities( + &log_control)[i]); + DRV_LOG_VERBOSITY(i) = + DRV_LOG_CONTROL_verbosities( + &log_control)[i]; + } + } + + for (; i < DRV_MAX_NB_LOG_CATEGORIES; i++) { + DRV_LOG_CONTROL_verbosities(&log_control)[i] = + LOG_VERBOSITY_UNSET; + } + + if (copy_to_user(args->buf_drv_to_usr, &log_control, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_MARK) { + DRV_LOG_CONTROL_message( + &log_control)[DRV_LOG_CONTROL_MAX_DATA_SIZE - 1] = 0; + SEP_DRV_LOG_MARK("Mark: '%s'.", + DRV_LOG_CONTROL_message(&log_control)); + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_QUERY_SIZE) { + DRV_LOG_CONTROL_log_size(&log_control) = sizeof(*DRV_LOG()); + SEP_DRV_LOG_TRACE("Driver log size is %u bytes.", + DRV_LOG_CONTROL_log_size(&log_control)); + if (copy_to_user(args->buf_drv_to_usr, &log_control, + sizeof(log_control))) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory copy failure!"); + return OS_FAULT; + } + } else if (DRV_LOG_CONTROL_command(&log_control) == + DRV_LOG_CONTROL_COMMAND_BENCHMARK) { + U32 nb_iterations = + *(U32 *)&DRV_LOG_CONTROL_message(&log_control); + + SEP_DRV_LOG_INIT_IN("Starting benchmark (%u iterations)...", + nb_iterations); + for (i = 0; i < nb_iterations; i++) { + (void)i; + } + SEP_DRV_LOG_INIT_OUT("Benchmark complete (%u/%u iterations).", + i, nb_iterations); + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Drv_Setup_Info + * + * @brief Get numerous information of driver + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Drv_Setup_Info(IOCTL_ARGS args) +{ +#define VMM_VENDOR_STR_LEN 12 + U32 pebs_unavailable = 0; + U64 rbx, rcx, rdx, num_basic_functions; + S8 vmm_vendor_name[VMM_VENDOR_STR_LEN + 1]; + S8 *VMM_VMWARE_STR = "VMwareVMware"; + S8 *VMM_KVM_STR = "KVMKVMKVM\0\0\0"; + S8 *VMM_MSHYPERV_STR = "Microsoft Hv"; +#if defined(DRV_USE_KAISER) + int *kaiser_enabled_ptr; + int *kaiser_pti_option; +#endif + int err = 0; + + + SEP_DRV_LOG_FLOW_IN("Args: %p.", args); + + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(DRV_SETUP_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_drv_setup_info, 0, sizeof(DRV_SETUP_INFO_NODE)); + + DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info) = 1; + + pebs_unavailable = (SYS_Read_MSR(IA32_MISC_ENABLE) >> 12) & 0x1; + if (!pebs_unavailable) { + if (!wrmsr_safe(IA32_PEBS_ENABLE, 0, 0)) { + DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) = 1; + } + } + + UTILITY_Read_Cpuid(0x1, &num_basic_functions, &rbx, &rcx, &rdx); + if ((rcx >> 31) & 0x1) { + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; + } + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || ((rcx >> 31) & 0x1)) { + UTILITY_Read_Cpuid( + 0x40000000, &num_basic_functions, &rbx, &rcx, &rdx); + memcpy(vmm_vendor_name, &rbx, 4); + memcpy(vmm_vendor_name+4, &rcx, 4); + memcpy(vmm_vendor_name+8, &rdx, 4); + memcpy(vmm_vendor_name+12, "\0", 1); + + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_UNKNOWN; + + if (!strncmp(vmm_vendor_name, VMM_VMWARE_STR, VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_VMWARE; + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; + } else if (!strncmp(vmm_vendor_name, VMM_KVM_STR, VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_KVM; + } else if (!strncmp(vmm_vendor_name, VMM_MSHYPERV_STR, VMM_VENDOR_STR_LEN)) { + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_HYPERV; + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; + } + } +#if defined(CONFIG_XEN) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) + else if (xen_domain()) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_XEN; + + if (xen_initial_domain()) { + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 0; + } else { + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 1; + } + } +#endif + else { + if (LINUXOS_Check_KVM_Guest_Process()) { + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info) = 1; + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info) = DRV_VMM_KVM; + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info) = 0; + + // Check whether the guest VM enter/exit callbacks are available + // by attempting to register callback handlers. If failed, + // the PEBS capability is disabled + err = LINUXOS_Install_GuestVM_Transition_Callback(); + if (err) { + SEP_DRV_LOG_INIT( + "Disable PEBS because profiling hook for guest vmenter/vmexit can't be registered"); + DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) = 0; + } + LINUXOS_Uninstall_GuestVM_Transition_Callback(); + } + } +#if defined(DRV_DISABLE_PEBS) + DRV_SETUP_INFO_pebs_ignored_by_pti(&req_drv_setup_info) = 1; +#endif + +#if defined(DRV_USE_KAISER) + kaiser_enabled_ptr = (int *)UTILITY_Find_Symbol("kaiser_enabled"); + if (kaiser_enabled_ptr && *kaiser_enabled_ptr) { + SEP_DRV_LOG_INIT( + "KAISER is enabled! (&kaiser_enable=%p, val: %d).", + kaiser_enabled_ptr, *kaiser_enabled_ptr); + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KAISER; + } else { + kaiser_pti_option = (int *)UTILITY_Find_Symbol("pti_option"); + if (kaiser_pti_option) { + SEP_DRV_LOG_INIT( + "KAISER pti_option=%p pti_option val=%d", + kaiser_pti_option, *kaiser_pti_option); +#if defined(X86_FEATURE_PTI) + if (static_cpu_has(X86_FEATURE_PTI)) { + SEP_DRV_LOG_INIT( + "KAISER is Enabled or in Auto Enable!\n"); + DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KAISER; + } else { + SEP_DRV_LOG_INIT( + "KAISER is present but disabled!"); + } +#endif + } + } + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == 0) { + if (!kaiser_enabled_ptr) { + SEP_DRV_LOG_INIT("KAISER Auto Enable!"); + DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KAISER; + } else { + SEP_DRV_LOG_WARNING( + "Could not find KAISER is neither present nor enabled!"); + } + } +#elif defined(DRV_USE_PTI) + if (static_cpu_has(X86_FEATURE_PTI)) { + SEP_DRV_LOG_INIT("Kernel Page Table Isolation is enabled!"); + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) = + DRV_SETUP_INFO_PTI_KPTI; + } +#endif + +#if defined(CONFIG_TRACEPOINTS) + DRV_SETUP_INFO_tracepoints_available(&req_drv_setup_info) = 1; +#else + DRV_SETUP_INFO_sched_switch_hook_unavailable(&req_drv_setup_info) = 1; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + DRV_SETUP_INFO_process_exit_hook_unavailable(&req_drv_setup_info) = 1; +#endif +#endif +#if !defined(CONFIG_KPROBES) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + DRV_SETUP_INFO_munmap_hook_unavailable(&req_drv_setup_info) = 1; +#endif + + if (!GLOBAL_STATE_symbol_lookup_available(driver_state)) { + if (!UTILITY_Find_Symbol("kallsyms_lookup_name")) { + DRV_SETUP_INFO_symbol_lookup_unavailable( + &req_drv_setup_info) = 1; + } + } else { + GLOBAL_STATE_symbol_lookup_available(driver_state) = 1; + } + + if (allowlist_index != -1) { + DRV_SETUP_INFO_register_allowlist_detected( + &req_drv_setup_info) = 1; + } + + DRV_SETUP_INFO_drv_type(&req_drv_setup_info) = drv_type; + + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO nmi_mode %d.", + DRV_SETUP_INFO_nmi_mode(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_mode %d.", + DRV_SETUP_INFO_vmm_mode(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_vendor %d.", + DRV_SETUP_INFO_vmm_vendor(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO vmm_guest_vm %d.", + DRV_SETUP_INFO_vmm_guest_vm(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO pebs_accessible %d.", + DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE( + "DRV_SETUP_INFO page_table_isolation %d.", + DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE( + "DRV_SETUP_INFO tracepoints_available %d.", + DRV_SETUP_INFO_tracepoints_available(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE( + "DRV_SETUP_INFO symbol_lookup_unavailable %d.", + DRV_SETUP_INFO_symbol_lookup_unavailable(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO process_exit_hook_unavailable %d.", + DRV_SETUP_INFO_process_exit_hook_unavailable( + &req_drv_setup_info)); + SEP_DRV_LOG_TRACE( + "DRV_SETUP_INFO munmap_hook_unavailable %d.", + DRV_SETUP_INFO_munmap_hook_unavailable(&req_drv_setup_info)); + SEP_DRV_LOG_TRACE("DRV_SETUP_INFO sched_switch_hook_unavailable %d.", + DRV_SETUP_INFO_sched_switch_hook_unavailable( + &req_drv_setup_info)); + +#if defined(DRV_CPU_HOTPLUG) + DRV_SETUP_INFO_cpu_hotplug_mode(&req_drv_setup_info) = 1; +#endif + + if (copy_to_user(args->buf_drv_to_usr, &req_drv_setup_info, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Get_Sample_Drop_Info + * + * @brief Get the information of dropped samples + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Get_Sample_Drop_Info(IOCTL_ARGS args) +{ + S32 i, size = 0; + SAMPLE_DROP_INFO_NODE req_sample_drop_info; + + SEP_DRV_LOG_FLOW_IN("Args: %p.", args); + + if (!pcb || !drv_cfg) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Pcb or drv_cfg pointer is NULL!"); + return OS_INVALID; + } + + if (args->buf_drv_to_usr == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(SAMPLE_DROP_INFO_NODE)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_sample_drop_info, 0, sizeof(SAMPLE_DROP_INFO_NODE)); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state) && + size < MAX_SAMPLE_DROP_NODES; + i++) { + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + if (!CPU_STATE_ipt_dropped_count(&pcb[i])) { + continue; + } + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size) + .sampled = + CPU_STATE_ipt_collected_count(&pcb[i]); + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size) + .dropped = CPU_STATE_ipt_dropped_count(&pcb[i]); + } else { + if (!CPU_STATE_num_dropped_ebs_samples(&pcb[i])) { + continue; + } + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size) + .sampled = CPU_STATE_num_ebs_sampled(&pcb[i]); + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size) + .dropped = + CPU_STATE_num_dropped_ebs_samples(&pcb[i]); + } + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size).os_id = + OS_ID_NATIVE; + SAMPLE_DROP_INFO_drop_info(&req_sample_drop_info, size).cpu_id = + (U32)i; + size++; + } + SAMPLE_DROP_INFO_size(&req_sample_drop_info) = size; + + if (copy_to_user(args->buf_drv_to_usr, &req_sample_drop_info, + args->len_drv_to_usr)) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 lwpmudrv_Set_Emon_Buffer_Driver_Helper + * + * @brief Setup EMON buffer driver helper + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS +lwpmudrv_Set_Emon_Buffer_Driver_Helper(IOCTL_ARGS args) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (args->len_usr_to_drv == 0 || args->buf_usr_to_drv == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error: Invalid arguments."); + return OS_INVALID; + } + + if (!emon_buffer_driver_helper) { + emon_buffer_driver_helper = + CONTROL_Allocate_Memory(args->len_usr_to_drv); + if (emon_buffer_driver_helper == NULL) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for emon_buffer_driver_helper!"); + return OS_NO_MEM; + } + } + + if (copy_from_user(emon_buffer_driver_helper, args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory copy failure for device num units!"); + return OS_FAULT; + } + + SEP_DRV_LOG_FLOW_OUT("Success"); + return OS_SUCCESS; +} + +#if defined(DRV_PMT_ENABLE) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID lwpmu_PMT_Add_Telemetry_Info(ep, ep_info) + * + * @brief Record the PMT telemetry devices detected + * + * @param ep - telemetry endpoint detected + * @param ep_info - device information about the telemetry endpoint detected + * + * @return status + * + * Special Notes: + * + */ +VOID +lwpmu_PMT_Add_Telemetry_Info(struct telem_endpoint *ep, + struct telem_endpoint_info *ep_info) +{ + SEP_DRV_LOG_FLOW_IN(""); + + if (!ep || !ep_info || !ep_info->pdev) { + SEP_DRV_LOG_WARNING("Invalid Telemetry Endpoint\n"); + return; + } + + if (pmt_dev_index < MAX_PMT_DEVICES) { + pmt_devices[pmt_dev_index].device_id = ep_info->pdev->device; + pmt_devices[pmt_dev_index].vendor_id = ep_info->pdev->vendor; + pmt_devices[pmt_dev_index].domain = 0; // TODO: fix the domain + pmt_devices[pmt_dev_index].bus = ep_info->pdev->bus->number; + pmt_devices[pmt_dev_index].device = + PCI_SLOT(ep_info->pdev->devfn); + pmt_devices[pmt_dev_index].function = + PCI_FUNC(ep_info->pdev->devfn); + pmt_devices[pmt_dev_index].guid = ep_info->header.guid; + pmt_devices[pmt_dev_index].telem_ep = ep; + SEP_DRV_LOG_DETECTION( + "Detected PMT device %p devid=%x bus=%x dev=%x func=%x\n", + ep, pmt_devices[pmt_dev_index].device_id, + pmt_devices[pmt_dev_index].bus, + pmt_devices[pmt_dev_index].device, + pmt_devices[pmt_dev_index].function); + } else { + SEP_DRV_LOG_WARNING( + "Reached max pmt device limit pmt_dev_index=%d devid=%x bus=%x dev=%x func=%x\n", + pmt_dev_index, ep_info->pdev->device, + ep_info->pdev->bus->number, + PCI_SLOT(ep_info->pdev->devfn), + PCI_FUNC(ep_info->pdev->devfn)); + } + pmt_dev_index++; + + SEP_DRV_LOG_FLOW_OUT("Success"); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void lwpmudrv_Detect_PMT_Endpoints(void) + * + * @brief Detect the PMT endpoints + * + * @param None + * + * @return status + * + * Special Notes: + * + */ +static void +lwpmudrv_Detect_PMT_Endpoints(void) +{ + int err; + unsigned long index = 0; + struct telem_endpoint *ep = NULL; + struct telem_endpoint_info info; + + if (pmt_telem_get_next_endpoint == 0x0 || + pmt_telem_get_endpoint_info == 0x0 || + pmt_telem_register_endpoint == 0x0) { + SEP_DRV_LOG_WARNING("Address of PMT function is invalid\n"); + return; + } + + while ((index = pmt_telem_get_next_endpoint(index))) { + err = pmt_telem_get_endpoint_info(index, &info); + if (err) { + SEP_DRV_LOG_WARNING( + "Could not get endpoint info for index %ld, error %d\n", + index, err); + continue; + } + + SEP_DRV_LOG_TRACE("size=%d guid=0x%x offset=0x%x\n", + info.header.size, info.header.guid, + info.header.base_offset); + + ep = pmt_telem_register_endpoint(index); + if (IS_ERR(ep)) { + SEP_DRV_LOG_WARNING( + "Could not get endpoint for index %ld, error %ld\n", + index, PTR_ERR(ep)); + continue; + } + lwpmu_PMT_Add_Telemetry_Info(ep, &info); + } + + return; +} +#endif + +/******************************************************************************* + * External Driver functions - Open + * This function is common to all drivers + *******************************************************************************/ + +static int +lwpmu_Open(struct inode *inode, struct file *filp) +{ + SEP_DRV_LOG_TRACE_IN("Maj:%d, min:%d", imajor(inode), iminor(inode)); + + filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev); + + SEP_DRV_LOG_TRACE_OUT(""); + return 0; +} + +/******************************************************************************* + * External Driver functions + * These functions are registered into the file operations table that + * controls this device. + * Open, Close, Read, Write, Release + *******************************************************************************/ + +static ssize_t +lwpmu_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + unsigned long retval; + + SEP_DRV_LOG_TRACE_IN(""); + + /* Transfering data to user space */ + SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); + if (copy_to_user(buf, &LWPMU_DEV_buffer(lwpmu_control), 1)) { + retval = OS_FAULT; + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return retval; + } + /* Changing reading position as best suits */ + if (*f_pos == 0) { + *f_pos += 1; + SEP_DRV_LOG_TRACE_OUT("Return value: 1."); + return 1; + } + + SEP_DRV_LOG_TRACE_OUT("Return value: 0."); + return 0; +} + +static ssize_t +lwpmu_Write(struct file *filp, char const *buf, size_t count, loff_t *f_pos) +{ + unsigned long retval; + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_LOG_TRACE("Dispatched with count=%d.", (S32)count); + if (copy_from_user(&LWPMU_DEV_buffer(lwpmu_control), buf + count - 1, + 1)) { + retval = OS_FAULT; + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory copy failure!"); + return retval; + } + + SEP_DRV_LOG_TRACE_OUT("Return value: 1."); + return 1; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern IOCTL_OP_TYPE lwpmu_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg) + * + * @param IOCTL_USE_INODE - Used for pre 2.6.32 kernels + * @param struct file *filp - file pointer + * @param unsigned int cmd - IOCTL command + * @param unsigned long arg - args to the IOCTL command + * + * @return OS_STATUS + * + * @brief SEP Worker function that handles IOCTL requests from the user mode. + * + * Special Notes + */ +extern IOCTL_OP_TYPE +lwpmu_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + IOCTL_ARGS_NODE local_args) +{ + int status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN("Command: %d.", cmd); + + if (cmd == DRV_OPERATION_GET_DRIVER_STATE) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_STATE."); + status = lwpmudrv_Get_Driver_State(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (cmd == DRV_OPERATION_GET_DRIVER_LOG) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRIVER_LOG."); + status = lwpmudrv_Get_Driver_Log(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (cmd == DRV_OPERATION_CONTROL_DRIVER_LOG) { + SEP_DRV_LOG_TRACE("DRV_OPERATION_CONTROL_DRIVER_LOG."); + status = lwpmudrv_Control_Driver_Log(&local_args); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { + SEP_DRV_LOG_TRACE("skipping ioctl -- processing stop."); + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d", cmd, + status); + return status; + } + + MUTEX_LOCK(ioctl_lock); + UTILITY_Driver_Set_Active_Ioctl(cmd); + + switch (cmd) { + /* + * Common IOCTL commands + */ + + case DRV_OPERATION_VERSION: + SEP_DRV_LOG_TRACE("DRV_OPERATION_VERSION."); + status = lwpmudrv_Version(&local_args); + break; + + case DRV_OPERATION_RESERVE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_RESERVE."); + status = lwpmudrv_Reserve(&local_args); + break; + + case DRV_OPERATION_INIT_DRIVER: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_DRIVER."); + status = lwpmudrv_Initialize_Driver(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_INIT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT."); + status = lwpmudrv_Initialize(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_INIT_PMU: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_PMU."); + status = lwpmudrv_Init_PMU(&local_args); + break; + + case DRV_OPERATION_SET_CPU_MASK: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_MASK."); + status = lwpmudrv_Set_CPU_Mask(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_START: + SEP_DRV_LOG_TRACE("DRV_OPERATION_START."); + status = lwpmudrv_Start(); + break; + + case DRV_OPERATION_STOP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_STOP."); + status = lwpmudrv_Prepare_Stop(); + UTILITY_Driver_Set_Active_Ioctl(0); + MUTEX_UNLOCK(ioctl_lock); + + MUTEX_LOCK(ioctl_lock); + UTILITY_Driver_Set_Active_Ioctl(cmd); + if (GET_DRIVER_STATE() == DRV_STATE_PREPARE_STOP) { + status = lwpmudrv_Finish_Stop(); + if (status == OS_SUCCESS) { + // if stop was successful, relevant memory should have been freed, + // so try to compact the memory tracker + CONTROL_Memory_Tracker_Compaction(); + } + } + break; + + case DRV_OPERATION_PAUSE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_PAUSE."); + status = lwpmudrv_Pause(); + break; + + case DRV_OPERATION_RESUME: + SEP_DRV_LOG_TRACE("DRV_OPERATION_RESUME."); + status = lwpmudrv_Resume(); + break; + + case DRV_OPERATION_EM_GROUPS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS."); + status = lwpmudrv_Set_EM_Config(&local_args); + break; + + case DRV_OPERATION_EM_CONFIG_NEXT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT."); + status = lwpmudrv_Configure_Events(&local_args); + break; + + case DRV_OPERATION_NUM_DESCRIPTOR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_DESCRIPTOR."); + status = lwpmudrv_Set_Sample_Descriptors(&local_args); + break; + + case DRV_OPERATION_DESC_NEXT: + SEP_DRV_LOG_TRACE("DRV_OPERATION_DESC_NEXT."); + status = lwpmudrv_Configure_Descriptors(&local_args); + break; + + case DRV_OPERATION_GET_NORMALIZED_TSC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NORMALIZED_TSC."); + status = lwpmudrv_Get_Normalized_TSC(&local_args); + break; + + case DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_GET_NORMALIZED_TSC_STANDALONE."); + status = lwpmudrv_Get_Normalized_TSC(&local_args); + break; + + case DRV_OPERATION_NUM_CORES: + SEP_DRV_LOG_TRACE("DRV_OPERATION_NUM_CORES."); + status = lwpmudrv_Get_Num_Cores(&local_args); + break; + + case DRV_OPERATION_KERNEL_CS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_KERNEL_CS."); + status = lwpmudrv_Get_KERNEL_CS(&local_args); + break; + + case DRV_OPERATION_SET_UID: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_UID."); + status = lwpmudrv_Set_UID(&local_args); + break; + + case DRV_OPERATION_TSC_SKEW_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_TSC_SKEW_INFO."); + status = lwpmudrv_Get_TSC_Skew_Info(&local_args); + break; + + case DRV_OPERATION_COLLECT_SYS_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_COLLECT_SYS_CONFIG."); + status = lwpmudrv_Collect_Sys_Config(&local_args); + break; + + case DRV_OPERATION_GET_SYS_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_SYS_CONFIG."); + status = lwpmudrv_Sys_Config(&local_args); + break; + + case DRV_OPERATION_TERMINATE: + SEP_DRV_LOG_TRACE("DRV_OPERATION_TERMINATE."); + status = lwpmudrv_Terminate(); + break; + + case DRV_OPERATION_SET_CPU_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_CPU_TOPOLOGY."); + status = lwpmudrv_Setup_Cpu_Topology(&local_args); + break; + + case DRV_OPERATION_GET_NUM_CORE_CTRS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_CORE_CTRS."); + status = lwpmudrv_Samp_Read_Num_Of_Core_Counters(&local_args); + break; + + case DRV_OPERATION_GET_PLATFORM_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_INFO."); + status = lwpmudrv_Get_Platform_Info(&local_args); + break; + + case DRV_OPERATION_SWITCH_GROUP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SWITCH_GROUP."); + status = lwpmudrv_Switch_Group(); + break; + + case DRV_OPERATION_GET_PERF_CAPAB: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PERF_CAPAB."); + status = lwpmudrv_Get_Perf_Capab(&local_args); + break; + + /* + * EMON-specific IOCTL commands + */ + case DRV_OPERATION_READ_MSR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_MSR."); + status = lwpmudrv_Read_Allowlist_MSR_All_Cores(&local_args); + break; + + case DRV_OPERATION_WRITE_MSR: + SEP_DRV_LOG_TRACE("DRV_OPERATION_WRITE_MSR."); + status = lwpmudrv_Write_Allowlist_MSR_All_Cores(&local_args); + break; + + case DRV_OPERATION_READ_SWITCH_GROUP: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_SWITCH_GROUP."); + status = lwpmudrv_Read_Counters_And_Switch_Group(&local_args); + break; + + case DRV_OPERATION_READ_AND_RESET: + SEP_DRV_LOG_TRACE("DRV_OPERATION_READ_AND_RESET."); + status = lwpmudrv_Read_And_Reset_Counters(&local_args); + break; + + /* + * Platform-specific IOCTL commands (IA32 and Intel64) + */ + + case DRV_OPERATION_INIT_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_UNC."); + status = lwpmudrv_Initialize_UNC(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_EM_GROUPS_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_GROUPS_UNC."); + status = lwpmudrv_Set_EM_Config_UNC(&local_args); + break; + + case DRV_OPERATION_EM_CONFIG_NEXT_UNC: + SEP_DRV_LOG_TRACE("DRV_OPERATION_EM_CONFIG_NEXT_UNC."); + status = lwpmudrv_Configure_Events_UNC(&local_args); + break; + + case DRV_OPERATION_LBR_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_LBR_INFO."); + status = lwpmudrv_LBR_Info(&local_args); + break; + + case DRV_OPERATION_SET_IPT_CONFIG: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_IPT_CONFIG."); + status = lwpmudrv_Set_IPT_Config(&local_args); + break; + + case DRV_OPERATION_INIT_NUM_DEV: + SEP_DRV_LOG_TRACE("DRV_OPERATION_INIT_NUM_DEV."); + status = lwpmudrv_Initialize_Num_Devices(&local_args); + break; + case DRV_OPERATION_GET_NUM_SAMPLES: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_NUM_SAMPLES."); + status = lwpmudrv_Get_Num_Samples(&local_args); + break; + + case DRV_OPERATION_SET_DEVICE_NUM_UNITS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_SET_DEVICE_NUM_UNITS."); + status = lwpmudrv_Set_Device_Num_Units(&local_args); + break; + + case DRV_OPERATION_GET_INTERVAL_COUNTS: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_INTERVAL_COUNTS."); + lwpmudrv_Get_Interval_Counts(&local_args); + break; + + case DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_SET_SCAN_UNCORE_TOPOLOGY_INFO."); + status = + lwpmudrv_Set_Uncore_Topology_Info_And_Scan(&local_args); + break; + + case DRV_OPERATION_GET_UNCORE_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_UNCORE_TOPOLOGY."); + status = lwpmudrv_Get_Uncore_Topology(&local_args); + break; + + case DRV_OPERATION_GET_PMT_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PMT_TOPOLOGY."); + status = lwpmudrv_Get_PMT_Topology(&local_args); + break; + + case DRV_OPERATION_GET_PLATFORM_TOPOLOGY: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_PLATFORM_TOPOLOGY."); + status = lwpmudrv_Get_Platform_Topology(&local_args); + break; + + case DRV_OPERATION_FLUSH: + SEP_DRV_LOG_TRACE("DRV_OPERATION_FLUSH."); + status = lwpmudrv_Flush(); + break; + + case DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER: + SEP_DRV_LOG_TRACE( + "DRV_OPERATION_SET_EMON_BUFFER_DRIVER_HELPER."); + status = lwpmudrv_Set_Emon_Buffer_Driver_Helper(&local_args); + break; + + case DRV_OPERATION_GET_DRV_SETUP_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_DRV_SETUP_INFO."); + status = lwpmudrv_Get_Drv_Setup_Info(&local_args); + break; + + case DRV_OPERATION_GET_SAMPLE_DROP_INFO: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_SAMPLE_DROP_INFO\n"); + status = lwpmudrv_Get_Sample_Drop_Info(&local_args); + break; + + case DRV_OPERATION_GET_UNCORE_DISCOVERY_TABLES: + SEP_DRV_LOG_TRACE("DRV_OPERATION_GET_UNCORE_DISCOVERY_TABLES."); + status = lwpmudrv_Get_Uncore_Discovery_Tables(&local_args); + break; + + /* + * if none of the above, treat as unknown/illegal IOCTL command + */ + + default: + SEP_DRV_LOG_ERROR("Unknown IOCTL number: %d!", cmd); + status = OS_ILLEGAL_IOCTL; + break; + } + + UTILITY_Driver_Set_Active_Ioctl(0); + MUTEX_UNLOCK(ioctl_lock); + + SEP_DRV_LOG_TRACE_OUT("Return value for command %d: %d.", cmd, status); + return status; +} + +extern long +lwpmu_Device_Control(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_ARGS_NODE local_args; + + SEP_DRV_LOG_TRACE_IN("Cmd type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + +#if !defined(DRV_USE_UNLOCKED_IOCTL) + SEP_DRV_LOG_TRACE("Cmd: 0x%x, called on inode maj:%d, min:%d.", cmd, + imajor(inode), iminor(inode)); +#endif + SEP_DRV_LOG_TRACE("Type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", + _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + if (arg) { + status = copy_from_user(&local_args, (IOCTL_ARGS)arg, + sizeof(IOCTL_ARGS_NODE)); + } + + status = lwpmu_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), + local_args); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d.", status); + return status; +} + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +extern long +lwpmu_Device_Control_Compat(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_COMPAT_ARGS_NODE local_args_compat; + IOCTL_ARGS_NODE local_args; + + SEP_DRV_LOG_TRACE_IN("Compat: type: %d, subcommand: %d.", + _IOC_TYPE(cmd), _IOC_NR(cmd)); + + memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); + SEP_DRV_LOG_TRACE("Compat: type: %d, subcommand: %d.", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Unknown IOCTL magic: %d!", + _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + if (arg) { + status = copy_from_user(&local_args_compat, + (IOCTL_COMPAT_ARGS)arg, + sizeof(IOCTL_COMPAT_ARGS_NODE)); + } // NB: status defined above is not being used... + local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; + local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; + local_args.buf_drv_to_usr = + (char *)compat_ptr(local_args_compat.buf_drv_to_usr); + local_args.buf_usr_to_drv = + (char *)compat_ptr(local_args_compat.buf_usr_to_drv); + + status = lwpmu_Service_IOCTL(filp, _IOC_NR(cmd), local_args); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d", status); + return status; +} +#endif + +/* + * @fn LWPMUDRV_Abnormal_Terminate(void) + * + * @brief This routine is called from linuxos_Exit_Task_Notify if the user process has + * been killed by an uncatchable signal (example kill -9). The state variable + * abormal_terminate is set to 1 and the clean up routines are called. In this + * code path the OS notifier hooks should not be unloaded. + * + * @param None + * + * @return OS_STATUS + * + * Special Notes: + * + */ +extern int +LWPMUDRV_Abnormal_Terminate(void) +{ + int status = OS_SUCCESS; + SEP_DRV_LOG_FLOW_IN(""); + + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Prepare_Stop."); + status = lwpmudrv_Prepare_Stop(); + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Finish_Stop."); + status = lwpmudrv_Finish_Stop(); + SEP_DRV_LOG_TRACE("Calling lwpmudrv_Terminate."); + status = lwpmudrv_Terminate(); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +static void +lwpmudrv_Get_Symbol_Lookup_Func_Addr(void) +{ + struct file *fd; + S8 buf; + char strbuf[MAX_CHARS_IN_LINE], *linestr, *addr, *sym, *name; + S32 ret, buf_idx; + + SEP_DRV_LOG_TRACE_IN(""); + + memset(sym_lookup_func_addr, 0, MAX_ADDR_STR_LEN + 1); + + /* + * kallsysm_lookup_name is not exported since 5.7 kernel + * the function pointer address is obtained by reading /proc/kallsysm + */ + fd = filp_open("/proc/kallsyms", O_RDONLY, 0); + if (fd) { + ret = fd->f_op->read(fd, &buf, 1, &fd->f_pos); + SEP_DRV_LOG_TRACE("ret = %d", ret); + buf_idx = 0; + while (ret > 0) { + if (buf == '\n') { + strbuf[buf_idx] = '\0'; + linestr = kstrdup(strbuf, GFP_KERNEL); + if (linestr == NULL) { + break; + } + addr = strsep(&linestr, " "); + if (addr == NULL) { + kfree(linestr); + break; + } + sym = strsep(&linestr, " "); + if (sym == NULL) { + kfree(linestr); + break; + } + name = strsep(&linestr, "\0"); + if (name == NULL) { + kfree(linestr); + break; + } + if (strcmp(name, "kallsyms_lookup_name") == 0) { + SEP_DRV_LOG_TRACE("%s, %s\n", addr, + name); + memcpy(sym_lookup_func_addr, addr, + MAX_ADDR_STR_LEN + 1); + kfree(linestr); + GLOBAL_STATE_symbol_lookup_available( + driver_state) = 1; + break; + } + buf_idx = 0; + kfree(linestr); + } else { + strbuf[buf_idx++] = buf; + } + ret = fd->f_op->read(fd, &buf, 1, &fd->f_pos); + } + filp_close(fd, NULL); + } else { + SEP_DRV_LOG_WARNING("fd is NULL!"); + } + + SEP_DRV_LOG_TRACE_OUT("sym_lookup_func_addr = %s", + sym_lookup_func_addr); +} +#endif + +static int +lwpmudrv_Abnormal_Handler(void *data) +{ + SEP_DRV_LOG_FLOW_IN(""); + + // Reading kallsyms should be done in a separate thread + // because of a blocking operation on read call +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + lwpmudrv_Get_Symbol_Lookup_Func_Addr(); +#endif + + while (!kthread_should_stop()) { + if (wait_event_interruptible_timeout( + wait_exit, + GET_DRIVER_STATE() == DRV_STATE_TERMINATING, + msecs_to_jiffies(350))) { + SEP_DRV_LOG_WARNING( + "Processing abnormal termination..."); + MUTEX_LOCK(ioctl_lock); + SEP_DRV_LOG_TRACE("Locked ioctl_lock..."); + LWPMUDRV_Abnormal_Terminate(); + SEP_DRV_LOG_TRACE("Unlocking ioctl_lock..."); + MUTEX_UNLOCK(ioctl_lock); + } + } + + SEP_DRV_LOG_FLOW_OUT("End of thread."); + return 0; +} + +/***************************************************************************************** + * + * Driver Entry / Exit functions that will be called on when the driver is loaded and + * unloaded + * + ****************************************************************************************/ + +/* + * Structure that declares the usual file access functions + * First one is for lwpmu_c, the control functions + */ +static struct file_operations lwpmu_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = lwpmu_Device_Control, +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) + .compat_ioctl = lwpmu_Device_Control_Compat, +#endif + .read = lwpmu_Read, + .write = lwpmu_Write, + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Second one is for lwpmu_m, the module notification functions + */ +static struct file_operations lwmod_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_Module_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Third one is for lwsamp_nn, the sampling functions + */ +static struct file_operations lwsamp_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_Sample_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Fourth one is for lwsamp_sideband, the pebs process info functions + */ +static struct file_operations lwsideband_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_SidebandInfo_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Fifth one is for lwsampunc_nn, the uncore sampling functions + */ +static struct file_operations lwsampunc_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_UncSample_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Sixth one is for lwpmu_e, the EMON notification functions + */ +static struct file_operations lwemon_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_Emon_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Seventh one is for lwpmu_pt, the IPT functions + */ +static struct file_operations lwipt_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_IPT_Trace_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* + * Seventh one is for lwpmu_ptinfo, the IPT info functions + */ +static struct file_operations lwiptinfo_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = NULL, //None needed + .read = OUTPUT_IPT_Info_Read, + .write = NULL, //No writing accepted + .open = lwpmu_Open, + .release = NULL, + .llseek = NULL, +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmudrv_setup_cdev(dev, fops, dev_number) + * + * @param LWPMU_DEV dev - pointer to the device object + * @param struct file_operations *fops - pointer to the file operations struct + * @param dev_t dev_number - major/monor device number + * + * @return OS_STATUS + * + * @brief Set up the device object. + * + * Special Notes + */ +static int +lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, dev_t dev_number) +{ + int res; + SEP_DRV_LOG_TRACE_IN(""); + + cdev_init(&LWPMU_DEV_cdev(dev), fops); + LWPMU_DEV_cdev(dev).owner = THIS_MODULE; + LWPMU_DEV_cdev(dev).ops = fops; + + res = cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1); + + SEP_DRV_LOG_TRACE_OUT("Return value: %d", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmu_Load(void) + * + * @param none + * + * @return STATUS + * + * @brief Load the driver module into the kernel. Set up the driver object. + * @brief Set up the initial state of the driver and allocate the memory + * @brief needed to keep basic state information. + */ +static int +lwpmu_Load(VOID) +{ + int i, num_cpus; + dev_t lwmod_DevNum; + OS_STATUS status = OS_INVALID; +#if !defined(DRV_UDEV_UNAVAILABLE) + char dev_name[MAXNAMELEN]; +#endif +#if defined(CONFIG_XEN_HAVE_VPMU) + xen_pmu_params_t xenpmu_param; + xen_pmu_data_t *xenpmu_data; + unsigned long pfn; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + SEP_DRV_LOG_LOAD("Driver loading... sym_lookup_func_addr=%s", + sym_lookup_func_addr); +#else + SEP_DRV_LOG_LOAD("Driver loading..."); +#endif + + // Do not use SEP_DRV_LOG_X (where X != LOAD) before this, or if this fails + if (UTILITY_Driver_Log_Init() != OS_SUCCESS) { + SEP_DRV_LOG_LOAD("Error: could not allocate log buffer."); + return OS_NO_MEM; + } + SEP_DRV_LOG_FLOW_IN("Starting internal log monitoring."); + + CONTROL_Memory_Tracker_Init(); + +#if !defined(CONFIG_XEN_HAVE_VPMU) +#if defined(CONFIG_XEN) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) + if (xen_initial_domain()) { + SEP_DRV_LOG_LOAD( + "PMU virtualization is not enabled on XEN dom0!"); + } +#endif +#endif + + /* Get one major device number and two minor numbers. */ + /* The result is formatted as major+minor(0) */ + /* One minor number is for control (lwpmu_c), */ + /* the other (lwpmu_m) is for modules */ + SEP_DRV_LOG_INIT("About to register chrdev..."); + + lwpmu_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES, + SEP_DRIVER_NAME); + SEP_DRV_LOG_INIT("Result of alloc_chrdev_region is %d.", status); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + return status; + } + SEP_DRV_LOG_LOAD("Major number is %d", MAJOR(lwpmu_DevNum)); + status = lwpmudrv_Initialize_State(); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Failed to initialize state (return = %d)!", status); + return status; + } + num_cpus = GLOBAL_STATE_num_cpus(driver_state); + SEP_DRV_LOG_LOAD("Detected %d total CPUs and %d active CPUs.", num_cpus, + GLOBAL_STATE_active_cpus(driver_state)); + +#if defined(CONFIG_XEN_HAVE_VPMU) + if (xen_initial_domain()) { + xenpmu_param.version.maj = XENPMU_VER_MAJ; + xenpmu_param.version.min = XENPMU_VER_MIN; + + for (i = 0; i < num_cpus; i++) { + xenpmu_data = + (xen_pmu_data_t *)get_zeroed_page(GFP_KERNEL); + if (!xenpmu_data) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for xenpmu_data!"); + return OS_NO_MEM; + } + pfn = vmalloc_to_pfn((char *)xenpmu_data); + + xenpmu_param.val = pfn_to_mfn(pfn); + xenpmu_param.vcpu = i; + status = HYPERVISOR_xenpmu_op(XENPMU_init, + (PVOID)&xenpmu_param); + + per_cpu(xenpmu_shared, i) = xenpmu_data; + } + SEP_DRV_LOG_LOAD("VPMU is initialized on XEN Dom0."); + } +#endif + + /* Allocate memory for the control structures */ + lwpmu_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + lwmod_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + lwemon_control = CONTROL_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + lwsamp_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + lwsideband_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + lwipt_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + lwiptinfo_control = + CONTROL_Allocate_Memory(num_cpus * sizeof(LWPMU_DEV_NODE)); + + if (!lwsideband_control || !lwsamp_control || !lwpmu_control || + !lwmod_control || !lwipt_control || !lwiptinfo_control) { + CONTROL_Free_Memory(lwpmu_control); + CONTROL_Free_Memory(lwmod_control); + CONTROL_Free_Memory(lwemon_control); + CONTROL_Free_Memory(lwsamp_control); + CONTROL_Free_Memory(lwsideband_control); + CONTROL_Free_Memory(lwipt_control); + CONTROL_Free_Memory(lwiptinfo_control); + + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for control structures!"); + return OS_NO_MEM; + } + + /* Register the file operations with the OS */ + +#if !defined(DRV_UDEV_UNAVAILABLE) + pmu_class = class_create(THIS_MODULE, SEP_DRIVER_NAME); + if (IS_ERR(pmu_class)) { + SEP_DRV_LOG_ERROR("Error registering SEP control class!"); + } + device_create(pmu_class, NULL, lwpmu_DevNum, NULL, + SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); +#endif + + status = lwpmu_setup_cdev(lwpmu_control, &lwpmu_Fops, lwpmu_DevNum); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", status); + return status; + } + /* _c init was fine, now try _m */ + lwmod_DevNum = MKDEV(MAJOR(lwpmu_DevNum), MINOR(lwpmu_DevNum) + 1); + +#if !defined(DRV_UDEV_UNAVAILABLE) + device_create(pmu_class, NULL, lwmod_DevNum, NULL, + SEP_DRIVER_NAME DRV_DEVICE_DELIMITER "m"); +#endif + + status = lwpmu_setup_cdev(lwmod_control, &lwmod_Fops, lwmod_DevNum); + if (status) { + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwmod as char device!", status); + return status; + } + + lwemon_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwemon_DevNum, 0, 1, SEP_EMON_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < 1; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%se", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER); + device_create(pmu_class, NULL, lwemon_DevNum + i, NULL, + dev_name); +#endif + status = lwpmu_setup_cdev(lwemon_control + i, &lwemon_Fops, + lwemon_DevNum + i); + if (status) { + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwemon as char device!", + status); + return status; + } else { + SEP_DRV_LOG_INIT("Added emon device %d.", i); + } + } + + /* allocate one sampling device per cpu */ + lwsamp_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsamp_DevNum, 0, num_cpus, + SEP_SAMPLES_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < num_cpus; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%ss%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + device_create(pmu_class, NULL, lwsamp_DevNum + i, NULL, + dev_name); +#endif + status = lwpmu_setup_cdev(lwsamp_control + i, &lwsamp_Fops, + lwsamp_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", + status); + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling device %d.", i); + } + } + + lwsideband_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsideband_DevNum, 0, num_cpus, + SEP_SIDEBAND_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for chrdev_region for sideband!"); + return status; + } + + for (i = 0; i < num_cpus; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%sb%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + device_create(pmu_class, NULL, lwsideband_DevNum + i, NULL, + dev_name); +#endif + status = lwpmu_setup_cdev(lwsideband_control + i, + &lwsideband_Fops, + lwsideband_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwsideband as char device!", + status); + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling sideband device %d.", + i); + } + } + + /* allocate one ipt device per cpu */ + lwipt_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwipt_DevNum, 0, num_cpus, SEP_IPT_NAME); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region for ipt (return = %d).", + status); + return status; + } + + lwiptinfo_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwiptinfo_DevNum, 0, num_cpus, + SEP_IPT_INFO_NAME); + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region for ipt info(return = %d).", + status); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < num_cpus; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%spt%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + device_create(pmu_class, NULL, lwipt_DevNum + i, NULL, + dev_name); +#endif + status = lwpmu_setup_cdev(lwipt_control + i, &lwipt_Fops, + lwipt_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwipt %d as char device!", + status, i); + return status; + } else { + SEP_DRV_LOG_INIT("Added ipt device %d.", i); + } + +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%sptinfo%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + device_create(pmu_class, NULL, lwiptinfo_DevNum + i, NULL, + dev_name); +#endif + status = + lwpmu_setup_cdev(lwiptinfo_control + i, &lwiptinfo_Fops, + lwiptinfo_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwiptinfo %d as char device!", + status, i); + return status; + } else { + SEP_DRV_LOG_INIT("Added ipt info device %d.", i); + } + } + + cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + prev_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + diff_cpu_tsc = (U64 *)CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64)); + +#if !defined(CONFIG_PREEMPT_COUNT) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + atomic_set(&read_now, GLOBAL_STATE_num_cpus(driver_state)); + init_waitqueue_head(&read_tsc_now); +#endif + CONTROL_Invoke_Parallel(lwpmudrv_Fill_TSC_Info, (PVOID)(size_t)0); +#endif + + + pcb_size = GLOBAL_STATE_num_cpus(driver_state)*sizeof(CPU_STATE_NODE); + pcb = CONTROL_Allocate_Memory(pcb_size); + if (!pcb) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for PCB!"); + return OS_NO_MEM; + } + + core_to_package_map = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state)*sizeof(U32)); + if (!core_to_package_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for core_to_package_map!"); + return OS_NO_MEM; + } + + core_to_module_map = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state)*sizeof(U32)); + if (!core_to_module_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for core_to_module_map!"); + return OS_NO_MEM; + } + + core_to_phys_core_map = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state)*sizeof(U32)); + if (!core_to_phys_core_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for core_to_phys_core_map!"); + return OS_NO_MEM; + } + + core_to_thread_map = CONTROL_Allocate_Memory(GLOBAL_STATE_num_cpus(driver_state)*sizeof(U32)); + if (!core_to_thread_map) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Memory allocation failure for core_to_thread_map!"); + return OS_NO_MEM; + } + + +#if defined(DRV_INIT_CORE_THREAD_MAP) + memset(core_to_thread_map, -1, + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); +#endif + + threads_per_core = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32)); + if (!threads_per_core) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Memory allocation failure for threads_per_core!"); + return OS_NO_MEM; + } + + SYS_INFO_Build(); + PCI_Initialize(); + memset(pcb, 0, pcb_size); + + if (total_ram <= OUTPUT_MEMORY_THRESHOLD) { + output_buffer_size = OUTPUT_SMALL_BUFFER; + } + + MUTEX_INIT(ioctl_lock); + + status = UNC_COMMON_Init(); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT("Error %d when init uncore struct!", + status); + return status; + } + + /* allocate one sampling device per package (for uncore)*/ + lwsampunc_control = + CONTROL_Allocate_Memory(num_packages * sizeof(LWPMU_DEV_NODE)); + if (!lwsampunc_control) { + CONTROL_Free_Memory(lwsampunc_control); + SEP_DRV_LOG_ERROR_FLOW_OUT( + "lwpmu driver failed to alloc space!\n"); + return OS_NO_MEM; + } + + lwsampunc_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwsampunc_DevNum, 0, num_packages, + SEP_UNCORE_NAME); + + if (status < 0) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error: Failed to alloc chrdev_region (return = %d).", + status); + return status; + } + + /* Register the file operations with the OS */ + for (i = 0; i < num_packages; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + snprintf(dev_name, MAXNAMELEN, "%s%su%d", SEP_DRIVER_NAME, + DRV_DEVICE_DELIMITER, i); + device_create(pmu_class, NULL, lwsampunc_DevNum + i, NULL, + dev_name); +#endif + status = + lwpmu_setup_cdev(lwsampunc_control + i, &lwsampunc_Fops, + lwsampunc_DevNum + i); + if (status) { + SEP_DRV_LOG_ERROR_FLOW_OUT( + "Error %d when adding lwpmu as char device!", + status); + return status; + } else { + SEP_DRV_LOG_INIT("Added sampling device %d.", i); + } + } + + init_waitqueue_head(&wait_exit); + abnormal_handler = kthread_create(lwpmudrv_Abnormal_Handler, NULL, + "SEPDRV_ABNORMAL_HANDLER"); + if (abnormal_handler) { + wake_up_process(abnormal_handler); + } + +#if defined(DRV_PMT_ENABLE) + //Telemetry registration + memset(&pmt_devices, 0, + sizeof(DRV_PMT_TELEM_DEV_NODE) * MAX_PMT_DEVICES); + lwpmudrv_Detect_PMT_Endpoints(); +#endif + +#if defined(DRV_CPU_HOTPLUG) + /* Register CPU hotplug notifier */ + LINUXOS_Register_Hotplug(); +#endif + /* + * Initialize the SEP driver version (done once at driver load time) + */ + SEP_VERSION_NODE_major(&drv_version) = SEP_MAJOR_VERSION; + SEP_VERSION_NODE_minor(&drv_version) = SEP_MINOR_VERSION; + SEP_VERSION_NODE_api(&drv_version) = SEP_API_VERSION; + + // + // Display driver version information + // + SEP_DRV_LOG_LOAD("PMU collection driver v%d.%d.%d %s has been loaded.", + SEP_VERSION_NODE_major(&drv_version), + SEP_VERSION_NODE_minor(&drv_version), + SEP_VERSION_NODE_api(&drv_version), + SEP_RELEASE_STRING); + +#if defined(DRV_UDEV_UNAVAILABLE) + SEP_DRV_LOG_LOAD("Device files are created separately."); +#endif + + SEP_DRV_LOG_LOAD("NMI will be used for handling PMU interrupts."); + + PMU_LIST_Initialize(&allowlist_index); + PMU_LIST_Build_MSR_List(); + PMU_LIST_Build_PCI_List(); + PMU_LIST_Build_MMIO_List(); + + SEP_DRV_LOG_FLOW_OUT("Return value: %d.", status); + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmu_Unload(void) + * + * @param none + * + * @return none + * + * @brief Remove the driver module from the kernel. + */ +static VOID +lwpmu_Unload(VOID) +{ + int i = 0; + int num_cpus; +#if defined(CONFIG_XEN_HAVE_VPMU) + xen_pmu_params_t xenpmu_param; +#endif + PVOID tmp_pcb; + + SEP_DRV_LOG_FLOW_IN(""); + + SEP_DRV_LOG_LOAD("Driver unloading."); + + PMU_LIST_Clean_Up(); + + num_cpus = GLOBAL_STATE_num_cpus(driver_state); + + if (abnormal_handler) { + if (GET_DRIVER_STATE() != DRV_STATE_UNINITIALIZED) { + CHANGE_DRIVER_STATE(STATE_BIT_ANY, + DRV_STATE_TERMINATING); + } + wake_up_interruptible_all(&wait_exit); + kthread_stop(abnormal_handler); + abnormal_handler = NULL; + } + +#if defined(CONFIG_XEN_HAVE_VPMU) + if (xen_initial_domain()) { + xenpmu_param.version.maj = XENPMU_VER_MAJ; + xenpmu_param.version.min = XENPMU_VER_MIN; + + for (i = 0; i < num_cpus; i++) { + xenpmu_param.vcpu = i; + HYPERVISOR_xenpmu_op(XENPMU_finish, &xenpmu_param); + + vfree(per_cpu(xenpmu_shared, i)); + per_cpu(xenpmu_shared, i) = NULL; + } + SEP_DRV_LOG_LOAD("VPMU was disabled on XEN Dom0."); + } +#endif + + PCI_Cleanup(); + + LINUXOS_Uninstall_Hooks(); + SYS_INFO_Destroy(); + OUTPUT_Destroy(); + cpu_buf = CONTROL_Free_Memory(cpu_buf); + unc_buf = CONTROL_Free_Memory(unc_buf); + cpu_sideband_buf = CONTROL_Free_Memory(cpu_sideband_buf); + module_buf = CONTROL_Free_Memory(module_buf); + emon_buf = CONTROL_Free_Memory(emon_buf); + cpu_tsc = CONTROL_Free_Memory(cpu_tsc); + prev_cpu_tsc = CONTROL_Free_Memory(prev_cpu_tsc); + diff_cpu_tsc = CONTROL_Free_Memory(diff_cpu_tsc); + core_to_package_map = CONTROL_Free_Memory(core_to_package_map); + core_to_module_map = CONTROL_Free_Memory(core_to_module_map); + core_to_phys_core_map = CONTROL_Free_Memory(core_to_phys_core_map); + core_to_thread_map = CONTROL_Free_Memory(core_to_thread_map); + cores_per_module = CONTROL_Free_Memory(cores_per_module); + threads_per_core = CONTROL_Free_Memory(threads_per_core); + ipt_trace_buf = CONTROL_Free_Memory(ipt_trace_buf); + ipt_info_buf = CONTROL_Free_Memory(ipt_info_buf); + + tmp_pcb = pcb; // Ensures there is no log message written (ERROR, ALLOC, ...) + pcb = NULL; // between pcb being freed and pcb being NULL. + tmp_pcb = CONTROL_Free_Memory(tmp_pcb); + pcb_size = 0; + + UNC_COMMON_Clean_Up(); + + +#if !defined(DRV_UDEV_UNAVAILABLE) + unregister_chrdev(MAJOR(lwpmu_DevNum), SEP_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); + device_destroy(pmu_class, lwpmu_DevNum + 1); +#endif + + cdev_del(&LWPMU_DEV_cdev(lwpmu_control)); + cdev_del(&LWPMU_DEV_cdev(lwmod_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + +#if !defined(DRV_UDEV_UNAVAILABLE) + unregister_chrdev(MAJOR(lwsamp_DevNum), SEP_SAMPLES_NAME); + unregister_chrdev(MAJOR(lwsampunc_DevNum), SEP_UNCORE_NAME); + unregister_chrdev(MAJOR(lwsideband_DevNum), SEP_SIDEBAND_NAME); + unregister_chrdev(MAJOR(lwemon_DevNum), SEP_EMON_NAME); + unregister_chrdev(MAJOR(lwipt_DevNum), SEP_IPT_NAME); + unregister_chrdev(MAJOR(lwiptinfo_DevNum), SEP_IPT_INFO_NAME); +#endif + + for (i = 0; i < num_cpus; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + device_destroy(pmu_class, lwsamp_DevNum + i); + device_destroy(pmu_class, lwsideband_DevNum + i); + device_destroy(pmu_class, lwipt_DevNum + i); + device_destroy(pmu_class, lwiptinfo_DevNum + i); +#endif + cdev_del(&LWPMU_DEV_cdev(&lwsamp_control[i])); + cdev_del(&LWPMU_DEV_cdev(&lwsideband_control[i])); + cdev_del(&LWPMU_DEV_cdev(&lwipt_control[i])); + cdev_del(&LWPMU_DEV_cdev(&lwiptinfo_control[i])); + } + + for (i = 0; i < num_packages; i++) { +#if !defined(DRV_UDEV_UNAVAILABLE) + device_destroy(pmu_class, lwsampunc_DevNum + i); +#endif + cdev_del(&LWPMU_DEV_cdev(&lwsampunc_control[i])); + } + +#if !defined(DRV_UDEV_UNAVAILABLE) + device_destroy(pmu_class, lwemon_DevNum + 0); +#endif + cdev_del(&LWPMU_DEV_cdev(lwemon_control)); + +#if !defined(DRV_UDEV_UNAVAILABLE) + class_destroy(pmu_class); +#endif + + unregister_chrdev_region(lwsamp_DevNum, num_cpus); + unregister_chrdev_region(lwsampunc_DevNum, num_packages); + unregister_chrdev_region(lwsideband_DevNum, num_cpus); + unregister_chrdev_region(lwemon_DevNum, 1); + unregister_chrdev_region(lwipt_DevNum, num_cpus); + unregister_chrdev_region(lwiptinfo_DevNum, num_cpus); + lwpmu_control = CONTROL_Free_Memory(lwpmu_control); + lwmod_control = CONTROL_Free_Memory(lwmod_control); + lwsamp_control = CONTROL_Free_Memory(lwsamp_control); + lwsampunc_control = CONTROL_Free_Memory(lwsampunc_control); + lwsideband_control = CONTROL_Free_Memory(lwsideband_control); + lwemon_control = CONTROL_Free_Memory(lwemon_control); + lwipt_control = CONTROL_Free_Memory(lwipt_control); + lwiptinfo_control = CONTROL_Free_Memory(lwiptinfo_control); + + CONTROL_Memory_Tracker_Free(); + +#if defined(DRV_CPU_HOTPLUG) + /* Unregister CPU hotplug notifier */ + LINUXOS_Unregister_Hotplug(); +#endif + + SEP_DRV_LOG_FLOW_OUT( + "Log deallocation. Cannot track further in internal log."); + UTILITY_Driver_Log_Free(); // Do not use SEP_DRV_LOG_X (where X != LOAD) after this + + SEP_DRV_LOG_LOAD( + "PMU collection driver v%d.%d.%d %s has been unloaded.", + SEP_VERSION_NODE_major(&drv_version), + SEP_VERSION_NODE_minor(&drv_version), + SEP_VERSION_NODE_api(&drv_version), SEP_RELEASE_STRING); + + return; +} + +/* Declaration of the init and exit functions */ +module_init(lwpmu_Load); +module_exit(lwpmu_Unload); + diff --git a/drivers/platform/x86/sepdk/sep/output.c b/drivers/platform/x86/sepdk/sep/output.c new file mode 100644 index 00000000000000..249e4bfa863a64 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/output.c @@ -0,0 +1,1602 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv.h" +#include "lwpmudrv_ioctl.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "control.h" +#include "output.h" +#include "utility.h" +#include "inc/linuxos.h" +#define OTHER_C_DEVICES 1 // one for module + +/* + * Global data: Buffer control structure + */ +static wait_queue_head_t flush_queue; +static atomic_t flush_writers; +static int volatile flush; +extern DRV_CONFIG drv_cfg; +extern DRV_BOOL collect_sideband; +extern DRV_BOOL unc_buf_init; +extern EMON_DESC emon_desc; +extern U64 emon_buffer_size; +extern U32 emon_read_threshold; + +static void output_NMI_Sample_Buffer(unsigned long data); +static unsigned long flags; + +/* + * @fn output_Free_Buffers(output, size) + * + * @param IN outbuf - The output buffer to manipulate + * + * @brief Deallocate the memory associated with the buffer descriptor + * + */ +static VOID +output_Free_Buffers(BUFFER_DESC buffer, size_t size) +{ + int j; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, size: %u.", buffer, size); + + if (buffer == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!buffer)."); + return; + } + outbuf = &BUFFER_DESC_outbuf(buffer); + for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { + CONTROL_Free_Memory(OUTPUT_buffer(outbuf, j)); + OUTPUT_buffer(outbuf, j) = NULL; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int OUTPUT_Reserve_Buffer_Space (OUTPUT outbuf, + * U32 size, + * U8 in_notification) + * + * @param outbuf IN output buffer to manipulate + * @param size IN The size of data to reserve + * @param defer IN wake up directly if FALSE. + * Otherwise, see below. + * @param in_notification IN 1 if in notification, 0 if not + * + * @result outloc - to the location where data is to be written + * + * Reserve space in the output buffers for data. The behavior of this function + * when a buffer is full will vary depending on the 'defer' and 'in_notification' + * parameters, as described in the special notes section. + * + * Special Notes: + * ----------------------------------------------------------------------------------------------------------------------- + * defer | in_notification | description + * ----------------------------------------------------------------------------------------------------------------------- + * FALSE | FALSE/TRUE | directly signals the buffer's consumer with wake_up_interruptible_sync + * ----------------------------------------------------------------------------------------------------------------------- + * TRUE | FALSE | defers the call to wake_up_interruptible_sync using tasklet_schedule [needed because calling + * | | it directly is not safe from an NMI] + * ----------------------------------------------------------------------------------------------------------------------- + * | | do not signal -or explicitly schedule the signaling of- the buffer's consumer [needed because + * TRUE | TRUE | neither operation is safe from the sched_switch tracepoint callback in kernel version 4.13]. + * | | Instead relies on the interrupt handler to do it next time there is an interrupt. + * ----------------------------------------------------------------------------------------------------------------------- + */ +extern void * +OUTPUT_Reserve_Buffer_Space(BUFFER_DESC bd, + U32 size, + DRV_BOOL defer, + U8 in_notification) +{ + char *outloc = NULL; + OUTPUT outbuf = &BUFFER_DESC_outbuf(bd); + U32 this_cpu; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + in_notification, "Bd: %p, size: %u, defer: %u, notif: %u.", bd, + size, defer, in_notification); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg) && flush) { + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT( + in_notification, "Res: NULL (cp_mode && flush)."); + return NULL; + } + + if (OUTPUT_remaining_buffer_size(outbuf) >= size) { + outloc = (OUTPUT_buffer(outbuf, OUTPUT_current_buffer(outbuf)) + + (OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf))); + } else { + U32 i, j, start; + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + + // + // Massive Naive assumption: Must find a way to fix it. + // In spite of the loop. + // The next buffer to fill are monotonically increasing + // indicies. + // + if (!DRV_CONFIG_enable_cp_mode(drv_cfg)) { + OUTPUT_signal_full(outbuf) = TRUE; + } + + start = OUTPUT_current_buffer(outbuf); + for (i = start + 1; i < start + OUTPUT_NUM_BUFFERS; i++) { + j = i % OUTPUT_NUM_BUFFERS; + + //don't check if buffer has data when doing CP + if (!OUTPUT_buffer_full(outbuf, j) || + (DRV_CONFIG_enable_cp_mode(drv_cfg))) { + OUTPUT_current_buffer(outbuf) = j; + OUTPUT_remaining_buffer_size(outbuf) = + OUTPUT_total_buffer_size(outbuf); + outloc = OUTPUT_buffer(outbuf, j); + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + // discarding all the information in the new buffer in CP mode + OUTPUT_buffer_full(outbuf, j) = 0; + break; + } + } +#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) + else { + if (!defer) { + OUTPUT_signal_full(outbuf) = FALSE; + SEP_DRV_LOG_NOTIFICATION_WARNING( + in_notification, + "Output buffers are full. Might be dropping some samples!"); + break; + } + } +#endif + } + } + + if (outloc) { + OUTPUT_remaining_buffer_size(outbuf) -= size; + memset(outloc, 0, size); + } + + if (OUTPUT_signal_full(outbuf)) { + if (!defer) { +#if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)) + SEP_DRV_LOG_NOTIFICATION_TRACE( + in_notification, + "Choosing direct wakeup approach."); + wake_up_interruptible_sync(&BUFFER_DESC_queue(bd)); + OUTPUT_signal_full(outbuf) = FALSE; +#endif + } else { + if (!OUTPUT_tasklet_queued(outbuf)) { + this_cpu = CONTROL_THIS_CPU(); + if (!in_notification) { + SEP_DRV_LOG_NOTIFICATION_TRACE( + in_notification, + "Scheduling the tasklet on cpu %u.", + this_cpu); + OUTPUT_tasklet_queued(outbuf) = TRUE; + tasklet_schedule(&CPU_STATE_nmi_tasklet( + &pcb[this_cpu])); + } else { + static U32 cpt; + if (!cpt) { + SEP_DRV_LOG_WARNING( + "Using interrupt-driven sideband buffer flushes for extra safety."); + SEP_DRV_LOG_WARNING( + "This may result in fewer context switches being recorded."); + } + SEP_DRV_LOG_TRACE( + "Lost context switch information (for the %uth time).", + ++cpt); + } + } + } + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %p.", outloc); + return outloc; +} + +/* ------------------------------------------------------------------------- */ +/*! + * + * @fn int OUTPUT_Buffer_Fill (BUFFER_DESC buf, + * PVOID data, + * U16 size, + * U8 in_notification) + * + * @brief Place a record (can be module, marker, etc) in a buffer + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @param in_notification - 1 if in notification, 0 if not + * + * @return number of bytes copied into buffer + * + * Start by ensuring that output buffer space is available. + * If so, then copy the input data to the output buffer and make the necessary + * adjustments to manage the output buffers. + * If not, signal the read event for this buffer and get another buffer. + * + * Special Notes: + * + */ +static int +output_Buffer_Fill(BUFFER_DESC bd, PVOID data, U16 size, U8 in_notification) +{ + char *outloc; + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN( + in_notification, "Bd: %p, data: %p, size: %u.", bd, data, size); + + outloc = OUTPUT_Reserve_Buffer_Space(bd, size, FALSE, in_notification); + if (outloc) { + memcpy(outloc, data, size); + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, + "Res: %d (outloc).", size); + return size; + } + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: 0 (!outloc)."); + return 0; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn int OUTPUT_Module_Fill (PVOID data, + * U16 size, + * U8 in_notification) + * + * @brief Place a module record in a buffer + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @param in_notification - 1 if in notification, 0 if not + * + * @return number of bytes copied into buffer + * + * + */ +extern int +OUTPUT_Module_Fill(PVOID data, U16 size, U8 in_notification) +{ + int ret_size; + OUTPUT outbuf = &BUFFER_DESC_outbuf(module_buf); + + SEP_DRV_LOG_NOTIFICATION_TRACE_IN(in_notification, + "Data: %p, size: %u.", data, size); + + spin_lock_irqsave(&OUTPUT_buffer_lock(outbuf), flags); + ret_size = output_Buffer_Fill(module_buf, data, size, in_notification); + spin_unlock_irqrestore(&OUTPUT_buffer_lock(outbuf), flags); + + SEP_DRV_LOG_NOTIFICATION_TRACE_OUT(in_notification, "Res: %d.", + ret_size); + return ret_size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t output_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos, + * BUFFER_DESC kernel_buf) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param kernel_buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block if unavailable on "BUFFER_DESC_queue(buf)" + * + * Special Notes: + * + */ +static ssize_t +output_Read(struct file *filp, + char *buf, + size_t count, + loff_t *f_pos, + BUFFER_DESC kernel_buf) +{ + ssize_t to_copy = 0; + ssize_t uncopied; + OUTPUT outbuf = &BUFFER_DESC_outbuf(kernel_buf); + U32 cur_buf, i; + /* Buffer is filled by output_fill_modules. */ + + SEP_DRV_LOG_TRACE_IN( + "Filp: %p, buf: %p, count: %u, f_pos: %p, kernel_buf: %p.", + filp, buf, (U32)count, f_pos, kernel_buf); + + cur_buf = OUTPUT_current_buffer(outbuf); + if (!DRV_CONFIG_enable_cp_mode(drv_cfg) || flush) { + for (i = 0; i < OUTPUT_NUM_BUFFERS; + i++) { //iterate through all buffers + cur_buf++; + if (cur_buf >= OUTPUT_NUM_BUFFERS) { + cur_buf = 0; + } //circularly + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + if (to_copy != 0) { + if (flush && + DRV_CONFIG_enable_cp_mode(drv_cfg) && + cur_buf == OUTPUT_current_buffer(outbuf)) { + OUTPUT_current_buffer(outbuf)++; + if (OUTPUT_current_buffer(outbuf) >= + OUTPUT_NUM_BUFFERS) { + OUTPUT_current_buffer(outbuf) = + 0; + } + OUTPUT_remaining_buffer_size(outbuf) = + OUTPUT_total_buffer_size( + outbuf); + } + break; + } + } + } + + SEP_DRV_LOG_TRACE("buffer %d has %d bytes ready.", (S32)cur_buf, + (S32)to_copy); + if (!flush && to_copy == 0) { + unsigned long delay = msecs_to_jiffies(1000); + + while (1) { + U32 res = wait_event_interruptible_timeout( + BUFFER_DESC_queue(kernel_buf), + flush || (OUTPUT_buffer_full(outbuf, cur_buf) && + !DRV_CONFIG_enable_cp_mode(drv_cfg)), + delay); + + if (GET_DRIVER_STATE() == DRV_STATE_TERMINATING) { + SEP_DRV_LOG_INIT( + "Switched to TERMINATING while waiting for BUFFER_DESC_queue!"); + break; + } + + if (res == ERESTARTSYS || res == 0) { + SEP_DRV_LOG_TRACE( + "Wait_event_interruptible_timeout(BUFFER_DESC_queue): %u.", + res); + continue; + } + + break; + } + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + // reset the current buffer index if in CP mode + cur_buf = OUTPUT_current_buffer(outbuf); + //iterate through all buffers + for (i = 0; i < OUTPUT_NUM_BUFFERS; i++) { + cur_buf++; + if (cur_buf >= OUTPUT_NUM_BUFFERS) { + cur_buf = 0; + } //circularly + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + if (to_copy != 0) { + if (flush && + DRV_CONFIG_enable_cp_mode( + drv_cfg) && + cur_buf == OUTPUT_current_buffer( + outbuf)) { + OUTPUT_current_buffer(outbuf)++; + if (OUTPUT_current_buffer( + outbuf) >= + OUTPUT_NUM_BUFFERS) { + OUTPUT_current_buffer( + outbuf) = 0; + } + OUTPUT_remaining_buffer_size( + outbuf) = + OUTPUT_total_buffer_size( + outbuf); + } + break; + } + } + } + SEP_DRV_LOG_TRACE("Get to copy %d.", (S32)cur_buf); + to_copy = OUTPUT_buffer_full(outbuf, cur_buf); + SEP_DRV_LOG_TRACE( + "output_Read awakened, buffer %d has %d bytes.", + cur_buf, (int)to_copy); + } + + /* Ensure that the user's buffer is large enough */ + if (to_copy > count) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (user buffer is too small!)."); + return OS_NO_MEM; + } + + /* Copy data to user space. Note that we use cur_buf as the source */ + if (GET_DRIVER_STATE() != DRV_STATE_TERMINATING) { + uncopied = copy_to_user(buf, OUTPUT_buffer(outbuf, cur_buf), + to_copy); + /* Mark the buffer empty */ + OUTPUT_buffer_full(outbuf, cur_buf) = 0; + *f_pos += to_copy - uncopied; + if (uncopied) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: %u (only copied %u of %u bytes!).", + (U32)(to_copy - uncopied), (U32)to_copy, + (U32)uncopied); + return (to_copy - uncopied); + } + } else { + to_copy = 0; + SEP_DRV_LOG_TRACE("To copy set to 0."); + } + + // At end-of-file, decrement the count of active buffer writers + + if (to_copy == 0) { + DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers); + SEP_DRV_LOG_TRACE("Decremented flush_writers. flush_writer=%u", + atomic_read(&flush_writers)); + if (flush_val == TRUE) { + SEP_DRV_LOG_TRACE("Wake up flush_queue"); + wake_up_interruptible_sync(&flush_queue); + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)to_copy); + return to_copy; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Module_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a module buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_Module_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + ssize_t res; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE("Read request for modules on minor."); + + res = output_Read(filp, buf, count, f_pos, module_buf); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_IPT_Trace_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a processor trace buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_IPT_Trace_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + int i; + ssize_t size = 0; + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for ipt trace on minor %d.", i); + size = output_Read(filp, buf, count, f_pos, &(ipt_trace_buf[i])); + + SEP_DRV_LOG_TRACE_OUT("size: %u.", (U32)size); + return size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_IPT_Info_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a ipt info buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_IPT_Info_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + int i; + ssize_t size = 0; + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for ipt trace on minor %d.", i); + size = output_Read(filp, buf, count, f_pos, &(ipt_info_buf[i])); + + SEP_DRV_LOG_TRACE_OUT("size: %u.", (U32)size); + return size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_Sample_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + int i; + ssize_t res; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); + res = output_Read(filp, buf, count, f_pos, &(cpu_buf[i])); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Sample_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sample buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_UncSample_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + int i; + ssize_t res = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for samples on minor %d.", i); + if (unc_buf_init) { + res = output_Read(filp, buf, count, f_pos, &(unc_buf[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_SidebandInfo_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return a sideband info buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sideband info buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_SidebandInfo_Read(struct file *filp, + char *buf, + size_t count, + loff_t *f_pos) +{ + int i; + ssize_t res = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + i = iminor(filp->DRV_F_DENTRY + ->d_inode); // kernel pointer - not user pointer + SEP_DRV_LOG_TRACE("Read request for pebs process info on minor %d.", i); + if (collect_sideband) { + res = output_Read(filp, buf, count, f_pos, + &(cpu_sideband_buf[i])); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn ssize_t OUTPUT_Emon_Read(struct file *filp, + * char *buf, + * size_t count, + * loff_t *f_pos) + * + * @brief Return EMON buffer to user-mode. If not full or flush, wait + * + * @param *filp a file pointer + * @param *buf a sampling buffer + * @param count size of the user's buffer + * @param f_pos file pointer (current offset in bytes) + * @param buf the kernel output buffer structure + * + * @return number of bytes read. zero indicates end of file. Neg means error + * + * Place no more than count bytes into the user's buffer. + * Block on "BUFFER_DESC_queue(kernel_buf)" if buffer isn't full. + * + * Special Notes: + * + */ +extern ssize_t +OUTPUT_Emon_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + ssize_t res; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE("Read request for modules on minor."); + + res = output_Read(filp, buf, count, f_pos, emon_buf); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)res); + return res; +} + +/* + * @fn output_Initialized_Buffers() + * + * @result OUTPUT + * @param BUFFER_DESC desc - descriptor for the buffer being initialized + * @param U32 factor - multiplier for OUTPUT_BUFFER_SIZE. + * 1 for cpu buffers, 2 for module buffers. + * + * @brief Allocate, initialize, and return an output data structure + * + * Special Notes: + * Multiple (OUTPUT_NUM_BUFFERS) buffers will be allocated + * Each buffer is of size (OUTPUT_BUFFER_SIZE) + * Each field in the buffer is initialized + * The event queue for the OUTPUT is initialized + * + */ +static BUFFER_DESC +output_Initialized_Buffers(BUFFER_DESC desc, U32 factor) +{ + OUTPUT outbuf; + int j; + + SEP_DRV_LOG_TRACE_IN("Desc: %p, factor: %u.", desc, factor); + + /* + * Allocate the BUFFER_DESC, then allocate its buffers + */ + if (desc == NULL) { + desc = (BUFFER_DESC)CONTROL_Allocate_Memory( + sizeof(BUFFER_DESC_NODE)); + if (desc == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: NULL (failed allocation for desc!)."); + return NULL; + } + } + outbuf = &(BUFFER_DESC_outbuf(desc)); + spin_lock_init(&OUTPUT_buffer_lock(outbuf)); + for (j = 0; j < OUTPUT_NUM_BUFFERS; j++) { + if (OUTPUT_buffer(outbuf, j) == NULL) { + OUTPUT_buffer(outbuf, j) = CONTROL_Allocate_Memory( + OUTPUT_BUFFER_SIZE * factor); + } + OUTPUT_buffer_full(outbuf, j) = 0; + if (!OUTPUT_buffer(outbuf, j)) { + /*return NULL to tell the caller that allocation failed*/ + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Res: NULL (failed alloc for OUTPUT_buffer(output, %d)!).", + j); + return NULL; + } + } + /* + * Initialize the remaining fields in the BUFFER_DESC + */ + OUTPUT_current_buffer(outbuf) = 0; + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_remaining_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; + OUTPUT_total_buffer_size(outbuf) = OUTPUT_BUFFER_SIZE * factor; + OUTPUT_tasklet_queued(outbuf) = FALSE; + init_waitqueue_head(&BUFFER_DESC_queue(desc)); + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", desc); + return desc; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID output_NMI_Sample_Buffer ( + * ) + * + * @brief Callback from NMI tasklet. The function checks if any buffers + * are full, and if full, signals the reader threads. + * + * @param none + * + * @return NONE + * + * Special Notes: + * This callback was added to handle out-of-band event delivery + * when running in NMI mode + */ +static void +output_NMI_Sample_Buffer(unsigned long data) +{ + U32 cpu_id; + OUTPUT outbuf; + + SEP_DRV_LOG_NOTIFICATION_IN("Data: %u.", (U32)data); + + preempt_disable(); + cpu_id = CONTROL_THIS_CPU(); + preempt_enable(); + + if (cpu_buf) { + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + if (cpu_sideband_buf) { + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_sideband_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + if (ipt_trace_buf) { + outbuf = &BUFFER_DESC_outbuf(&ipt_trace_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&ipt_trace_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + if (ipt_info_buf) { + outbuf = &BUFFER_DESC_outbuf(&ipt_info_buf[cpu_id]); + if (outbuf && OUTPUT_signal_full(outbuf)) { + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&ipt_info_buf[cpu_id])); + OUTPUT_signal_full(outbuf) = FALSE; + OUTPUT_tasklet_queued(outbuf) = FALSE; + } + } + + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} + +/* + * @fn extern void OUTPUT_Initialize(void) + * + * @returns OS_STATUS + * @brief Allocate, initialize, and return all output data structure + * + * Special Notes: + * Initialize the output structures. + * For each CPU in the system, allocate the output buffers. + * Initialize a module buffer and temp file to hold module information + * Initialize the read queues for each sample buffer + * + */ +extern OS_STATUS +OUTPUT_Initialize(void) +{ + BUFFER_DESC unused; + int i; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + flush = 0; + if (saved_buffer_size != OUTPUT_BUFFER_SIZE) { + if (saved_buffer_size > 0) { + OUTPUT_Destroy(); + } + saved_buffer_size = OUTPUT_BUFFER_SIZE; + } + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + unused = output_Initialized_Buffers(&cpu_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to allocate cpu output buffers!)."); + return OS_NO_MEM; + } + } + + if (collect_sideband) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + unused = output_Initialized_Buffers( + &cpu_sideband_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to allocate pebs process info output buffers!)."); + return OS_NO_MEM; + } + } + } + + /* + * Just need one module buffer + */ + unused = output_Initialized_Buffers(module_buf, MODULE_BUFF_SIZE); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to create module output buffers!)."); + return OS_NO_MEM; + } + + /* + * ipt trace buffer + */ + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + unused = output_Initialized_Buffers( + &ipt_trace_buf[i], IPT_TRACE_BUFF_SIZE); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to create ipt trace output buffer %d!).", + i); + return OS_NO_MEM; + } + unused = output_Initialized_Buffers(&ipt_info_buf[i], + IPT_INFO_BUFF_SIZE); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to create ipt info output buffer %d!).", + i); + return OS_NO_MEM; + } + } + } + + SEP_DRV_LOG_TRACE("Set up the tasklet for NMI."); + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + tasklet_init(&CPU_STATE_nmi_tasklet(&pcb[i]), + output_NMI_Sample_Buffer, (unsigned long)NULL); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); + return status; +} + +#if defined(DRV_USE_TASKLET_WORKAROUND) +static struct tasklet_struct dummy_tasklet; + +/* + * @fn extern void output_tasklet_waker (PVOID ptr) + * + * @returns None + * @brief Schedules a dummy tasklet to wake up the tasklet handler on the current core + * + * Special Notes: + * Workaround for a rare situation where some tasklets are scheduled, but the core's TASKLET softirq bit was reset. + * [NB: this may be caused by a kernel bug; so far, this issue was only observed on kernel version 3.10.0-123.el7] + * Scheduling a (new) tasklet raises a new softirq, and gives 'forgotten' tasklets another chance to be processed. + * This workaround is not fool-proof: if this new tasklet gets 'forgotten' too, the driver will get stuck in the + * Clean Up routine until it gets processed (thanks to an external event raising the TASKLET softirq on this core), + * which might never happen. + * + */ +static void +output_tasklet_waker(PVOID ptr) +{ + SEP_DRV_LOG_TRACE_IN(""); + tasklet_schedule(&dummy_tasklet); + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn extern void output_dummy_tasklet_handler (unsigned long dummy) + * + * @returns None + * @brief Dummy tasklet handler. + * + * Special Notes: + * If this gets executed, the aforementioned workaround was successful. + * + */ +static void +output_dummy_tasklet_handler(unsigned long dummy) +{ + SEP_DRV_LOG_NOTIFICATION_IN("Workaround was successful!"); + SEP_DRV_LOG_NOTIFICATION_OUT(""); +} +#endif + +/* + * @fn extern void OUTPUT_Cleanup (VOID) + * + * @returns None + * @brief Cleans up NMI tasklets if needed + * + * Special Notes: + * Waits until all NMI tasklets are complete. + * + */ +extern void +OUTPUT_Cleanup(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + if (!pcb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } else { + int i; + SEP_DRV_LOG_TRACE("Killing all NMI tasklets..."); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + SEP_DRV_LOG_TRACE("Killing NMI tasklet %d...", i); + + if (CPU_STATE_nmi_tasklet(&pcb[i]).state) { +#if defined(DRV_USE_TASKLET_WORKAROUND) + SEP_DRV_LOG_ERROR( + "Tasklet %d is probably stuck! Trying workaround...", + i); + tasklet_init(&dummy_tasklet, + output_dummy_tasklet_handler, 0); + CONTROL_Invoke_Cpu(i, output_tasklet_waker, + NULL); + tasklet_kill(&dummy_tasklet); + SEP_DRV_LOG_ERROR( + "Workaround was successful for tasklet %d.", + i); +#else + SEP_DRV_LOG_ERROR( + "Tasklet %d may be stuck. Try to set USE_TASKLET_WORKAROUND=YES in the Makefile if you observe unexpected behavior (e.g. cannot terminate a collection or initiate a new one).", + i); +#endif + } + + tasklet_kill(&CPU_STATE_nmi_tasklet(&pcb[i])); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* + * @fn extern void OUTPUT_Initialize_UNC() + * + * @returns OS_STATUS + * @brief Allocate, initialize, and return all output data structure + * + * Special Notes: + * Initialize the output structures. + * For each CPU in the system, allocate the output buffers. + * Initialize a module buffer and temp file to hold module information + * Initialize the read queues for each sample buffer + * + */ +extern OS_STATUS +OUTPUT_Initialize_UNC(void) +{ + BUFFER_DESC unused; + int i; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < num_packages; i++) { + unused = output_Initialized_Buffers(&unc_buf[i], 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Failed to allocate package output buffers!"); + return OS_NO_MEM; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); + return status; +} + +/* + * @fn extern void OUTPUT_Initialize_EMON() + * + * @returns OS_STATUS + * @brief Allocate, initialize, and return EMON output data structure + * + * Special Notes: + * Initialize the output structures. + * For each CPU in the system, allocate the output buffers. + * Initialize a module buffer and temp file to hold module information + * Initialize the read queues for each sample buffer + * + */ +extern OS_STATUS +OUTPUT_Initialize_EMON(void) +{ + BUFFER_DESC unused; + OS_STATUS status = OS_SUCCESS; + + SEP_DRV_LOG_TRACE_IN(""); + + flush = 0; + unused = output_Initialized_Buffers(emon_buf, 1); + if (!unused) { + OUTPUT_Destroy(); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "OS_NO_MEM (failed to create EMON output buffers!)."); + return OS_NO_MEM; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)status); + return status; +} + +/* + * @fn OS_STATUS OUTPUT_Flush() + * + * @brief Flush the module buffers and sample buffers + * + * @return OS_STATUS + * + * For each CPU in the system, set buffer full to the byte count to flush. + * Flush the modules buffer, as well. + * + */ +extern int +OUTPUT_Flush(VOID) +{ + int i; + int writers = 0; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Flush all remaining data to files + * set up a flush event + */ + init_waitqueue_head(&flush_queue); + SEP_DRV_LOG_TRACE( + "Waiting for %d writers.", + (GLOBAL_STATE_num_cpus(driver_state) + OTHER_C_DEVICES)); + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &(cpu_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + + if (unc_buf_init) { + for (i = 0; i < num_packages; i++) { + outbuf = &(unc_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + } + + if (collect_sideband) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &(cpu_sideband_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + } + + SEP_DRV_LOG_TRACE("before atomic_set to %d writers.", + writers + OTHER_C_DEVICES); + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &(ipt_trace_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + + outbuf = &(ipt_info_buf[i].outbuf); + writers += 1; + + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + } + } + + SEP_DRV_LOG_TRACE("after atomic_set to %d writers.", + writers + OTHER_C_DEVICES); + + atomic_set(&flush_writers, writers + OTHER_C_DEVICES); + + // Flip the switch to terminate the output threads + // Do not do this earlier, as threads may terminate before all the data is flushed + flush = 1; + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync(&BUFFER_DESC_queue(&cpu_buf[i])); + } + + if (unc_buf_init) { + for (i = 0; i < num_packages; i++) { + outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&unc_buf[i])); + } + } + + if (collect_sideband) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&cpu_sideband_buf[i])); + } + } + + // Flush all data from the processor trace buffers + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (CPU_STATE_initial_mask(&pcb[i]) == 0) { + continue; + } + + outbuf = &BUFFER_DESC_outbuf(&ipt_trace_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&ipt_trace_buf[i])); + + outbuf = &BUFFER_DESC_outbuf(&ipt_info_buf[i]); + OUTPUT_buffer_full(outbuf, + OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + wake_up_interruptible_sync( + &BUFFER_DESC_queue(&ipt_info_buf[i])); + } + } + + // Flush all data from the module buffers + outbuf = &BUFFER_DESC_outbuf(module_buf); + + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + + SEP_DRV_LOG_TRACE("Waking up module_queue."); + wake_up_interruptible_sync(&BUFFER_DESC_queue(module_buf)); + + SEP_DRV_LOG_TRACE("Wait for buffers to empty"); + + //Wait for buffers to empty + while (atomic_read(&flush_writers) != 0) { + unsigned long delay; + U32 res; + delay = msecs_to_jiffies(1000); + res = wait_event_interruptible_timeout( + flush_queue, atomic_read(&flush_writers) == 0, delay); + + if (res == ERESTARTSYS || res == 0) { + SEP_DRV_LOG_TRACE( + "Wait_event_interruptible_timeout(flush_queue): %u, %u writers.", + res, atomic_read(&flush_writers)); + continue; + } + } + + SEP_DRV_LOG_TRACE("Awakened from flush_queue."); + flush = 0; + + SEP_DRV_LOG_TRACE_OUT("Res: 0."); + return 0; +} + +/* + * @fn OS_STATUS OUTPUT_Flush_EMON() + * + * @brief Flush the EMON buffers + * + * @return OS_STATUS + * + * Set buffer full to the byte count to flush. + * + */ +extern int +OUTPUT_Flush_EMON(VOID) +{ + int writers = 1; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * Flush all remaining data to files + * set up a flush event + */ + init_waitqueue_head(&flush_queue); + atomic_set(&flush_writers, writers); + + // Flip the switch to terminate the output threads + // Do not do this earlier, as threads may terminate before all the data is flushed + flush = 1; + + // Flush all data from the EMON buffers + outbuf = &BUFFER_DESC_outbuf(emon_buf); + OUTPUT_buffer_full(outbuf, OUTPUT_current_buffer(outbuf)) = + OUTPUT_total_buffer_size(outbuf) - + OUTPUT_remaining_buffer_size(outbuf); + SEP_DRV_LOG_TRACE("Waking up emon_queue."); + wake_up_interruptible_sync(&BUFFER_DESC_queue(emon_buf)); + + //Wait for buffers to empty + while (atomic_read(&flush_writers) != 0) { + unsigned long delay; + U32 res; + delay = msecs_to_jiffies(1000); + res = wait_event_interruptible_timeout( + flush_queue, atomic_read(&flush_writers) == 0, delay); + + if (res == ERESTARTSYS || res == 0) { + SEP_DRV_LOG_TRACE( + "Wait_event_interruptible_timeout(flush_queue): %u, %u writers.", + res, atomic_read(&flush_writers)); + continue; + } + } + + SEP_DRV_LOG_TRACE("Awakened from flush_queue."); + flush = 0; + + SEP_DRV_LOG_TRACE_OUT("Res: 0."); + return 0; +} + +/* + * @fn extern void OUTPUT_Destroy() + * + * @param buffer - seed name of the output file + * @param len - length of the seed name + * @returns OS_STATUS + * @brief Deallocate output structures + * + * Special Notes: + * Free the module buffers + * For each CPU in the system, free the sampling buffers + */ +extern int +OUTPUT_Destroy(VOID) +{ + int i, n; + OUTPUT outbuf; + + SEP_DRV_LOG_TRACE_IN(""); + + if (module_buf) { + outbuf = &BUFFER_DESC_outbuf(module_buf); + output_Free_Buffers(module_buf, + OUTPUT_total_buffer_size(outbuf)); + } + + if (ipt_trace_buf) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&ipt_trace_buf[i]); + output_Free_Buffers(&ipt_trace_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (ipt_info_buf) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&ipt_info_buf[i]); + output_Free_Buffers(&ipt_info_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (cpu_buf != NULL) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); + output_Free_Buffers(&cpu_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (unc_buf != NULL) { + n = num_packages; + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&unc_buf[i]); + output_Free_Buffers(&unc_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (cpu_sideband_buf != NULL) { + n = GLOBAL_STATE_num_cpus(driver_state); + for (i = 0; i < n; i++) { + outbuf = &BUFFER_DESC_outbuf(&cpu_sideband_buf[i]); + output_Free_Buffers(&cpu_sideband_buf[i], + OUTPUT_total_buffer_size(outbuf)); + } + } + + if (emon_buf) { + outbuf = &BUFFER_DESC_outbuf(emon_buf); + output_Free_Buffers(emon_buf, OUTPUT_total_buffer_size(outbuf)); + } + + SEP_DRV_LOG_TRACE_OUT("Res: 0."); + return 0; +} + +/* + * @fn exern int OUTPUT_IPT_Fill (data, size) + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @returns size of data copied + * + * @brief Start by ensuring that output buffer space is available. + * @brief If so, then copy the input data to the output buffer and make the necessary + * @brief adjustments to manage the output buffers. + */ +extern int +OUTPUT_IPT_Fill(char *data, U32 size) +{ + BUFFER_DESC bd; + char *outloc = NULL; + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN("Data: %p, size: %u."); + + this_cpu = CONTROL_THIS_CPU(); + + bd = &ipt_trace_buf[this_cpu]; + + outloc = OUTPUT_Reserve_Buffer_Space( + bd, size, (NMI_mode) ? TRUE : FALSE, !SEP_IN_NOTIFICATION); + SEP_DRV_LOG_TRACE("outloc: %p", outloc); + if (!outloc) { + SEP_DRV_LOG_TRACE_OUT( + "OUTPUT_Reserve_Buffer_Space returns NULL!"); + return 0; + } + + memcpy(outloc, data, size); + + SEP_DRV_LOG_TRACE_OUT(""); + return size; +} + +/* + * @fn exern int OUTPUT_IPT_Info_Fill (data, size) + * + * @param data - pointer to a buffer to copy + * @param size - size of the buffer to cpu + * @returns size of data copied + * + * @brief Start by ensuring that output buffer space is available. + * @brief If so, then copy the input data to the output buffer and make the necessary + * @brief adjustments to manage the output buffers. + */ +extern int +OUTPUT_IPT_Info_Fill(char *data, U32 size) +{ + BUFFER_DESC bd; + char *outloc = NULL; + U32 this_cpu; + + SEP_DRV_LOG_TRACE_IN("Data: %p, size: %u."); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + bd = &ipt_info_buf[this_cpu]; + + outloc = OUTPUT_Reserve_Buffer_Space( + bd, size, (NMI_mode) ? TRUE : FALSE, !SEP_IN_NOTIFICATION); + SEP_DRV_LOG_TRACE("outloc: %p", outloc); + if (!outloc) { + SEP_DRV_LOG_TRACE_OUT( + "OUTPUT_Reserve_Buffer_Space returns NULL!"); + return 0; + } + + memcpy(outloc, data, size); + + SEP_DRV_LOG_TRACE_OUT(""); + return size; +} + diff --git a/drivers/platform/x86/sepdk/sep/pci.c b/drivers/platform/x86/sepdk/sep/pci.c new file mode 100644 index 00000000000000..4a7abd8ad92862 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pci.c @@ -0,0 +1,1027 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" + +#include "inc/lwpmudrv.h" +#include "inc/pci.h" +#include "inc/utility.h" +#include "inc/control.h" + +struct pci_bus ***pci_buses = NULL; +#if defined(DRV_PMT_ENABLE) +/** + * pmt_telem_read() - Read qwords from telemetry sram + * @ep: Telemetry endpoint to be read + * @offset: Register offset in bytes + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered not valid. + */ +extern int __weak +pmt_telem_read32(struct telem_endpoint *ep, u32 offset, u32 *data, u32 count); + +extern int __weak +pmt_telem_read(struct telem_endpoint *ep, u32 offset, u64 *data, u32 count); + +/** + * pmt_telem_read() - Read qwords from telemetry sram + * @ep: Telemetry endpoint to be read + * @offset: Register offset in bytes + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered not valid. + */ +extern int __weak +pmt_telem_read64(struct telem_endpoint *ep, u32 offset, u64 *data, u32 count); +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern VOID PCI_Initialize(VOID) + * + * @param none + * + * @return none + * + * @brief Initializes the pci_buses array. + * + */ +extern VOID +PCI_Initialize(VOID) +{ + U32 i, j; + U32 num_found_buses = 0; + U32 prev_val = 0; + + SEP_DRV_LOG_INIT_IN("Initializing pci_buses..."); + + pci_buses = CONTROL_Allocate_Memory(num_packages * + sizeof(struct pci_bus **)); + if (pci_buses == NULL) { + SEP_DRV_LOG_INIT_OUT("Failed to allocate memory"); + return; + } + for (i = 0; i < num_packages; i++) { + pci_buses[i] = CONTROL_Allocate_Memory( + MAX_BUSNO * sizeof(struct pci_bus *)); + if (pci_buses[i] != NULL) { + memset(pci_buses[i], 0, + MAX_BUSNO * sizeof(struct pci_bus *)); + } + } + + for (i = 0; i < num_packages; i++) { + prev_val = num_found_buses; + for (j = 0; j < MAX_BUSNO; j++) { + if (pci_buses[i] != NULL) { + pci_buses[i][j] = pci_find_bus(i, j); + if (pci_buses[i][j]) { + SEP_DRV_LOG_DETECTION("Found PCI domain 0x%x, bus 0x%x at %p.", + i, j, + pci_buses[i][j]); + num_found_buses++; + } + SEP_DRV_LOG_TRACE("pci_buses[%u][%u]: %p.", i, + j, pci_buses[i][j]); + } + } + if (prev_val < num_found_buses) { + num_pci_domains++; + } + } + + if (!num_pci_domains) { + num_pci_domains = 1; + } + + SEP_DRV_LOG_INIT_OUT("Found %u buses across %u domains.", + num_found_buses, num_pci_domains); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern VOID PCI_Cleanup(VOID) + * + * @param none + * + * @return none + * + * @brief Clean up the pci_buses array. + * + */ +extern VOID +PCI_Cleanup(VOID) +{ + U32 i; + + SEP_DRV_LOG_TRACE_IN("Cleaning up pci_buses..."); + + if (pci_buses == NULL) { + return; + } + + for (i = 0; i < num_packages; i++) { + CONTROL_Free_Memory(pci_buses[i]); + } + CONTROL_Free_Memory(pci_buses); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/*! + * @fn extern VOID PCI_Find_DVSEC_Capability_BAR(UNCORE_DISCOVERY_DVSEC_CONFIG_NODE, DRV_PCI_DEVICE_ENTRY) + * + * @param none + * + * @return none + * + * @brief Traverses PCI space to identify discovery BAR using the given capability id + * + */ +extern VOID +PCI_Find_DVSEC_Capability_BAR(UNCORE_DISCOVERY_DVSEC_CONFIG config, + DRV_PCI_DEVICE_ENTRY bar_list) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + struct pci_dev *device = NULL; + U32 dvsec = 0; + U32 value = 0; + U32 bir = 0; + U32 bar_offset = 0; + U32 bar = 0; + U32 i = 0; + + if (!config || !UNCORE_DISCOVERY_DVSEC_pci_ext_cap_id(config)) { + return; + } + while ((device = pci_get_device(DRV_IS_PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + device)) != NULL) { + while ((dvsec = pci_find_next_ext_capability( + device, dvsec, + UNCORE_DISCOVERY_DVSEC_pci_ext_cap_id( + config)))) { + pci_read_config_dword( + device, + dvsec + UNCORE_DISCOVERY_DVSEC_dvsec_offset( + config), + &value); + if ((value & + UNCORE_DISCOVERY_DVSEC_dvsec_id_mask(config)) == + UNCORE_DISCOVERY_DVSEC_dvsec_id_pmon(config)) { + pci_read_config_dword( + device, + dvsec + + UNCORE_DISCOVERY_DVSEC_dvsec_offset( + config) + + 4, + &bir); + bir = bir & + UNCORE_DISCOVERY_DVSEC_dvsec_bir_mask( + config); + bar_offset = 0x10 + (bir * 4); + pci_read_config_dword(device, bar_offset, &bar); + bar = bar & ~(PAGE_SIZE - 1); + SEP_DRV_LOG_TRACE("%u. BAR:0x%x", i, bar); + DRV_PCI_DEVICE_ENTRY_bar_address(&bar_list[i]) = + bar; + i++; + } + } + } +#endif +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U32(domain, bus, device, function, offset) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * + * @return Value at this location + * + * @brief Reads a U32 from PCI configuration space + * + */ +extern U32 +PCI_Read_U32(U32 domain, U32 bus, U32 device, U32 function, U32 offset) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read Domain%x BDF(%x:%x:%x)[0x%x](4B)...", + domain, bus, device, function, offset); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset, 4, &res); + } else { + SEP_DRV_LOG_ERROR("Could not read Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + } + + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x.", + domain, bus, device, function, offset, res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U32_Valid(domain, bus, device, function, offset, invalid_value) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param invalid_value - value against which to compare the PCI-obtained value + * + * @return Value at this location (if value != invalid_value), 0 otherwise + * + * @brief Reads a U32 from PCI configuration space + * + */ +extern U32 +PCI_Read_U32_Valid(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U32 invalid_value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read Domain%x BDF(%x:%x:%x)[0x%x](4B)...", + domain, bus, device, function, offset); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset, 4, &res); + if (res == invalid_value) { + res = 0; + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x (invalid value).", + domain, bus, device, function, + offset, res); + } else { + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x.", + domain, bus, device, function, + offset, res); + } + } else { + SEP_DRV_LOG_REGISTER_OUT("Could not read Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U64(domain, bus, device, function, offset) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * + * @return Value at this location + * + * @brief Reads a U64 from PCI configuration space + * + */ +extern U64 +PCI_Read_U64(U32 domain, U32 bus, U32 device, U32 function, U32 offset) +{ + U64 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read Domain%x BDF(%x:%x:%x)[0x%x](8B)...", + domain, bus, device, function, offset); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset, 4, (U32 *)&res); + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset + 4, 4, + ((U32 *)&res) + 1); + } else { + SEP_DRV_LOG_ERROR("Could not read Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + } + + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + domain, bus, device, function, offset, res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Read_U64_Valid(domain, bus, device, function, offset, invalid_value) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param invalid_value - value against which to compare the PCI-obtained value + * + * @return Value at this location (if value != invalid_value), 0 otherwise + * + * @brief Reads a U64 from PCI configuration space + * + */ +extern U64 +PCI_Read_U64_Valid(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U64 invalid_value) +{ + U64 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will read Domain%x BDF(%x:%x:%x)[0x%x](8B)...", + domain, bus, device, function, offset); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset, 4, (U32 *)&res); + pci_buses[domain][bus]->ops->read(pci_buses[domain][bus], devfn, + offset + 4, 4, + ((U32 *)&res) + 1); + + if (res == invalid_value) { + res = 0; + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx (invalid value).", + domain, bus, device, function, + offset, res); + } else { + SEP_DRV_LOG_REGISTER_OUT("Has read Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + domain, bus, device, function, + offset, res); + } + } else { + SEP_DRV_LOG_REGISTER_OUT("Could not read Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Write_U32(domain, bus, device, function, offset, value) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param value - value to write + * + * @return 0 in case of success, 1 otherwise + * + * @brief Writes a U32 to PCI configuration space + * + */ +extern U32 +PCI_Write_U32(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U32 value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will write Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x...", + domain, bus, device, function, offset, value); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->write(pci_buses[domain][bus], + devfn, offset, 4, value); + SEP_DRV_LOG_REGISTER_OUT("Has written Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x.", + domain, bus, device, function, offset, + value); + } else { + SEP_DRV_LOG_ERROR("Could not write Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + res = 1; + SEP_DRV_LOG_REGISTER_OUT("Failed to write Domain%x BDF(%x:%x:%x)[0x%x](4B): 0x%x.", + domain, bus, device, function, offset, + value); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Write_U64(domain, bus, device, function, offset, value) + * + * @param domain - target domain + * @param bus - target bus + * @param device - target device + * @param function - target function + * @param offset - target register offset + * @param value - value to write + * + * @return 0 in case of success, 1 otherwise + * + * @brief Writes a U64 to PCI configuration space + * + */ +extern U32 +PCI_Write_U64(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U64 value) +{ + U32 res = 0; + U32 devfn = (device << 3) | (function & 0x7); + + SEP_DRV_LOG_REGISTER_IN("Will write Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx...", + domain, bus, device, function, offset, value); + + if (bus < MAX_BUSNO && pci_buses && domain < num_pci_domains && + pci_buses[domain] && pci_buses[domain][bus]) { + pci_buses[domain][bus]->ops->write( + pci_buses[domain][bus], devfn, offset, 4, (U32)value); + pci_buses[domain][bus]->ops->write(pci_buses[domain][bus], + devfn, offset + 4, 4, + (U32)(value >> 32)); + SEP_DRV_LOG_REGISTER_OUT("Has written Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + domain, bus, device, function, offset, + value); + } else { + SEP_DRV_LOG_ERROR("Could not write Domain%x BDF(%x:%x:%x)[0x%x]: bus not found!", + domain, bus, device, function, offset); + res = 1; + SEP_DRV_LOG_REGISTER_OUT("Failed to write Domain%x BDF(%x:%x:%x)[0x%x](8B): 0x%llx.", + domain, bus, device, function, offset, + value); + } + + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Read_From_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param *value - value at this address + * + * @return status + * + * @brief Read memory mapped i/o physical location + * + */ +extern int +PCI_Read_From_Memory_Address(U32 addr, U32 *val) +{ + U32 aligned_addr, offset, value; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %x, val_pointer: %p.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Preparing to reading physical address: %x.", addr); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x.", + aligned_addr, offset); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will read PCI address %u (mapped at %p).", + addr, base + offset); + value = readl(base + offset); + SEP_DRV_LOG_REGISTER_OUT("Read PCI address %u (mapped at %p): %x.", + addr, base + offset, value); + + *val = value; + + iounmap(base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Write_To_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param value - value to be written + * + * @return status + * + * @brief Write to memory mapped i/o physical location + * + */ +extern int +PCI_Write_To_Memory_Address(U32 addr, U32 val) +{ + U32 aligned_addr, offset; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %x, val: %x.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Preparing to writing physical address: %x (val: %x).", + addr, val); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %x, offset: %x (val: %x).", + aligned_addr, offset, val); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will write PCI address %u (mapped at %p): %x.", + addr, base + offset, val); + writel(val, base + offset); + SEP_DRV_LOG_REGISTER_OUT("Wrote PCI address %u (mapped at %p): %x.", + addr, base + offset, val); + + iounmap(base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Read_From_Memory_Address_U64(addr, val) + * + * @param addr - physical address in mmio + * @param *value - value at this address + * + * @return status + * + * @brief Read memory mapped i/o physical location + * + */ +extern int +PCI_Read_From_Memory_Address_U64 ( + U64 addr, + U64 *val +) +{ + U64 aligned_addr, offset, value; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %llx, val_pointer: %p.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Preparing to reading physical address: %llx.", addr); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %llx, offset: %llx.", aligned_addr, offset); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will read PCI address %u (mapped at %p).", addr, base + offset); + value = readl(base+offset); + SEP_DRV_LOG_REGISTER_OUT("Read PCI address %u (mapped at %p): %llx.", addr, base + offset, value); + + *val = value; + + iounmap(base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int PCI_Write_To_Memory_Address_U64(addr, val) + * + * @param addr - physical address in mmio + * @param value - value to be written + * + * @return status + * + * @brief Write to memory mapped i/o physical location + * + */ +extern int +PCI_Write_To_Memory_Address_U64 ( + U64 addr, + U64 val +) +{ + U64 aligned_addr, offset; + PVOID base; + + SEP_DRV_LOG_TRACE_IN("Addr: %llx, val: %llx.", addr, val); + + if (addr <= 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (addr <= 0!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_TRACE("Preparing to writing physical address: %llx (val: %llx).", addr, val); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SEP_DRV_LOG_TRACE("Aligned physical address: %llx, offset: %llx (val: %llx).", aligned_addr, offset, val); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (mapping failed!)."); + return OS_INVALID; + } + + SEP_DRV_LOG_REGISTER_IN("Will write PCI address %u (mapped at %p): %llx.", addr, base + offset, val); + writel(val, base + offset); + SEP_DRV_LOG_REGISTER_OUT("Wrote PCI address %u (mapped at %p): %llx.", addr, base + offset, val); + + iounmap(base); + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, U64 map_size) + * + * @param node - MAP NODE to use + * @param phy_address - Address to be mapped + * @param map_size - Amount of memory to map (has to be a multiple of 4k) + * + * @return OS_SUCCESS or OS_INVALID + * + * @brief Maps a physical address to a virtual address + * + */ +extern OS_STATUS +PCI_Map_Memory(SEP_MMIO_NODE *node, U64 phy_address, U32 map_size) +{ + U8 *res; + + SEP_DRV_LOG_INIT_IN("Node: %p, phy_address: %llx, map_size: %u.", node, + phy_address, map_size); + + if (!node || !phy_address || !map_size) { + SEP_DRV_LOG_ERROR_INIT_OUT("Invalid parameters, aborting!"); + return OS_INVALID; + } + + res = ioremap(phy_address, map_size); + if (!res) { + SEP_DRV_LOG_ERROR_INIT_OUT("Map operation failed!"); + return OS_INVALID; + } + + SEP_MMIO_NODE_physical_address(node) = (UIOP)phy_address; + SEP_MMIO_NODE_virtual_address(node) = (UIOP)res; + SEP_MMIO_NODE_map_token(node) = (UIOP)res; + SEP_MMIO_NODE_size(node) = map_size; + + SEP_DRV_LOG_INIT_OUT("Addr:0x%llx->0x%llx,tok:0x%llx,sz:%u.", + SEP_MMIO_NODE_physical_address(node), + SEP_MMIO_NODE_virtual_address(node), + SEP_MMIO_NODE_map_token(node), + SEP_MMIO_NODE_size(node)); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void PCI_Unmap_Memory(SEP_MMIO_NODE *node) + * + * @param node - memory map node to clean up + * + * @return none + * + * @brief Unmaps previously mapped memory + * + */ +extern void +PCI_Unmap_Memory(SEP_MMIO_NODE *node) +{ + SEP_DRV_LOG_INIT_IN("Unmapping node %p.", node); + + if (node) { + if (SEP_MMIO_NODE_size(node)) { + SEP_DRV_LOG_TRACE("Unmapping token 0x%llx (0x%llx->0x%llx)[%uB].", + SEP_MMIO_NODE_map_token(node), + SEP_MMIO_NODE_physical_address(node), + SEP_MMIO_NODE_virtual_address(node), + SEP_MMIO_NODE_size(node)); + iounmap((void *)(UIOP)SEP_MMIO_NODE_map_token(node)); + SEP_MMIO_NODE_size(node) = 0; + SEP_MMIO_NODE_map_token(node) = 0; + SEP_MMIO_NODE_virtual_address(node) = 0; + SEP_MMIO_NODE_physical_address(node) = 0; + } else { + SEP_DRV_LOG_TRACE("Already unmapped."); + } + } + + SEP_DRV_LOG_INIT_OUT("Unmapped node %p.", node); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 PCI_MMIO_Read_U32(virtual_address_base, offset) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * + * @return U32 read from an MMIO register + * + * @brief Reads U32 value from MMIO + * + */ +extern U32 +PCI_MMIO_Read_U32(U64 virtual_address_base, U32 offset) +{ + U32 temp_u32 = 0LL; + U32 *computed_address; + + computed_address = + (U32 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Will read U32(0x%llx + 0x%x = 0x%p).", + virtual_address_base, offset, computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + temp_u32 = 0; + } else { + temp_u32 = *computed_address; + } + + SEP_DRV_LOG_REGISTER_OUT("Has read U32(0x%llx + 0x%x): 0x%x.", + virtual_address_base, offset, temp_u32); + return temp_u32; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PCI_MMIO_Read_U64(virtual_address_base, offset) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * + * @return U64 read from an MMIO register + * + * @brief Reads U64 value from MMIO + * + */ +extern U64 +PCI_MMIO_Read_U64(U64 virtual_address_base, U32 offset) +{ + U64 temp_u64 = 0LL; + U64 *computed_address; + + computed_address = + (U64 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Will read U64(0x%llx + 0x%x = 0x%p).", + virtual_address_base, offset, computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + temp_u64 = 0; + } else { + temp_u64 = *computed_address; + } + + SEP_DRV_LOG_REGISTER_OUT("Has read U64(0x%llx + 0x%x): 0x%llx.", + virtual_address_base, offset, temp_u64); + return temp_u64; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void PCI_MMIO_Write_U32(virtual_address_base, offset, value) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * @param value - Value to write + * + * @return U32 write into an MMIO register + * + * @brief Writes U32 value to MMIO + * + */ +extern void +PCI_MMIO_Write_U32(U64 virtual_address_base, U32 offset, U32 value) +{ + U32 *computed_address; + + computed_address = + (U32 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Writing 0x%x to U32(0x%llx + 0x%x = 0x%p).", + value, virtual_address_base, offset, + computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + } else { + *computed_address = value; + } + + SEP_DRV_LOG_REGISTER_OUT("Has written 0x%x to U32(0x%llx + 0x%x).", + value, virtual_address_base, offset); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void PCI_MMIO_Write_U64(virtual_address_base, offset, value) + * + * @param virtual_address_base - Virtual address base + * @param offset - Register offset + * @param value - Value to write + * + * @return U64 write into an MMIO register + * + * @brief Writes U64 value to MMIO + * + */ +extern void +PCI_MMIO_Write_U64(U64 virtual_address_base, U32 offset, U64 value) +{ + U64 *computed_address; + + computed_address = + (U64 *)(((char *)(UIOP)virtual_address_base) + offset); + + SEP_DRV_LOG_REGISTER_IN("Writing 0x%llx to U64(0x%llx + 0x%x = 0x%p).", + value, virtual_address_base, offset, + computed_address); + + if (!virtual_address_base) { + SEP_DRV_LOG_ERROR("Invalid base for U32(0x%llx + 0x%x = 0x%p)!", + virtual_address_base, offset, + computed_address); + } else { + *computed_address = value; + } + + SEP_DRV_LOG_REGISTER_OUT("Has written 0x%llx to U64(0x%llx + 0x%x).", + value, virtual_address_base, offset); + return; +} + +#if defined(DRV_PMT_ENABLE) +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 PCI_MMIO_Read_U32(telem_ep, offset) + * + * @param telem_ep - Telemetry end point + * @param offset - Register offset + * + * @return U32 data read from an MMIO register + * + * @brief Reads U32 value from telemetry MMIO space + * + */ +extern U32 +PCI_PMT_Read_U32(struct telem_endpoint *telem_ep, U32 offset) +{ + U64 temp_u64 = 0LL; + + SEP_DRV_LOG_REGISTER_IN("Will read from U32(0x%x).", offset); + pmt_telem_read(telem_ep, offset, &temp_u64, 1); + + SEP_DRV_LOG_REGISTER_OUT("Has read U32(0x%x): 0x%x.", offset, temp_u64); + + return (U32)temp_u64; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PCI_PMT_Read_U64(telenm_ep, offset) + * + * @param telem_ep - Telemetry end point + * @param offset - Register offset + * + * @return U64 data read from an MMIO register + * + * @brief Reads U64 value from telemetry MMIO space + * + */ +extern U64 +PCI_PMT_Read_U64(struct telem_endpoint *telem_ep, U32 offset) +{ + U64 temp_u64 = 0LL; + + SEP_DRV_LOG_REGISTER_IN("Will read U64(0x%x)", offset); + pmt_telem_read(telem_ep, offset, &temp_u64, 1); + SEP_DRV_LOG_REGISTER_OUT("Has read U64(0x%x): 0x%llx.", offset, + temp_u64); + + return temp_u64; +} +#endif + diff --git a/drivers/platform/x86/sepdk/sep/pebs.c b/drivers/platform/x86/sepdk/sep/pebs.c new file mode 100644 index 00000000000000..cbd748f1cf8cbd --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pebs.c @@ -0,0 +1,2023 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "core2.h" +#include "utility.h" +#include "output.h" +#include "ecb_iterators.h" +#include "pebs.h" + +#if defined(DRV_USE_KAISER) +#include +#include +int (*local_kaiser_add_mapping)(unsigned long, unsigned long, unsigned long) = NULL; +void (*local_kaiser_remove_mapping)(unsigned long, unsigned long) = NULL; +#elif defined(DRV_USE_PTI) +#include +#include +#include +#include +#include +void (*local_cea_set_pte)(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) = NULL; +void (*local_do_kernel_range_flush)(void *info) = NULL; +DEFINE_PER_CPU(PVOID, dts_buffer_cea); +#endif + +/* + * The structure is hidden for kernel modules + * since 5.8 kernel. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) +struct flush_tlb_info { + struct mm_struct *mm; + unsigned long start; + unsigned long end; + U64 new_tlb_gen; + unsigned int stride_shift; + DRV_BOOL freed_tables; +}; +#endif + +static PVOID pebs_global_memory; +static size_t pebs_global_memory_size; + +extern DRV_CONFIG drv_cfg; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern DRV_BOOL multi_pebs_enabled; + +#if defined(DRV_USE_PTI) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Update_CEA (S32) + * + * @brief Flush the TLB entries related to PEBS buffer in cpu entry area + * + * @param this_cpu current cpu + * + * @return NONE + * + * Special Notes: + */ +static VOID +pebs_Update_CEA(S32 this_cpu) +{ + unsigned long cea_start_addr; + unsigned long cea_end_addr; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); + + if (per_cpu(dts_buffer_cea, this_cpu)) { + cea_start_addr = + (unsigned long)per_cpu(dts_buffer_cea, this_cpu); + cea_end_addr = cea_start_addr + + (unsigned long)CPU_STATE_dts_buffer_size( + &pcb[this_cpu]); + if (local_do_kernel_range_flush) { + struct flush_tlb_info info; + info.start = cea_start_addr; + info.end = cea_end_addr; + local_do_kernel_range_flush(&info); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Initialize_Threshold (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The nehalem specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID +pebs_Corei7_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + DTS_BUFFER_EXT_pebs_threshold(dts) = + DTS_BUFFER_EXT_pebs_base(dts) + + (LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) * + DEV_CONFIG_pebs_record_num(pcfg)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow () + * + * @brief The Nehalem specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 4 general purpose + * registers. + */ +static U64 +pebs_Corei7_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("This_cpu: %d, dtes %p.", this_cpu, dtes); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, pebs_base); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + overflow_status |= PEBS_REC_EXT_glob_perf_overflow(pb); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow_APEBS () + * + * @brief Overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 8 general purpose + * registers or 4 fixed registers. + */ +static U64 +pebs_Corei7_Overflow_APEBS(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + S8 *pebs_base, *pebs_index, *pebs_ptr; + ADAPTIVE_PEBS_BASIC_INFO pb; + DTS_BUFFER_EXT1 dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (pebs_ptr_check && DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + pb = (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr; + overflow_status |= + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(pb); + } + + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Initialize_Threshold (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID +pebs_Core2_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + DTS_BUFFER_EXT_pebs_threshold(dts) = DTS_BUFFER_EXT_pebs_base(dts); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Overflow (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the base and the index fields of the circular buffer, if they are + * not the same, then a precise event has overflowed. Precise events are + * allocated only on register#0. + */ +static U64 +pebs_Core2_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + U8 status = FALSE; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Res: %llx (dtes is NULL!).", + overflow_status); + return overflow_status; + } + status = (U8)((dtes) && (DTS_BUFFER_EXT_pebs_index(dtes) != + DTS_BUFFER_EXT_pebs_base(dtes))); + if (status) { + // Merom allows only for general purpose register 0 to be precise capable + overflow_status |= 0x1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = PEBS_REC_EXT_linear_ip(pb); + SAMPLE_RECORD_ipsr(psamp) = PEBS_REC_EXT_r_flags(pb); + } else { + SAMPLE_RECORD_eip(psamp) = + PEBS_REC_EXT_linear_ip(pb) & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = + PEBS_REC_EXT_r_flags(pb) & 0xFFFFFFFF; + } + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP_With_Eventing_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_IP_With_Eventing_IP(void *sample, + DRV_BOOL is_64bit_addr, + U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_ptr, *pebs_base, *pebs_index; + U64 ip = 0, flags = 0; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (!pebs_ptr_check) { + return; + } + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + ip = ADAPTIVE_PEBS_BASIC_INFO_eventing_ip( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + flags = ADAPTIVE_PEBS_GPR_INFO_rflags(( + ADAPTIVE_PEBS_GPR_INFO)(pebs_ptr + + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx]))); + } + } else { + ip = PEBS_REC_EXT1_eventing_ip((PEBS_REC_EXT1)pebs_ptr); + flags = PEBS_REC_EXT1_r_flags((PEBS_REC_EXT1)pebs_ptr); + } + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = ip; + SAMPLE_RECORD_ipsr(psamp) = flags; + } else { + SAMPLE_RECORD_eip(psamp) = ip & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = flags & 0xFFFFFFFF; + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * rec_index - record index + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_TSC(void *sample, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U64 tsc; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return; + } + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + tsc = ADAPTIVE_PEBS_BASIC_INFO_tsc( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + } else { + tsc = PEBS_REC_EXT2_tsc((PEBS_REC_EXT2)pebs_ptr); + } + SAMPLE_RECORD_tsc(psamp) = tsc; + SEP_DRV_LOG_TRACE_OUT(""); + return; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 pebs_Get_Num_Records_Filled () + * + * @brief get number of PEBS records filled in PEBS buffer + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * + */ +static U32 +pebs_Get_Num_Records_Filled(VOID) +{ + U32 num = 0; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return num; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + if (pebs_base != pebs_index) { + num = (U32)(pebs_index - pebs_base) / + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} + +/* + * Initialize the pebs micro dispatch tables + */ +PEBS_DISPATCH_NODE core2_pebs = +{ + .initialize_threshold = pebs_Core2_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE core2p_pebs = +{ + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE corei7_pebs = +{ + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE haswell_pebs = +{ + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE perfver4_pebs = +{ + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE perfver4_apebs = // adaptive PEBS +{ + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow_APEBS, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +#define PER_CORE_BUFFER_SIZE(dts_size, record_size, record_num) \ + (dts_size + (record_num + 1) * (record_size) + 64) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Alloc_DTS_Buffer (VOID) + * + * @brief Allocate buffers used for latency and pebs sampling + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocate the memory needed to hold the DTS and PEBS records buffer. + * This routine is called by a thread that corresponds to a single core + */ +static VOID * +pebs_Alloc_DTS_Buffer(VOID) +{ + UIOP pebs_base; + U32 dts_size; + PVOID dts_buffer = NULL; + DTS_BUFFER_EXT dts; + int this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * one PEBS record... need 2 records so that + * threshold can be less than absolute max + */ + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || + DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + if (DEV_CONFIG_pebs_mode(pcfg) == 6) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } else if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + dts_size = sizeof(DTS_BUFFER_EXT2_NODE); + } else { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid PEBS mode for Adaptive pebs(%d).", DEV_CONFIG_pebs_mode(pcfg)); + return NULL; + } + } + + /* + * account for extra bytes to align PEBS base to cache line boundary + */ + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == DRV_SETUP_INFO_PTI_KPTI) { +#if defined(DRV_USE_PTI) + struct page *page; + U32 buffer_size; + + SEP_DRV_LOG_INIT("Allocating PEBS buffer using KPTI approach."); + buffer_size = (PER_CORE_BUFFER_SIZE(dts_size, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), DEV_CONFIG_pebs_record_num(pcfg)) / PAGE_SIZE + 1) * PAGE_SIZE; + if (buffer_size > PEBS_BUFFER_SIZE) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Can't allocate more buffer than CEA allows!"); + return NULL; + } + + page = __alloc_pages_node(cpu_to_node(this_cpu), GFP_ATOMIC | __GFP_ZERO, get_order(buffer_size)); + if (!page) { + SEP_DRV_LOG_ERROR_TRACE_OUT("NULL (failed to allocate space for DTS buffer!)."); + return NULL; + } + dts_buffer = page_address(page); + per_cpu(dts_buffer_cea, this_cpu) = &get_cpu_entry_area(this_cpu)->cpu_debug_buffers.pebs_buffer; + if (!per_cpu(dts_buffer_cea, this_cpu)) { + if (dts_buffer) { + free_pages((unsigned long)dts_buffer, get_order(buffer_size)); + } + SEP_DRV_LOG_ERROR_TRACE_OUT("CEA pebs_buffer ptr is NULL!"); + return NULL; + } + + CPU_STATE_dts_buffer(pcpu) = dts_buffer; + CPU_STATE_dts_buffer_size(pcpu) = buffer_size; + + if (local_cea_set_pte) { + size_t idx; + phys_addr_t phys_addr; + PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); + + phys_addr = virt_to_phys(dts_buffer); + + preempt_disable(); + for (idx = 0; idx < buffer_size; idx += PAGE_SIZE, phys_addr += PAGE_SIZE, cea_ptr += PAGE_SIZE) { + local_cea_set_pte(cea_ptr, phys_addr, PAGE_KERNEL); + } + pebs_Update_CEA(this_cpu); + preempt_enable(); + } + pebs_base = (UIOP)(per_cpu(dts_buffer_cea, this_cpu)) + dts_size; + SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, pebs_base); + + dts = (DTS_BUFFER_EXT)(per_cpu(dts_buffer_cea, this_cpu)); +#else + SEP_DRV_LOG_ERROR_TRACE_OUT("KPTI is enabled without PAGE_TABLE_ISOLATION kernel configuration!"); + return NULL; +#endif + } else { + dts_buffer = (char *)pebs_global_memory + CPU_STATE_dts_buffer_offset(pcpu); + if (!dts_buffer) { + SEP_DRV_LOG_ERROR_TRACE_OUT("NULL (failed to allocate space for DTS buffer!)."); + return NULL; + } + pebs_base = (UIOP)(dts_buffer) + dts_size; + + CPU_STATE_dts_buffer(pcpu) = dts_buffer; + CPU_STATE_dts_buffer_size(pcpu) = PER_CORE_BUFFER_SIZE(dts_size, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), DEV_CONFIG_pebs_record_num(pcfg)); + + // Make 32 byte aligned + if ((pebs_base & 0x000001F) != 0x0) { + pebs_base = ALIGN_32(pebs_base); + } + + dts = (DTS_BUFFER_EXT)dts_buffer; + } + + /* + * Program the DTES Buffer for Precise EBS. + * Set PEBS buffer for one PEBS record + */ + DTS_BUFFER_EXT_base(dts) = 0; + DTS_BUFFER_EXT_index(dts) = 0; + DTS_BUFFER_EXT_max(dts) = 0; + DTS_BUFFER_EXT_threshold(dts) = 0; + DTS_BUFFER_EXT_pebs_base(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_index(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_max(dts) = + pebs_base + + (DEV_CONFIG_pebs_record_num(pcfg) + 1) * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + + pebs_dispatch->initialize_threshold(dts); + + SEP_DRV_LOG_TRACE("base --- %llx.", DTS_BUFFER_EXT_pebs_base(dts)); + SEP_DRV_LOG_TRACE("index --- %llu.", DTS_BUFFER_EXT_pebs_index(dts)); + SEP_DRV_LOG_TRACE("max --- %llu.", DTS_BUFFER_EXT_pebs_max(dts)); + SEP_DRV_LOG_TRACE("threahold --- %llu.", + DTS_BUFFER_EXT_pebs_threshold(dts)); + SEP_DRV_LOG_TRACE("DTES buffer allocated for PEBS: %p.", dts_buffer); + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", dts_buffer); + return dts; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Allocate_Buffers (VOID *params) + * + * @brief Allocate memory and set up MSRs in preparation for PEBS + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Set up the DS area and program the DS_AREA msrs in preparation + * for a PEBS run. Save away the old value in the DS_AREA. + * This routine is called via the parallel thread call. + */ +static VOID +pebs_Allocate_Buffers(VOID *params) +{ + U64 value; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PVOID dts_ptr = NULL; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg) || !DEV_CONFIG_pebs_mode(pcfg)) { + return; + } + + SYS_Write_MSR(IA32_PEBS_ENABLE, 0LL); + value = SYS_Read_MSR(IA32_MISC_ENABLE); + if ((value & 0x80) && !(value & 0x1000)) { + CPU_STATE_old_dts_buffer(pcpu) = + (PVOID)(UIOP)SYS_Read_MSR(IA32_DS_AREA); + dts_ptr = pebs_Alloc_DTS_Buffer(); + if (!dts_ptr) { + SEP_DRV_LOG_ERROR_TRACE_OUT("dts_ptr is NULL!"); + return; + } + SEP_DRV_LOG_TRACE("Old dts buffer - %p.", + CPU_STATE_old_dts_buffer(pcpu)); + SEP_DRV_LOG_TRACE("New dts buffer - %p.", dts_ptr); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)dts_ptr); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Dellocate_Buffers (VOID *params) + * + * @brief Clean up PEBS buffers and restore older values into the DS_AREA + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Clean up the DS area and all restore state prior to the sampling run + * This routine is called via the parallel thread call. + */ +static VOID +pebs_Deallocate_Buffers(VOID *params) +{ + CPU_STATE pcpu; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg) || !DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + + SEP_DRV_LOG_TRACE("Entered deallocate buffers."); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)CPU_STATE_old_dts_buffer(pcpu)); + + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KPTI) { +#if defined(DRV_USE_PTI) + SEP_DRV_LOG_INIT("Freeing PEBS buffer using KPTI approach."); + + if (local_cea_set_pte) { + size_t idx; + PVOID cea_ptr = per_cpu(dts_buffer_cea, this_cpu); + preempt_disable(); + for (idx = 0; idx < CPU_STATE_dts_buffer_size(pcpu); + idx += PAGE_SIZE, cea_ptr += PAGE_SIZE) { + local_cea_set_pte(cea_ptr, 0, PAGE_KERNEL); + } + pebs_Update_CEA(this_cpu); + preempt_enable(); + } + + if (CPU_STATE_dts_buffer(pcpu)) { + free_pages((unsigned long)CPU_STATE_dts_buffer(pcpu), + get_order(CPU_STATE_dts_buffer_size(pcpu))); + CPU_STATE_dts_buffer(pcpu) = NULL; + } +#endif + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Overflowed (this_cpu, overflow_status) + * + * @brief Figure out if the PEBS event caused an overflow + * + * @param this_cpu -- the current cpu + * overflow_status -- current value of the global overflow status + * + * @return updated overflow_status + * + * Special Notes: + * Figure out if the PEBS area has data that need to be transferred + * to the output sample. + * Update the overflow_status that is passed and return this value. + * The overflow_status defines the events/status to be read + */ +extern U64 +PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + U64 res; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + res = pebs_dispatch->overflow(this_cpu, overflow_status, rec_index); + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Index (this_cpu) + * + * @brief Reset the PEBS index pointer + * + * @param this_cpu -- the current cpu + * + * @return NONE + * + * Special Notes: + * reset index to next PEBS record to base of buffer + */ +extern VOID +PEBS_Reset_Index(S32 this_cpu) +{ + DTS_BUFFER_EXT dtes; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return; + } + SEP_DRV_LOG_TRACE("PEBS Reset Index: %d.", this_cpu); + DTS_BUFFER_EXT_pebs_index(dtes) = DTS_BUFFER_EXT_pebs_base(dtes); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +extern U32 +pmi_Get_CSD(U32, U32 *, U32 *); +#define EFLAGS_V86_MASK 0x00020000L + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Flush_Buffer (VOID * param) + * + * @brief generate sampling records from PEBS records in PEBS buffer + * + * @param param -- not used + * + * @return NONE + * + * Special Notes: + */ +extern VOID +PEBS_Flush_Buffer(VOID *param) +{ + U32 i, this_cpu, index, desc_id; + U64 pebs_overflow_status = 0; + U64 lbr_tos_from_ip = 0ULL; + DRV_BOOL counter_overflowed = FALSE; + ECB pecb; + CPU_STATE pcpu; + EVENT_DESC evt_desc; + BUFFER_DESC bd; + SampleRecordPC *psamp_pebs; + U32 is_64bit_addr = FALSE; + U32 u32PebsRecordNumFilled; +#if defined(DRV_IA32) + U32 seg_cs; + U32 csdlo; + U32 csdhi; +#endif + U32 dev_idx; + DEV_CONFIG pcfg; + U32 cur_grp; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + bd = &cpu_buf[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + return; + } + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("PEBS is not enabled"); + return; + } + + if (!multi_pebs_enabled) { + SEP_DRV_LOG_TRACE_OUT("PEBS_Flush_Buffer is not supported."); + return; + } + + u32PebsRecordNumFilled = PEBS_Get_Num_Records_Filled(); + for (i = 0; i < u32PebsRecordNumFilled; i++) { + pebs_overflow_status = PEBS_Overflowed(this_cpu, 0, i); + SEP_DRV_LOG_TRACE("Pebs_overflow_status = 0x%llx, i=%d.", + pebs_overflow_status, i); + + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]; + FOR_EACH_DATA_REG (pecb, j) { + counter_overflowed = FALSE; + if ((!DEV_CONFIG_enable_adaptive_pebs(pcfg) && + !ECB_entries_is_gp_reg_get(pecb, j)) || + !ECB_entries_precise_get(pecb, j)) { + continue; + } + if (ECB_entries_fixed_reg_get(pecb, j)) { + index = ECB_entries_reg_id(pecb, j) - + IA32_FIXED_CTR0; + if (pebs_overflow_status & + ((U64)1 << (32 + index))) { + counter_overflowed = TRUE; + } + } else { + index = ECB_entries_reg_id(pecb, j) - IA32_PMC0; + if (pebs_overflow_status & (U64)1 << index) { + counter_overflowed = TRUE; + } + } + if (counter_overflowed) { + desc_id = ECB_entries_desc_id(pecb, j); + evt_desc = desc_data[desc_id]; + SEP_DRV_LOG_TRACE("Event_id_index=%u, desc_id=%u.", + ECB_entries_event_id_index(pecb, j), + desc_id); + psamp_pebs = (SampleRecordPC *) + OUTPUT_Reserve_Buffer_Space( + bd, + EVENT_DESC_sample_size(evt_desc), + (NMI_mode) ? TRUE : FALSE, + !SEP_IN_NOTIFICATION); + if (!psamp_pebs) { + SEP_DRV_LOG_ERROR("Could not generate samples from PEBS records."); + continue; + } + + lbr_tos_from_ip = 0ULL; + CPU_STATE_num_samples(&pcb[this_cpu]) += 1; + SAMPLE_RECORD_descriptor_id(psamp_pebs) = + desc_id; + SAMPLE_RECORD_event_index(psamp_pebs) = + ECB_entries_event_id_index(pecb, j); + SAMPLE_RECORD_pid_rec_index(psamp_pebs) = + (U32)-1; + SAMPLE_RECORD_pid_rec_index_raw(psamp_pebs) = 1; + SAMPLE_RECORD_tid(psamp_pebs) = (U32)-1; + SAMPLE_RECORD_cpu_num(psamp_pebs) = + (U16)this_cpu; + SAMPLE_RECORD_osid(psamp_pebs) = 0; + +#if defined(DRV_IA32) + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, i); + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + if (SAMPLE_RECORD_eflags(psamp_pebs) & + EFLAGS_V86_MASK) { + csdlo = 0; + csdhi = 0; + } else { + seg_cs = SAMPLE_RECORD_cs(psamp_pebs); + SYS_Get_CSD(seg_cs, &csdlo, &csdhi); + } + SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord = + csdlo; + SAMPLE_RECORD_csd(psamp_pebs).u2.highWord = + csdhi; +#elif defined(DRV_EM64T) + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + pmi_Get_CSD(SAMPLE_RECORD_cs(psamp_pebs), + &SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord, + &SAMPLE_RECORD_csd(psamp_pebs).u2.highWord); + is_64bit_addr = + (SAMPLE_RECORD_csd(psamp_pebs).u2.s2.reserved_0 == 1); + if (is_64bit_addr) { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = TRUE; + } else { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = FALSE; + + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp_pebs) 0x%x.", + SAMPLE_RECORD_eip(psamp_pebs)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp_pebs) %x.", + SAMPLE_RECORD_eflags(psamp_pebs)); + } +#endif + if (EVENT_DESC_pebs_offset(evt_desc) || + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + lbr_tos_from_ip = PEBS_Fill_Buffer( + (S8 *)psamp_pebs, evt_desc, i); + } + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, i); + PEBS_Modify_TSC((S8 *)psamp_pebs, i); + if (ECB_entries_branch_evt_get(pecb, j) && + DEV_CONFIG_precise_ip_lbrs(pcfg) && + lbr_tos_from_ip) { + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp_pebs) = + lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", + SAMPLE_RECORD_iip(psamp_pebs)); + } else { + SAMPLE_RECORD_eip(psamp_pebs) = + (U32)lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip(psamp_pebs)); + } + } + if (i == u32PebsRecordNumFilled - 1 && + DRV_CONFIG_event_based_counts(drv_cfg) && + ECB_entries_em_trigger_get(pecb, j)) { + dispatch->read_counts( + (S8 *)psamp_pebs, + ECB_entries_event_id_index(pecb, j)); + } + } + } + END_FOR_EACH_DATA_REG; + } + PEBS_Reset_Index(this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Counter (this_cpu, index, op_type) + * + * @brief set reset value for PMC after overflow + * + * @param this_cpu -- the current cpu + * index -- PMC register index + * op_type -- Type of Operation + * + * @return NONE + * + * Special Notes: + */ +extern VOID +PEBS_Reset_Counter(S32 this_cpu, U32 index, U32 op_type) +{ + DTS_BUFFER_EXT dts; + DTS_BUFFER_EXT1 dts_ext = NULL; + DTS_BUFFER_EXT2 dts_ext2 = NULL; + U32 dev_idx; + DEV_CONFIG pcfg; + CPU_STATE pcpu; + U32 counter_index = 0; + U32 gp_index = 0; + U32 fixed_index = 0; + U32 fixed_offset = 0; + U32 cur_grp; + ECB pecb; + U64 value; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, index: %u.", this_cpu, index); + + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + dts = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid number of events"); + return; + } + + if (!pecb) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid ECB data"); + return; + } + + if (!dts) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid PEBS Buffer"); + return; + } + + value = ECB_entries_reg_value(pecb, index); + + if (ECB_entries_fixed_reg_get(pecb, index)) { + if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + fixed_offset = EXT2_FIXED_OFFSET; + } else if (DEV_CONFIG_pebs_mode(pcfg) <= 6) { + fixed_offset = EXT1_FIXED_OFFSET; + } else { + SEP_DRV_LOG_ERROR_TRACE_OUT("Not supported PEBS Mode"); + return; + } + + if (op_type == PMU_OPERATION_ALL_REG) { + counter_index = (ECB_entries_reg_id(pecb, index) - IA32_FIXED_CTR0 + fixed_offset); + } else { + counter_index = index - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_FIXED) + fixed_offset; + } + } else { + if (op_type == PMU_OPERATION_ALL_REG) { + counter_index = (ECB_entries_reg_id(pecb, index) - IA32_PMC0); + } else { + counter_index = index - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + } + } + + SEP_DRV_LOG_TRACE("PEBS Reset GP Counters[0:4]: cpu %d, index=%u, value=%llx.", this_cpu, counter_index, value); + + switch (counter_index) { + case 0: + DTS_BUFFER_EXT_counter_reset0(dts) = value; + break; + case 1: + DTS_BUFFER_EXT_counter_reset1(dts) = value; + break; + case 2: + DTS_BUFFER_EXT_counter_reset2(dts) = value; + break; + case 3: + DTS_BUFFER_EXT_counter_reset3(dts) = value; + break; + } + + if (counter_index < NUM_EXT_COUNTER_RESET) { + return; + } + + if (!DEV_CONFIG_enable_adaptive_pebs(pcfg) && !DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("PEBS is not enabled or Fixed Counter Collection"); + return; + } + + SEP_DRV_LOG_TRACE("PEBS Reset Fixed Counters and GP Counters[4:7]: cpu %d, index=%u, value=%llx.", this_cpu, counter_index, value); + + if (DEV_CONFIG_pebs_mode(pcfg) == 6) { + dts_ext = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + switch (counter_index) { + case 4: + DTS_BUFFER_EXT1_counter_reset4(dts_ext) = value; + break; + case 5: + DTS_BUFFER_EXT1_counter_reset5(dts_ext) = value; + break; + case 6: + DTS_BUFFER_EXT1_counter_reset6(dts_ext) = value; + break; + case 7: + DTS_BUFFER_EXT1_counter_reset7(dts_ext) = value; + break; + case 8: + DTS_BUFFER_EXT1_fixed_counter_reset0(dts_ext) = value; + break; + case 9: + DTS_BUFFER_EXT1_fixed_counter_reset1(dts_ext) = value; + break; + case 10: + DTS_BUFFER_EXT1_fixed_counter_reset2(dts_ext) = value; + break; + case 11: + DTS_BUFFER_EXT1_fixed_counter_reset3(dts_ext) = value; + break; + } + } else if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + dts_ext2 = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (counter_index < (NUM_EXT_COUNTER_RESET + NUM_EXT2_COUNTER_RESET)) { + gp_index = counter_index - NUM_EXT_COUNTER_RESET; + DTS_BUFFER_EXT2_counter_reset(dts_ext2)[gp_index] = value; + } else { + fixed_index = counter_index - (NUM_EXT_COUNTER_RESET + NUM_EXT2_COUNTER_RESET); + if (fixed_index <= NUM_EXT2_FIXED_COUNTER_RESET) { + DTS_BUFFER_EXT2_fixed_counter_reset(dts_ext2)[fixed_index] = value; + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +extern VOID +PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + pebs_dispatch->modify_ip(sample, is_64bit_addr, rec_index); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * + * @return NONE + * + * Special Notes: + * + */ +extern VOID +PEBS_Modify_TSC(void *sample, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->modify_tsc != NULL) { + pebs_dispatch->modify_tsc(sample, rec_index); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +extern U32 +PEBS_Get_Num_Records_Filled(VOID) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + U32 num = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->get_num_records_filled != NULL) { + num = pebs_dispatch->get_num_records_filled(); + SEP_DRV_LOG_TRACE("Num=%u.", num); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Fill_Phy_Addr (LATENCY_INFO latency_info) + * + * @brief Fill latency node with phy addr when applicable + * + * @param latency_info - pointer to LATENCY_INFO struct + * + * @return NONE + * + * Special Notes: + * + */ + +extern VOID +PEBS_Fill_Phy_Addr(LATENCY_INFO latency_info) +{ +#if defined(DRV_EM64T) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + U64 lin_addr; + U64 offset; + struct page *page; + + if (!DRV_CONFIG_virt_phys_translation(drv_cfg)) { + return; + } + lin_addr = (U64)LATENCY_INFO_linear_address(latency_info); + if (lin_addr != 0) { + offset = (U64)(lin_addr & 0x0FFF); + if (__virt_addr_valid(lin_addr)) { + LATENCY_INFO_phys_addr(latency_info) = + (U64)__pa(lin_addr); + } else if (lin_addr < __PAGE_OFFSET) { + pagefault_disable(); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) + if (__get_user_pages_fast(lin_addr, 1, 1, &page)) { +#else + if (get_user_pages_fast_only(lin_addr, 1, 1, &page)) { +#endif + LATENCY_INFO_phys_addr(latency_info) = + (U64)page_to_phys(page) + offset; + put_page(page); + } + pagefault_enable(); + } + } +#endif + return; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + rec_index - current pebs record index + * + * @return if APEBS return LBR_TOS_FROM_IP else return 0 + * + * Special Notes: + * + */ +extern U64 +PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + PEBS_REC_EXT1 pebs_base_ext1; + PEBS_REC_EXT2 pebs_base_ext2; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!DEV_CONFIG_num_events(pcfg)) { + return lbr_tos_from_ip; + } + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + lbr_tos_from_ip = + APEBS_Fill_Buffer(buffer, evt_desc, rec_index); + return lbr_tos_from_ip; + } + + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + + if (!dtes) { + return lbr_tos_from_ip; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + pebs_base = pebs_ptr; + if (EVENT_DESC_pebs_offset(evt_desc)) { + SEP_DRV_LOG_TRACE("PEBS buffer has data available."); + memcpy(buffer + EVENT_DESC_pebs_offset(evt_desc), pebs_base, + EVENT_DESC_pebs_size(evt_desc)); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + PEBS_REC_EXT1_eventing_ip(pebs_base_ext1); + } + if (EVENT_DESC_hle_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + PEBS_REC_EXT1_hle_info(pebs_base_ext1); + } + if (EVENT_DESC_latency_offset_in_sample(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + memcpy(&latency_info, + pebs_base + EVENT_DESC_latency_offset_in_pebs_record( + evt_desc), + EVENT_DESC_latency_size_from_pebs_record(evt_desc)); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &PEBS_REC_EXT1_rsp(pebs_base_ext1), sizeof(U64)); + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + pebs_base_ext2 = (PEBS_REC_EXT2)pebs_base; + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + PEBS_REC_EXT2_tsc(pebs_base_ext2); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 APEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + * rec_index - current pebs record index + * + * @return LBR_TOS_FROM_IP + * + * Special Notes: + * + */ +extern U64 +APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT1 dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + U64 dtes_record_size = 0; + U64 dtes_record_format = 0; + ADAPTIVE_PEBS_MEM_INFO apebs_mem = NULL; + ADAPTIVE_PEBS_GPR_INFO apebs_gpr = NULL; + ADAPTIVE_PEBS_BASIC_INFO apebs_basic = NULL; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("In APEBS Fill Buffer: cpu %d.", this_cpu); + + if (!DEV_CONFIG_num_events(pcfg)) { + return lbr_tos_from_ip; + } + + if (!dtes || !DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + return lbr_tos_from_ip; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + + pebs_base = pebs_ptr; + apebs_basic = + (ADAPTIVE_PEBS_BASIC_INFO)(pebs_base + + LWPMU_DEVICE_apebs_basic_offset( + &devices[dev_idx])); + dtes_record_size = (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + APEBS_RECORD_SIZE_MASK) >> + 48; // [63:48] + dtes_record_format = + (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + APEBS_RECORD_FORMAT_MASK); // [47:0] + + if (dtes_record_size != + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) { + SEP_DRV_LOG_TRACE("PEBS record size does not match with ucode\n"); + } + if (EVENT_DESC_pebs_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(apebs_basic); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_tsc(apebs_basic); + } + if (EVENT_DESC_applicable_counters_offset(evt_desc)) { + *(U64 *)(buffer + + EVENT_DESC_applicable_counters_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters( + apebs_basic); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg) && + EVENT_DESC_gpr_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_GPR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("GPR info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_gpr_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]), + EVENT_DESC_gpr_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + if (!(dtes_record_format & APEBS_MEM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("MEM info not found in DS PEBS record."); + } + apebs_mem = + (ADAPTIVE_PEBS_MEM_INFO)(pebs_base + + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx])); + memcpy(&LATENCY_INFO_linear_address(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_linear_address(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_data_source(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_source(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_latency(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_latency(apebs_mem), sizeof(U64)); + LATENCY_INFO_stack_pointer(&latency_info) = 0; + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + apebs_gpr = + (ADAPTIVE_PEBS_GPR_INFO)(pebs_base + + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx])); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &ADAPTIVE_PEBS_GPR_INFO_rsp(apebs_gpr), + sizeof(U64)); + } + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_hle_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + ADAPTIVE_PEBS_MEM_INFO_hle_info(( + ADAPTIVE_PEBS_MEM_INFO)(pebs_base + + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx]))); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg) && + EVENT_DESC_xmm_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_XMM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("XMM info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_xmm_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]), + EVENT_DESC_xmm_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg) && + EVENT_DESC_lbr_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_LBR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("LBR info not found in DS PEBS record\n"); + } + if ((dtes_record_format >> 24) != + (DEV_CONFIG_num_lbr_entries(pcfg) - 1)) { + SEP_DRV_LOG_WARNING("DRV_CONFIG_num_lbr_entries does not match with PEBS record\n"); + } + *(U64 *)(buffer + EVENT_DESC_lbr_offset(evt_desc)) = + DEV_CONFIG_num_lbr_entries(pcfg) - + 1; //Top-of-Stack(TOS) pointing to last entry + //Populating lbr callstack as SST_ENTRY_N to SST_ENTRY_0 in tb util, hence setting TOS to SST_ENTRY_N + memcpy(buffer + EVENT_DESC_lbr_offset(evt_desc) + sizeof(U64), + pebs_base + + LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]), + EVENT_DESC_lbr_info_size(evt_desc) - sizeof(U64)); + lbr_tos_from_ip = ADAPTIVE_PEBS_LBR_INFO_lbr_from( + (ADAPTIVE_PEBS_LBR_INFO)(pebs_base + + LWPMU_DEVICE_apebs_lbr_offset( + &devices[dev_idx]))); + } + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Initialize (DEV_CONFIG pcfg) + * + * @brief Initialize the pebs buffers + * + * @param dev_idx - Device index + * + * @return status + * + * Special Notes: + * If the user is asking for PEBS information. Allocate the DS area + */ +extern OS_STATUS +PEBS_Initialize(U32 dev_idx) +{ + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN("Pcfg: %p.", pcfg); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("PEBS is not enabled"); + return OS_SUCCESS; + } + + switch (DEV_CONFIG_pebs_mode(pcfg)) { + case 1: + SEP_DRV_LOG_INIT("Set up the Core2 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &core2_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 2: + SEP_DRV_LOG_INIT("Set up the Nehalem dispatch."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &corei7_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT_NODE); + break; + case 3: + SEP_DRV_LOG_INIT("Set up the Core2 (PNR) dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &core2p_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 4: + SEP_DRV_LOG_INIT("Set up the Haswell dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &haswell_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT1_NODE); + break; + case 5: + SEP_DRV_LOG_INIT("Set up the Perf version4 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &perfver4_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT2_NODE); + break; + case 6: + case 7: + if (!DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + SEP_DRV_LOG_TRACE("APEBS need to be enabled in perf version4 SNC dispatch mode."); + } + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &perfver4_apebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(ADAPTIVE_PEBS_BASIC_INFO_NODE); + if (DEV_CONFIG_apebs_collect_mem_info(pcfg)) { + LWPMU_DEVICE_apebs_mem_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_MEM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_GPR_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg)) { + LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_XMM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg)) { + LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + (sizeof(ADAPTIVE_PEBS_LBR_INFO_NODE) * + DEV_CONFIG_num_lbr_entries(pcfg)); + } + SEP_DRV_LOG_TRACE("Size of adaptive pebs record - %d.", + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx])); + break; + default: + SEP_DRV_LOG_INIT("Unknown PEBS type. Will not collect PEBS information."); + break; + } + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) && + !DEV_CONFIG_pebs_record_num(pcfg)) { + DEV_CONFIG_pebs_record_num(pcfg) = 1; + } + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Allocate (void) + * + * @brief Allocate the pebs related buffers + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocated the DS area used for PEBS capture + */ +extern OS_STATUS +PEBS_Allocate(VOID) +{ + S32 cpu_num; + CPU_STATE pcpu; + U32 dev_idx; + U32 dts_size; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_INIT_IN(""); + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); cpu_num++) { + pcpu = &pcb[cpu_num]; + dev_idx = core_to_dev_map[cpu_num]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!DEV_CONFIG_pebs_mode(pcfg)) { + continue; + } + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx])) { + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + if (DEV_CONFIG_pebs_mode(pcfg) == 6) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } else if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + dts_size = sizeof(DTS_BUFFER_EXT2_NODE); + } else { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid PEBS mode for Adaptive pebs(%d).", DEV_CONFIG_pebs_mode(pcfg)); + return OS_INVALID; + } + } else { + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + } + CPU_STATE_dts_buffer_offset(pcpu) = pebs_global_memory_size; + pebs_global_memory_size += PER_CORE_BUFFER_SIZE(dts_size, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), DEV_CONFIG_pebs_record_num(pcfg)); + } + } + if (pebs_global_memory_size) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT("Allocating global PEBS buffer using regular control routine."); + pebs_global_memory = (PVOID)CONTROL_Allocate_KMemory(pebs_global_memory_size); + if (!pebs_global_memory) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Failed to allocate PEBS buffer!"); + return OS_NO_MEM; + } + memset(pebs_global_memory, 0, pebs_global_memory_size); + } else { +#if defined(DRV_USE_KAISER) + SEP_DRV_LOG_INIT("Allocating PEBS buffer using KAISER-compatible approach."); + + if (!local_kaiser_add_mapping) { + local_kaiser_add_mapping = (PVOID)UTILITY_Find_Symbol("kaiser_add_mapping"); + if (!local_kaiser_add_mapping) { + SEP_DRV_LOG_ERROR("Could not find 'kaiser_add_mapping'!"); + goto kaiser_error_handling; + } + } + + if (!local_kaiser_remove_mapping) { + local_kaiser_remove_mapping = (PVOID)UTILITY_Find_Symbol("kaiser_remove_mapping"); + if (!local_kaiser_remove_mapping) { + SEP_DRV_LOG_ERROR("Could not find 'kaiser_remove_mapping'!"); + goto kaiser_error_handling; + } + } + + pebs_global_memory = (PVOID)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(pebs_global_memory_size)); + + if (pebs_global_memory) { + SEP_DRV_LOG_TRACE("Successful memory allocation for pebs_global_memory."); + + if (local_kaiser_add_mapping((unsigned long)pebs_global_memory, pebs_global_memory_size, __PAGE_KERNEL) >= 0) { + SEP_DRV_LOG_TRACE("Successful kaiser_add_mapping."); + } else { + SEP_DRV_LOG_ERROR("KAISER mapping failed!"); + free_pages((unsigned long)pebs_global_memory, get_order(pebs_global_memory_size)); + pebs_global_memory = NULL; + goto kaiser_error_handling; + } + } else { + SEP_DRV_LOG_ERROR("Failed memory allocation for pebs_global_memory!"); + } + +kaiser_error_handling: + if (!pebs_global_memory) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Failed to setup PEBS buffer!"); + return OS_NO_MEM; + } +#elif defined(DRV_USE_PTI) + if (!local_cea_set_pte) { + local_cea_set_pte = (PVOID)UTILITY_Find_Symbol("cea_set_pte"); + if (!local_cea_set_pte) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Could not find 'cea_set_pte'!"); + return OS_FAULT; + } + } + if (!local_do_kernel_range_flush) { + local_do_kernel_range_flush = (PVOID)UTILITY_Find_Symbol("do_kernel_range_flush"); + if (!local_do_kernel_range_flush) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Could not find 'do_kernel_range_flush'!"); + return OS_FAULT; + } + } +#endif // DRV_USE_PTI + } + } + + CONTROL_Invoke_Parallel(pebs_Allocate_Buffers, (VOID *)NULL); + + SEP_DRV_LOG_INIT_OUT(""); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Destroy (void) + * + * @brief Clean up the pebs related buffers + * + * @param pcfg - Driver Configuration + * + * @return NONE + * + * Special Notes: + * Deallocated the DS area used for PEBS capture + */ +extern VOID +PEBS_Destroy(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + CONTROL_Invoke_Parallel(pebs_Deallocate_Buffers, (VOID *)(size_t)0); + if (pebs_global_memory) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT("Freeing PEBS buffer using regular control routine."); + pebs_global_memory = + CONTROL_Free_Memory(pebs_global_memory); + } +#if defined(DRV_USE_KAISER) + else if (DRV_SETUP_INFO_page_table_isolation( + &req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KAISER) { + SEP_DRV_LOG_INIT("Freeing PEBS buffer using KAISER-compatible approach."); + if (local_kaiser_remove_mapping) { + local_kaiser_remove_mapping( + (unsigned long)pebs_global_memory, + pebs_global_memory_size); + } else { + SEP_DRV_LOG_ERROR("Could not call 'kaiser_remove_mapping'!"); + } + free_pages((unsigned long)pebs_global_memory, + get_order(pebs_global_memory_size)); + pebs_global_memory = NULL; + } +#endif // DRV_USE_KAISER + + pebs_global_memory_size = 0; + SEP_DRV_LOG_INIT("PEBS buffer successfully freed."); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/pebs_linref.c b/drivers/platform/x86/sepdk/sep/pebs_linref.c new file mode 100644 index 00000000000000..cee3140b5ad339 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pebs_linref.c @@ -0,0 +1,1797 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "core2.h" +#include "utility.h" +#include "output.h" +#include "ecb_iterators.h" +#include "pebs.h" + +static PVOID pebs_global_memory; +static size_t pebs_global_memory_size; + +extern DRV_CONFIG drv_cfg; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Initialize_Threshold (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The nehalem specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID +pebs_Corei7_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + DTS_BUFFER_EXT_pebs_threshold(dts) = + DTS_BUFFER_EXT_pebs_base(dts) + + (LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) * + DEV_CONFIG_pebs_record_num(pcfg)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow () + * + * @brief The Nehalem specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 4 general purpose + * registers. + */ +static U64 +pebs_Corei7_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("This_cpu: %d, dtes %p.", this_cpu, dtes); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + SEP_DRV_LOG_TRACE("This_cpu: %d, pebs_base %p.", this_cpu, pebs_base); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + overflow_status |= PEBS_REC_EXT_glob_perf_overflow(pb); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Corei7_Overflow_APEBS () + * + * @brief Overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the global overflow field of the buffer descriptor. + * Precise events can be allocated on any of the 8 general purpose + * registers or 4 fixed registers. + */ +static U64 +pebs_Corei7_Overflow_APEBS(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + S8 *pebs_base, *pebs_index, *pebs_ptr; + ADAPTIVE_PEBS_BASIC_INFO pb; + DTS_BUFFER_EXT1 dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + U8 pebs_ptr_check = FALSE; + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!dtes) { + return overflow_status; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (pebs_ptr_check && DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + pb = (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr; + overflow_status |= + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters(pb); + } + + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Initialize_Threshold (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific initialization + * + * @param dts - dts description + * + * @return NONE + * + * Special Notes: + */ +static VOID +pebs_Core2_Initialize_Threshold(DTS_BUFFER_EXT dts) +{ + SEP_DRV_LOG_TRACE_IN("Dts: %p.", dts); + + DTS_BUFFER_EXT_pebs_threshold(dts) = DTS_BUFFER_EXT_pebs_base(dts); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Core2_Overflow (dts, LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) + * + * @brief The Core2 specific overflow check + * + * @param this_cpu - cpu id + * overflow_status - overflow status + * rec_index - record index + * + * @return NONE + * + * Special Notes: + * Check the base and the index fields of the circular buffer, if they are + * not the same, then a precise event has overflowed. Precise events are + * allocated only on register#0. + */ +static U64 +pebs_Core2_Overflow(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + U8 status = FALSE; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Res: %llx (dtes is NULL!).", + overflow_status); + return overflow_status; + } + status = (U8)((dtes) && (DTS_BUFFER_EXT_pebs_index(dtes) != + DTS_BUFFER_EXT_pebs_base(dtes))); + if (status) { + // Merom allows only for general purpose register 0 to be precise capable + overflow_status |= 0x1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return overflow_status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + PEBS_REC_EXT pb; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (pebs_ptr_check) { + pb = (PEBS_REC_EXT)pebs_ptr; + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = PEBS_REC_EXT_linear_ip(pb); + SAMPLE_RECORD_ipsr(psamp) = PEBS_REC_EXT_r_flags(pb); + } else { + SAMPLE_RECORD_eip(psamp) = + PEBS_REC_EXT_linear_ip(pb) & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = + PEBS_REC_EXT_r_flags(pb) & 0xFFFFFFFF; + } + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_IP_With_Eventing_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_IP_With_Eventing_IP(void *sample, + DRV_BOOL is_64bit_addr, + U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_ptr, *pebs_base, *pebs_index; + U64 ip = 0, flags = 0; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + + if (!pebs_ptr_check) { + return; + } + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + ip = ADAPTIVE_PEBS_BASIC_INFO_eventing_ip( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + flags = ADAPTIVE_PEBS_GPR_INFO_rflags(( + ADAPTIVE_PEBS_GPR_INFO)(pebs_ptr + + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx]))); + } + } else { + ip = PEBS_REC_EXT1_eventing_ip((PEBS_REC_EXT1)pebs_ptr); + flags = PEBS_REC_EXT1_r_flags((PEBS_REC_EXT1)pebs_ptr); + } + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = ip; + SAMPLE_RECORD_ipsr(psamp) = flags; + } else { + SAMPLE_RECORD_eip(psamp) = ip & 0xFFFFFFFF; + SAMPLE_RECORD_eflags(psamp) = flags & 0xFFFFFFFF; + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * rec_index - record index + * @return NONE + * + * Special Notes: + * + */ +static VOID +pebs_Modify_TSC(void *sample, U32 rec_index) +{ + SampleRecordPC *psamp = sample; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U64 tsc; + U8 pebs_ptr_check = FALSE; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes || !psamp) { + return; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return; + } + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + tsc = ADAPTIVE_PEBS_BASIC_INFO_tsc( + (ADAPTIVE_PEBS_BASIC_INFO)pebs_ptr); + } else { + tsc = PEBS_REC_EXT2_tsc((PEBS_REC_EXT2)pebs_ptr); + } + SAMPLE_RECORD_tsc(psamp) = tsc; + SEP_DRV_LOG_TRACE_OUT(""); + return; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U32 pebs_Get_Num_Records_Filled () + * + * @brief get number of PEBS records filled in PEBS buffer + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * + */ +static U32 +pebs_Get_Num_Records_Filled(VOID) +{ + U32 num = 0; + DTS_BUFFER_EXT dtes; + S8 *pebs_base, *pebs_index; + U32 this_cpu; + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return num; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + if (pebs_base != pebs_index) { + num = (U32)(pebs_index - pebs_base) / + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} + +/* + * Initialize the pebs micro dispatch tables + */ +PEBS_DISPATCH_NODE core2_pebs = { + .initialize_threshold = pebs_Core2_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE core2p_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Core2_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE corei7_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE haswell_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = NULL, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE perfver4_pebs = { + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +PEBS_DISPATCH_NODE perfver4_apebs = { // adaptive ..PEBS + .initialize_threshold = pebs_Corei7_Initialize_Threshold, + .overflow = pebs_Corei7_Overflow_APEBS, + .modify_ip = pebs_Modify_IP_With_Eventing_IP, + .modify_tsc = pebs_Modify_TSC, + .get_num_records_filled = pebs_Get_Num_Records_Filled +}; + +#define PER_CORE_BUFFER_SIZE(dts_size, record_size, record_num) \ + (dts_size + (record_num + 1) * (record_size) + 64) +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Alloc_DTS_Buffer (VOID) + * + * @brief Allocate buffers used for latency and pebs sampling + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocate the memory needed to hold the DTS and PEBS records buffer. + * This routine is called by a thread that corresponds to a single core + */ +static VOID * +pebs_Alloc_DTS_Buffer(VOID) +{ + UIOP pebs_base; + U32 dts_size; + PVOID dts_buffer = NULL; + DTS_BUFFER_EXT dts; + int this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN(""); + + /* + * one PEBS record... need 2 records so that + * threshold can be less than absolute max + */ + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg) || + DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } + + /* + * account for extra bytes to align PEBS base to cache line boundary + */ + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_KPTI) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "KPTI is enabled without PAGE_TABLE_ISOLATION kernel configuration!"); + return NULL; + } else { + dts_buffer = (char *)pebs_global_memory + + CPU_STATE_dts_buffer_offset(pcpu); + if (!dts_buffer) { + SEP_DRV_LOG_ERROR_TRACE_OUT("NULL (failed to allocate space for DTS buffer!)."); + return NULL; + } + pebs_base = (UIOP)(dts_buffer) + dts_size; + + CPU_STATE_dts_buffer(pcpu) = dts_buffer; + CPU_STATE_dts_buffer_size(pcpu) = PER_CORE_BUFFER_SIZE( + dts_size, + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), + DEV_CONFIG_pebs_record_num(pcfg)); + + // Make 32 byte aligned + if ((pebs_base & 0x000001F) != 0x0) { + pebs_base = ALIGN_32(pebs_base); + } + + dts = (DTS_BUFFER_EXT)dts_buffer; + } + + /* + * Program the DTES Buffer for Precise EBS. + * Set PEBS buffer for one PEBS record + */ + DTS_BUFFER_EXT_base(dts) = 0; + DTS_BUFFER_EXT_index(dts) = 0; + DTS_BUFFER_EXT_max(dts) = 0; + DTS_BUFFER_EXT_threshold(dts) = 0; + DTS_BUFFER_EXT_pebs_base(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_index(dts) = pebs_base; + DTS_BUFFER_EXT_pebs_max(dts) = + pebs_base + + (DEV_CONFIG_pebs_record_num(pcfg) + 1) * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]); + + pebs_dispatch->initialize_threshold(dts); + + SEP_DRV_LOG_TRACE("base --- %llx.", DTS_BUFFER_EXT_pebs_base(dts)); + SEP_DRV_LOG_TRACE("index --- %llu.", DTS_BUFFER_EXT_pebs_index(dts)); + SEP_DRV_LOG_TRACE("max --- %llu.", DTS_BUFFER_EXT_pebs_max(dts)); + SEP_DRV_LOG_TRACE("threahold --- %llu.", + DTS_BUFFER_EXT_pebs_threshold(dts)); + SEP_DRV_LOG_TRACE("DTES buffer allocated for PEBS: %p.", dts_buffer); + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", dts_buffer); + return dts; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID* pebs_Allocate_Buffers (VOID *params) + * + * @brief Allocate memory and set up MSRs in preparation for PEBS + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Set up the DS area and program the DS_AREA msrs in preparation + * for a PEBS run. Save away the old value in the DS_AREA. + * This routine is called via the parallel thread call. + */ +static VOID +pebs_Allocate_Buffers(VOID *params) +{ + U64 value; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + PVOID dts_ptr = NULL; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + return; + } + + SYS_Write_MSR(IA32_PEBS_ENABLE, 0LL); + value = SYS_Read_MSR(IA32_MISC_ENABLE); + if ((value & 0x80) && !(value & 0x1000)) { + CPU_STATE_old_dts_buffer(pcpu) = + (PVOID)(UIOP)SYS_Read_MSR(IA32_DS_AREA); + dts_ptr = pebs_Alloc_DTS_Buffer(); + if (!dts_ptr) { + SEP_DRV_LOG_ERROR_TRACE_OUT("dts_ptr is NULL!"); + return; + } + SEP_DRV_LOG_TRACE("Old dts buffer - %p.", + CPU_STATE_old_dts_buffer(pcpu)); + SEP_DRV_LOG_TRACE("New dts buffer - %p.", dts_ptr); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)dts_ptr); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID pebs_Dellocate_Buffers (VOID *params) + * + * @brief Clean up PEBS buffers and restore older values into the DS_AREA + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Clean up the DS area and all restore state prior to the sampling run + * This routine is called via the parallel thread call. + */ +static VOID +pebs_Deallocate_Buffers(VOID *params) +{ + CPU_STATE pcpu; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Params: %p.", params); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + + SEP_DRV_LOG_TRACE("Entered deallocate buffers."); + SYS_Write_MSR(IA32_DS_AREA, (U64)(UIOP)CPU_STATE_old_dts_buffer(pcpu)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Overflowed (this_cpu, overflow_status) + * + * @brief Figure out if the PEBS event caused an overflow + * + * @param this_cpu -- the current cpu + * overflow_status -- current value of the global overflow status + * + * @return updated overflow_status + * + * Special Notes: + * Figure out if the PEBS area has data that need to be transferred + * to the output sample. + * Update the overflow_status that is passed and return this value. + * The overflow_status defines the events/status to be read + */ +extern U64 +PEBS_Overflowed(S32 this_cpu, U64 overflow_status, U32 rec_index) +{ + U64 res; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, overflow_status: %llx, rec_index: %u.", + this_cpu, overflow_status, rec_index); + + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + res = pebs_dispatch->overflow(this_cpu, overflow_status, rec_index); + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", overflow_status); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Index (this_cpu) + * + * @brief Reset the PEBS index pointer + * + * @param this_cpu -- the current cpu + * + * @return NONE + * + * Special Notes: + * reset index to next PEBS record to base of buffer + */ +extern VOID +PEBS_Reset_Index(S32 this_cpu) +{ + DTS_BUFFER_EXT dtes; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d.", this_cpu); + + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!dtes) { + return; + } + SEP_DRV_LOG_TRACE("PEBS Reset Index: %d.", this_cpu); + DTS_BUFFER_EXT_pebs_index(dtes) = DTS_BUFFER_EXT_pebs_base(dtes); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +extern U32 +pmi_Get_CSD(U32, U32 *, U32 *); +#define EFLAGS_V86_MASK 0x00020000L + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Flush_Buffer (VOID * param) + * + * @brief generate sampling records from PEBS records in PEBS buffer + * + * @param param -- not used + * + * @return NONE + * + * Special Notes: + */ +extern VOID +PEBS_Flush_Buffer(VOID *param) +{ + U32 i, this_cpu, index, desc_id; + U64 pebs_overflow_status = 0; + U64 lbr_tos_from_ip = 0ULL; + DRV_BOOL counter_overflowed = FALSE; + ECB pecb; + CPU_STATE pcpu; + EVENT_DESC evt_desc; + BUFFER_DESC bd; + SampleRecordPC *psamp_pebs; + U32 is_64bit_addr = FALSE; + U32 u32PebsRecordNumFilled; +#if defined(DRV_IA32) + U32 seg_cs; + U32 csdlo; + U32 csdhi; +#endif + U32 dev_idx; + DEV_CONFIG pcfg; + U32 cur_grp; + DRV_BOOL multi_pebs_enabled; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + bd = &cpu_buf[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + multi_pebs_enabled = + (DEV_CONFIG_pebs_mode(pcfg) && + (DEV_CONFIG_pebs_record_num(pcfg) > 1) && + (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED)); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("PEBS is not enabled"); + return; + } + + if (!multi_pebs_enabled) { + SEP_DRV_LOG_TRACE_OUT("PEBS_Flush_Buffer is not supported."); + return; + } + + u32PebsRecordNumFilled = PEBS_Get_Num_Records_Filled(); + for (i = 0; i < u32PebsRecordNumFilled; i++) { + pebs_overflow_status = PEBS_Overflowed(this_cpu, 0, i); + SEP_DRV_LOG_TRACE("Pebs_overflow_status = 0x%llx, i=%d.", + pebs_overflow_status, i); + + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[cur_grp]; + FOR_EACH_DATA_REG (pecb, j) { + if ((!DEV_CONFIG_enable_adaptive_pebs(pcfg) && + !ECB_entries_is_gp_reg_get(pecb, j)) || + !ECB_entries_precise_get(pecb, j)) { + continue; + } + if (ECB_entries_fixed_reg_get(pecb, j)) { + index = ECB_entries_reg_id(pecb, j) - + IA32_FIXED_CTR0; + if (pebs_overflow_status & + ((U64)1 << (32 + index))) { + counter_overflowed = TRUE; + } + } else { + index = ECB_entries_reg_id(pecb, j) - IA32_PMC0; + if (pebs_overflow_status & (U64)1 << index) { + counter_overflowed = TRUE; + } + } + if (counter_overflowed) { + desc_id = ECB_entries_desc_id(pecb, j); + evt_desc = desc_data[desc_id]; + SEP_DRV_LOG_TRACE("Event_id_index=%u, desc_id=%u.", + ECB_entries_event_id_index( + pecb, j), + desc_id); + psamp_pebs = (SampleRecordPC *) + OUTPUT_Reserve_Buffer_Space( + bd, + EVENT_DESC_sample_size(evt_desc), + (NMI_mode) ? TRUE : FALSE, + !SEP_IN_NOTIFICATION); + if (!psamp_pebs) { + SEP_DRV_LOG_ERROR("Could not generate samples from PEBS records."); + continue; + } + + lbr_tos_from_ip = 0ULL; + CPU_STATE_num_samples(&pcb[this_cpu]) += 1; + SAMPLE_RECORD_descriptor_id(psamp_pebs) = + desc_id; + SAMPLE_RECORD_event_index(psamp_pebs) = + ECB_entries_event_id_index(pecb, j); + SAMPLE_RECORD_pid_rec_index(psamp_pebs) = + (U32)-1; + SAMPLE_RECORD_pid_rec_index_raw(psamp_pebs) = 1; + SAMPLE_RECORD_tid(psamp_pebs) = (U32)-1; + SAMPLE_RECORD_cpu_num(psamp_pebs) = + (U16)this_cpu; + SAMPLE_RECORD_osid(psamp_pebs) = 0; + +#if defined(DRV_IA32) + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, + i); + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + if (SAMPLE_RECORD_eflags(psamp_pebs) & + EFLAGS_V86_MASK) { + csdlo = 0; + csdhi = 0; + } else { + seg_cs = SAMPLE_RECORD_cs(psamp_pebs); + SYS_Get_CSD(seg_cs, &csdlo, &csdhi); + } + SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord = + csdlo; + SAMPLE_RECORD_csd(psamp_pebs).u2.highWord = + csdhi; +#elif defined(DRV_EM64T) + SAMPLE_RECORD_cs(psamp_pebs) = __KERNEL_CS; + pmi_Get_CSD(SAMPLE_RECORD_cs(psamp_pebs), + &SAMPLE_RECORD_csd(psamp_pebs).u1.lowWord, + &SAMPLE_RECORD_csd(psamp_pebs).u2.highWord); + is_64bit_addr = + (SAMPLE_RECORD_csd(psamp_pebs).u2.s2.reserved_0 == 1); + if (is_64bit_addr) { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = + TRUE; + } else { + SAMPLE_RECORD_ia64_pc(psamp_pebs) = + FALSE; + + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp_pebs) 0x%x.", + SAMPLE_RECORD_eip(psamp_pebs)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp_pebs) %x.", + SAMPLE_RECORD_eflags(psamp_pebs)); + } +#endif + if (EVENT_DESC_pebs_offset(evt_desc) || + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + lbr_tos_from_ip = PEBS_Fill_Buffer( + (S8 *)psamp_pebs, evt_desc, i); + } + PEBS_Modify_IP((S8 *)psamp_pebs, is_64bit_addr, i); + PEBS_Modify_TSC((S8 *)psamp_pebs, i); + if (ECB_entries_branch_evt_get(pecb, j) && + DEV_CONFIG_precise_ip_lbrs(pcfg) && + lbr_tos_from_ip) { + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp_pebs) = + lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", + SAMPLE_RECORD_iip( + psamp_pebs)); + } else { + SAMPLE_RECORD_eip(psamp_pebs) = + (U32)lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip( + psamp_pebs)); + } + } + if (i == u32PebsRecordNumFilled - 1 && + DRV_CONFIG_event_based_counts(drv_cfg) && + ECB_entries_em_trigger_get(pecb, j)) { + dispatch->read_counts( + (S8 *)psamp_pebs, + ECB_entries_event_id_index(pecb, + j)); + } + } + } + END_FOR_EACH_DATA_REG; + } + PEBS_Reset_Index(this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Reset_Counter (this_cpu, index, op_type) + * + * @brief set reset value for PMC after overflow + * + * @param this_cpu -- the current cpu + * index -- PMC register index + * op_type -- Type of Operation + * + * @return NONE + * + * Special Notes: + */ +extern VOID +PEBS_Reset_Counter(S32 this_cpu, U32 index, U32 op_type) +{ + DTS_BUFFER_EXT dts; + DTS_BUFFER_EXT1 dts_ext = NULL; + DTS_BUFFER_EXT2 dts_ext2 = NULL; + U32 dev_idx; + DEV_CONFIG pcfg; + CPU_STATE pcpu; + U32 counter_index = 0; + U32 gp_index = 0; + U32 fixed_index = 0; + U32 fixed_offset = 0; + U32 cur_grp; + ECB pecb; + U64 value; + + SEP_DRV_LOG_TRACE_IN("This_cpu: %d, index: %u.", this_cpu, index); + + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + dts = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid number of events"); + return; + } + + if (!pecb) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid ECB data"); + return; + } + + if (!dts) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Invalid PEBS Buffer"); + return; + } + + value = ECB_entries_reg_value(pecb, index); + + if (ECB_entries_fixed_reg_get(pecb, index)) { + if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + fixed_offset = EXT2_FIXED_OFFSET; + } else if (DEV_CONFIG_pebs_mode(pcfg) <= 6) { + fixed_offset = EXT1_FIXED_OFFSET; + } else { + SEP_DRV_LOG_ERROR_TRACE_OUT("Not supported PEBS Mode"); + return; + } + + if (op_type == PMU_OPERATION_ALL_REG) { + counter_index = (ECB_entries_reg_id(pecb, index) - IA32_FIXED_CTR0 + fixed_offset); + } else { + counter_index = index - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_FIXED) + fixed_offset; + } + } else { + if (op_type == PMU_OPERATION_ALL_REG) { + counter_index = (ECB_entries_reg_id(pecb, index) - IA32_PMC0); + } else { + counter_index = index - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + } + } + + SEP_DRV_LOG_TRACE("PEBS Reset GP Counters[0:4]: cpu %d, index=%u, value=%llx.", this_cpu, counter_index, value); + + switch (counter_index) { + case 0: + DTS_BUFFER_EXT_counter_reset0(dts) = value; + break; + case 1: + DTS_BUFFER_EXT_counter_reset1(dts) = value; + break; + case 2: + DTS_BUFFER_EXT_counter_reset2(dts) = value; + break; + case 3: + DTS_BUFFER_EXT_counter_reset3(dts) = value; + break; + } + + if (counter_index < NUM_EXT_COUNTER_RESET) { + return; + } + + if (!DEV_CONFIG_enable_adaptive_pebs(pcfg) && !DEV_CONFIG_collect_fixed_counter_pebs(pcfg)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("PEBS is not enabled or Fixed Counter Collection"); + return; + } + + if (!dts_ext) { + return; + } + SEP_DRV_LOG_TRACE("PEBS Reset Fixed Counters and GP Counters[4:7]: cpu %d, index=%u, value=%llx.", this_cpu, counter_index, value); + + if (DEV_CONFIG_pebs_mode(pcfg) == 6) { + dts_ext = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + switch (counter_index) { + case 4: + DTS_BUFFER_EXT1_counter_reset4(dts_ext) = value; + break; + case 5: + DTS_BUFFER_EXT1_counter_reset5(dts_ext) = value; + break; + case 6: + DTS_BUFFER_EXT1_counter_reset6(dts_ext) = value; + break; + case 7: + DTS_BUFFER_EXT1_counter_reset7(dts_ext) = value; + break; + case 8: + DTS_BUFFER_EXT1_fixed_counter_reset0(dts_ext) = value; + break; + case 9: + DTS_BUFFER_EXT1_fixed_counter_reset1(dts_ext) = value; + break; + case 10: + DTS_BUFFER_EXT1_fixed_counter_reset2(dts_ext) = value; + break; + case 11: + DTS_BUFFER_EXT1_fixed_counter_reset3(dts_ext) = value; + break; + } + } else if (DEV_CONFIG_pebs_mode(pcfg) == 7) { + dts_ext2 = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (counter_index < (NUM_EXT_COUNTER_RESET + NUM_EXT2_COUNTER_RESET)) { + gp_index = counter_index - NUM_EXT_COUNTER_RESET; + DTS_BUFFER_EXT2_counter_reset(dts_ext2)[gp_index] = value; + } else { + fixed_index = counter_index - (NUM_EXT_COUNTER_RESET + NUM_EXT2_COUNTER_RESET); + if (fixed_index <= NUM_EXT2_FIXED_COUNTER_RESET) { + DTS_BUFFER_EXT2_fixed_counter_reset(dts_ext2)[fixed_index] = value; + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_IP (sample, is_64bit_addr) + * + * @brief Change the IP field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * @param is_64bit_addr - are we in a 64 bit module + * + * @return NONE + * + * Special Notes: + * + */ +extern VOID +PEBS_Modify_IP(void *sample, DRV_BOOL is_64bit_addr, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, is_64bit_addr: %u, rec_index: %u.", + sample, is_64bit_addr, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + pebs_dispatch->modify_ip(sample, is_64bit_addr, rec_index); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Modify_TSC (sample) + * + * @brief Change the TSC field in the sample to that in the PEBS record + * + * @param sample - sample buffer + * + * @return NONE + * + * Special Notes: + * + */ +extern VOID +PEBS_Modify_TSC(void *sample, U32 rec_index) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + + SEP_DRV_LOG_TRACE_IN("Sample: %p, rec_index: %u.", sample, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->modify_tsc != NULL) { + pebs_dispatch->modify_tsc(sample, rec_index); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +extern U32 +PEBS_Get_Num_Records_Filled(VOID) +{ + U32 this_cpu; + U32 dev_idx; + PEBS_DISPATCH pebs_dispatch; + U32 num = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pebs_dispatch = LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]); + + if (pebs_dispatch->get_num_records_filled != NULL) { + num = pebs_dispatch->get_num_records_filled(); + SEP_DRV_LOG_TRACE("Num=%u.", num); + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", num); + return num; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Fill_Phy_Addr (LATENCY_INFO latency_info) + * + * @brief Fill latency node with phy addr when applicable + * + * @param latency_info - pointer to LATENCY_INFO struct + * + * @return NONE + * + * Special Notes: + * + */ + +extern VOID +PEBS_Fill_Phy_Addr(LATENCY_INFO latency_info) +{ +#if defined(DRV_EM64T) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) + U64 lin_addr; + U64 offset; + struct page *page; + + if (!DRV_CONFIG_virt_phys_translation(drv_cfg)) { + return; + } + lin_addr = (U64)LATENCY_INFO_linear_address(latency_info); + if (lin_addr != 0) { + offset = (U64)(lin_addr & 0x0FFF); + if (__virt_addr_valid(lin_addr)) { + LATENCY_INFO_phys_addr(latency_info) = + (U64)__pa(lin_addr); + } else if (lin_addr < __PAGE_OFFSET) { + pagefault_disable(); + if (__get_user_pages_fast(lin_addr, 1, 1, &page)) { + LATENCY_INFO_phys_addr(latency_info) = + (U64)page_to_phys(page) + offset; + put_page(page); + } + pagefault_enable(); + } + } +#endif + return; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 PEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + rec_index - current pebs record index + * + * @return if APEBS return LBR_TOS_FROM_IP else return 0 + * + * Special Notes: + * + */ +extern U64 +PEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + PEBS_REC_EXT1 pebs_base_ext1; + PEBS_REC_EXT2 pebs_base_ext2; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + lbr_tos_from_ip = + APEBS_Fill_Buffer(buffer, evt_desc, rec_index); + return lbr_tos_from_ip; + } + + SEP_DRV_LOG_TRACE("In PEBS Fill Buffer: cpu %d.", CONTROL_THIS_CPU()); + + if (!dtes) { + return lbr_tos_from_ip; + } + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + pebs_base = pebs_ptr; + if (EVENT_DESC_pebs_offset(evt_desc)) { + SEP_DRV_LOG_TRACE("PEBS buffer has data available."); + memcpy(buffer + EVENT_DESC_pebs_offset(evt_desc), pebs_base, + EVENT_DESC_pebs_size(evt_desc)); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + PEBS_REC_EXT1_eventing_ip(pebs_base_ext1); + } + if (EVENT_DESC_hle_offset(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + PEBS_REC_EXT1_hle_info(pebs_base_ext1); + } + if (EVENT_DESC_latency_offset_in_sample(evt_desc)) { + pebs_base_ext1 = (PEBS_REC_EXT1)pebs_base; + memcpy(&latency_info, + pebs_base + EVENT_DESC_latency_offset_in_pebs_record( + evt_desc), + EVENT_DESC_latency_size_from_pebs_record(evt_desc)); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &PEBS_REC_EXT1_rsp(pebs_base_ext1), sizeof(U64)); + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + pebs_base_ext2 = (PEBS_REC_EXT2)pebs_base; + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + PEBS_REC_EXT2_tsc(pebs_base_ext2); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 APEBS_Fill_Buffer (S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) + * + * @brief Fill the buffer with the pebs data + * + * @param buffer - area to write the data into + * event_desc - event descriptor of the pebs event + * rec_index - current pebs record index + * + * @return LBR_TOS_FROM_IP + * + * Special Notes: + * + */ +extern U64 +APEBS_Fill_Buffer(S8 *buffer, EVENT_DESC evt_desc, U32 rec_index) +{ + DTS_BUFFER_EXT1 dtes; + LATENCY_INFO_NODE latency_info = { 0 }; + U64 dtes_record_size = 0; + U64 dtes_record_format = 0; + ADAPTIVE_PEBS_MEM_INFO apebs_mem = NULL; + ADAPTIVE_PEBS_GPR_INFO apebs_gpr = NULL; + ADAPTIVE_PEBS_BASIC_INFO apebs_basic = NULL; + S8 *pebs_base, *pebs_index, *pebs_ptr; + U8 pebs_ptr_check = FALSE; + U64 lbr_tos_from_ip = 0ULL; + U32 this_cpu; + U32 dev_idx; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, evt_desc: %p, rec_index: %u.", buffer, + evt_desc, rec_index); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dtes = CPU_STATE_dts_buffer(&pcb[this_cpu]); + + SEP_DRV_LOG_TRACE("In APEBS Fill Buffer: cpu %d.", this_cpu); + + if (!dtes || !DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + return lbr_tos_from_ip; + } + + pebs_base = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_base(dtes); + pebs_index = (S8 *)(UIOP)DTS_BUFFER_EXT1_pebs_index(dtes); + pebs_ptr = (S8 *)((UIOP)DTS_BUFFER_EXT1_pebs_base(dtes) + + (rec_index * + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]))); + pebs_ptr_check = + (pebs_ptr && pebs_base != pebs_index && pebs_ptr < pebs_index); + if (!pebs_ptr_check) { + return lbr_tos_from_ip; + } + + pebs_base = pebs_ptr; + apebs_basic = + (ADAPTIVE_PEBS_BASIC_INFO)(pebs_base + + LWPMU_DEVICE_apebs_basic_offset( + &devices[dev_idx])); + dtes_record_size = (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + APEBS_RECORD_SIZE_MASK) >> 48; // [63:48] + dtes_record_format = + (ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic) & + APEBS_RECORD_FORMAT_MASK); // [47:0] + + if (dtes_record_size != + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx])) { + SEP_DRV_LOG_TRACE("PEBS record size does not match with ucode\n"); + } + if (EVENT_DESC_pebs_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_record_info(apebs_basic); + } + if (EVENT_DESC_eventing_ip_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_eventing_ip_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_eventing_ip(apebs_basic); + } + if (EVENT_DESC_pebs_tsc_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_pebs_tsc_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_tsc(apebs_basic); + } + if (EVENT_DESC_applicable_counters_offset(evt_desc)) { + *(U64 *)(buffer + + EVENT_DESC_applicable_counters_offset(evt_desc)) = + ADAPTIVE_PEBS_BASIC_INFO_applicable_counters( + apebs_basic); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg) && + EVENT_DESC_gpr_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_GPR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("GPR info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_gpr_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]), + EVENT_DESC_gpr_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + if (!(dtes_record_format & APEBS_MEM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("MEM info not found in DS PEBS record."); + } + apebs_mem = + (ADAPTIVE_PEBS_MEM_INFO)(pebs_base + + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx])); + memcpy(&LATENCY_INFO_linear_address(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_linear_address(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_data_source(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_data_source(apebs_mem), + sizeof(U64)); + memcpy(&LATENCY_INFO_latency(&latency_info), + &ADAPTIVE_PEBS_MEM_INFO_latency(apebs_mem), sizeof(U64)); + LATENCY_INFO_stack_pointer(&latency_info) = 0; + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + apebs_gpr = + (ADAPTIVE_PEBS_GPR_INFO)(pebs_base + + LWPMU_DEVICE_apebs_gpr_offset( + &devices[dev_idx])); + memcpy(&LATENCY_INFO_stack_pointer(&latency_info), + &ADAPTIVE_PEBS_GPR_INFO_rsp(apebs_gpr), + sizeof(U64)); + } + + LATENCY_INFO_phys_addr(&latency_info) = 0; + PEBS_Fill_Phy_Addr(&latency_info); + memcpy(buffer + EVENT_DESC_latency_offset_in_sample(evt_desc), + &latency_info, sizeof(LATENCY_INFO_NODE)); + } + if (DEV_CONFIG_apebs_collect_mem_info(pcfg) && + EVENT_DESC_hle_offset(evt_desc)) { + *(U64 *)(buffer + EVENT_DESC_hle_offset(evt_desc)) = + ADAPTIVE_PEBS_MEM_INFO_hle_info(( + ADAPTIVE_PEBS_MEM_INFO)(pebs_base + + LWPMU_DEVICE_apebs_mem_offset( + &devices[dev_idx]))); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg) && + EVENT_DESC_xmm_info_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_XMM_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("XMM info not found in DS PEBS record."); + } + memcpy(buffer + EVENT_DESC_xmm_info_offset(evt_desc), + pebs_base + + LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]), + EVENT_DESC_xmm_info_size(evt_desc)); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg) && + EVENT_DESC_lbr_offset(evt_desc)) { + if (!(dtes_record_format & APEBS_LBR_RECORD_FORMAT_MASK)) { + SEP_DRV_LOG_WARNING("LBR info not found in DS PEBS record\n"); + } + if ((dtes_record_format >> 24) != + (DEV_CONFIG_num_lbr_entries(pcfg) - 1)) { + SEP_DRV_LOG_WARNING("DRV_CONFIG_num_lbr_entries does not match with PEBS record\n"); + } + *(U64 *)(buffer + EVENT_DESC_lbr_offset(evt_desc)) = + DEV_CONFIG_num_lbr_entries(pcfg) - + 1; //Top-of-Stack(TOS) pointing to last entry + //Populating lbr callstack as SST_ENTRY_N to SST_ENTRY_0 in tb util, hence setting TOS to SST_ENTRY_N + memcpy(buffer + EVENT_DESC_lbr_offset(evt_desc) + sizeof(U64), + pebs_base + + LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]), + EVENT_DESC_lbr_info_size(evt_desc) - sizeof(U64)); + lbr_tos_from_ip = ADAPTIVE_PEBS_LBR_INFO_lbr_from( + (ADAPTIVE_PEBS_LBR_INFO)(pebs_base + + LWPMU_DEVICE_apebs_lbr_offset( + &devices[dev_idx]))); + } + return lbr_tos_from_ip; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Initialize (DEV_CONFIG pcfg) + * + * @brief Initialize the pebs buffers + * + * @param dev_idx - Device index + * + * @return status + * + * Special Notes: + * If the user is asking for PEBS information. Allocate the DS area + */ +extern OS_STATUS +PEBS_Initialize(U32 dev_idx) +{ + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + SEP_DRV_LOG_TRACE_IN("Pcfg: %p.", pcfg); + + if (!DEV_CONFIG_pebs_mode(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("PEBS is not enabled"); + return OS_SUCCESS; + } + + switch (DEV_CONFIG_pebs_mode(pcfg)) { + case 1: + SEP_DRV_LOG_INIT("Set up the Core2 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &core2_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 2: + SEP_DRV_LOG_INIT("Set up the Nehalem dispatch."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &corei7_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT_NODE); + break; + case 3: + SEP_DRV_LOG_INIT("Set up the Core2 (PNR) dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &core2p_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_NODE); + break; + case 4: + SEP_DRV_LOG_INIT("Set up the Haswell dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &haswell_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT1_NODE); + break; + case 5: + SEP_DRV_LOG_INIT("Set up the Perf version4 dispatch table."); + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &perfver4_pebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(PEBS_REC_EXT2_NODE); + break; + case 6: + if (!DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + SEP_DRV_LOG_TRACE("APEBS need to be enabled in perf version4 SNC dispatch mode."); + } + LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) = &perfver4_apebs; + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) = + sizeof(ADAPTIVE_PEBS_BASIC_INFO_NODE); + if (DEV_CONFIG_apebs_collect_mem_info(pcfg)) { + LWPMU_DEVICE_apebs_mem_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_MEM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_gpr(pcfg)) { + LWPMU_DEVICE_apebs_gpr_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_GPR_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_xmm(pcfg)) { + LWPMU_DEVICE_apebs_xmm_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + sizeof(ADAPTIVE_PEBS_XMM_INFO_NODE); + } + if (DEV_CONFIG_apebs_collect_lbrs(pcfg)) { + LWPMU_DEVICE_apebs_lbr_offset(&devices[dev_idx]) = + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx]); + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]) += + (sizeof(ADAPTIVE_PEBS_LBR_INFO_NODE) * + DEV_CONFIG_num_lbr_entries(pcfg)); + } + SEP_DRV_LOG_TRACE("Size of adaptive pebs record - %d.", + LWPMU_DEVICE_pebs_record_size( + &devices[dev_idx])); + break; + default: + SEP_DRV_LOG_INIT("Unknown PEBS type. Will not collect PEBS information."); + break; + } + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx]) && + !DEV_CONFIG_pebs_record_num(pcfg)) { + DEV_CONFIG_pebs_record_num(pcfg) = 1; + } + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS"); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn OS_STATUS PEBS_Allocate (void) + * + * @brief Allocate the pebs related buffers + * + * @param NONE + * + * @return NONE + * + * Special Notes: + * Allocated the DS area used for PEBS capture + */ +extern OS_STATUS +PEBS_Allocate(VOID) +{ + S32 cpu_num; + CPU_STATE pcpu; + U32 dev_idx; + U32 dts_size; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_INIT_IN(""); + + for (cpu_num = 0; cpu_num < GLOBAL_STATE_num_cpus(driver_state); + cpu_num++) { + pcpu = &pcb[cpu_num]; + dev_idx = core_to_dev_map[cpu_num]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!DEV_CONFIG_pebs_mode(pcfg)) { + continue; + } + if (LWPMU_DEVICE_pebs_dispatch(&devices[dev_idx])) { + dts_size = sizeof(DTS_BUFFER_EXT_NODE); + if (DEV_CONFIG_enable_adaptive_pebs(pcfg)) { + dts_size = sizeof(DTS_BUFFER_EXT1_NODE); + } + CPU_STATE_dts_buffer_offset(pcpu) = + pebs_global_memory_size; + pebs_global_memory_size += PER_CORE_BUFFER_SIZE( + dts_size, + LWPMU_DEVICE_pebs_record_size(&devices[dev_idx]), + DEV_CONFIG_pebs_record_num(pcfg)); + } + } + if (pebs_global_memory_size) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT( + "Allocating global PEBS buffer using regular control routine."); + pebs_global_memory = (PVOID)CONTROL_Allocate_KMemory( + pebs_global_memory_size); + if (!pebs_global_memory) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Failed to allocate PEBS buffer!"); + return OS_NO_MEM; + } + memset(pebs_global_memory, 0, pebs_global_memory_size); + } else { + SEP_DRV_LOG_INIT( + "KAISER or PTI patch is enabled and PEBS feature can't be used.\n"); + return OS_SUCCESS; + } + } + + CONTROL_Invoke_Parallel(pebs_Allocate_Buffers, (VOID *)NULL); + + SEP_DRV_LOG_INIT_OUT(""); + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID PEBS_Destroy (void) + * + * @brief Clean up the pebs related buffers + * + * @param pcfg - Driver Configuration + * + * @return NONE + * + * Special Notes: + * Deallocated the DS area used for PEBS capture + */ +extern VOID +PEBS_Destroy(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + CONTROL_Invoke_Parallel(pebs_Deallocate_Buffers, (VOID *)(size_t)0); + if (pebs_global_memory) { + if (DRV_SETUP_INFO_page_table_isolation(&req_drv_setup_info) == + DRV_SETUP_INFO_PTI_DISABLED) { + SEP_DRV_LOG_INIT( + "Freeing PEBS buffer using regular control routine."); + pebs_global_memory = + CONTROL_Free_Memory(pebs_global_memory); + } + pebs_global_memory_size = 0; + SEP_DRV_LOG_INIT("PEBS buffer successfully freed."); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/perfver4.c b/drivers/platform/x86/sepdk/sep/perfver4.c new file mode 100644 index 00000000000000..d83ac5ae56758a --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/perfver4.c @@ -0,0 +1,1434 @@ +/**** + Copyright (C) 2013 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "perfver4.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" +#include "ipt.h" + + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +extern DRV_BOOL multi_pebs_enabled; + +typedef struct SADDR_S { + S64 addr:PERFVER4_LBR_DATA_BITS; +} SADDR; + +static U32 restore_reg_addr[3]; + +#define SADDR_addr(x) ((x).addr) +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +#define IS_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ + ((ia32_perf_global_ctrl_reg_val) & 0x700000000ULL) +#define IS_FOUR_FIXED_CTR_ENABLED(ia32_perf_global_ctrl_reg_val) \ + ((ia32_perf_global_ctrl_reg_val) & 0xF00000000ULL) +#define IS_PMC_PEBS_ENABLED_GP(ia32_perf_global_ctrl_reg_val, \ + ia32_pebs_enable_reg_val) \ + (((ia32_perf_global_ctrl_reg_val) & 0xfULL) == \ + ((ia32_pebs_enable_reg_val) & 0xfULL)) +#define IS_PMC_PEBS_ENABLED_FP_AND_GP(ia32_perf_global_ctrl_reg_val, \ + ia32_pebs_enable_reg_val) \ + (((ia32_perf_global_ctrl_reg_val) & 0xf000000ffULL) == \ + ((ia32_pebs_enable_reg_val) & 0xf000000ffULL)) + +#define DISABLE_FRZ_ON_PMI(ia32_debug_ctrl_reg_val) \ + (0xefff & (ia32_debug_ctrl_reg_val)) +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID +perfver4_Write_PMU ( + VOID *param +) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + EVENT_CONFIG ec; + DISPATCH dispatch; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = ECB_entries_reg_value(pecb, i); + } END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + /* Clear outstanding frozen bits */ + SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, PERFVER4_FROZEN_BIT_MASK); + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_ALL_REG) { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS) && ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + + if (DEV_CONFIG_pebs_mode(pcfg) && (ECB_entries_precise_get(pecb, i) == 1)) { + PEBS_Reset_Counter(this_cpu, i, PMU_OPERATION_ALL_REG); + } + + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Write reg 0x%x --- value 0x%llx -- read 0x%llx.", ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i), val); + } +#endif + } END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID +perfver4_Disable_PMU ( + PVOID param +) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + // no programming for this device for this group + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SEP_DRV_LOG_TRACE("Driver state = %d.", GET_DRIVER_STATE()); + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID +perfver4_Enable_PMU ( + PVOID param +) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + U64 global_control_val; + U64 pebs_enable_val; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + // no programming for this device for this group + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX (pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Exit for IPT collection"); + return; + } + + /* Clear outstanding frozen bits */ + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX (pecb, GLOBAL_OVF_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), PERFVER4_FROZEN_BIT_MASK); + + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg) || DEV_CONFIG_latency_capture(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE("Write reg 0x%x--- read 0x%llx.", ECB_entries_reg_id(pecb, 0), val); + } +#endif + } + + // FIXME: workaround for sampling both pebs event and non-pebs event + // with pebs buffer size > 1 + if (multi_pebs_enabled) { + global_control_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + pebs_enable_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS))); + if (IS_FIXED_CTR_ENABLED(global_control_val) || !IS_PMC_PEBS_ENABLED_GP(global_control_val, pebs_enable_val)) { + SEP_DRV_LOG_TRACE("Global_control_val = 0x%llx pebs_enable_val = 0x%llx.", global_control_val, pebs_enable_val); + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), DISABLE_FRZ_ON_PMI(ECB_entries_reg_value(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)))); + } + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +#define FIXED_CTR3_BIT_INDEX 35 +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void +perfver4_Read_PMU_Data ( + PVOID param, + U32 dev_idx +) +{ + U32 j; + U64 *buffer = (U64 *)param; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 cur_grp; + U32 index; + DEV_CONFIG pcfg; + DISPATCH dispatch; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + index = 0; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), buffer); + LFENCE_SERIALIZE(); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + j = EMON_BUFFER_CORE_EVENT_OFFSET(EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(emon_buffer_driver_helper)[this_cpu], ECB_entries_core_event_id(pecb, i)); + buffer[j] = SYS_Read_PMC(ECB_entries_reg_id(pecb, i), ECB_entries_fixed_reg_get(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, buffer[j], this_cpu, ECB_entries_core_event_id(pecb, i)); + + /* + * Read hardware perf_metrics if appropriate fixed counter is specified + * for collection + */ + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - IA32_FIXED_CTR0 + 0x20; + if (DEV_CONFIG_enable_perf_metrics(pcfg) && index == FIXED_CTR3_BIT_INDEX) { + j = EMON_BUFFER_CORE_EVENT_OFFSET(EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(emon_buffer_driver_helper)[this_cpu], EMON_BUFFER_DRIVER_HELPER_core_num_events(emon_buffer_driver_helper)); + dispatch->read_metrics((PVOID)(buffer+j)); + } + } + } END_FOR_EACH_REG_CORE_OPERATION; + LFENCE_SERIALIZE(); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void +perfver4_Check_Overflow ( + DRV_MASKS masks +) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed (this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + IPT_TOPA_Interrupted(this_cpu, overflow_status); + overflow_status_clr &= ~PERF_GLOBAL_STATUS_TOPA_PMI_MASK; + } + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata(pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_FIXED) + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = dispatch->check_overflow_errata(pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", this_cpu, index); + SEP_DRV_LOG_TRACE("Register 0x%x --- val 0%llx.", ECB_entries_reg_id(pecb, i), SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts[this_cpu * DRV_CONFIG_num_events(drv_cfg) + ECB_entries_event_id_index(pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8) 0; + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + if (ECB_entries_collect_on_ctx_sw_get(pecb, i)) { + DRV_EVENT_MASK_collect_on_ctx_sw(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_OVF_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_ON); + + SEP_DRV_LOG_TRACE("Check overflow completed %d.", this_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID +perfver4_Swap_Group ( + DRV_BOOL restart +) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DISPATCH dispatch; + DEV_CONFIG pcfg; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!num_events)."); + return; + } + + st_index = CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("Current group : 0x%x.", CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); + + // Save the counters for the current group + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) { + if (ECB_entries_ebc_sampling_evt_get(pecb, i) || !DRV_CONFIG_event_based_counts(drv_cfg)) { + index = st_index + i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for cur grp %u, reg 0x%x : 0x%llx.", CPU_STATE_current_group(pcpu), ECB_entries_reg_id(pecb, i), CPU_STATE_em_tables(pcpu)[index]); + } + } END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_CTRL_GP) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); + } END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (!ECB_entries_ebc_sampling_evt_get(pecb, i) && (ECB_entries_event_id_index(pecb, i) != CPU_STATE_trigger_event_num(pcpu))) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } END_FOR_EACH_REG_CORE_OPERATION; + } + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_GP) { + if (ECB_entries_ebc_sampling_evt_get(pecb, i) || !DRV_CONFIG_event_based_counts(drv_cfg)) { + index = st_index + i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE("Restore value for next grp %u, reg 0x%x : 0x%llx.", next_group, ECB_entries_reg_id(pecb, i), CPU_STATE_em_tables(pcpu)[index]); + } + } END_FOR_EACH_REG_CORE_OPERATION; + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_OCR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); + } END_FOR_EACH_REG_CORE_OPERATION; + + if (DEV_CONFIG_pebs_record_num(pcfg)) { + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_precise_get(pecb, i) == 1) { + PEBS_Reset_Counter(this_cpu, i, PMU_OPERATION_DATA_ALL); + } + } END_FOR_EACH_REG_CORE_OPERATION; + } + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID +perfver4_Initialize ( + VOID *param +) +{ + U32 this_cpu, j; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + ECB pecb = NULL; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!num_events)."); + return; + } + + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to save PMU state on CPU %d.", this_cpu); + return; + } + + // Looping through the groups to identify a non-NULL ECB as the cur_grp + // may have a NULL ECB + for (j = 0; j < LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); j++) { + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + + if (pecb == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to save PMU state on CPU %d.", this_cpu); + return; + } + + restore_reg_addr[0] = ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)); + + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + if (DRV_CONFIG_ds_area_available(drv_cfg) && DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), 0LL); + } + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", CPU_STATE_pmu_state(pcpu)[2]); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in pmv_v4_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID +perfver4_Destroy ( + VOID *param +) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to restore PMU state on CPU %d.", this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x0."); + + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * @fn perfver4_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return Last branch source IP address + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 +perfver4_Read_LBRs ( + VOID *buffer +) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value = 0; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 pairs = 0; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (lbr == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!lbr)."); + return 0; + } + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + if (LBR_num_entries(lbr) > 0) { + pairs = (LBR_num_entries(lbr) - 1)/3; + } + for (i = 0; i < LBR_num_entries(lbr); i++) { + if (i == 0 && DEV_CONFIG_enable_arch_lbrs(pcfg)) { + value = DEV_CONFIG_num_lbr_entries(pcfg)-1; + } else if (LBR_entries_reg_id(lbr, i)) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + } else { + continue; + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { // LBR from register + if (tos_ptr == count) { + SADDR_addr(saddr) = value & PERFVER4_LBR_BITMASK; + tos_ip_addr = (U64) SADDR_addr(saddr); // Add signed extension + SEP_DRV_LOG_TRACE("Tos_ip_addr %llu, 0x%llx.", tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +/* + * @fn perfver4_Clean_Up(param) + * + * @param IN param - currently not used + * + * @brief Clean up registers in ECB + * + */ +static VOID +perfver4_Clean_Up ( + VOID *param +) +{ + U32 this_cpu, j; + U32 dev_idx; + ECB pecb_first_grp = NULL; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + + for (j = 0; j < LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); j++) { + (pecb_first_grp) = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[j]; + if (!(pecb_first_grp)) { + continue; + } else { + break; + } + } + FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS(pecb, i, PMU_OPERATION_ALL_REG) { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } END_FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS; + + /* Clear outstanding frozen bits */ + if (pecb_first_grp) { + SYS_Write_MSR(ECB_entries_reg_id(pecb_first_grp, ECB_SECTION_REG_INDEX(pecb_first_grp, GLOBAL_OVF_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), PERFVER4_FROZEN_BIT_MASK); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow_Htoff_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void +perfver4_Check_Overflow_Htoff_Mode ( + DRV_MASKS masks +) +{ + U32 index; + U64 value = 0; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DISPATCH dispatch; + DEV_CONFIG pcfg; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed (this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, overflow_status); + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + IPT_TOPA_Interrupted(this_cpu, overflow_status); + overflow_status_clr &= ~PERF_GLOBAL_STATUS_TOPA_PMI_MASK; + } + + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata(pecb, &overflow_status_clr); + } + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_FIXED) + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + if (index >= 4 && index <= 7) { + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + if (value > 0 && value <= 0x100000000LL) { + overflow_status |= ((U64)1 << index); + } + } + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", this_cpu, index); + SEP_DRV_LOG_TRACE("Register 0x%x --- val 0%llx.", ECB_entries_reg_id(pecb, i), SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts[this_cpu * DRV_CONFIG_num_events(drv_cfg) + ECB_entries_event_id_index(pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8) 0; + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + if (ECB_entries_collect_on_ctx_sw_get(pecb, i)) { + DRV_EVENT_MASK_collect_on_ctx_sw(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_OVF_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_HT_OFF); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +#define MAX_COUNTER 0xFFFFFFFFFFFFLLU + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void perfver4_Check_Overflow_Nonht_Mode(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static VOID +perfver4_Check_Overflow_Nonht_Mode ( + DRV_MASKS masks +) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu = CONTROL_THIS_CPU(); + BUFFER_DESC bd = &cpu_buf[this_cpu]; + CPU_STATE pcpu = &pcb[this_cpu]; + U32 dev_idx = core_to_dev_map[this_cpu]; + U32 cur_grp = CPU_STATE_current_group(pcpu); + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + ECB pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg) && (DEV_CONFIG_pebs_record_num(pcfg) == 1)) { + overflow_status = PEBS_Overflowed (this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, overflow_status); + + if (DRV_CONFIG_ipt_mode(drv_cfg)) { + IPT_TOPA_Interrupted(this_cpu, overflow_status); + overflow_status_clr &= ~PERF_GLOBAL_STATUS_TOPA_PMI_MASK; + } + + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_FIXED) + 0x20; + } else if (ECB_entries_is_gp_reg_get(pecb, i) && ECB_entries_reg_value(pecb, i) != 0) { + index = i - ECB_operations_register_start(pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", this_cpu, index); + SEP_DRV_LOG_TRACE("register 0x%x --- val 0%llx.", ECB_entries_reg_id(pecb, i), SYS_Read_MSR(ECB_entries_reg_id(pecb, i))); + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8) 0; + if (DEV_CONFIG_enable_perf_metrics(pcfg) && index == FIXED_CTR3_BIT_INDEX) { + // Writing positive SAV into data register before reading metrics + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ((~(ECB_entries_reg_value(pecb, i)) + 1) & MAX_COUNTER)); + DRV_EVENT_MASK_perf_metrics_capture(&event_flag) = 1; + } else { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), ECB_entries_reg_value(pecb, i)); + } + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts[this_cpu * DRV_CONFIG_num_events(drv_cfg) + ECB_entries_event_id_index(pecb, i)] += 1; + } + } + + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_branch_evt_get(pecb, i)) { + DRV_EVENT_MASK_branch(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + if (ECB_entries_collect_on_ctx_sw_get(pecb, i)) { + DRV_EVENT_MASK_collect_on_ctx_sw(&event_flag) = 1; + } + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id(DRV_MASKS_eventmasks(masks) + DRV_MASKS_masks_num(masks)) = ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + /* Clear outstanding overflow bits */ + SYS_Write_MSR(ECB_entries_reg_id(pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_OVF_CTRL_REG_INDEX, PMU_OPERATION_GLOBAL_REGS)), overflow_status_clr & PERFVER4_OVERFLOW_BIT_MASK_NON_HT); + + SEP_DRV_LOG_TRACE("Check Overflow completed %d.", this_cpu); +} + + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts data and store into the buffer param; + * For the case of the trigger event, store the SAV value. + */ +static VOID +perfver4_Read_Counts ( + PVOID param, + U32 id +) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!num_events)."); + return; + } + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + LFENCE_SERIALIZE(); + FOR_EACH_REG_CORE_OPERATION(pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & ECB_entries_max_bits(pecb, i);; + } else { + *data = SYS_Read_PMC(ECB_entries_reg_id(pecb, i), ECB_entries_fixed_reg_get(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } END_FOR_EACH_REG_CORE_OPERATION; + LFENCE_SERIALIZE(); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn perfver4_Read_Metrics(buffer, id) + * + * @param param buffer to write metrics into + * + * @return None No return needed + * + * @brief Read hardware metrics from IA32_PERF_METRICS MSR + */ +static VOID +perfver4_Read_Metrics ( + PVOID buffer +) +{ + U64 *data, metrics = 0; + U32 j; + U32 index = 0; + U32 this_cpu = CONTROL_THIS_CPU(); + U32 dev_idx = core_to_dev_map[this_cpu]; + DEV_CONFIG pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!num_events)."); + return; + } + + data = (U64 *)buffer; + FOR_EACH_NONEVENT_REG(pecb, i) { + metrics = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + for (j = 0; j < DEV_CONFIG_num_perf_metrics(pcfg); j++) { + *data = (metrics & (0xFFULL << 8*j)) >> 8*j; + data++; + } + } END_FOR_EACH_NONEVENT_REG; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SYS_Write_MSR(IA32_FIXED_CTR3, 0LL); + } else { + FOR_EACH_DATA_REG(pecb, i) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = ECB_entries_reg_id(pecb, i) - IA32_FIXED_CTR0 + 0x20; + if (index == FIXED_CTR3_BIT_INDEX) { + SYS_Write_MSR(IA32_FIXED_CTR3, ECB_entries_reg_value(pecb, i)); + break; + } + } + } END_FOR_EACH_DATA_REG; + } + + SYS_Write_MSR(IA32_PERF_METRICS, 0LL); + + return; +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 perfver4_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID +perfver4_Platform_Info ( + PVOID data +) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; + +#define IA32_MSR_MISC_ENABLE 0x1A4 + DRV_PLATFORM_INFO_misc_valid(platform_data) = 1; + value = SYS_Read_MSR(IA32_MSR_MISC_ENABLE); + DRV_PLATFORM_INFO_misc_info(platform_data) = value; +#undef IA32_MSR_MISC_ENABLE + + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE ("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = (U32) (energy_multiplier & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE perfver4_dispatch = { + .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE perfver4_dispatch_htoff_mode = { + .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow_Htoff_Mode, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE perfver4_dispatch_nonht_mode = { + .init = perfver4_Initialize, + .fini = perfver4_Destroy, + .write = perfver4_Write_PMU, + .freeze = perfver4_Disable_PMU, + .restart = perfver4_Enable_PMU, + .read_data = perfver4_Read_PMU_Data, + .check_overflow = perfver4_Check_Overflow_Nonht_Mode, + .swap_group = perfver4_Swap_Group, + .read_lbrs = perfver4_Read_LBRs, + .cleanup = perfver4_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = perfver4_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = perfver4_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = perfver4_Read_Metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/pmi.c b/drivers/platform/x86/sepdk/sep/pmi.c new file mode 100644 index 00000000000000..49406f3fa3c531 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pmi.c @@ -0,0 +1,440 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#if defined(DRV_EM64T) +#include +#endif +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "apic.h" +#include "lwpmudrv.h" +#include "output.h" +#include "control.h" +#include "pmi.h" +#include "utility.h" +#include "pebs.h" + +#include "sepdrv_p_state.h" + +// Desc id #0 is used for module records +#define COMPUTE_DESC_ID(index) ((index)) + +extern DRV_CONFIG drv_cfg; +extern uid_t uid; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +#define EFLAGS_V86_MASK 0x00020000L +extern DRV_BOOL multi_pebs_enabled; +/********************************************************************* + * Global Variables / State + *********************************************************************/ + +/********************************************************************* + * Interrupt Handler + *********************************************************************/ + +/* + * PMI_Interrupt_Handler + * Arguments + * IntFrame - Pointer to the Interrupt Frame + * + * Returns + * None + * + * Description + * Grab the data that is needed to populate the sample records + */ +#if defined(DRV_EM64T) +#define IS_LDT_BIT 0x4 +#define SEGMENT_SHIFT 3 +IDTGDT_DESC gdt_desc; + +U32 +pmi_Get_CSD(U32 seg, U32 *low, U32 *high) +{ + PVOID gdt_max_addr; + struct desc_struct *gdt; + CodeDescriptor *csd; + + SEP_DRV_LOG_TRACE_IN("Seg: %u, low: %p, high: %p.", seg, low, high); + + gdt_max_addr = + (PVOID)(((U64)gdt_desc.idtgdt_base) + gdt_desc.idtgdt_limit); + gdt = gdt_desc.idtgdt_base; + + if (seg & IS_LDT_BIT) { + *low = 0; + *high = 0; + SEP_DRV_LOG_TRACE_OUT("FALSE [%u, %u] (IS_LDT_BIT).", *low, + *high); + return FALSE; + } + + // segment offset is based on dropping the bottom 3 bits... + csd = (CodeDescriptor *)&(gdt[seg >> SEGMENT_SHIFT]); + + if (((PVOID)csd) >= gdt_max_addr) { + SEP_DRV_LOG_WARNING_TRACE_OUT("FALSE (segment too big in get_CSD(0x%x)!).", + seg); + return FALSE; + } + + *low = csd->u1.lowWord; + *high = csd->u2.highWord; + + SEP_DRV_LOG_TRACE("Seg 0x%x, low %08x, high %08x, reserved_0: %d.", seg, + *low, *high, csd->u2.s2.reserved_0); + SEP_DRV_LOG_TRACE_OUT("TRUE [%u, %u].", *low, *high); + + return TRUE; +} +#endif + +asmlinkage VOID +PMI_Interrupt_Handler(struct pt_regs *regs) +{ + SampleRecordPC *psamp; + CPU_STATE pcpu; + BUFFER_DESC bd; +#if defined(DRV_IA32) + U32 csdlo; // low half code seg descriptor + U32 csdhi; // high half code seg descriptor + U32 seg_cs; // code seg selector +#endif + DRV_MASKS_NODE event_mask; + U32 this_cpu; + U32 dev_idx; + DISPATCH dispatch; + DEV_CONFIG pcfg; + U32 i; + U32 is_64bit_addr = FALSE; + U32 pid; + U32 tid; + U64 tsc; + U32 desc_id; + EVENT_DESC evt_desc; + U32 accept_interrupt = 1; +#if defined(SECURE_SEP) + uid_t l_uid; +#endif + U64 lbr_tos_from_ip = 0; + U32 unc_dev_idx; + DEV_UNC_CONFIG pcfg_unc = NULL; + DISPATCH dispatch_unc = NULL; + U32 read_unc_evt_counts_from_intr = 0; + + // needs to be before function calls for the tracing to make sense + // may later want to separate the INTERRUPT_IN from the PID/TID logging + SEP_DRV_LOG_INTERRUPT_IN("PID: %d, TID: %d.", current->pid, GET_CURRENT_TGID()); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + bd = &cpu_buf[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + // needs to be before dispatch->freeze to ensure printk is never called from an interrupt + SYS_Locked_Inc(&CPU_STATE_in_interrupt(pcpu)); + + // Disable the counter control + dispatch->freeze(NULL); + + CPU_STATE_nmi_handled(pcpu)++; + +#if defined(SECURE_SEP) + l_uid = DRV_GET_UID(current); + accept_interrupt = (l_uid == uid); +#endif + dispatch->check_overflow(&event_mask); + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING || + CPU_STATE_accept_interrupt(pcpu) != 1) { + goto pmi_cleanup; + } + + pid = GET_CURRENT_TGID(); + tid = current->pid; + + if (DRV_CONFIG_target_pid(drv_cfg) > 0 && + pid != DRV_CONFIG_target_pid(drv_cfg)) { + accept_interrupt = 0; + } + + if (accept_interrupt == 0) { + goto pmi_cleanup; + } + UTILITY_Read_TSC(&tsc); + if (multi_pebs_enabled && PEBS_Get_Num_Records_Filled() > 0) { + PEBS_Flush_Buffer(NULL); + } + + SEP_DRV_LOG_TRACE("Nb overflowed events: %d.", event_mask.masks_num); + for (i = 0; i < event_mask.masks_num; i++) { + if (DRV_EVENT_MASK_collect_on_ctx_sw( + &event_mask.eventmasks[i])) { + if (CPU_STATE_last_thread_id(pcpu) == tid) { + continue; + } else { + CPU_STATE_last_thread_id(pcpu) = tid; + } + } + if (multi_pebs_enabled && + (DRV_EVENT_MASK_precise(&event_mask.eventmasks[i]))) { + continue; + } + if (DRV_CONFIG_mixed_ebc_available(drv_cfg)) { + desc_id = DRV_EVENT_MASK_desc_id( + &event_mask.eventmasks[i]); + } else { + if (DRV_CONFIG_event_based_counts(drv_cfg) == 0) { + desc_id = COMPUTE_DESC_ID( + DRV_EVENT_MASK_event_idx( + &event_mask.eventmasks[i])); + } else { + desc_id = CPU_STATE_current_group(pcpu); + } + } + + CPU_STATE_num_ebs_sampled(pcpu)++; + + evt_desc = desc_data[desc_id]; + psamp = (SampleRecordPC *)OUTPUT_Reserve_Buffer_Space( + bd, EVENT_DESC_sample_size(evt_desc), + (NMI_mode) ? TRUE : FALSE, !SEP_IN_NOTIFICATION); + if (!psamp) { + CPU_STATE_num_dropped_ebs_samples(pcpu)++; + continue; + } + + lbr_tos_from_ip = 0; + CPU_STATE_num_samples(pcpu) += 1; + SAMPLE_RECORD_descriptor_id(psamp) = desc_id; + SAMPLE_RECORD_tsc(psamp) = tsc; + SAMPLE_RECORD_pid_rec_index_raw(psamp) = 1; + SAMPLE_RECORD_pid_rec_index(psamp) = pid; + SAMPLE_RECORD_tid(psamp) = tid; + SAMPLE_RECORD_cpu_num(psamp) = (U16)this_cpu; +#if defined(DRV_IA32) + SAMPLE_RECORD_eip(psamp) = REGS_eip(regs); + SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); + SAMPLE_RECORD_cs(psamp) = (U16)REGS_xcs(regs); + + if (SAMPLE_RECORD_eflags(psamp) & EFLAGS_V86_MASK) { + csdlo = 0; + csdhi = 0; + } else { + seg_cs = SAMPLE_RECORD_cs(psamp); + SYS_Get_CSD(seg_cs, &csdlo, &csdhi); + } + SAMPLE_RECORD_csd(psamp).u1.lowWord = csdlo; + SAMPLE_RECORD_csd(psamp).u2.highWord = csdhi; +#elif defined(DRV_EM64T) + SAMPLE_RECORD_cs(psamp) = (U16)REGS_cs(regs); + + pmi_Get_CSD(SAMPLE_RECORD_cs(psamp), + &SAMPLE_RECORD_csd(psamp).u1.lowWord, + &SAMPLE_RECORD_csd(psamp).u2.highWord); +#endif + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_pid_rec_index(psamp) %x.", + SAMPLE_RECORD_pid_rec_index(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_tid(psamp) %x.", + SAMPLE_RECORD_tid(psamp)); +#if defined(DRV_IA32) + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) %x.", + SAMPLE_RECORD_eip(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", + SAMPLE_RECORD_eflags(psamp)); +#endif + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cpu_num(psamp) %x.", + SAMPLE_RECORD_cpu_num(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_cs(psamp) %x.", + SAMPLE_RECORD_cs(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).lowWord %x.", + SAMPLE_RECORD_csd(psamp).u1.lowWord); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_csd(psamp).highWord %x.", + SAMPLE_RECORD_csd(psamp).u2.highWord); + +#if defined(DRV_EM64T) + is_64bit_addr = + (SAMPLE_RECORD_csd(psamp).u2.s2.reserved_0 == 1); + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = REGS_rip(regs); + SAMPLE_RECORD_ipsr(psamp) = + (REGS_eflags(regs) & 0xffffffff) | + (((U64)SAMPLE_RECORD_csd(psamp).u2.s2.dpl) + << 32); + SAMPLE_RECORD_ia64_pc(psamp) = TRUE; + } else { + SAMPLE_RECORD_eip(psamp) = REGS_rip(regs); + SAMPLE_RECORD_eflags(psamp) = REGS_eflags(regs); + SAMPLE_RECORD_ia64_pc(psamp) = FALSE; + + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip(psamp)); + SEP_DRV_LOG_TRACE("SAMPLE_RECORD_eflags(psamp) %x.", + SAMPLE_RECORD_eflags(psamp)); + } +#endif + + SAMPLE_RECORD_event_index(psamp) = + DRV_EVENT_MASK_event_idx(&event_mask.eventmasks[i]); + if (DEV_CONFIG_pebs_mode(pcfg) && + DRV_EVENT_MASK_precise(&event_mask.eventmasks[i])) { + if (EVENT_DESC_pebs_offset(evt_desc) || + EVENT_DESC_latency_offset_in_sample(evt_desc)) { + lbr_tos_from_ip = PEBS_Fill_Buffer((S8 *)psamp, + evt_desc, 0); + } + PEBS_Modify_IP((S8 *)psamp, is_64bit_addr, 0); + PEBS_Modify_TSC((S8 *)psamp, 0); + } + if (DEV_CONFIG_collect_lbrs(pcfg) && + DRV_EVENT_MASK_lbr_capture(&event_mask.eventmasks[i]) && + EVENT_DESC_lbr_offset(evt_desc) && + !DEV_CONFIG_apebs_collect_lbrs(pcfg)) { + lbr_tos_from_ip = dispatch->read_lbrs( + !DEV_CONFIG_store_lbrs(pcfg) ? + NULL : + ((S8 *)(psamp) + + EVENT_DESC_lbr_offset(evt_desc))); + } + if (DRV_EVENT_MASK_branch(&event_mask.eventmasks[i]) && + DEV_CONFIG_precise_ip_lbrs(pcfg) && lbr_tos_from_ip) { + if (is_64bit_addr) { + SAMPLE_RECORD_iip(psamp) = lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_iip(psamp) 0x%llx.", + SAMPLE_RECORD_iip(psamp)); + } else { + SAMPLE_RECORD_eip(psamp) = (U32)lbr_tos_from_ip; + SEP_DRV_LOG_TRACE("UPDATED SAMPLE_RECORD_eip(psamp) 0x%x.", + SAMPLE_RECORD_eip(psamp)); + } + } + if (DEV_CONFIG_power_capture(pcfg)) { + dispatch->read_power( + ((S8 *)(psamp) + + EVENT_DESC_power_offset_in_sample(evt_desc))); + } + + if (DRV_CONFIG_event_based_counts(drv_cfg) && + DRV_EVENT_MASK_trigger(&event_mask.eventmasks[i])) { + dispatch->read_counts( + (S8 *)psamp, + DRV_EVENT_MASK_event_idx( + &event_mask.eventmasks[i])); + } + if (DEV_CONFIG_enable_perf_metrics(pcfg) && + DRV_EVENT_MASK_perf_metrics_capture( + &event_mask.eventmasks[i])) { + dispatch->read_metrics( + (S8 *)(psamp) + + EVENT_DESC_perfmetrics_offset(evt_desc)); + } + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + if (DRV_CONFIG_read_pstate_msrs(drv_cfg) && + (DRV_CONFIG_p_state_trigger_index(drv_cfg) == -1 || + SAMPLE_RECORD_event_index(psamp) == + DRV_CONFIG_p_state_trigger_index(drv_cfg))) { + SEPDRV_P_STATE_Read( + (S8 *)(psamp) + EVENT_DESC_p_state_offset(evt_desc), pcpu); + } + if (!DRV_CONFIG_event_based_counts(drv_cfg) && + CPU_STATE_p_state_counting(pcpu)) { + dispatch->read_counts( + (S8 *)psamp, + DRV_EVENT_MASK_event_idx(&event_mask.eventmasks[i])); + } + } + + if (DRV_CONFIG_unc_collect_in_intr_enabled(drv_cfg) && + DRV_EVENT_MASK_trigger(&event_mask.eventmasks[i])) { + for (unc_dev_idx = num_core_devs; + unc_dev_idx < num_devices; unc_dev_idx++) { + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg( + &devices[unc_dev_idx]); + dispatch_unc = LWPMU_DEVICE_dispatch( + &devices[unc_dev_idx]); + + if (pcfg_unc && + DEV_UNC_CONFIG_device_with_intr_events(pcfg_unc) && + dispatch_unc && + dispatch_unc->trigger_read) { + read_unc_evt_counts_from_intr = 1; + dispatch_unc->trigger_read( + (S8 *)(psamp) + EVENT_DESC_uncore_ebc_offset(evt_desc), + unc_dev_idx, + read_unc_evt_counts_from_intr); + } + } + } + } + +pmi_cleanup: + if (DEV_CONFIG_pebs_mode(pcfg)) { + if (!multi_pebs_enabled) { + PEBS_Reset_Index(this_cpu); + } else { + if (cpu_sideband_buf) { + OUTPUT outbuf = &BUFFER_DESC_outbuf( + &cpu_sideband_buf[this_cpu]); + if (OUTPUT_signal_full(outbuf) && + !OUTPUT_tasklet_queued(outbuf)) { + SEP_DRV_LOG_TRACE("Interrupt-driven sideband buffer flush tasklet scheduling."); + OUTPUT_tasklet_queued(outbuf) = TRUE; + tasklet_schedule( + &CPU_STATE_nmi_tasklet(pcpu)); + } + } + } + } + + // Reset the data counters + if (CPU_STATE_trigger_count(pcpu) == 0) { + dispatch->swap_group(FALSE); + } + // Re-enable the counter control + dispatch->restart(NULL); + // do not use SEP_DRV_LOG_X (where X != INTERRUPT) below this + SYS_Locked_Dec(&CPU_STATE_in_interrupt(pcpu)); + + SEP_DRV_LOG_INTERRUPT_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/pmu_list.c b/drivers/platform/x86/sepdk/sep/pmu_list.c new file mode 100644 index 00000000000000..cde05f85f94d9f --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/pmu_list.c @@ -0,0 +1,841 @@ +/**** + Copyright (C) 2019 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/msrdefs.h" + +#include "inc/pmu_info_struct.h" +#include "inc/pmu_info_pci.h" +#include "inc/pmu_info_mmio.h" +#include "inc/pmu_info_msr.h" +#include "inc/pmu_info.h" +#include "inc/pmu_list.h" + +#define USE_RANGE_OPTIMIZATION + +static S32 pmu_info_index = -1; +static DRV_BOOL arch_pmu_info_used = FALSE; + +static PMU_SEARCH_NODE *msr_root = NULL; +static PMU_SEARCH_NODE *pci_root = NULL; +static PMU_SEARCH_NODE *mmio_root = NULL; + +static void +pmu_list_Lookup_PMU_Info(const PMU_INFO_NODE *pmu_list, + U32 family, + U32 model, + U32 stepping) +{ + S32 i = 0; + + while (!(pmu_list[i].family == 0 && pmu_list[i].model == 0 && + pmu_list[i].stepping_to == 0)) { + if (pmu_list[i].family == family && + pmu_list[i].model == model && + pmu_list[i].stepping_from <= stepping && + pmu_list[i].stepping_to >= stepping) { + pmu_info_index = i; + return; + } + i++; + } +} + +/**************************************************************************************** + * Common helper fuctions for search algorithm + ****************************************************************************************/ + +static U16 +pmu_list_Max_Height(PMU_SEARCH_NODE *node_left, PMU_SEARCH_NODE *node_right) +{ + if (node_left && node_right) { + return (node_left->height > node_right->height) ? + node_left->height : + node_right->height; + } else if (node_left) { + return node_left->height; + } else if (node_right) { + return node_right->height; + } + return 0; +} + +static PMU_SEARCH_NODE * +pmu_list_Right_Rotate(PMU_SEARCH_NODE *node) +{ + PMU_SEARCH_NODE *nn, *r_child_nn; + + nn = node->left; + r_child_nn = nn->right; + + // Rotate + nn->right = node; // node becomes right child + node->left = r_child_nn; // original right child becomes left child + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + nn->height = 1 + pmu_list_Max_Height(nn->left, nn->right); + + return nn; +} + +static PMU_SEARCH_NODE * +pmu_list_Left_Rotate(PMU_SEARCH_NODE *node) +{ + PMU_SEARCH_NODE *nn, *l_child_nn; + + nn = node->right; + l_child_nn = nn->left; + + // Rotate + nn->left = node; // node becomes left child + node->right = l_child_nn; // original left child becomes right child + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + nn->height = 1 + pmu_list_Max_Height(nn->left, nn->right); + + return nn; +} + +static PMU_SEARCH_NODE * +pmu_list_Create_Node(U64 key, U16 range, void *addr) +{ + PMU_SEARCH_NODE *temp = (PMU_SEARCH_NODE *)CONTROL_Allocate_Memory( + sizeof(PMU_SEARCH_NODE)); + temp->key = key; + temp->range = range; + temp->left = NULL; + temp->right = NULL; + temp->height = 1; + temp->addr = addr; + SEP_DRV_LOG_TRACE("Added :: < %x,%x>", key, range); + return temp; +} + +static void +pmu_list_Delete_Tree(PMU_SEARCH_NODE *node) +{ + if (node == NULL) { + return; + } + pmu_list_Delete_Tree(node->left); + pmu_list_Delete_Tree(node->right); + + node->left = NULL; + node->right = NULL; + SEP_DRV_LOG_TRACE("Delete :: <%x, %x>", node->key, + node->range); + CONTROL_Free_Memory(node); +} + +/**************************************************************************************** + * Range is not used: for PCI and MMIO + ****************************************************************************************/ + +static PMU_SEARCH_NODE * +pmu_list_Balance_Tree(PMU_SEARCH_NODE *node, U64 key) +{ + S32 height_delta = 0; + + if (node->left && node->right) { + height_delta = node->left->height - node->right->height; + } else if (node->left) { + height_delta = node->left->height; + } else if (node->right) { + height_delta = 0 - (node->right->height); + } + + if (height_delta == 0) { + // tree is balanced + return node; + } + // if Delta > 1, balance left tree + else if ((height_delta > 1) && (node->key > key)) { + node = pmu_list_Right_Rotate(node); + } else if ((height_delta > 1) && (node->key < key)) { + node->left = pmu_list_Left_Rotate(node->left); + node = pmu_list_Right_Rotate(node); + } + // if Delta < -1, balance right tree + else if ((height_delta < -1) && (node->key < key)) { + node = pmu_list_Left_Rotate(node); + } else if ((height_delta < -1) && (node->key > key)) { + node->right = pmu_list_Right_Rotate(node->right); + node = pmu_list_Left_Rotate(node); + } + return node; +} + +static PMU_SEARCH_NODE * +pmu_list_Insert_Node(PMU_SEARCH_NODE *node, U64 key, void *addr) +{ + if (node == NULL) { + // make it root, range = 0 + node = pmu_list_Create_Node(key, 0, addr); + } else if (node->key < key) { + // insert to right sub tree + node->right = pmu_list_Insert_Node(node->right, key, addr); + } else if (node->key > key) { + // insert to left sub tree + node->left = pmu_list_Insert_Node(node->left, key, addr); + } else { + // do nothing + return node; + } + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + + // Balance the tree + return pmu_list_Balance_Tree(node, key); +} + +static PMU_SEARCH_NODE * +pmu_list_Binary_Search(PMU_SEARCH_NODE *node, U64 key) +{ + if (node == NULL) { + return NULL; + } + if (node->key == key) { + return node; + } else if (node->key < key) { + return pmu_list_Binary_Search(node->right, key); + } else { + return pmu_list_Binary_Search(node->left, key); + } +} + +/**************************************************************************************** + * Range is used : for MSR + ****************************************************************************************/ + +#if defined(USE_RANGE_OPTIMIZATION) +static PMU_SEARCH_NODE * +pmu_list_Balance_Tree_Range(PMU_SEARCH_NODE *node, U64 key, U16 range) +{ + S32 height_delta = 0; + + if (node->left && node->right) { + height_delta = (node->left->height) - (node->right->height); + } else if (node->left) { + height_delta = node->left->height; + } else if (node->right) { + height_delta = 0 - (node->right->height); + } + + if (height_delta == 0) { + // tree is balanced + return node; + } + // if Delta > 1, balance left tree + else if ((height_delta > 1) && + ((node->key > key) && ((node->key + range) > key))) { + node = pmu_list_Right_Rotate(node); + } else if ((height_delta > 1) && + ((node->key < key) && ((node->key + range) < key))) { + node->left = pmu_list_Left_Rotate(node->left); + node = pmu_list_Right_Rotate(node); + } + // if Delta < -1, balance right tree + else if ((height_delta < -1) && + ((node->key < key) && ((node->key + range) < key))) { + node = pmu_list_Left_Rotate(node); + } else if ((height_delta < -1) && + ((node->key > key) && ((node->key + range) > key))) { + node->right = pmu_list_Right_Rotate(node->right); + node = pmu_list_Left_Rotate(node); + } + return node; +} + +static PMU_SEARCH_NODE * +pmu_list_Insert_Node_Range(PMU_SEARCH_NODE *node, U64 key, U16 range, void *addr) +{ + if (node == NULL) { + // make it root + node = pmu_list_Create_Node(key, range, addr); + } else if (node->key < key) { + if (node->key + node->range < key) { + // case 6: new key and range is greater then existing key and range + // (new_key > old_key) & (new_key > old_key + old_range) + // insert to right subtree (new_key, new_range) + node->right = pmu_list_Insert_Node_Range( + node->right, key, range, addr); + } else if (node->key + node->range < key + range) { + // case 3: overlaps with + // (old_key + old_range > new_key > old_key) & + // (new_key + new_range > old_key + old_range) + // update range (old_key, ) + SEP_DRV_LOG_TRACE( + "Case 3: Updated :: <%x, %x> -> <%x, %x>", + node->key, node->range, node->key, + ((key + range) - (node->key))); + node->range = (U16)(key - node->key) + range; + return node; + } else { + // case 1 : is subset of , + // (old_key + old_range > new_key > old_key) & + // (new_key + new_range < old_key + old_range) + // do nothing + return node; + } + } else if (node->key > key) { + if (node->key > key + range) { + // case 5 : new key and range is less than existing key i.e., + // (new_key < old_key) & (new_key + new_range < old_key) + // insert to left subtree (new_key, new_range) + node->left = pmu_list_Insert_Node_Range(node->left, key, + range, addr); + } else if (node->key + node->range < key + range) { + // case 2: is superset of + // (new_key < old_key) & (new_key + new_range > old_key + old_range) + // insert to left subtree (new_key , ) and + // update current node (old_key , ) + SEP_DRV_LOG_TRACE( + "Case 2: Split to new :: <%x, %x> -> <%x, %x>", + key, range, key,(node->key - key - 1)); + node->left = pmu_list_Insert_Node_Range( + node->left, key, ((U16)(node->key - key) - 1), + addr); + if (node->key + node->range < key + range) { + SEP_DRV_LOG_TRACE( + "Case 2: Updated :: <%x, %x> -> <%x, %x>", + node->key, node->range, node->key, + ((key + range) - (node->key))); + node->range = range - (U16)(node->key - key); + } + } else { + // case 4: overlaps with + // (new_key < old_key) & (new_key + new_range > old_key ) + // insert to left tree (new_key , ) + SEP_DRV_LOG_TRACE("Case 4: Split to new :: <%x, %x> -> <%x, %x>", + key, range, key, + (node->key - key - 1)); + node->left = pmu_list_Insert_Node_Range( + node->left, key, ((U16)(node->key - key) - 1), + addr); + } + } else { + if (range > node->range) { + // case 7: (new_key == old_key) & (new_range > old_range) + // update current node (old_key, new_range) + SEP_DRV_LOG_TRACE( + "Case 7: Updated :: <%x, %x> -> <%x, %x> ", + node->key, node->range, node->key, range); + node->range = range; + } + return node; + } + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + + // Balance the tree + return pmu_list_Balance_Tree_Range(node, key, range); +} + +static PMU_SEARCH_NODE * +pmu_list_Binary_Search_Range(PMU_SEARCH_NODE *node, U64 key) +{ + if (node == NULL) { + return NULL; + } + + if ((key >= (node->key)) && (key <= (node->key + node->range))) { + // key found + return node; + } else if (key > (node->key)) { + // search right tree + return pmu_list_Binary_Search_Range(node->right, key); + } else if (key < node->key) { + // search left tree + return pmu_list_Binary_Search_Range(node->left, key); + } else { + // unexpected case - something wrong + return NULL; + } +} + +static OS_STATUS +pmu_list_Create_MSR_Tree_Range(const PMU_INFO_NODE *pmu_list) +{ + S32 j = 0; + PMU_MSR_INFO_NODE *list; + + if (pmu_info_index == -1) { + return -1; + } + if (pmu_list[pmu_info_index].msr_info_list == NULL) { + return -1; + } + while (pmu_list[pmu_info_index].msr_info_list[j] != 0) { + list = pmu_list[pmu_info_index].msr_info_list[j++]; + + while ((*list).msr_id != 0x0) { + SEP_DRV_LOG_TRACE("Incoming :: <%x, %x> ", + (*list).msr_id, (*list).range); + msr_root = pmu_list_Insert_Node_Range(msr_root, + (*list).msr_id, + (*list).range, + (void *)list); + list++; + } + } + return 0; +} +#else + +static OS_STATUS +pmu_list_Create_MSR_Tree(const PMU_INFO_NODE *pmu_list) +{ + S32 j = 0; + PMU_MSR_INFO_NODE *list; + + if (pmu_info_index == -1) { + return -1; + } + if (pmu_list[pmu_info_index].msr_info_list == NULL) { + return -1; + } + while (pmu_list[pmu_info_index].msr_info_list[j] != 0) { + list = pmu_list[pmu_info_index].msr_info_list[j++]; + + while ((*list).msr_id != 0) { + if ((*list).range != 0) { + // populate entry for each MSR value for range + S32 i = 0; + for (i = 0; i <= (*list).range; i++) { + msr_root = pmu_list_Insert_Node( + msr_root, (*list).msr_id + i, + (void *)list); + } + } else { + msr_root = pmu_list_Insert_Node( + msr_root, (*list).msr_id, (void *)list); + } + list++; + } + } + return 0; +} +#endif + +static void +pmu_list_Add_Detected_MSR(void) +{ + U64 rax, rbx, rcx, rdx; + U16 num_fixed_ctrs = 0; + U16 num_gp_ctrs = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_Cpuid(0xA, &rax, &rbx, &rcx, &rdx); + num_gp_ctrs = ((U32)(rax >> 8)) & 0xFF; + num_fixed_ctrs = ((U32)(rdx)) & 0x1F; + +#if defined(USE_RANGE_OPTIMIZATION) + msr_root = pmu_list_Insert_Node_Range(msr_root, IA32_PMC0, num_gp_ctrs, + NULL); + msr_root = pmu_list_Insert_Node_Range(msr_root, IA32_FIXED_CTR0, + num_fixed_ctrs, NULL); +#else + for (U32 i = 0; i < num_gp_ctrs; i++) { + msr_root = pmu_list_Insert_Node(msr_root, IA32_PMC0 + i, NULL); + } + for (U32 i = 0; i < num_fixed_ctrs; i++) { + msr_root = pmu_list_Insert_Node(msr_root, IA32_FIXED_CTR0 + i, + NULL); + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); +} + +extern DRV_BOOL +PMU_LIST_Check_MSR(U32 msr_id) +{ + PMU_SEARCH_NODE *temp; + + SEP_DRV_LOG_TRACE_IN(""); + + if (pmu_info_index == -1 || msr_root == NULL) { + SEP_DRV_LOG_TRACE_OUT("Success"); + return FALSE; + } + +#if !defined(USE_RANGE_OPTIMIZATION) + temp = pmu_list_Binary_Search(msr_root, msr_id); +#else + temp = pmu_list_Binary_Search_Range(msr_root, msr_id); +#endif + + // returning search node so that it can be used if any reference to + // static node is required + if (temp == NULL) { + SEP_DRV_LOG_TRACE_OUT("Failure"); + return FALSE; + } else { + SEP_DRV_LOG_TRACE_OUT("Success"); + return TRUE; + } +} + +extern DRV_BOOL +PMU_LIST_Check_PCI(U8 bus_id, U8 dev_num, U8 func_num, U32 offset) +{ + PMU_PCI_INFO_NODE key; + PMU_SEARCH_NODE *temp; + + SEP_DRV_LOG_TRACE_IN(""); + + if (pmu_info_index == -1 || pci_root == NULL) { + SEP_DRV_LOG_TRACE_OUT("Success"); + return FALSE; + } + + SEP_DRV_MEMSET(&key, 0, sizeof(PMU_PCI_INFO_NODE)); + + key.u.s.dev = dev_num; + key.u.s.func = func_num; + key.u.s.offset = offset; + + temp = pmu_list_Binary_Search(pci_root, key.u.reg); + if (temp != NULL) { + SEP_DRV_LOG_TRACE_OUT("Success"); + return TRUE; + } else { + SEP_DRV_LOG_TRACE_OUT("Success"); + return FALSE; + } +} + +extern DRV_BOOL +PMU_LIST_Check_MMIO(PMU_MMIO_BAR_INFO_NODE primary, + PMU_MMIO_BAR_INFO_NODE secondary, + U32 offset) +{ + PMU_SEARCH_NODE *temp; + U64 key; + PMU_MMIO_UNIT_INFO_NODE *unit_info = NULL; + DRV_BOOL ret = FALSE; + + SEP_DRV_LOG_TRACE_IN(""); + + if (pmu_info_index == -1 || mmio_root == NULL) { + SEP_DRV_LOG_TRACE_OUT("Success"); + return FALSE; + } + + if (primary.bar_prog_type == MMIO_SINGLE_BAR_TYPE) { + key = (U64)primary.u.reg << 32 | offset; + } else if (primary.bar_prog_type == MMIO_DUAL_BAR_TYPE) { + key = (U64)secondary.u.reg << 32 | offset; + } else if (primary.bar_prog_type == MMIO_DIRECT_BAR_TYPE) { + key = (U64)primary.mask << 16 | offset; + } else { + SEP_DRV_LOG_TRACE("Invalid BAR prog type %d", + primary.bar_prog_type); + SEP_DRV_LOG_TRACE_OUT("Success"); + return FALSE; + } + + temp = pmu_list_Binary_Search(mmio_root, key); + if (temp != NULL) { + if (primary.bar_prog_type == MMIO_DIRECT_BAR_TYPE) { + ret = TRUE; + } else if (primary.bar_prog_type == MMIO_SINGLE_BAR_TYPE) { + unit_info = (PMU_MMIO_UNIT_INFO_NODE *)temp->addr; + if (unit_info && + (unit_info->primary.mask == primary.mask) && + (unit_info->primary.shift == primary.shift)) { + ret = TRUE; + } + } else if (primary.bar_prog_type == MMIO_DUAL_BAR_TYPE) { + unit_info = (PMU_MMIO_UNIT_INFO_NODE *)temp->addr; + if (unit_info && + (unit_info->secondary.mask == secondary.mask) && + (unit_info->secondary.shift == secondary.shift) && + (unit_info->primary.u.s.offset == + primary.u.s.offset) && + (unit_info->primary.mask == primary.mask) && + (unit_info->primary.shift == primary.shift)) { + ret = TRUE; + } + } + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return ret; +} + +extern OS_STATUS +PMU_LIST_Initialize(S32 *idx) +{ + U64 rax, rbx, rcx, rdx; + U32 family, model, stepping; + U16 perfmon_ver; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_Cpuid(0x1, &rax, &rbx, &rcx, &rdx); + + family = (U32)(rax >> 8 & 0x0f); + model = (U32)(rax >> 12 & 0xf0); /* extended model bits */ + model |= (U32)(rax >> 4 & 0x0f); + stepping = (U32)(rax & 0x0f); + + pmu_info_index = -1; + pmu_list_Lookup_PMU_Info(pmu_info_list, family, model, stepping); + + if (pmu_info_index == -1) { + UTILITY_Read_Cpuid(0xA, &rax, &rbx, &rcx, &rdx); + perfmon_ver = (U16)(rax & 0xFF); + if (family == 0x6 && perfmon_ver >= 4) { + arch_pmu_info_used = TRUE; + if (perfmon_ver > MAX_PERFMON_VERSION) { + pmu_info_index = MAX_PERFMON_VERSION; + } else { + pmu_info_index = perfmon_ver; + ; + } + } + } + + if (idx) { + *idx = pmu_info_index; + } + + SEP_DRV_LOG_LOAD( + "PMU check enabled! F%x.M%x.S%x index=%d drv_type=%s arch_pmu_info_used=%s\n", + family, model, stepping, pmu_info_index, drv_type_str, + (arch_pmu_info_used) ? "yes" : "no"); + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS +PMU_LIST_Build_MSR_List(void) +{ + S32 status = OS_SUCCESS; + PMU_MSR_INFO_NODE **msr_info_list = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + if (arch_pmu_info_used) { + msr_info_list = architectural_pmu_info_list[pmu_info_index] + .msr_info_list; + } else { + msr_info_list = pmu_info_list[pmu_info_index].msr_info_list; + } + + if (pmu_info_index == -1 || !msr_info_list) { + SEP_DRV_LOG_LOAD("No MSR list information detected!\n"); + SEP_DRV_LOG_TRACE_OUT("Success"); + return status; + } + +#if !defined(USE_RANGE_OPTIMIZATION) + status = pmu_list_Create_MSR_Tree((arch_pmu_info_used) ? + architectural_pmu_info_list : + pmu_info_list); +#else + status = pmu_list_Create_MSR_Tree_Range( + (arch_pmu_info_used) ? architectural_pmu_info_list : + pmu_info_list); +#endif + + pmu_list_Add_Detected_MSR(); + + SEP_DRV_LOG_TRACE_OUT("Success"); + + return status; +} + +extern OS_STATUS +PMU_LIST_Build_PCI_List(void) +{ + U32 unit_idx = 0; + U32 reg_idx = 0; + PMU_PCI_INFO_NODE key; + PMU_PCI_UNIT_INFO_NODE *unit_info_list = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + if (arch_pmu_info_used) { + unit_info_list = architectural_pmu_info_list[pmu_info_index] + .pci_info_list; + } else { + unit_info_list = pmu_info_list[pmu_info_index].pci_info_list; + } + + if (pmu_info_index == -1 || !unit_info_list) { + SEP_DRV_LOG_LOAD("No PCI list information detected!\n"); + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; + } + + SEP_DRV_MEMSET(&key, 0, sizeof(PMU_PCI_INFO_NODE)); + + while (unit_info_list[unit_idx] + .reg_offset_list) { //Iterate through unit list + reg_idx = 0; + key.u.s.dev = (U8)unit_info_list[unit_idx].dev; + key.u.s.func = (U8)unit_info_list[unit_idx].func; + + while (unit_info_list[unit_idx].reg_offset_list[reg_idx] != + 0x0) { //Iterate through offset list + key.u.s.offset = unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + pci_root = pmu_list_Insert_Node( + pci_root, key.u.reg, + (void *)(&unit_info_list[unit_idx])); + reg_idx++; + } + unit_idx++; + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS +PMU_LIST_Build_MMIO_List(void) +{ + U32 unit_idx = 0; + U32 reg_idx = 0; + U64 key; + PMU_MMIO_UNIT_INFO_NODE *unit_info_list = NULL; + + SEP_DRV_LOG_TRACE_IN(""); + + if (arch_pmu_info_used) { + unit_info_list = architectural_pmu_info_list[pmu_info_index] + .mmio_info_list; + } else { + unit_info_list = pmu_info_list[pmu_info_index].mmio_info_list; + } + + if (pmu_info_index == -1 || !unit_info_list) { + SEP_DRV_LOG_LOAD("No MMIO list information detected!\n"); + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; + } + + SEP_DRV_MEMSET(&key, 0, sizeof(U64)); + + //Iterate through unit list + while (unit_info_list[unit_idx].reg_offset_list) { + reg_idx = 0; + //Iterate through offset list + while (unit_info_list[unit_idx].reg_offset_list[reg_idx] != 0x0) { + switch (unit_info_list[unit_idx].primary.bar_prog_type) { + case MMIO_SINGLE_BAR_TYPE: + key = (U64)unit_info_list[unit_idx].primary.u.reg + << 32 | + (U64)unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + mmio_root = pmu_list_Insert_Node( + mmio_root, key, + (void *)(&unit_info_list[unit_idx])); + break; + case MMIO_DUAL_BAR_TYPE: + key = (U64)unit_info_list[unit_idx] + .secondary.u.reg + << 32 | + (U64)unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + mmio_root = pmu_list_Insert_Node( + mmio_root, key, + (void *)(&unit_info_list[unit_idx])); + break; + case MMIO_DIRECT_BAR_TYPE: + key = (U64)unit_info_list[unit_idx].primary.mask + << 16 | + (U64)unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + mmio_root = pmu_list_Insert_Node( + mmio_root, key, + (void *)(&unit_info_list[unit_idx])); + break; + } + reg_idx++; + } + unit_idx++; + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS +PMU_LIST_Add_To_MMIO_List(U64 key, void *addr) +{ + SEP_DRV_LOG_TRACE_IN(""); + mmio_root = pmu_list_Insert_Node(mmio_root, key, addr); + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS +PMU_LIST_Clean_Up(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + pmu_info_index = -1; + + if (msr_root) { + pmu_list_Delete_Tree(msr_root); + msr_root = NULL; + } + + if (pci_root) { + pmu_list_Delete_Tree(pci_root); + pci_root = NULL; + } + + if (mmio_root) { + pmu_list_Delete_Tree(mmio_root); + mmio_root = NULL; + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + return OS_SUCCESS; +} + diff --git a/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c new file mode 100644 index 00000000000000..3f9597aea6d10c --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sepdrv_p_state.c @@ -0,0 +1,97 @@ +/**** + Copyright (C) 2013 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/sepdrv_p_state.h" + +/*! + * @fn OS_STATUS SEPDRV_P_STATE_Read + * + * @brief Reads the APERF and MPERF counters into the buffer provided for the purpose + * + * @param buffer - buffer to read the counts into + * + * @param pcpu - pcpu struct that contains the previous APERF/MPERF values + * + * @return OS_SUCCESS if read succeeded, otherwise error + * + * @note + */ +extern OS_STATUS +SEPDRV_P_STATE_Read(S8 *buffer, CPU_STATE pcpu) +{ + U64 *samp = (U64 *)buffer; + U64 new_APERF = 0; + U64 new_MPERF = 0; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, pcpu: %p.", buffer, pcpu); + + if ((samp == NULL) || (pcpu == NULL)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("OS_INVALID (!samp || !pcpu)."); + return OS_INVALID; + } + + new_APERF = SYS_Read_MSR(DRV_APERF_MSR); + new_MPERF = SYS_Read_MSR(DRV_MPERF_MSR); + + if (CPU_STATE_last_p_state_valid(pcpu)) { + // there is a previous APERF/MPERF value + if ((CPU_STATE_last_aperf(pcpu)) > new_APERF) { + // a wrap-around has occurred. + samp[1] = CPU_STATE_last_aperf(pcpu) - new_APERF; + } else { + samp[1] = new_APERF - CPU_STATE_last_aperf(pcpu); + } + + if ((CPU_STATE_last_mperf(pcpu)) > new_MPERF) { + // a wrap-around has occurred. + samp[0] = CPU_STATE_last_mperf(pcpu) - new_MPERF; + } else { + samp[0] = new_MPERF - CPU_STATE_last_mperf(pcpu); + } + } else { + // there is no previous valid APERF/MPERF values, thus no delta calculations + (CPU_STATE_last_p_state_valid(pcpu)) = TRUE; + samp[0] = 0; + samp[1] = 0; + } + + CPU_STATE_last_aperf(pcpu) = new_APERF; + CPU_STATE_last_mperf(pcpu) = new_MPERF; + + SEP_DRV_LOG_TRACE_OUT("OS_SUCCESS."); + return OS_SUCCESS; +} + diff --git a/drivers/platform/x86/sepdk/sep/silvermont.c b/drivers/platform/x86/sepdk/sep/silvermont.c new file mode 100644 index 00000000000000..d84c87bb487e17 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/silvermont.c @@ -0,0 +1,1164 @@ +/**** + Copyright (C) 2011 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "lwpmudrv.h" +#include "utility.h" +#include "control.h" +#include "output.h" +#include "silvermont.h" +#include "ecb_iterators.h" +#include "pebs.h" +#include "apic.h" + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; +extern U64 *interrupt_counts; +extern DRV_SETUP_INFO_NODE req_drv_setup_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +static U32 restore_reg_addr[3]; + +typedef struct SADDR_S { + S64 addr : SILVERMONT_LBR_DATA_BITS; +} SADDR; + +#define SADDR_addr(x) ((x).addr) +#define ADD_ERRATA_FIX_FOR_FIXED_CTR0 +#define MSR_ENERGY_MULTIPLIER 0x606 // Energy Multiplier MSR + +#if defined(DRV_IA32) +#define ENABLE_IA32_PERFEVTSEL0_CTR 0x00400000 +#define ENABLE_FIXED_CTR0 0x00000003 +#elif defined(DRV_EM64T) +#define ENABLE_IA32_PERFEVTSEL0_CTR 0x0000000000400000 +#define ENABLE_FIXED_CTR0 0x0000000000000003 +#else +#error "Unexpected Architecture seen" +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Write_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initial set up of the PMU registers + * + * Special Notes + * Initial write of PMU registers. + * Walk through the enties and write the value of the register accordingly. + * Assumption: For CCCR registers the enable bit is set to value 0. + * When current_group = 0, then this is the first time this routine is called, + * initialize the locks and set up EM tables. + */ +static VOID +silvermont_Write_PMU(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (CPU_STATE_current_group(pcpu) == 0) { + if (EVENT_CONFIG_mode(ec) != EM_DISABLED) { + U32 index; + U32 st_index; + U32 j; + + /* Save all the initialization values away into an array for Event Multiplexing. */ + for (j = 0; j < EVENT_CONFIG_num_groups(ec); j++) { + CPU_STATE_current_group(pcpu) = j; + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION ( + pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, + PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + ECB_entries_reg_value(pecb, i); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + /* Reset the current group to the very first one. */ + CPU_STATE_current_group(pcpu) = + this_cpu % EVENT_CONFIG_num_groups(ec); + } + } + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_ALL_REG) { + /* + * Writing the GLOBAL Control register enables the PMU to start counting. + * So write 0 into the register to prevent any counting from starting. + */ + if (i == ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + /* + * PEBS is enabled for this collection session + */ + if (DRV_SETUP_INFO_pebs_accessible(&req_drv_setup_info) && + i == ECB_SECTION_REG_INDEX(pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS) && + ECB_entries_reg_value(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + continue; + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); +#if defined(MYDEBUG) + { + U64 val = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Write reg 0x%x --- value 0x%llx -- read 0x%llx.", + ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i), val); + } +#endif + } + END_FOR_EACH_REG_CORE_OPERATION; + +#if defined(ADD_ERRATA_FIX_FOR_FIXED_CTR0) + { + U64 fixed_ctr0 = SYS_Read_MSR(IA32_FIXED_CTRL); + fixed_ctr0 = (fixed_ctr0 & (ENABLE_FIXED_CTR0)); + if (fixed_ctr0 != 0x0) { + U64 val = SYS_Read_MSR(IA32_PERFEVTSEL0); + val |= ENABLE_IA32_PERFEVTSEL0_CTR; + SYS_Write_MSR(IA32_PERFEVTSEL0, val); + } + } +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Disable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Zero out the global control register. This automatically disables the PMU counters. + * + */ +static VOID +silvermont_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("No programming for this device in this group."); + return; + } + + if (GET_DRIVER_STATE() != DRV_STATE_RUNNING) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Enable_PMU(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Set the enable bit for all the Control registers + * + */ +static VOID +silvermont_Enable_PMU(PVOID param) +{ + /* + * Get the value from the event block + * 0 == location of the global control reg for this block. + * Generalize this location awareness when possible + */ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (KVM_guest_mode) { + SYS_Write_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + 0LL); + } + if (GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + APIC_Enable_Pmi(); + if (CPU_STATE_reset_mask(pcpu)) { + SEP_DRV_LOG_TRACE("Overflow reset mask %llx.", + CPU_STATE_reset_mask(pcpu)); + // Reinitialize the global overflow control register + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + CPU_STATE_reset_mask(pcpu) = 0LL; + } + if (CPU_STATE_group_swap(pcpu)) { + CPU_STATE_group_swap(pcpu) = 0; + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + if (DEV_CONFIG_pebs_mode(pcfg)) { + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + PEBS_ENABLE_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + } + SYS_Write_MSR( + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + ECB_entries_reg_value( + pecb, + ECB_SECTION_REG_INDEX( + pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); +#if defined(MYDEBUG) + { + U64 val; + val = SYS_Read_MSR(ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS))); + SEP_DRV_LOG_TRACE("Write reg 0x%x--- read 0x%llx.", + ECB_entries_reg_id( + pecb, + ECB_SECTION_REG_INDEX( + pecb, + GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)), + val); + } +#endif + } + } + SEP_DRV_LOG_TRACE("Reenabled PMU with value 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static void +silvermont_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j; + U64 *buffer = (U64 *)param; + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 cur_grp; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + SEP_DRV_LOG_TRACE("PMU control_data 0x%p, buffer 0x%p.", + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx]), + buffer); + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + j = EMON_BUFFER_CORE_EVENT_OFFSET( + EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map( + emon_buffer_driver_helper)[this_cpu], + ECB_entries_core_event_id(pecb, i)); + + buffer[j] = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, event_id=%u", j, + buffer[j], this_cpu, + ECB_entries_core_event_id(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void silvermont_Check_Overflow(masks) + * + * @param masks the mask structure to populate + * + * @return None No return needed + * + * @brief Called by the data processing method to figure out which registers have overflowed. + * + */ +static void +silvermont_Check_Overflow(DRV_MASKS masks) +{ + U32 index; + U64 overflow_status = 0; + U32 this_cpu; + BUFFER_DESC bd; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + DISPATCH dispatch; + U64 overflow_status_clr = 0; + DRV_EVENT_MASK_NODE event_flag; + + SEP_DRV_LOG_TRACE_IN("Masks: %p.", masks); + + this_cpu = CONTROL_THIS_CPU(); + bd = &cpu_buf[this_cpu]; + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // initialize masks + DRV_MASKS_masks_num(masks) = 0; + + overflow_status = SYS_Read_MSR(ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_STATUS_REG_INDEX, + PMU_OPERATION_GLOBAL_STATUS))); + + if (DEV_CONFIG_pebs_mode(pcfg)) { + overflow_status = PEBS_Overflowed(this_cpu, overflow_status, 0); + } + overflow_status_clr = overflow_status; + + if (dispatch->check_overflow_gp_errata) { + overflow_status = dispatch->check_overflow_gp_errata( + pecb, &overflow_status_clr); + } + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, status 0x%llx.", this_cpu, + overflow_status); + index = 0; + BUFFER_DESC_sample_count(bd) = 0; + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_fixed_reg_get(pecb, i)) { + index = i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_FIXED) + + 0x20; + if (dispatch->check_overflow_errata) { + overflow_status = + dispatch->check_overflow_errata( + pecb, i, overflow_status); + } + } else if (ECB_entries_is_gp_reg_get(pecb, i)) { + index = i - ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + } else { + continue; + } + if (overflow_status & ((U64)1 << index)) { + SEP_DRV_LOG_TRACE("Overflow: cpu: %d, index %d.", + this_cpu, index); + SEP_DRV_LOG_TRACE("Register 0x%x --- val 0%llx.", + ECB_entries_reg_id(pecb, i), + SYS_Read_MSR( + ECB_entries_reg_id(pecb, i))); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + + if (DRV_CONFIG_enable_cp_mode(drv_cfg)) { + /* Increment the interrupt count. */ + if (interrupt_counts) { + interrupt_counts + [this_cpu * DRV_CONFIG_num_events( + drv_cfg) + + ECB_entries_event_id_index( + pecb, i)] += 1; + } + } + + DRV_EVENT_MASK_bitFields1(&event_flag) = (U8)0; + if (ECB_entries_fixed_reg_get(pecb, i)) { + CPU_STATE_p_state_counting(pcpu) = 1; + } + if (ECB_entries_precise_get(pecb, i)) { + DRV_EVENT_MASK_precise(&event_flag) = 1; + } + if (ECB_entries_lbr_value_get(pecb, i)) { + DRV_EVENT_MASK_lbr_capture(&event_flag) = 1; + } + if (ECB_entries_uncore_get(pecb, i)) { + DRV_EVENT_MASK_uncore_capture(&event_flag) = 1; + } + if (ECB_entries_em_trigger_get(pecb, i)) { + DRV_EVENT_MASK_trigger(&event_flag) = 1; + } + + if (DRV_MASKS_masks_num(masks) < MAX_OVERFLOW_EVENTS) { + DRV_EVENT_MASK_bitFields1( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + DRV_EVENT_MASK_bitFields1(&event_flag); + DRV_EVENT_MASK_event_idx( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_event_id_index(pecb, i); + DRV_EVENT_MASK_desc_id( + DRV_MASKS_eventmasks(masks) + + DRV_MASKS_masks_num(masks)) = + ECB_entries_desc_id(pecb, i); + DRV_MASKS_masks_num(masks)++; + } else { + SEP_DRV_LOG_ERROR("The array for event masks is full."); + } + + SEP_DRV_LOG_TRACE("Overflow -- 0x%llx, index 0x%llx.", + overflow_status, (U64)1 << index); + SEP_DRV_LOG_TRACE("Slot# %d, reg_id 0x%x, index %d.", i, + ECB_entries_reg_id(pecb, i), index); + if (ECB_entries_event_id_index(pecb, i) == + CPU_STATE_trigger_event_num(pcpu)) { + CPU_STATE_trigger_count(pcpu)--; + } + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + CPU_STATE_reset_mask(pcpu) = overflow_status_clr; + // Reinitialize the global overflow control register + SYS_Write_MSR(IA32_PERF_GLOBAL_OVF_CTRL, overflow_status_clr); + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Swap_Group(restart) + * + * @param restart dummy parameter which is not used + * + * @return None No return needed + * + * @brief Perform the mechanics of swapping the event groups for event mux operations + * + * Special Notes + * Swap function for event multiplexing. + * Freeze the counting. + * Swap the groups. + * Enable the counting. + * Reset the event trigger count + * + */ +static VOID +silvermont_Swap_Group(DRV_BOOL restart) +{ + U32 index; + U32 next_group; + U32 st_index; + U32 this_cpu = CONTROL_THIS_CPU(); + CPU_STATE pcpu = &pcb[this_cpu]; + U32 dev_idx; + DEV_CONFIG pcfg; + DISPATCH dispatch; + EVENT_CONFIG ec; + + SEP_DRV_LOG_TRACE_IN("Dummy restart: %u.", restart); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + dispatch = LWPMU_DEVICE_dispatch(&devices[dev_idx]); + ec = LWPMU_DEVICE_ec(&devices[dev_idx]); + st_index = + CPU_STATE_current_group(pcpu) * EVENT_CONFIG_max_gp_events(ec); + next_group = (CPU_STATE_current_group(pcpu) + 1); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (next_group >= EVENT_CONFIG_num_groups(ec)) { + next_group = 0; + } + + SEP_DRV_LOG_TRACE("Current group : 0x%x.", + CPU_STATE_current_group(pcpu)); + SEP_DRV_LOG_TRACE("Next group : 0x%x.", next_group); + + // Save the counters for the current group + if (!DRV_CONFIG_event_based_counts(drv_cfg)) { + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + CPU_STATE_em_tables(pcpu)[index] = + SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SEP_DRV_LOG_TRACE("Saved value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + CPU_STATE_current_group(pcpu) = next_group; + + if (dispatch->hw_errata) { + dispatch->hw_errata(); + } + + // First write the GP control registers (eventsel) + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_CTRL_GP) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_event_based_counts(drv_cfg)) { + // In EBC mode, reset the counts for all events except for trigger event + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_event_id_index(pecb, i) != + CPU_STATE_trigger_event_num(pcpu)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + } else { + // Then write the gp count registers + st_index = CPU_STATE_current_group(pcpu) * + EVENT_CONFIG_max_gp_events(ec); + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_GP) { + index = st_index + i - + ECB_operations_register_start( + pecb, PMU_OPERATION_DATA_GP); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + SEP_DRV_LOG_TRACE("Restore value for reg 0x%x : 0x%llx.", + ECB_entries_reg_id(pecb, i), + CPU_STATE_em_tables(pcpu)[index]); + } + END_FOR_EACH_REG_CORE_OPERATION; + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_OCR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_CORE_OPERATION; + + /* + * reset the em factor when a group is swapped + */ + CPU_STATE_trigger_count(pcpu) = EVENT_CONFIG_em_factor(ec); + + /* + * The enable routine needs to rewrite the control registers + */ + CPU_STATE_reset_mask(pcpu) = 0LL; + CPU_STATE_group_swap(pcpu) = 1; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Initialize(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Initialize the PMU setting up for collection + * + * Special Notes + * Saves the relevant PMU state (minimal set of MSRs required + * to avoid conflicts with other Linux tools, such as Oprofile). + * This function should be called in parallel across all CPUs + * prior to the start of sampling, before PMU state is changed. + * + */ +static VOID +silvermont_Initialize(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + ECB pecb; + U32 dev_idx; + U32 cur_grp; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + pcpu = &pcb[this_cpu]; + cur_grp = CPU_STATE_current_group(pcpu); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + CPU_STATE_pmu_state(pcpu) = pmu_state + (this_cpu * 3); + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to save PMU state on CPU %d!", + this_cpu); + return; + } + + restore_reg_addr[0] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, DEBUG_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[1] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, GLOBAL_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + restore_reg_addr[2] = ECB_entries_reg_id( + pecb, ECB_SECTION_REG_INDEX(pecb, FIXED_CTRL_REG_INDEX, + PMU_OPERATION_GLOBAL_REGS)); + + // save the original PMU state on this CPU (NOTE: must only be called ONCE per collection) + CPU_STATE_pmu_state(pcpu)[0] = SYS_Read_MSR(restore_reg_addr[0]); + CPU_STATE_pmu_state(pcpu)[1] = SYS_Read_MSR(restore_reg_addr[1]); + CPU_STATE_pmu_state(pcpu)[2] = SYS_Read_MSR(restore_reg_addr[2]); + + SEP_DRV_LOG_TRACE("Saving PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[0]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[1]); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0x%llx.", + CPU_STATE_pmu_state(pcpu)[2]); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Destroy(params) + * + * @param params dummy parameter which is not used + * + * @return None No return needed + * + * @brief Reset the PMU setting up after collection + * + * Special Notes + * Restores the previously saved PMU state done in core2_Initialize. + * This function should be called in parallel across all CPUs + * after sampling collection ends/terminates. + * + */ +static VOID +silvermont_Destroy(VOID *param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + if (pcb == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pcb)."); + return; + } + + preempt_disable(); + this_cpu = CONTROL_THIS_CPU(); + preempt_enable(); + pcpu = &pcb[this_cpu]; + + if (CPU_STATE_pmu_state(pcpu) == NULL) { + SEP_DRV_LOG_WARNING_TRACE_OUT("Unable to restore PMU state on CPU %d!", + this_cpu); + return; + } + + SEP_DRV_LOG_TRACE("Clearing PMU state on CPU %d:", this_cpu); + SEP_DRV_LOG_TRACE(" msr_val(IA32_DEBUG_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_PERF_GLOBAL_CTRL)=0x0."); + SEP_DRV_LOG_TRACE(" msr_val(IA32_FIXED_CTRL)=0."); + + SYS_Write_MSR(restore_reg_addr[0], 0); + SYS_Write_MSR(restore_reg_addr[1], 0); + SYS_Write_MSR(restore_reg_addr[2], 0); + + CPU_STATE_pmu_state(pcpu) = NULL; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * @fn silvermont_Read_LBRs(buffer) + * + * @param IN buffer - pointer to the buffer to write the data into + * @return None + * + * @brief Read all the LBR registers into the buffer provided and return + * + */ +static U64 +silvermont_Read_LBRs(VOID *buffer) +{ + U32 i, count = 0; + U64 *lbr_buf = NULL; + U64 value; + U64 tos_ip_addr = 0; + U64 tos_ptr = 0; + SADDR saddr; + U32 this_cpu; + U32 dev_idx; + LBR lbr; + DEV_CONFIG pcfg; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + this_cpu = CONTROL_THIS_CPU(); + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + lbr = LWPMU_DEVICE_lbr(&devices[dev_idx]); + + if (lbr == NULL) { + SEP_DRV_LOG_TRACE_OUT("No lbr for this device."); + return tos_ip_addr; + } + + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf = (U64 *)buffer; + } + + for (i = 0; i < LBR_num_entries(lbr); i++) { + value = SYS_Read_MSR(LBR_entries_reg_id(lbr, i)); + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + *lbr_buf = value; + } + SEP_DRV_LOG_TRACE("LBR %u, 0x%llx.", i, value); + if (i == 0) { + tos_ptr = value; + } else { + if (LBR_entries_etype(lbr, i) == LBR_ENTRY_FROM_IP) { + if (tos_ptr == count) { + SADDR_addr(saddr) = + value & SILVERMONT_LBR_BITMASK; + tos_ip_addr = (U64)SADDR_addr( + saddr); // Add signed extension + SEP_DRV_LOG_TRACE("Tos_ip_addr %llu, 0x%llx.", + tos_ptr, value); + } + count++; + } + } + if (buffer && DEV_CONFIG_store_lbrs(pcfg)) { + lbr_buf++; + } + } + + SEP_DRV_LOG_TRACE_OUT("Res: %llu.", tos_ip_addr); + return tos_ip_addr; +} + +static VOID +silvermont_Clean_Up(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS (pecb, i, + PMU_OPERATION_ALL_REG) { + if (ECB_entries_clean_up_get(pecb, i)) { + SEP_DRV_LOG_TRACE("Clean up set --- RegId --- %x.", + ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION_IN_ALL_GRPS; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn silvermont_Read_Counts(param, id) + * + * @param param The read thread node to process + * @param id The event id for the which the sample is generated + * + * @return None No return needed + * + * @brief Read CPU event based counts data and store into the buffer param; + * For the case of the trigger event, store the SAV value. + */ +static VOID +silvermont_Read_Counts(PVOID param, U32 id) +{ + U64 *data; + U32 this_cpu; + CPU_STATE pcpu; + U32 dev_idx; + DEV_CONFIG pcfg; + U32 event_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_idx = core_to_dev_map[this_cpu]; + pcfg = LWPMU_DEVICE_pcfg(&devices[dev_idx]); + + if (!DEV_CONFIG_num_events(pcfg)) { + SEP_DRV_LOG_TRACE_OUT("No events for this device."); + return; + } + + if (DEV_CONFIG_ebc_group_id_offset(pcfg)) { + // Write GroupID + data = (U64 *)((S8 *)param + + DEV_CONFIG_ebc_group_id_offset(pcfg)); + *data = CPU_STATE_current_group(pcpu) + 1; + } + + FOR_EACH_REG_CORE_OPERATION (pecb, i, PMU_OPERATION_DATA_ALL) { + if (ECB_entries_counter_event_offset(pecb, i) == 0) { + continue; + } + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + event_id = ECB_entries_event_id_index(pecb, i); + if (event_id == id) { + *data = ~(ECB_entries_reg_value(pecb, i) - 1) & + ECB_entries_max_bits(pecb, i); + } else { + *data = SYS_Read_MSR(ECB_entries_reg_id(pecb, i)); + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_CORE_OPERATION; + + if (DRV_CONFIG_enable_p_state(drv_cfg)) { + CPU_STATE_p_state_counting(pcpu) = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 silvermont_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static void +silvermont_Platform_Info(PVOID data) +{ + U64 index = 0; + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 clock_value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + +#define IA32_MSR_PSB_CLOCK_STS 0xCD +#define FREQ_MASK_BITS 0x03 + + clock_value = SYS_Read_MSR(IA32_MSR_PSB_CLOCK_STS); + index = clock_value & FREQ_MASK_BITS; + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = index; + +#undef IA32_MSR_PLATFORM_INFO +#undef IA32_MSR_PSB_CLOCK_STS +#undef FREQ_MASK_BITS + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(energy_multiplier & 0x00001F00) >> 8; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID knights_Platform_Info + * + * @brief Reads the MSR_PLATFORM_INFO register if present + * + * @param void + * + * @return value read from the register + * + * Special Notes: + * + */ +static VOID +knights_Platform_Info(PVOID data) +{ + DRV_PLATFORM_INFO platform_data = (DRV_PLATFORM_INFO)data; + U64 value = 0; + U64 energy_multiplier; + + SEP_DRV_LOG_TRACE_IN("Data: %p.", data); + + if (!platform_data) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!platform_data)."); + return; + } + +#define IA32_MSR_PLATFORM_INFO 0xCE + value = SYS_Read_MSR(IA32_MSR_PLATFORM_INFO); + + DRV_PLATFORM_INFO_info(platform_data) = value; + DRV_PLATFORM_INFO_ddr_freq_index(platform_data) = 0; + energy_multiplier = SYS_Read_MSR(MSR_ENERGY_MULTIPLIER); + SEP_DRV_LOG_TRACE("MSR_ENERGY_MULTIPLIER: %llx.", energy_multiplier); + DRV_PLATFORM_INFO_energy_multiplier(platform_data) = + (U32)(energy_multiplier & 0x00001F00) >> 8; + + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE silvermont_dispatch = { + .init = silvermont_Initialize, + .fini = silvermont_Destroy, + .write = silvermont_Write_PMU, + .freeze = silvermont_Disable_PMU, + .restart = silvermont_Enable_PMU, + .read_data = silvermont_Read_PMU_Data, + .check_overflow = silvermont_Check_Overflow, + .swap_group = silvermont_Swap_Group, + .read_lbrs = silvermont_Read_LBRs, + .cleanup = silvermont_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = silvermont_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = silvermont_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + +DISPATCH_NODE knights_dispatch = { + .init = silvermont_Initialize, + .fini = silvermont_Destroy, + .write = silvermont_Write_PMU, + .freeze = silvermont_Disable_PMU, + .restart = silvermont_Enable_PMU, + .read_data = silvermont_Read_PMU_Data, + .check_overflow = silvermont_Check_Overflow, + .swap_group = silvermont_Swap_Group, + .read_lbrs = silvermont_Read_LBRs, + .cleanup = silvermont_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = silvermont_Read_Counts, + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = knights_Platform_Info, + .trigger_read = NULL, + .scan_for_uncore = NULL, + .read_metrics = NULL +}; + diff --git a/drivers/platform/x86/sepdk/sep/sys32.S b/drivers/platform/x86/sepdk/sep/sys32.S new file mode 100644 index 00000000000000..3aaf9e790e9616 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys32.S @@ -0,0 +1,197 @@ +# Copyright (C) 2002 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with SEP Development Kit; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. + + +#include +#include + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,20) +#define USE_KERNEL_PERCPU_SEGMENT_GS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) && LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,29) +#define USE_KERNEL_PERCPU_SEGMENT_FS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30) +#define USE_KERNEL_PERCPU_SEGMENT_FS +#define USE_KERNEL_PERCPU_SEGMENT_GS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) +#if !defined(__KERNEL_PERCPU) +#define __KERNEL_PERCPU __KERNEL_PDA +#endif +#endif + +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) +#if defined(__KERNEL_STACK_CANARY) +#define SEP_GS_SEG_VALUE __KERNEL_STACK_CANARY +#else +#define SEP_GS_SEG_VALUE __KERNEL_PERCPU +#endif +#endif + +#*********************************************************************** +# +# SYS_IO_Delay +# Add small delay +# +# Entry: none +# +# Exit: none +# +# void SYS_IO_Delay(); +# +#*********************************************************************** + .text + .align 4 + .global SYS_IO_Delay +SYS_IO_Delay: + ret + + .global SYS_Get_cs +SYS_Get_cs: + mov %cs, %ax + andl $0x0000ffff, %eax + ret + + .global SYS_Get_TSC +SYS_Get_TSC: + rdtsc + ret + .text + .align 4 + .global SYS_Perfvec_Handler +SYS_Perfvec_Handler: + # This is the same as KERNEL's + pushl %eax # Filler for Error Code + + cld + pushl %es # SAVE_ALL macro to access pt_regs + pushl %ds # inside our ISR. +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + pushl %gs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + pushl %fs +#endif + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl $(__KERNEL_DS), %edx # Use KERNEL DS selector + movl %edx,%ds # Make sure we set Kernel + movl %edx,%es # DS into local DS and ES + +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + movl $(SEP_GS_SEG_VALUE), %edx # Use kernel percpu segment + movl %edx,%gs # ... and load it into %gs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + movl $(__KERNEL_PERCPU), %edx # Use kernel percpu segment + movl %edx,%fs # ... and load it into %fs +#endif + + movl %esp, %ebx # get ready to put *pt_regs on stack + + pushl %ebx # put *pt_regs on the stack + call PMI_Interrupt_Handler + addl $0x4, %esp # pop to nowhere... + + pop %ebx # restore register set + pop %ecx + pop %edx + pop %esi + pop %edi + pop %ebp + pop %eax +#if defined(USE_KERNEL_PERCPU_SEGMENT_FS) + pop %fs +#endif +#if defined(USE_KERNEL_PERCPU_SEGMENT_GS) + pop %gs +#endif + pop %ds + pop %es + pop %eax + + iret +# ---------------------------------------------------------------------------- +# name: get_CSD +# +# description: get the CS descriptor +# +# input: code segment selector +# +# output: code segment descriptor +# ---------------------------------------------------------------------------- + .text + .align 4 + .globl SYS_Get_CSD + +SYS_Get_CSD: + pushl %ebp + movl %esp, %ebp + pushal # save regs + + subl $8,%esp + xorl %eax, %eax + movw 8(%ebp), %ax # eax.lo = cs + sgdt (%esp) # store gdt reg + leal (%esp), %ebx # ebx = gdt reg ptr + movl 2(%ebx), %ecx # ecx = gdt base + xorl %edx, %edx + movw %ax, %dx + andl $4, %edx + cmpl $0, %edx # test ti. GDT? + jz .bsr_10 # ..yes + xorl %edx, %edx + sldt %dx # ..no dx=ldtsel + andb $0xf8, %dl # clear ti,rpl + addl 2(%ebx), %edx # add gdt base + movb 7(%edx), %cl # ecx = ldt base + shll $8, %ecx # .. + movb 4(%edx), %cl # .. + shll $16, %ecx # .. + movw 2(%edx), %cx # .. +.bsr_10: + andb $0xf8, %al # clear ti & rpl + addl %eax, %ecx # add to gdt/ldt + movl (%ecx), %eax # copy code seg + movl 12(%ebp), %edx # ..descriptor (csdlo) + movl %eax, (%edx) # ..descriptor (csdlo) + movl 4(%ecx), %eax # ..from gdt or + movl 16(%ebp), %edx # ..ldt to sample (csdhi) + movl %eax, (%edx) # ..ldt to sample (csdhi) + addl $8,%esp + popal # restore regs + leave + ret diff --git a/drivers/platform/x86/sepdk/sep/sys64.S b/drivers/platform/x86/sepdk/sep/sys64.S new file mode 100644 index 00000000000000..84813f8c054472 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys64.S @@ -0,0 +1,147 @@ +# Copyright (C) 2002 Intel Corporation. All Rights Reserved. +# +# This file is part of SEP Development Kit +# +# SEP Development Kit is free software; you can redistribute it +# and/or modify it under the terms of the GNU General Public License +# version 2 as published by the Free Software Foundation. +# +# SEP Development Kit is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with SEP Development Kit; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# As a special exception, you may use this file as part of a free software +# library without restriction. Specifically, if other files instantiate +# templates or use macros or inline functions from this file, or you compile +# this file and link it with other files to produce an executable, this +# file does not by itself cause the resulting executable to be covered by +# the GNU General Public License. This exception does not however +# invalidate any other reasons why the executable file might be covered by +# the GNU General Public License. + +#include "../inc/asm_helper.h" +#include +#include + +.text + + +#*********************************************************************** +# +# SYS_Get_GDT_Base +# Get the GDT Desc address +# +# Entry: pointer to location to store gdt Desc +# +# Exit: none +# +# void SYS_Get_GDT_Base(U64 *pGdtDesc); +# +#*********************************************************************** + .global SYS_Get_GDT_Base +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) +SYM_FUNC_START(SYS_Get_GDT_Base) +#else +ENTRY(SYS_Get_GDT_Base) +#endif + SGDT (%rdi) + ret +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) +SYM_FUNC_END(SYS_Get_GDT_Base) +#else +ENDPROC(SYS_Get_GDT_Base) +#endif + +#*********************************************************************** +# +# SYS_Get_TSC +# Get the current TSC +# +# Entry: pointer to location to store gdt Desc +# +# Exit: none +# +# void SYS_Get_TSC(U64 *tsc); +# +#*********************************************************************** +# .global SYS_Get_TSC +#SYS_Get_TSC: +# rdtsc +# ret + +#*********************************************************************** +# +# SYS_IO_Delay +# Add a short delay to the instruction stream +# +# Entry: none +# +# Exit: none +# +# void SYS_IO_Delay(void); +# +#*********************************************************************** + .global SYS_IO_Delay +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) +SYM_FUNC_START(SYS_IO_Delay) +#else +ENTRY(SYS_IO_Delay) +#endif + ret +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) +SYM_FUNC_END(SYS_IO_Delay) +#else +ENDPROC(SYS_IO_Delay) +#endif + +# ---------------------------------------------------------------------------- +# name: SYS_PerfVec_Handler +# +# description: ISR entry for local APIC PERF interrupt vector +# +# Input: n/a +# +# Output: n/a +# ---------------------------------------------------------------------------- + + .global SYS_Perfvec_Handler +SYS_Perfvec_Handler: + CFI_STARTPROC + pushq %rax // fake an error code... + cld // cause the kernel likes it this way... + + SAVE_ALL // Save the world! + + movl $MSR_GS_BASE,%ecx // for the moment, do the safe swapgs check + rdmsr + xorl %ebx,%ebx // assume no swapgs (ebx == 0) + testl %edx,%edx + js 1f + swapgs + movl $1,%ebx // ebx == 1 means we did a swapgs +1: movq %rsp,%rdi // pt_regs is the first argument + + // + // ebx is zero if no swap, one if swap + // ebx is preserved in C calling convention... + // + // NOTE: the C code is responsible for ACK'ing the APIC!!! + // + call PMI_Interrupt_Handler + + // + // Don't want an interrupt while we are doing the swapgs stuff + // + cli + testl %ebx,%ebx + jz 2f + swapgs +2: RESTORE_ALL + popq %rax + iretq + CFI_ENDPROC diff --git a/drivers/platform/x86/sepdk/sep/sys_info.c b/drivers/platform/x86/sepdk/sep/sys_info.c new file mode 100644 index 00000000000000..44d4e39dc6b470 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/sys_info.c @@ -0,0 +1,1373 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv.h" +#include "control.h" +#include "utility.h" +#include "apic.h" +#include "sys_info.h" + +#define VTSA_CPUID VTSA_CPUID_X86 + +extern U64 total_ram; +static IOCTL_SYS_INFO *ioctl_sys_info; +static size_t ioctl_sys_info_size; +static U32 *cpuid_entry_count; +static U32 *cpuid_total_count; +U32 *cpu_built_sysinfo; + +static U32 cpu_threads_per_core = 1; +extern U64 max_rmid; + +#define VTSA_NA64 ((U64)-1) +#define VTSA_NA32 ((U32)-1) +#define VTSA_NA ((U32)-1) + +#define SYS_INFO_NUM_SETS(rcx) ((rcx) + 1) +#define SYS_INFO_LINE_SIZE(rbx) (((rbx)&0xfff) + 1) +#define SYS_INFO_LINE_PARTITIONS(rbx) ((((rbx) >> 12) & 0x3ff) + 1) +#define SYS_INFO_NUM_WAYS(rbx) ((((rbx) >> 22) & 0x3ff) + 1) + +#define SYS_INFO_CACHE_SIZE(rcx, rbx) \ + (SYS_INFO_NUM_SETS((rcx)) * SYS_INFO_LINE_SIZE((rbx)) * \ + SYS_INFO_LINE_PARTITIONS((rbx)) * SYS_INFO_NUM_WAYS((rbx))) + +#define MSR_FB_PCARD_ID_FUSE 0x17 // platform id fuses MSR + +#define LOW_PART(x) (x & 0xFFFFFFFF) + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U64 sys_info_nbits(number) + * + * @param number - the number to check + * @return the number of bit. + * + * @brief This routine gets the number of useful bits with the given number. + * It will round the number up to power of 2, and adjust to 0 based number. + * sys_info_nbits(0x3) = 2 + * sys_info_nbits(0x4) = 2 + * + */ +static U64 +sys_info_nbits(U64 number) +{ + U64 i; + + SEP_DRV_LOG_TRACE_IN("Number: %llx.", + number); // is %llu portable in the kernel? + + if (number < 2) { + SEP_DRV_LOG_TRACE_OUT("Res: %u. (early exit)", (U32)number); + return number; + } + + // adjust to 0 based number, and round up to power of 2 + number--; + for (i = 0; number > 0; i++) { + number >>= 1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", (U32)i); + return i; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U64 sys_info_bitmask(nbits) + * + * @param number - the number of bits + * @return the bit mask for the nbits number + * + * @brief This routine gets the bitmask for the nbits number. + */ +static U64 +sys_info_bitmask(U64 nbits) +{ + U64 mask = 0; + + SEP_DRV_LOG_TRACE_IN("Nbits: %u.", (U32)nbits); + + mask = (U64)(1 << nbits); + mask--; + + SEP_DRV_LOG_TRACE_OUT("Res: %llx.", mask); + + return mask; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Num_Cpuid_Funcs(basic_funcs, basic_4_funcs, extended_funcs) + * + * @param basic_functions - pointer to the number of basic functions + * @param basic_4_funcs - pointer to the basic 4 functions + * @param extended_funcs - pointer to the number of extended functions + * @return total number of cpuid functions + * + * @brief This routine gets the number of basic and extended cpuid functions. + * + */ +static U32 +sys_info_Get_Num_Cpuid_Funcs(OUT U32 *basic_funcs, + OUT U32 *basic_4_funcs, + OUT U32 *extended_funcs) +{ + U64 num_basic_funcs = 0x0LL; + U64 num_basic_4_funcs = 0x0LL; + U64 num_extended_funcs = 0x0LL; + U64 rax; + U64 rbx; + U64 rcx; + U64 rdx; + U64 i; + U32 res; + + SEP_DRV_LOG_TRACE_IN(""); + + UTILITY_Read_Cpuid(0, &num_basic_funcs, &rbx, &rcx, &rdx); + UTILITY_Read_Cpuid(0x80000000, &num_extended_funcs, &rbx, &rcx, &rdx); + + if (num_extended_funcs & 0x80000000) { + num_extended_funcs -= 0x80000000; + } + + // + // make sure num_extended_funcs is not bogus + // + if (num_extended_funcs > 0x1000) { + num_extended_funcs = 0; + } + + // + // if number of basic funcs is greater than 4, figure out how many + // time we should call CPUID with eax = 0x4. + // + num_basic_4_funcs = 0; + if (num_basic_funcs >= 4) { + for (i = 0, rax = (U64)-1; (rax & 0x1f) != 0; i++) { + rcx = i; + UTILITY_Read_Cpuid(4, &rax, &rbx, &rcx, &rdx); + } + num_basic_4_funcs = i - 1; + } + if (num_basic_funcs >= 0xb) { + i = 0; + do { + rcx = i; + UTILITY_Read_Cpuid(0xb, &rax, &rbx, &rcx, &rdx); + i++; + } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); + num_basic_4_funcs += i; + } + if (num_basic_funcs >= 0xf) { + num_basic_4_funcs += 1; // subleafs 0x0, 0x1 + } + + if (num_basic_funcs >= 0x10) { + num_basic_4_funcs += 3; // subleafs 0x0 - 0x3 + } + + SEP_DRV_LOG_TRACE("Num_basic_4_funcs = %llx.", num_basic_4_funcs); + + // + // adjust number to include 0 and 0x80000000 functions. + // + num_basic_funcs++; + num_extended_funcs++; + + SEP_DRV_LOG_TRACE("num_basic_funcs: %llx, num_extended_funcs: %llx.", + num_basic_funcs, num_extended_funcs); + + // + // fill-in the parameter for the caller + // + if (basic_funcs != NULL) { + *basic_funcs = (U32)num_basic_funcs; + } + if (basic_4_funcs != NULL) { + *basic_4_funcs = (U32)num_basic_4_funcs; + } + if (extended_funcs != NULL) { + *extended_funcs = (U32)num_extended_funcs; + } + + res = (U32)(num_basic_funcs + num_basic_4_funcs + num_extended_funcs); + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Cpuid_Entry_Cpunt(buffer) + * + * @param buffer - pointer to the buffer to hold the info + * @return None + * + * @brief Service Routine to query the CPU for the number of entries needed + * + */ +static VOID +sys_info_Get_Cpuid_Entry_Count(PVOID buffer) +{ + U32 current_processor; + U32 *current_cpu_buffer; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + preempt_disable(); + current_processor = CONTROL_THIS_CPU(); + preempt_enable(); + + SEP_DRV_LOG_TRACE("Beginning on CPU %u.", current_processor); + + current_cpu_buffer = + (U32 *)((U8 *)buffer + current_processor * sizeof(U32)); + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)current_cpu_buffer + sizeof(U32)) <= + ((U8 *)current_cpu_buffer + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32))); +#endif + *current_cpu_buffer = sys_info_Get_Num_Cpuid_Funcs(NULL, NULL, NULL); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static U32 sys_info_Get_Cpuid_Buffer_Size(cpuid_entries) + * + * @param cpuid_entries - number of cpuid entries + * @return size of buffer needed in bytes + * + * @brief This routine returns number of bytes needed to hold the CPU_CS_INFO + * @brief structure. + * + */ +static U32 +sys_info_Get_Cpuid_Buffer_Size(U32 cpuid_entries) +{ + U32 cpuid_size; + U32 buffer_size; + + SEP_DRV_LOG_TRACE_IN(""); + + cpuid_size = sizeof(VTSA_CPUID); + + buffer_size = + sizeof(IOCTL_SYS_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + + sizeof(VTSA_NODE_INFO) + sizeof(VTSA_GEN_ARRAY_HDR) + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU) + + GLOBAL_STATE_num_cpus(driver_state) * + sizeof(VTSA_GEN_ARRAY_HDR) + + cpuid_entries * cpuid_size; + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", buffer_size); + + return buffer_size; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void sys_info_Fill_CPUID(...) + * + * @param num_cpuids, + * @param basic_funcs, + * @param extended_funcs, + * @param cpu, + * @param *current_cpuid + * @param *gen_per_cpu, + * @param *local_gpc + * + * @return None + * + * @brief This routine is called to build per cpu information. + * @brief Fills in the cpuid for the processor in the right location in the buffer + * + */ +static void +sys_info_Fill_CPUID(U32 num_cpuids, + U32 basic_funcs, + U32 extended_funcs, + U32 cpu, + VTSA_CPUID *current_cpuid, + VTSA_GEN_PER_CPU *gen_per_cpu, + VTSA_GEN_PER_CPU *local_gpc) +{ + U32 i, index, j; + U64 cpuid_function; + U64 rax, rbx, rcx, rdx; + VTSA_CPUID *cpuid_el; + U32 shift_nbits_core = 0; + U32 shift_nbits_pkg = 0; + U32 shift_nbits_module = 0; + U32 family = 0; + U32 model = 0; + DRV_BOOL ht_supported = FALSE; + U32 apic_id = 0; + U32 num_logical_per_physical = 0; + U32 cores_per_die = 1; + U32 thread_id = 0; + U32 core_id = 0; + U32 package_id = 0; + U32 module_id = 0; + U32 cores_sharing_cache = 0; + U32 cache_mask_width = 0; + U32 num_cores = 0; + U32 level_type = 0; + U32 num_dies_per_pkg = 0; + DRV_BOOL multi_die = FALSE; + U32 num_sub_leafs = 0; + DRV_BOOL is_function_1f_available = FALSE; + DRV_BOOL is_hetero_platform = FALSE; + + SEP_DRV_LOG_TRACE_IN("CPU: %u.", cpu); + + apic_id = CPU_STATE_apic_id(&pcb[cpu]); + SEP_DRV_LOG_DETECTION("cpu %u apic_id 0x%x.", cpu, apic_id); + + for (i = 0, index = 0; index < num_cpuids; i++) { + cpuid_function = + (i < basic_funcs) ? i : (0x80000000 + i - basic_funcs); + + if (cpuid_function == 0x4) { + for (j = 0, rax = (U64)-1; (rax & 0x1f) != 0; j++) { + rcx = j; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, + &rcx, &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID)) <= + cpuid_buffer_limit); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE( + "cpu=%u, func=0x%x - rax=0x%x, rbx=0x%x, rcx=0x%x, rdx=0x%x", + cpu, (U32)cpuid_function, (U32)rax, + (U32)rbx, (U32)rcx, (U32)rdx); + + if ((rax & 0x1f) != 0) { + local_gpc = &gen_per_cpu[cpu]; + if (((rax >> 5) & 0x3) == 2) { + VTSA_GEN_PER_CPU_cpu_cache_L2( + local_gpc) = + (U32)(SYS_INFO_CACHE_SIZE( + rcx, + rbx) >> + 10); + SEP_DRV_LOG_TRACE( + "L2 Cache: %x.", + VTSA_GEN_PER_CPU_cpu_cache_L2( + local_gpc)); + cores_sharing_cache = + ((U16)(rax >> 14) & + 0xfff) + + 1; + SEP_DRV_LOG_TRACE( + "CORES_SHARING_CACHE=%d j=%d cpu=%d.", + cores_sharing_cache, j, + cpu); + } + + if (((rax >> 5) & 0x3) == 3) { + VTSA_GEN_PER_CPU_cpu_cache_L3( + local_gpc) = + (U32)(SYS_INFO_CACHE_SIZE( + rcx, + rbx) >> + 10); + SEP_DRV_LOG_TRACE( + "L3 Cache: %x.", + VTSA_GEN_PER_CPU_cpu_cache_L3( + local_gpc)); + } + } + if (j == 0) { + cores_per_die = + ((U16)(rax >> 26) & 0x3f) + 1; + } + } + if (cores_sharing_cache != 0) { + cache_mask_width = (U32)sys_info_nbits( + cores_sharing_cache); + SEP_DRV_LOG_TRACE("CACHE MASK WIDTH=%x.", + cache_mask_width); + } + } else if (cpuid_function == 0xb || cpuid_function == 0x1f) { + // As for SDM: CPUID leaf 1FH is a preferred superset to leaf 0BH + // Since we sequentially loop the CPUID, if 1FH is available then replace the info from 08H + if (cpuid_function == 0x1f) { + is_function_1f_available = TRUE; + } + + j = 0; + do { + rcx = j; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, + &rcx, &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + + sizeof(VTSA_CPUID_X86)) <= + cpuid_buffer_limit); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE( + "cpu=%u, func=0x%x, j=%u - rax=0x%x, rbx=0x%x, rcx=0x%x, rdx=0x%x", + cpu, (U32)cpuid_function, j, (U32)rax, + (U32)rbx, (U32)rcx, (U32)rdx); + + level_type = (U32)(rcx >> 8 & 0xff); + if (level_type == 1) { + shift_nbits_core = + rax & + 0x1f; //No. of bits to shift APIC ID to get Core ID + } + + if (cpuid_function == 0xb) { + if (level_type == 2) { + shift_nbits_pkg = + rax & + 0x1f; //No. of bits to shift APIC ID to get Pkg ID + } + } else if (cpuid_function == 0x1f) { + if (level_type == 2) { + shift_nbits_module = + rax & + 0x1f; //No. of bits to shift APIC ID to get Module ID + shift_nbits_pkg = + rax & + 0x1f; //No. of bits to shift APIC ID to get Pkg ID + } + if (level_type == 3 || + level_type == 4 || + level_type == 5) { + if (level_type == 5) { + multi_die = TRUE; + } + shift_nbits_pkg = + rax & + 0x1f; //No. of bits to shift APIC ID to get Pkg ID + } + } + + j++; + } while (!(LOW_PART(rax) == 0 && LOW_PART(rbx) == 0)); + } else if (cpuid_function == 0xf || cpuid_function == 0x10) { + num_sub_leafs = (cpuid_function == 0xf) ? 2 : 4; + + for (j = 0; j < num_sub_leafs; j++) { + rcx = j; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, + &rcx, &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID)) <= + cpuid_buffer_limit); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + if (cpuid_function == 0xf && j == 0x0) { + max_rmid = rbx; + } + SEP_DRV_LOG_TRACE( + "cpu=%u, leaf=0x%x, sleaf=0x%x - rax=0x%x, rbx=0x%x, rcx=0x%x, rdx=0x%x", + cpu, (U32)cpuid_function, j, (U32)rax, + (U32)rbx, (U32)rcx, (U32)rdx); + } + } else { + rcx = 0; + UTILITY_Read_Cpuid(cpuid_function, &rax, &rbx, &rcx, + &rdx); + cpuid_el = ¤t_cpuid[index]; + index++; + + if (cpuid_function == 0x7) { + is_hetero_platform = + (rdx >> 15) & 1 ? TRUE : FALSE; + } + + SEP_DRV_LOG_TRACE( + "Cpu %u: num_cpuids = %u i = %u index = %u.", + cpu, num_cpuids, i, index); + +#if defined(ALLOW_ASSERT) + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= + cpuid_buffer_limit); + + ASSERT(((U8 *)cpuid_el + sizeof(VTSA_CPUID_X86)) <= + ((U8 *)current_cpuid + + (num_cpuids * sizeof(VTSA_CPUID_X86)))); +#endif + + VTSA_CPUID_X86_cpuid_eax_input(cpuid_el) = + (U32)cpuid_function; + VTSA_CPUID_X86_cpuid_eax(cpuid_el) = (U32)rax; + VTSA_CPUID_X86_cpuid_ebx(cpuid_el) = (U32)rbx; + VTSA_CPUID_X86_cpuid_ecx(cpuid_el) = (U32)rcx; + VTSA_CPUID_X86_cpuid_edx(cpuid_el) = (U32)rdx; + SEP_DRV_LOG_TRACE( + "cpu=%u, func=0x%x - rax=0x%x, rbx=0x%x, rcx=0x%x, rdx=0x%x", + cpu, (U32)cpuid_function, (U32)rax, (U32)rbx, + (U32)rcx, (U32)rdx); + + if (cpuid_function == 0) { + if ((U32)rbx == 0x756e6547 && + (U32)rcx == 0x6c65746e && + (U32)rdx == 0x49656e69) { + VTSA_GEN_PER_CPU_platform_id( + local_gpc) = + SYS_Read_MSR( + MSR_FB_PCARD_ID_FUSE); + } + } else if (cpuid_function == 1) { + family = (U32)(rax >> 8 & 0x0f); + /* extended model bits */ + model = (U32)(rax >> 12 & 0xf0); + model |= (U32)(rax >> 4 & 0x0f); + ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; + num_logical_per_physical = + (U32)((rbx & 0xff0000) >> 16); + if (num_logical_per_physical == 0) { + num_logical_per_physical = 1; + } + } else if (cpuid_function == 0xa) { + VTSA_GEN_PER_CPU_arch_perfmon_ver(local_gpc) = + (U32)(rax & 0xFF); + VTSA_GEN_PER_CPU_num_gp_counters(local_gpc) = + (U32)((rax >> 8) & 0xFF); + VTSA_GEN_PER_CPU_num_fixed_counters(local_gpc) = + (U32)(rdx & 0x1F); + } else if (cpuid_function == 0x1a && + is_hetero_platform) { + VTSA_GEN_PER_CPU_cpu_core_type(local_gpc) = + (U32)((rax >> 24) & 0xFF); + } + } + } + + // set cpu_cache_L2 if not already set using 0x80000006 function + if (gen_per_cpu[cpu].cpu_cache_L2 == VTSA_NA && extended_funcs >= 6) { + UTILITY_Read_Cpuid(0x80000006, &rax, &rbx, &rcx, &rdx); + VTSA_GEN_PER_CPU_cpu_cache_L2(local_gpc) = (U32)(rcx >> 16); + } + + if (!ht_supported || num_logical_per_physical == cores_per_die) { + threads_per_core[cpu] = 1; + thread_id = 0; + } else { + // each core has 4 threads for MIC system, otherwise, it has 2 threads when ht is enabled + threads_per_core[cpu] = cpu_threads_per_core; + thread_id = (U16)(apic_id & (cpu_threads_per_core - 1)); + } + + package_id = apic_id >> shift_nbits_pkg; + + if (is_function_1f_available) { + core_id = + (apic_id >> shift_nbits_core) & + sys_info_bitmask(shift_nbits_module - shift_nbits_core); + module_id = + (apic_id >> shift_nbits_module) & + sys_info_bitmask(shift_nbits_pkg - shift_nbits_module); + if (multi_die) { + // num_dies in a pkg = 2 ^ (CPUID bit width for dies) + num_dies_per_pkg = (U32)1 << (shift_nbits_pkg - + shift_nbits_module); + package_id = + (num_dies_per_pkg * package_id) + module_id; + // In the examples below, num_dies is same num_modules, die_id is same as module_id + // eg: num_dies=2, init_pkg_id=0, die_id=0 -> final_pkg_id = 0 + // num_dies=2, init_pkg_id=2, die_id=1 -> final_pkg_id = (2 * 2) + 1 = 5 + } + } else { + core_id = (apic_id >> shift_nbits_core) & + sys_info_bitmask(shift_nbits_pkg - shift_nbits_core); + + if (cache_mask_width) { + module_id = (U32)(core_id / 2); + } + } + + SEP_DRV_LOG_DETECTION( + "MODULE ID=%d CORE ID=%d for cpu=%d PACKAGE ID=%d.", module_id, + core_id, cpu, package_id); + SEP_DRV_LOG_DETECTION("Num_logical_per_physical=%d cores_per_die=%d.", + num_logical_per_physical, cores_per_die); + SEP_DRV_LOG_DETECTION("Package_id %d, apic_id %x.", package_id, + apic_id); + SEP_DRV_LOG_DETECTION( + "Sys_info_nbits[cores_per_die,threads_per_core[%u]]: [%lld,%lld].", + cpu, sys_info_nbits(cores_per_die), + sys_info_nbits(threads_per_core[cpu])); + + VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc) = VTSA_NA32; + VTSA_GEN_PER_CPU_cpu_package_num(local_gpc) = (U16)package_id; + VTSA_GEN_PER_CPU_cpu_core_num(local_gpc) = (U16)core_id; + VTSA_GEN_PER_CPU_cpu_hw_thread_num(local_gpc) = (U16)thread_id; + VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = + (U16)threads_per_core[cpu]; + VTSA_GEN_PER_CPU_cpu_module_num(local_gpc) = (U16)module_id; + num_cores = GLOBAL_STATE_num_cpus(driver_state) / threads_per_core[cpu]; + + core_to_package_map[cpu] = package_id; + core_to_phys_core_map[cpu] = core_id; + core_to_thread_map[cpu] = thread_id; + core_to_module_map[cpu] = module_id; + + if (num_packages < package_id + 1) { + num_packages = package_id + 1; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! +* @fn static void sys_info_Update_Hyperthreading_Info(buffer) +* +* @param buffer - points to the base of GEN_PER_CPU structure +* @return None +* +* @brief This routine is called to update per cpu information based on HT ON/OFF. +* +*/ +static VOID +sys_info_Update_Hyperthreading_Info(VOID *buffer) +{ + U32 cpu; + VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; + U32 i = 0; + U32 num_cores = 0; + U32 max_thread_id = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + preempt_disable(); + cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + // get the GEN_PER_CPU entry for the current processor. + gen_per_cpu = (VTSA_GEN_PER_CPU *)buffer; + + // Update GEN_PER_CPU + local_gpc = &(gen_per_cpu[cpu]); + + // check how many CPUs are on thread 0, then we can know the number of physical cores. + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + // If cpu is offline, move on to the next cpu + if (!VTSA_GEN_PER_CPU_cpu_intel_processor_number( + &(gen_per_cpu[i]))) { + continue; + } + if (core_to_thread_map[i] == 0) { + num_cores++; + } + if (core_to_thread_map[i] > max_thread_id) { + max_thread_id = core_to_thread_map[i]; + } + } + SEP_DRV_LOG_DETECTION("num_cores %u, max_thread_id %u", num_cores, + max_thread_id); + + // Apply hueristic to detect HT OFF and CPU offline configuration + if (max_thread_id == 0 && + GLOBAL_STATE_num_cpus(driver_state) == num_cores) { + // This is the case HT is off by BIOS + SEP_DRV_LOG_DETECTION("Hyperthreading OFF"); + if (cpu_threads_per_core > 1 && + VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) != 1) { + SEP_DRV_LOG_DETECTION("Updating info"); + threads_per_core[cpu] = 1; + VTSA_GEN_PER_CPU_cpu_threads_per_core(local_gpc) = + (U16)threads_per_core[cpu]; + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = + (U16)(num_cores / 2); + GLOBAL_STATE_num_modules(driver_state) = + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc); + } + } else { + SEP_DRV_LOG_DETECTION("Hyperthreading ON"); + } + + SEP_DRV_LOG_DETECTION("%u HW threads are offlined", + GLOBAL_STATE_num_cpus(driver_state) - + GLOBAL_STATE_active_cpus(driver_state)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Build_Percpu(buffer) + * + * @param buffer - points to the base of GEN_PER_CPU structure + * @return None + * + * @brief This routine is called to build per cpu information. + * + */ +static VOID +sys_info_Build_Percpu(VOID *buffer) +{ + U32 basic_funcs, basic_4_funcs, extended_funcs; + U32 num_cpuids; + U32 cpu; + VTSA_CPUID *current_cpuid; + VTSA_GEN_ARRAY_HDR *cpuid_gen_array_hdr; + VTSA_GEN_PER_CPU *gen_per_cpu, *local_gpc; + VTSA_FIXED_SIZE_PTR *fsp; + U8 *cpuid_gen_array_hdr_base; +#if defined(ALLOW_ASSERT) + U8 *cpuid_buffer_limit; +#endif + + SEP_DRV_LOG_TRACE_IN("Buffer: %p.", buffer); + + preempt_disable(); + cpu = CONTROL_THIS_CPU(); + preempt_enable(); + + num_cpuids = (U32)sys_info_Get_Num_Cpuid_Funcs( + &basic_funcs, &basic_4_funcs, &extended_funcs); + + // get the GEN_PER_CPU entry for the current processor. + gen_per_cpu = (VTSA_GEN_PER_CPU *)buffer; + SEP_DRV_LOG_TRACE("cpu %x: gen_per_cpu = %p.", cpu, gen_per_cpu); + + // get GEN_ARRAY_HDR and cpuid array base + cpuid_gen_array_hdr_base = + (U8 *)gen_per_cpu + + GLOBAL_STATE_num_cpus(driver_state) * sizeof(VTSA_GEN_PER_CPU); + + SEP_DRV_LOG_TRACE("cpuid_gen_array_hdr_base = %p.", + cpuid_gen_array_hdr_base); + SEP_DRV_LOG_TRACE("cpu = %x.", cpu); + SEP_DRV_LOG_TRACE("cpuid_total_count[cpu] = %x.", + cpuid_total_count[cpu]); + SEP_DRV_LOG_TRACE("sizeof(VTSA_CPUID) = %lx.", sizeof(VTSA_CPUID)); + + cpuid_gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)cpuid_gen_array_hdr_base + + sizeof(VTSA_GEN_ARRAY_HDR) * cpu + + cpuid_total_count[cpu] * + sizeof(VTSA_CPUID)); + + // get current cpuid array base. + current_cpuid = (VTSA_CPUID *)((U8 *)cpuid_gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); +#if defined(ALLOW_ASSERT) + // get the absolute buffer limit + cpuid_buffer_limit = + (U8 *)ioctl_sys_info + + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); +#endif + + // + // Fill in GEN_PER_CPU + // + local_gpc = &(gen_per_cpu[cpu]); + + if (VTSA_GEN_PER_CPU_cpu_intel_processor_number(local_gpc)) { + SEP_DRV_LOG_TRACE_OUT( + "Early exit (VTSA_GEN_PER_CPU_cpu_intel_processor_number)."); + return; + } + VTSA_GEN_PER_CPU_cpu_number(local_gpc) = cpu; + VTSA_GEN_PER_CPU_cpu_core_type(local_gpc) = 0; + VTSA_GEN_PER_CPU_cpu_speed_mhz(local_gpc) = VTSA_NA32; + VTSA_GEN_PER_CPU_cpu_fsb_mhz(local_gpc) = VTSA_NA32; + + fsp = &VTSA_GEN_PER_CPU_cpu_cpuid_array(local_gpc); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = + (U64)((U8 *)cpuid_gen_array_hdr - + (U8 *)&IOCTL_SYS_INFO_sys_info(ioctl_sys_info)); + + /* + * Get the time stamp difference between this cpu and cpu 0. + * This value will be used by user mode code to generate standardize + * time needed for sampling over time (SOT) functionality. + */ + VTSA_GEN_PER_CPU_cpu_tsc_offset(local_gpc) = TSC_SKEW(cpu); + + // + // fill GEN_ARRAY_HDR + // + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(cpuid_gen_array_hdr); + VTSA_GEN_ARRAY_HDR_hdr_size(cpuid_gen_array_hdr) = + sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + VTSA_GEN_ARRAY_HDR_array_num_entries(cpuid_gen_array_hdr) = num_cpuids; + VTSA_GEN_ARRAY_HDR_array_entry_size(cpuid_gen_array_hdr) = + sizeof(VTSA_CPUID); + VTSA_GEN_ARRAY_HDR_array_type(cpuid_gen_array_hdr) = GT_CPUID; +#if defined(DRV_IA32) + VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_X86; +#elif defined(DRV_EM64T) + VTSA_GEN_ARRAY_HDR_array_subtype(cpuid_gen_array_hdr) = GST_EM64T; +#endif + + // + // fill out cpu id information + // + sys_info_Fill_CPUID(num_cpuids, basic_funcs, extended_funcs, cpu, + current_cpuid, gen_per_cpu, local_gpc); + /* + * Mark cpu info on this cpu as successfully built + */ + cpu_built_sysinfo[cpu] = 1; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void sys_info_Get_Processor_Info(NULL) + * + * @param None + * @return None + * + * @brief This routine is called to get global informaton on the processor in general, + * it include: + * cpu_thread_per_core + * + */ +static VOID +sys_info_Get_Processor_Info(VOID *param) +{ + U64 rax; + U64 rbx; + U64 rcx; + U64 rdx; + U32 family; + U32 model; + DRV_BOOL ht_supported = FALSE; + + SEP_DRV_LOG_TRACE_IN(""); + + // read cpuid with function 1 to find family/model + UTILITY_Read_Cpuid(1, &rax, &rbx, &rcx, &rdx); + family = (U32)(rax >> 8 & 0x0f); + model = (U32)(rax >> 12 & 0xf0); /* extended model bits */ + model |= (U32)(rax >> 4 & 0x0f); + if (is_Knights_family(family, model)) { + cpu_threads_per_core = 4; + } else { + ht_supported = (rdx >> 28) & 1 ? TRUE : FALSE; + if (ht_supported) { + cpu_threads_per_core = 2; + } else { + cpu_threads_per_core = 1; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Build(void) + * + * @param None + * @return None + * + * @brief This is the driver routine that constructs the VTSA_SYS_INFO + * @brief structure used to report system information into the tb5 file + * + */ +extern U32 +SYS_INFO_Build(VOID) +{ + VTSA_GEN_ARRAY_HDR *gen_array_hdr; + VTSA_NODE_INFO *node_info; + VTSA_SYS_INFO *sys_info; + VTSA_FIXED_SIZE_PTR *fsp; + U8 *gen_per_cpu; + U32 buffer_size; + U32 total_cpuid_entries; + U32 i; + struct sysinfo k_sysinfo; + int me; + U32 res; + VTSA_GEN_PER_CPU *local_gpc; + U32 *pkg_id_list; + U32 max_pkg_id; + U32 num_modules = 0; + U32 module_mask = 0; + U32 modules = 0; + U32 prev_core_num = 0; + U32 prev_module_num = 0; + U32 max_module_id = 0; + + SEP_DRV_LOG_TRACE_IN(""); + SEP_DRV_LOG_TRACE("Entered."); + + if (ioctl_sys_info) { + /* The sys info has already been computed. Do not redo */ + buffer_size = + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)); + return buffer_size - sizeof(GENERIC_IOCTL); + } + + si_meminfo(&k_sysinfo); + + buffer_size = GLOBAL_STATE_num_cpus(driver_state) * sizeof(U32); + cpu_built_sysinfo = CONTROL_Allocate_Memory(buffer_size); + if (cpu_built_sysinfo == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Cpu_built_sysinfo memory alloc failed!"); + return 0; + } + + cpuid_entry_count = CONTROL_Allocate_Memory(buffer_size); + if (cpuid_entry_count == NULL) { + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for cpuid_entry_count!"); + return 0; + } + + cpuid_total_count = CONTROL_Allocate_Memory(buffer_size); + if (cpuid_total_count == NULL) { + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for cpuid_total_count!"); + return 0; + } + + // checking on family-model to set threads_per_core as 4: MIC, 2: ht-on; 1: rest + sys_info_Get_Processor_Info(NULL); + CONTROL_Invoke_Parallel(sys_info_Get_Cpuid_Entry_Count, + (VOID *)cpuid_entry_count); + total_cpuid_entries = 0; + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + //if cpu is offline, set its cpuid count same as cpu0 + if (cpuid_entry_count[i] == 0) { + cpuid_entry_count[i] = cpuid_entry_count[0]; + cpu_built_sysinfo[i] = 0; + } + cpuid_total_count[i] = total_cpuid_entries; + total_cpuid_entries += cpuid_entry_count[i]; + } + + ioctl_sys_info_size = + sys_info_Get_Cpuid_Buffer_Size(total_cpuid_entries); + ioctl_sys_info = CONTROL_Allocate_Memory(ioctl_sys_info_size); + if (ioctl_sys_info == NULL) { + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); + + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for ioctl_sys_info!"); + // return STATUS_INSUFFICIENT_RESOURCES; + return 0; + } + + // + // fill in ioctl and cpu_cs_info fields. + // + GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = + ioctl_sys_info_size; + GENERIC_IOCTL_ret(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) = VT_SUCCESS; + + sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); + VTSA_SYS_INFO_min_app_address(sys_info) = VTSA_NA64; + VTSA_SYS_INFO_max_app_address(sys_info) = VTSA_NA64; + VTSA_SYS_INFO_page_size(sys_info) = k_sysinfo.mem_unit; + VTSA_SYS_INFO_allocation_granularity(sys_info) = k_sysinfo.mem_unit; + + // + // offset from ioctl_sys_info + // + VTSA_FIXED_SIZE_PTR_is_ptr(&VTSA_SYS_INFO_node_array(sys_info)) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(&VTSA_SYS_INFO_node_array(sys_info)) = + sizeof(VTSA_SYS_INFO); + + // + // fill in node_info array header + // + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset( + &VTSA_SYS_INFO_node_array( + sys_info))); + + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + + VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = 1; + VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = + sizeof(VTSA_NODE_INFO); + VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_NODE; + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_UNK; + + // + // fill in node_info + // + node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); + SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); + + VTSA_NODE_INFO_node_type_from_shell(node_info) = VTSA_NA32; + + VTSA_NODE_INFO_node_id(node_info) = VTSA_NA32; + VTSA_NODE_INFO_node_num_available(node_info) = + GLOBAL_STATE_num_cpus(driver_state); + VTSA_NODE_INFO_node_num_used(node_info) = VTSA_NA32; + total_ram = k_sysinfo.totalram << PAGE_SHIFT; + VTSA_NODE_INFO_node_physical_memory(node_info) = total_ram; + + fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = sizeof(VTSA_SYS_INFO) + + sizeof(VTSA_GEN_ARRAY_HDR) + + sizeof(VTSA_NODE_INFO); + // + // fill in gen_per_cpu array header + // + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + + fsp = &VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gen_array_hdr); + VTSA_FIXED_SIZE_PTR_is_ptr(fsp) = 0; + VTSA_FIXED_SIZE_PTR_fs_offset(fsp) = 0; + + VTSA_GEN_ARRAY_HDR_hdr_size(gen_array_hdr) = sizeof(VTSA_GEN_ARRAY_HDR); + VTSA_GEN_ARRAY_HDR_array_num_entries(gen_array_hdr) = + GLOBAL_STATE_num_cpus(driver_state); + VTSA_GEN_ARRAY_HDR_array_entry_size(gen_array_hdr) = + sizeof(VTSA_GEN_PER_CPU); + VTSA_GEN_ARRAY_HDR_array_type(gen_array_hdr) = GT_PER_CPU; + +#if defined(DRV_IA32) + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_X86; +#elif defined(DRV_EM64T) + VTSA_GEN_ARRAY_HDR_array_subtype(gen_array_hdr) = GST_EM64T; +#endif + + gen_per_cpu = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); + + me = 0; + CONTROL_Invoke_Parallel(APIC_Init, NULL); + CONTROL_Invoke_Parallel(sys_info_Build_Percpu, (VOID *)gen_per_cpu); + CONTROL_Invoke_Parallel(sys_info_Update_Hyperthreading_Info, + (VOID *)gen_per_cpu); + + /* + * Make package IDs consecutive + */ + pkg_id_list = CONTROL_Allocate_Memory(sizeof(U32) * num_packages); + if (pkg_id_list && gen_per_cpu) { + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + local_gpc = &(((VTSA_GEN_PER_CPU *)gen_per_cpu)[i]); + if (local_gpc) { + max_pkg_id = VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc); + if (!pkg_id_list[max_pkg_id]) { + pkg_id_list[max_pkg_id] = 1; + } + } + } + // Find the adjusted package IDs + max_pkg_id = 0; + for (i = 0; i < num_packages; i++) { + if (pkg_id_list[i]) { + pkg_id_list[i] = max_pkg_id; + max_pkg_id++; + } + } + // Update num_packages accordingly as the packag IDs are updated + if (num_packages != max_pkg_id) { + SEP_DRV_LOG_DETECTION("num_packages=%u, max_pkg_id=%u", + num_packages, max_pkg_id); + num_packages = max_pkg_id; + } + // Update the package ID + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + local_gpc = &(((VTSA_GEN_PER_CPU *)gen_per_cpu)[i]); + if (local_gpc) { + if (VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc) != + pkg_id_list[VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc)]) { + SEP_DRV_LOG_DETECTION( + "Update the pakge ID from %u to %u", + VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc), + pkg_id_list + [VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc)]); + VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc) = pkg_id_list + [VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc)]; + core_to_package_map[i] = + VTSA_GEN_PER_CPU_cpu_package_num( + local_gpc); + } + } + } + pkg_id_list = CONTROL_Free_Memory(pkg_id_list); + } + + // Determine number of modules + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + local_gpc = &(((VTSA_GEN_PER_CPU *)gen_per_cpu)[i]); + module_mask = (1 << VTSA_GEN_PER_CPU_cpu_module_num(local_gpc)); + if (!(modules & module_mask)) { + num_modules++; + } + modules |= module_mask; + if (VTSA_GEN_PER_CPU_cpu_package_num(local_gpc) == 0 && + VTSA_GEN_PER_CPU_cpu_hw_thread_num(local_gpc) == 0 && + VTSA_GEN_PER_CPU_cpu_module_num(local_gpc) != + prev_module_num && + VTSA_GEN_PER_CPU_cpu_core_num(local_gpc) < prev_core_num) { + // Core ID resets across module boundary, module enumeration is supported + module_enum_supported = 1; + } + } + SEP_DRV_LOG_TRACE("MODULE COUNT=%d.", num_modules); + GLOBAL_STATE_num_modules(driver_state) = num_modules; + + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + if (core_to_module_map[i] > max_module_id) { + max_module_id = core_to_module_map[i]; + } + } + + cores_per_module = + CONTROL_Allocate_Memory(sizeof(U32) * (max_module_id + 1)); + if (cores_per_module == NULL) { + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); + + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Memory alloc failed for cores_per_module!"); + // return STATUS_INSUFFICIENT_RESOURCES; + return 0; + } + // Update per CPU num_modules + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + local_gpc = &(((VTSA_GEN_PER_CPU *)gen_per_cpu)[i]); + VTSA_GEN_PER_CPU_cpu_num_modules(local_gpc) = + (U16)(num_modules); + if (VTSA_GEN_PER_CPU_cpu_package_num(local_gpc) == 0 && + VTSA_GEN_PER_CPU_cpu_hw_thread_num(local_gpc) == 0) { + cores_per_module[core_to_module_map[i]]++; + } + } + + //Determine max threads per core and cores per module + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + if (threads_per_core[i] > max_threads_per_core) { + max_threads_per_core = threads_per_core[i]; + } + } + for (i = 0; i < num_modules; i++) { + if (cores_per_module[i] > max_cores_per_module) { + max_cores_per_module = cores_per_module[i]; + } + } + + /* + * Cleanup - deallocate memory that is no longer needed + */ + cpuid_entry_count = CONTROL_Free_Memory(cpuid_entry_count); + + res = ioctl_sys_info_size - sizeof(GENERIC_IOCTL); + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Transfer(buf_usr_to_drv, len_usr_to_drv) + * + * @param buf_usr_to_drv - pointer to the buffer to write the data into + * @param len_usr_to_drv - length of the buffer passed in + * + * @brief Transfer the data collected via the SYS_INFO_Build routine + * @brief back to the caller. + * + */ +extern VOID +SYS_INFO_Transfer(PVOID buf_usr_to_drv, unsigned long len_usr_to_drv) +{ + unsigned long exp_size; + ssize_t unused; + + SEP_DRV_LOG_TRACE_IN("Buffer: %p, buffer_len: %u.", buf_usr_to_drv, + (U32)len_usr_to_drv); + + if (ioctl_sys_info == NULL || len_usr_to_drv == 0) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Ioctl_sys_info is NULL or len_usr_to_drv is 0!"); + return; + } + exp_size = GENERIC_IOCTL_size(&IOCTL_SYS_INFO_gen(ioctl_sys_info)) - + sizeof(GENERIC_IOCTL); + if (len_usr_to_drv < exp_size) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Insufficient Space!"); + return; + } + unused = copy_to_user(buf_usr_to_drv, + &(IOCTL_SYS_INFO_sys_info(ioctl_sys_info)), + len_usr_to_drv); + if (unused) { + // no-op ... eliminates "variable not used" compiler warning + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_Info_Destroy(void) + * + * @param None + * @return None + * + * @brief Free any memory associated with the sys info before unloading the driver + * + */ +extern VOID +SYS_INFO_Destroy(void) +{ + SEP_DRV_LOG_TRACE_IN(""); + + cpuid_total_count = CONTROL_Free_Memory(cpuid_total_count); + cpu_built_sysinfo = CONTROL_Free_Memory(cpu_built_sysinfo); + ioctl_sys_info = CONTROL_Free_Memory(ioctl_sys_info); + ioctl_sys_info_size = 0; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void SYS_INFO_Build_Cpu(PVOID param) + * + * @param PVOID param + * @return None + * + * @brief call routine to populate cpu info + * + */ +extern VOID +SYS_INFO_Build_Cpu(PVOID param) +{ + VTSA_GEN_ARRAY_HDR *gen_array_hdr; + VTSA_NODE_INFO *node_info; + VTSA_SYS_INFO *sys_info; + VTSA_FIXED_SIZE_PTR *fsp; + U8 *gen_per_cpu; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!ioctl_sys_info) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Ioctl_sys_info is null!"); + return; + } + sys_info = &IOCTL_SYS_INFO_sys_info(ioctl_sys_info); + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset( + &VTSA_SYS_INFO_node_array( + sys_info))); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + + node_info = (VTSA_NODE_INFO *)((U8 *)gen_array_hdr + + sizeof(VTSA_GEN_ARRAY_HDR)); + SEP_DRV_LOG_TRACE("Node_info = %p.", node_info); + fsp = &VTSA_NODE_INFO_node_percpu_array(node_info); + + gen_array_hdr = + (VTSA_GEN_ARRAY_HDR *)((U8 *)sys_info + + VTSA_FIXED_SIZE_PTR_fs_offset(fsp)); + SEP_DRV_LOG_TRACE("Gen_array_hdr = %p.", gen_array_hdr); + gen_per_cpu = (U8 *)gen_array_hdr + sizeof(VTSA_GEN_ARRAY_HDR); + + sys_info_Build_Percpu((VOID *)gen_per_cpu); + sys_info_Update_Hyperthreading_Info((VOID *)gen_per_cpu); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/unc_common.c b/drivers/platform/x86/sepdk/sep/unc_common.c new file mode 100644 index 00000000000000..1c5ac04d433ef4 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_common.c @@ -0,0 +1,998 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/pci.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pmu_info_struct.h" +#include "inc/pmu_list.h" + +extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +extern PLATFORM_TOPOLOGY_PROG_NODE platform_topology_prog_node; +extern UNCORE_DISCOVERY_TABLE_LIST_NODE uncore_discovery_tables; +extern U64 *read_counter_info; +#if defined(DRV_PMT_ENABLE) +extern PMT_TOPOLOGY_DISCOVERY_LIST_NODE pmt_topology; +extern DRV_PMT_TELEM_DEV_NODE pmt_devices[MAX_PMT_DEVICES]; +extern U32 pmt_dev_index; +#endif + +/* this is the table to keep pci_bus structure for PCI devices + * for both pci config access and mmio access + */ +UNC_PCIDEV_NODE unc_pcidev_map[MAX_DEVICES]; + +#define GET_PACKAGE_NUM(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) + +/************************************************************/ +/* + * unc common Dispatch functions + * + ************************************************************/ +extern void +UNC_COMMON_Dummy_Func(PVOID param) +{ + UNREFERENCED_PARAMETER(param); + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); + return; +} + +/************************************************************/ +/* + * UNC common PCI based API + * + ************************************************************/ + +/*! + * @fn OS_STATUS UNC_COMMON_Add_Bus_Map + * + * @brief This code discovers which package's data is read off of which bus. + * + * @param None + * + * @return OS_STATUS + * + * Special Notes: + * This probably will move to the UBOX once that is programmed. + */ +OS_STATUS +UNC_COMMON_Add_Bus_Map(U32 uncore_did, U32 dev_node, U32 bus_no, U32 domain_no) +{ + U32 i = 0; + U32 entries = 0; + + UNREFERENCED_PARAMETER(uncore_did); + SEP_DRV_LOG_TRACE_IN(""); + + if (!UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node]))) { + // allocate array for holding bus mapping + // package based device: an entry per package, all units in the same package are in the same bus. + // system based device: an entry per unit if in different bus + entries = GET_MAX_PCIDEV_ENTRIES(num_packages); + UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * sizeof(S32)); + if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + UNC_PCIDEV_domainno_list(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * sizeof(S32)); + if (UNC_PCIDEV_domainno_list(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Memory allocation failure!"); + return OS_NO_MEM; + } + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; + for (i = 0; i < entries; i++) { + UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = INVALID_BUS_NUMBER; + } + } else { + entries = UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])); + } + + for (i = 0; i < UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])); i++) { + if ((UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) == (S32)bus_no) && + (UNC_PCIDEV_domainno_entry(&(unc_pcidev_map[dev_node]), i) == (S32)domain_no)) { + SEP_DRV_LOG_TRACE_OUT("Already in the map, another unit, no add."); + return OS_SUCCESS; + } + } + if (i < entries) { + UNC_PCIDEV_busno_entry(&(unc_pcidev_map[dev_node]), i) = (S32)bus_no; + UNC_PCIDEV_domainno_entry(&(unc_pcidev_map[dev_node]), i) = (S32)domain_no; + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; + SEP_DRV_LOG_TRACE_OUT("Add pkgno=%d num_packages=%d domainno=%x busno=%x devnode=%d.", + i, num_packages, domain_no, bus_no, dev_node); + return OS_SUCCESS; + } + SEP_DRV_LOG_ERROR_TRACE_OUT("Exceed max map entries, drop this bus map!"); + return OS_NO_MEM; +} + +extern OS_STATUS +UNC_COMMON_Init(void) +{ + U32 i = 0; + + for (i = 0; i < MAX_DEVICES; i++) { + SEP_DRV_MEMSET(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); + } + + SEP_DRV_MEMSET((char *)&uncore_topology, 0, sizeof(UNCORE_TOPOLOGY_INFO_NODE)); + SEP_DRV_MEMSET((char *)&platform_topology_prog_node, 0, + sizeof(PLATFORM_TOPOLOGY_PROG_NODE)); +#if defined(DRV_PMT_ENABLE) + SEP_DRV_MEMSET((char *)&pmt_topology, 0, sizeof(PMT_TOPOLOGY_DISCOVERY_LIST_NODE)); +#endif + + return OS_SUCCESS; +} + +/*! + * @fn extern VOID UNC_COMMON_Clean_Up(void) + * + * @brief clear out out programming + * + * @param None + * + * @return None + */ +extern void +UNC_COMMON_Clean_Up(void) +{ + U32 i = 0; + for (i = 0; i < MAX_DEVICES; i++) { + if (UNC_PCIDEV_busno_list(&(unc_pcidev_map[i]))) { + UNC_PCIDEV_busno_list(&(unc_pcidev_map[i])) = + CONTROL_Free_Memory(UNC_PCIDEV_busno_list(&(unc_pcidev_map[i]))); + } + if (UNC_PCIDEV_domainno_list(&(unc_pcidev_map[i]))) { + UNC_PCIDEV_domainno_list(&(unc_pcidev_map[i])) = + CONTROL_Free_Memory(UNC_PCIDEV_domainno_list(&(unc_pcidev_map[i]))); + } + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i]))) { + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i])) = + CONTROL_Free_Memory(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[i]))); + } + SEP_DRV_MEMSET(&(unc_pcidev_map[i]), 0, sizeof(UNC_PCIDEV_NODE)); + } + return; +} + +#if defined(DRV_PMT_ENABLE) +/*! + * @fn extern U32 UNC_COMMON_Scan_PMT_Device() + * + * @brief Map the PMT device discovered and notified by the pmt driver + * + * @param None + * + * @return Return true if the map is found and falst otherwise + */ +extern U32 +UNC_COMMON_Scan_PMT_Device(void) +{ + U32 i = 0; + U32 j = 0; + U32 k = 0; + + for (i = 0; i < PMT_TOPOLOGY_DISCOVERY_LIST_num_pmt_devices(&pmt_topology); i++) { + for (j = 0; j < PMT_TOPOLOGY_DISCOVERY_LIST_num_tiles(&pmt_topology, i); j++) { + for (k = 0; k < pmt_dev_index; k++) { + if (PMT_TOPOLOGY_DISCOVERY_LIST_tile_domain(&pmt_topology, i, j) == + pmt_devices[k].domain && + //PMT_TOPOLOGY_DISCOVERY_LIST_tile_bus(&pmt_topology, i, j) == pmt_devices[k].bus && + PMT_TOPOLOGY_DISCOVERY_LIST_tile_device(&pmt_topology, i, j) == + pmt_devices[k].device && + PMT_TOPOLOGY_DISCOVERY_LIST_tile_function( + &pmt_topology, i, j) == pmt_devices[k].function && + PMT_TOPOLOGY_DISCOVERY_LIST_tile_oobmsm_deviceid( + &pmt_topology, i, j) == pmt_devices[k].device_id && + PMT_TOPOLOGY_DISCOVERY_LIST_tile_pmt_endpoint_guid( + &pmt_topology, i, j) == pmt_devices[k].guid && + !pmt_devices[k].found) { + pmt_devices[k].dev_idx = i; + SEP_DRV_LOG_DETECTION("PMT Device ndex=%i bus=%x dev=%x func=%x devid=%x venid=%x found\n", + k, pmt_devices[k].bus, + pmt_devices[k].device, + pmt_devices[k].function, + pmt_devices[k].device_id, + pmt_devices[k].vendor_id); + pmt_devices[k].found = 1; + PMT_TOPOLOGY_DISCOVERY_LIST_topology_detected( + &pmt_topology) = 1; + PMT_TOPOLOGY_DISCOVERY_LIST_num_entries_found(&pmt_topology, + i)++; + PMT_TOPOLOGY_DISCOVERY_LIST_tile_found(&pmt_topology, i, + j) = 1; + PMT_TOPOLOGY_DISCOVERY_LIST_tile_unit_id(&pmt_topology, i, + j) = k; + PMT_TOPOLOGY_DISCOVERY_LIST_tile_bus(&pmt_topology, i, j) = + pmt_devices[k].bus; + //PMT_TOPOLOGY_DISCOVERY_LIST_tile_id(&pmt_topology, i, j) = i; + SEP_DRV_LOG_DETECTION("Found CTA device 0x%x at DBDF(%x:%x:%x:%x) [%u unit(s) so far].", + pmt_devices[k].device_id, + pmt_devices[k].domain, + pmt_devices[k].bus, + pmt_devices[k].device, + pmt_devices[k].function, + PMT_TOPOLOGY_DISCOVERY_LIST_num_entries_found( + &pmt_topology, i)); + break; + } + } + } + } + + return TRUE; +} +#endif + +/*! + * @fn static VOID UNC_COMMON_PCI_Scan_For_Uncore(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ + +extern VOID +UNC_COMMON_PCI_Scan_For_Uncore(PVOID param, U32 dev_node, DEVICE_CALLBACK callback) +{ + U32 device_id; + U32 value; + U32 vendor_id; + U32 busno; + U32 domainno; + U32 j, k, l; + U32 device_found = 0; + UNREFERENCED_PARAMETER(param); + UNREFERENCED_PARAMETER(callback); + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p, dev_node: %u, callback: %p.", param, dev_node, + callback); + + for (domainno = 0; domainno < num_pci_domains; domainno++) { + for (busno = 0; busno < MAX_BUSNO; busno++) { + for (j = 0; j < MAX_PCI_DEVNO; j++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_valid(&uncore_topology, dev_node, + j))) { + continue; + } + for (k = 0; k < MAX_PCI_FUNCNO; k++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid( + &uncore_topology, dev_node, j, k))) { + continue; + } + device_found = 0; + // Assume a single pci domain is connected with multiple sockets with + // consecutive package IDs + // if the number of pci domains is smaller than the number of packages + value = PCI_Read_U32_Valid(domainno, busno, j, k, 0, 0); + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, + device_id); + SEP_DRV_LOG_DETECTION("Uncore deviceID=0x%x DBDF(%x:%x:%x:%x).", + device_id, domainno, busno, j, k); + + for (l = 0; l < UNCORE_TOPOLOGY_INFO_num_deviceid_entries( + &uncore_topology, dev_node); + l++) { + if (UNCORE_TOPOLOGY_INFO_deviceid(&uncore_topology, + dev_node, + l) == device_id) { + device_found = 1; + break; + } + } + if (device_found) { + if (UNC_COMMON_Add_Bus_Map(device_id, dev_node, + busno, domainno) == + OS_SUCCESS) { + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, dev_node, j, k)++; + SEP_DRV_LOG_DETECTION("Found device 0x%x at DBDF(%x:%x:%x:%x) [%u unit(s) so far].", + device_id, domainno, + busno, j, k, + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, + dev_node, j, + k)); + } + } + } + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn extern VOID UNC_COMMON_PCI_Scan_For_CapId(UNCORE_DISCOVERY_DVSEC_CONFIG, DRV_PCI_DEVICE_ENTRY) + * + * @brief Walk through the PCICFG space, identify all the matching capability IDs from all sockets, + * and domains and populate the corresponding BARs into the output array bar_list + * + * @param config - pointer to UNCORE_DISCOVERY_DVSEC_CONFIG structure + * bar_list - list returned from the function with the BAR addresses + * + * @return None + * + * Special Notes: + */ +extern VOID +UNC_COMMON_PCI_Scan_For_CapId(UNCORE_DISCOVERY_DVSEC_CONFIG config, DRV_PCI_DEVICE_ENTRY bar_list) +{ + U32 device_id; + U32 value; + U32 vendor_id; + U32 busno; + U32 domainno; + U32 j, k; + U32 bir = 0; + U32 bar_offset = 0; + U32 bar = 0; + U32 i = 0; + U32 offset = EXT_CAPID_START_OFFSET; + U32 next_offset = 0; + U32 cap_id = 0; + + SEP_DRV_LOG_TRACE_IN("Config: %p, bar_list: %p.", config, bar_list); + + for (domainno = 0; domainno < num_pci_domains; domainno++) { + for (busno = 0; busno < MAX_BUSNO; busno++) { + for (j = 0; j < MAX_PCI_DEVNO; j++) { + for (k = 0; k < MAX_PCI_FUNCNO; k++) { + value = PCI_Read_U32_Valid(domainno, busno, j, k, 0, 0); + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, + device_id); + offset = EXT_CAPID_START_OFFSET; + while ((value = PCI_Read_U32_Valid(domainno, busno, j, k, + offset, 0))) { + next_offset = + (value & EXT_CAPID_LIST_NEXT_ADDR_MASK) >> + EXT_CAPID_LIST_NEXT_ADDR_BITSHIFT; + cap_id = (value & EXT_CAPID_MASK); + if (cap_id == + UNCORE_DISCOVERY_DVSEC_pci_ext_cap_id(config)) { + value = PCI_Read_U32( + domainno, busno, j, k, + offset + + UNCORE_DISCOVERY_DVSEC_dvsec_offset( + config)); + if ((value & + UNCORE_DISCOVERY_DVSEC_dvsec_id_mask( + config)) == + UNCORE_DISCOVERY_DVSEC_dvsec_id_pmon( + config)) { + bir = PCI_Read_U32( + domainno, busno, j, k, + offset + + UNCORE_DISCOVERY_DVSEC_dvsec_offset( + config) + + 4); + bir = bir & + UNCORE_DISCOVERY_DVSEC_dvsec_bir_mask( + config); + bar_offset = 0x10 + (bir * 4); + bar = PCI_Read_U32(domainno, busno, + j, k, + bar_offset); + bar = bar & ~(PAGE_SIZE - 1); + DRV_PCI_DEVICE_ENTRY_bar_address( + &bar_list[i]) = bar; + j = MAX_PCI_DEVNO; + k = MAX_PCI_FUNCNO; + SEP_DRV_LOG_TRACE("%u. BAR:0x%x", i, + bar); + i++; + break; + } + } + if (!next_offset || + next_offset >= EXT_CAPID_END_OFFSET) { + break; + } + offset = next_offset; + } + } + } + } + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void unc_common_Read_MSR(pvoid param) + * + * @param param - pointer to the buffer to store the MSR counts + * + * @return none + * + * @brief + * @brief Read the U64 value at address in buf_drv_to_usr and + * @brief write the result into buf_usr_to_drv. + * + * Special Notes + */ +static VOID +unc_common_Read_MSR(PVOID param) +{ + MSR_DATA topo_msr = (MSR_DATA)param; + + SEP_DRV_LOG_TRACE_IN(""); + + if (!param) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Error: param is null"); + return; + } + MSR_DATA_value(topo_msr) = (U64)SYS_Read_MSR((U32)MSR_DATA_addr(topo_msr)); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS unc_common_Read_MSR_Package(IOCTL_ARGS arg) + * + * @param arg U32 msr_address - MSR address to be read + * @param arg U32 package_num - Package on which to read MSR + * + * @return OS_STATUS + * + * @brief Read the MSR value on a specific package + * @brief Returns the MSR value read + * + * Special Notes + */ +static U64 +unc_common_Read_MSR_Package(U32 msr_address, U32 package_num) +{ + S32 i; + U64 msr_value = 0ULL; + MSR_DATA_NODE topo_msr_node; + + SEP_DRV_LOG_TRACE_IN(""); + + for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { + if (package_num == core_to_package_map[i]) { + MSR_DATA_addr(&topo_msr_node) = msr_address; +#if !defined(DRV_OS_MAC) + CONTROL_Invoke_Cpu(i, unc_common_Read_MSR, (VOID *)&topo_msr_node); + msr_value = MSR_DATA_value(&topo_msr_node); +#else + msr_value = SYS_Read_MSR(msr_address); +#endif + break; + } + } + + SEP_DRV_LOG_TRACE_OUT("Success"); + + return msr_value; +} + +/*! + * @fn extern VOID UNC_COMMON_Get_Platform_Topology() + * + * @brief This function will walk through the platform registers to retrieve information and calculate the bus no. + * Reads appropriate pci_config regs and populates the PLATFORM_TOPOLOGY_PROG_NODE structure with the reg value. + * + * @param U32 dev_node - Device no. + * + * @return None + * + * Special Notes: + * device_num corresponds to Memory controller + * func_num corresponds to Channel number + * reg_offset corresponds to dimm slot + */ +extern VOID +UNC_COMMON_Get_Platform_Topology(U32 dev_node) +{ + U32 num_registers = 0; + U32 device_index = 0; + U32 bus_num = 0; + U32 domain_num = 0; + U32 i = 0; + U32 func_num = 0; + U32 num_pkgs = num_packages; + U32 device_num = 0; + U32 reg_offset = 0; + U32 len = 0; + U64 reg_value = 0; + U32 device_value = 0; + U64 reg_mask = 0; + U32 reg_id = 0; + U32 vendor_id; + U32 device_id; + U32 valid; + U32 reg_idx = 0; + U32 num_found_units = 0; + U64 physical_address = 0ULL; + U64 virtual_address = 0ULL; + U64 mmio_base1 = 0ULL; + U64 mmio_base2 = 0ULL; + SEP_MMIO_NODE tmp_map = { 0 }; + + PLATFORM_TOPOLOGY_REG_EXT topology_regs_ext = NULL; + U32 unc_topo_node = PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_index( + &platform_topology_prog_node, dev_node); + + SEP_DRV_LOG_TRACE_IN("Dev_node: %u.", dev_node); + // Backward Compatibility - old version has less max devices (similar checks below) + if (dev_node < MAX_TOPOLOGY_DEV_OLD) { + PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(&platform_topology_prog_node, + dev_node) = 1; + } + PLATFORM_TOPOLOGY_PROG_EXT_topology_device_prog_valid(&platform_topology_prog_node, + dev_node) = 1; + + if (num_packages > MAX_PACKAGES) { + SEP_DRV_LOG_ERROR("Num_packages %d > MAX_PACKAGE, getting for only %d packages.", + num_packages, MAX_PACKAGES); + num_pkgs = MAX_PACKAGES; + } + + num_registers = PLATFORM_TOPOLOGY_PROG_EXT_topology_device_num_registers( + &platform_topology_prog_node, dev_node); + topology_regs_ext = PLATFORM_TOPOLOGY_PROG_EXT_topology_topology_regs( + &platform_topology_prog_node, dev_node); + device_index = PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_index( + &platform_topology_prog_node, dev_node); + + for (i = 0; i < num_pkgs; i++) { + for (len = 0; len < num_registers; len++) { + if (PLATFORM_TOPOLOGY_PROG_EXT_topology_device_distinct_buses( + &platform_topology_prog_node, dev_node)) { + // bus numbers are different for devices within the package (for e.g. iio) + reg_idx = PLATFORM_TOPOLOGY_REG_EXT_unit_index(topology_regs_ext, + len) + + i * num_found_units; //5; + } else { + //same bus numbers for all devices in the same package + reg_idx = i; + } + if (PLATFORM_TOPOLOGY_REG_EXT_reg_type(topology_regs_ext, len) == + PMU_REG_PROG_MSR) { + reg_id = PLATFORM_TOPOLOGY_REG_EXT_reg_id(topology_regs_ext, len); + + // Validation check from PMU list + if (!PMU_LIST_Check_MSR(reg_id)) { + SEP_DRV_LOG_ERROR("Received invalid MSR 0x%x. Skipped!", + reg_id); + continue; + } else { + SEP_DRV_LOG_TRACE("Verified MSR 0x%llx\n", reg_id); + } + + if (PLATFORM_TOPOLOGY_PROG_EXT_topology_device_scope( + &platform_topology_prog_node, dev_node) == + PACKAGE_EVENT) { + reg_value = unc_common_Read_MSR_Package(reg_id, i); + } else { + reg_value = SYS_Read_MSR(reg_id); + } + reg_mask = + PLATFORM_TOPOLOGY_REG_EXT_reg_mask(topology_regs_ext, len); + PLATFORM_TOPOLOGY_REG_EXT_reg_value(topology_regs_ext, len, i) = + reg_value & reg_mask; + SEP_DRV_LOG_DETECTION("Device ID=%d unc_topo_node=%d Read MSR 0x%x Val=%llx\n", + dev_node, unc_topo_node, + PLATFORM_TOPOLOGY_REG_EXT_reg_id( + topology_regs_ext, len), + PLATFORM_TOPOLOGY_REG_EXT_reg_value( + topology_regs_ext, len, i)); + } else if (PLATFORM_TOPOLOGY_REG_EXT_reg_type(topology_regs_ext, len) == + PMU_REG_PROG_PCI) { + if (!IS_BUS_MAP_VALID(unc_topo_node, reg_idx)) { + SEP_DRV_LOG_TRACE("BUS MAP invalid for device=%d reg_idx=%d\n", + dev_node, reg_idx); + continue; + } + if (!PLATFORM_TOPOLOGY_REG_EXT_device_valid(topology_regs_ext, + len)) { + SEP_DRV_LOG_TRACE("Device=%d reg_idx=%d invalid\n", + dev_node, reg_idx); + continue; + } + bus_num = GET_BUS_MAP(unc_topo_node, reg_idx); + domain_num = GET_DOMAIN_MAP(unc_topo_node, reg_idx); + device_num = + PLATFORM_TOPOLOGY_REG_EXT_device(topology_regs_ext, len); + func_num = + PLATFORM_TOPOLOGY_REG_EXT_function(topology_regs_ext, len); + reg_offset = + PLATFORM_TOPOLOGY_REG_EXT_reg_id(topology_regs_ext, len); + + // Validation check from PMU list + if (!PMU_LIST_Check_PCI((U8)bus_num, (U8)device_num, (U8)func_num, + reg_offset)) { + PLATFORM_TOPOLOGY_REG_EXT_device_valid(topology_regs_ext, + len) = 0; + SEP_DRV_LOG_ERROR("Received invalid PCI device DBDF(%x:%x:%d:%d) Off=%x Skipped!", + domain_num, bus_num, device_num, func_num, + reg_offset); + continue; + } else { + SEP_DRV_LOG_DETECTION("Verified PCI dev_node=%d unc_topo_node=%d reg_idx=%d DBDF(%x:%x:%d:%d) Off=%x\n", + dev_node, unc_topo_node, reg_idx, + domain_num, bus_num, device_num, + func_num, reg_offset); + } + + device_value = PCI_Read_U32_Valid(domain_num, bus_num, device_num, + func_num, 0, 0); + CHECK_IF_GENUINE_INTEL_DEVICE(device_value, vendor_id, device_id, + valid); + if (!valid) { + PLATFORM_TOPOLOGY_REG_EXT_device_valid(topology_regs_ext, + len) = 0; + } + if (PLATFORM_TOPOLOGY_REG_EXT_device_id(topology_regs_ext, len) && + PLATFORM_TOPOLOGY_REG_EXT_device_id(topology_regs_ext, len) != + device_id) { + continue; + } + PLATFORM_TOPOLOGY_REG_EXT_reg_value(topology_regs_ext, len, i) = + PCI_Read_U32_Valid(domain_num, bus_num, device_num, + func_num, reg_offset, PCI_INVALID_VALUE); + PLATFORM_TOPOLOGY_REG_EXT_bus_number(topology_regs_ext, len, i) = + (U16)bus_num; + PLATFORM_TOPOLOGY_REG_EXT_bus(topology_regs_ext, len) = + (U16)bus_num; + PLATFORM_TOPOLOGY_REG_EXT_domain_number(topology_regs_ext, len, i) = + (U16)domain_num; + SEP_DRV_LOG_DETECTION("PCI topology DBDF(%x:%x:%d:%d) val=%llx\n", + domain_num, bus_num, device_num, func_num, + PLATFORM_TOPOLOGY_REG_EXT_reg_value( + topology_regs_ext, len, i)); + if (i == 0 && PLATFORM_TOPOLOGY_REG_EXT_unit_index( + topology_regs_ext, len) >= num_found_units) { + num_found_units = PLATFORM_TOPOLOGY_REG_EXT_unit_index( + topology_regs_ext, len) + + 1; + } + } else if (PLATFORM_TOPOLOGY_REG_EXT_reg_type(topology_regs_ext, len) == + PMU_REG_PROG_MMIO) { + if (!IS_BUS_MAP_VALID(unc_topo_node, reg_idx)) { + SEP_DRV_LOG_TRACE("BUS MAP invalid for device=%d reg_idx=%d\n", + dev_node, reg_idx); + continue; + } + if (!PLATFORM_TOPOLOGY_REG_EXT_device_valid(topology_regs_ext, + len)) { + SEP_DRV_LOG_TRACE("Device=%d reg_idx=%d invalid\n", + dev_node, reg_idx); + continue; + } + bus_num = GET_BUS_MAP(unc_topo_node, reg_idx); + domain_num = GET_DOMAIN_MAP(unc_topo_node, reg_idx); + device_num = + PLATFORM_TOPOLOGY_REG_EXT_device(topology_regs_ext, len); + func_num = + PLATFORM_TOPOLOGY_REG_EXT_function(topology_regs_ext, len); + reg_offset = + PLATFORM_TOPOLOGY_REG_EXT_reg_id(topology_regs_ext, len); + + // Validation check from PMU list + if (!PMU_LIST_Check_PCI((U8)bus_num, (U8)device_num, (U8)func_num, + PLATFORM_TOPOLOGY_REG_EXT_main_bar_offset( + topology_regs_ext, len)) || + !PMU_LIST_Check_PCI( + (U8)bus_num, (U8)device_num, (U8)func_num, + PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_offset( + topology_regs_ext, len))) { + PLATFORM_TOPOLOGY_REG_EXT_device_valid(topology_regs_ext, + len) = 0; + SEP_DRV_LOG_ERROR("Received invalid MMIO device DBDF(%x:%x:%d:%d) Off=%llx Skipped!", + domain_num, bus_num, device_num, func_num, + PLATFORM_TOPOLOGY_REG_EXT_main_bar_offset( + topology_regs_ext, len)); + continue; + } + mmio_base1 = PCI_Read_U32(domain_num, bus_num, device_num, func_num, + PLATFORM_TOPOLOGY_REG_EXT_main_bar_offset( + topology_regs_ext, len)); + mmio_base1 = mmio_base1 & PLATFORM_TOPOLOGY_REG_EXT_main_bar_mask( + topology_regs_ext, len); + if (PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_offset( + topology_regs_ext, len)) { + mmio_base2 = PCI_Read_U32( + domain_num, bus_num, device_num, func_num, + PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_offset( + topology_regs_ext, len)); + mmio_base2 = mmio_base2 & + PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_mask( + topology_regs_ext, len); + } + physical_address = + ((mmio_base1 << PLATFORM_TOPOLOGY_REG_EXT_main_bar_shift( + topology_regs_ext, len)) | + (mmio_base2 + << PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_shift( + topology_regs_ext, len))) + + PLATFORM_TOPOLOGY_REG_EXT_base_mmio_offset( + topology_regs_ext, len); + PCI_Map_Memory(&tmp_map, physical_address, + PLATFORM_TOPOLOGY_REG_EXT_mmio_map_size( + topology_regs_ext, len)); + virtual_address = SEP_MMIO_NODE_virtual_address(&tmp_map); + PLATFORM_TOPOLOGY_REG_EXT_reg_value(topology_regs_ext, len, i) = + PCI_MMIO_Read_U32(virtual_address, reg_offset); + PCI_Unmap_Memory(&tmp_map); + SEP_DRV_LOG_DETECTION("MMIO base1=%llx base2=%llx phys=%llx, virt=%llx\n", + mmio_base1, mmio_base2, physical_address, + virtual_address); + SEP_DRV_LOG_DETECTION("MMIO dev_node=%d unc_topo_node=%d val = %llx bus=%x\n", + dev_node, unc_topo_node, + PLATFORM_TOPOLOGY_REG_EXT_reg_value( + topology_regs_ext, len, i), + bus_num); + } + } + if (PLATFORM_TOPOLOGY_PROG_EXT_topology_device_scope(&platform_topology_prog_node, + dev_node) == SYSTEM_EVENT) { + break; + } + } + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void unc_common_Disc_Add_Allowlist_Key_Valu(U64, U16) + * + * @param base_addr - base address of the MMIO uncore unit + * offset - offset from base address + * + * @return None + * + * @brief Adds entry into MMIO Allowlist + * + * Special Notes + */ + +static void +unc_common_Disc_Add_Allowlist_Key_Value(U64 base_addr, U64 offset) +{ + U64 key = 0; + SEP_DRV_LOG_TRACE_IN(""); + + key = base_addr << 16 | (U16)offset; + PMU_LIST_Add_To_MMIO_List(key, NULL); + + SEP_DRV_LOG_TRACE_OUT("Success"); +} +/* ------------------------------------------------------------------------- */ +/*! + * @fn static void unc_common_Disc_Add_Allowlist_Entry(UNCORE_DISCOVERY_TABLE) + * + * @param UNCORE_DISCOVERY_TABLE pkg_table - pointer to discovery table + * + * @return None + * + * @brief Iterates the discovery entries to populate Allowlist + * + * Special Notes + */ +static void +unc_common_Disc_Add_Allowlist_Entry(UNCORE_DISCOVERY_TABLE pkg_table) +{ + U32 i = 0; + U32 j = 0; + U32 max_blocks = 0; + UNCORE_DISCOVERY_UNIT unit_entry = NULL; + U64 unit_ctrl_addr = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + max_blocks = (U32)UNCORE_DISCOVERY_GLOBAL_max_blocks( + &UNCORE_DISCOVERY_TABLE_discovery_global_node(pkg_table)); + + for (i = 0; i < max_blocks; i++) { + unit_entry = &UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry(pkg_table, i); + if (UNCORE_DISCOVERY_UNIT_unit_type(unit_entry) == DISCOVERY_UNIT_CXLCM || + UNCORE_DISCOVERY_UNIT_unit_type(unit_entry) == DISCOVERY_UNIT_CXLDP) { + unit_ctrl_addr = UNCORE_DISCOVERY_UNIT_unit_ctrl_addr(unit_entry); + if (!unit_ctrl_addr) { + continue; + } + unc_common_Disc_Add_Allowlist_Key_Value(unit_ctrl_addr, 0x0); + unc_common_Disc_Add_Allowlist_Key_Value( + unit_ctrl_addr, + UNCORE_DISCOVERY_UNIT_unit_status_offset(unit_entry)); + for (j = 0; j < UNCORE_DISCOVERY_UNIT_num_gp_ctrs(unit_entry); j++) { + unc_common_Disc_Add_Allowlist_Key_Value( + unit_ctrl_addr, + (UNCORE_DISCOVERY_UNIT_evtsel0_offset(unit_entry) + j * 8)); + unc_common_Disc_Add_Allowlist_Key_Value( + unit_ctrl_addr, + (UNCORE_DISCOVERY_UNIT_ctr0_offset(unit_entry) + j * 8)); + } + } + } + SEP_DRV_LOG_TRACE_OUT("Success"); +} + +/*! + * @fn static VOID lwpmudrv_Read_Uncore_Discovery_Tables(VOID) + * @brief Fetch the BAR for Uncore discovery table + * Using the BAR read the discovery table and populate it + * + * @param None + * + * @return None + * + * Special Notes: + */ +extern VOID +UNC_COMMON_Read_Uncore_Discovery_Tables(void) +{ + SEP_MMIO_NODE mmio_node = { 0 }; + UNCORE_DISCOVERY_TABLE pkg_table = NULL; + U32 i = 0; + U32 stride_size, max_blocks; + static DRV_PCI_DEVICE_ENTRY_NODE bar_list[MAX_PACKAGES]; + UNCORE_DISCOVERY_DVSEC_CONFIG_NODE config_node; + UNCORE_DISCOVERY_GLOBAL_NODE global_node; + + SEP_DRV_LOG_TRACE_IN(""); + + SEP_DRV_MEMSET((char *)bar_list, 0, (MAX_PACKAGES * sizeof(DRV_PCI_DEVICE_ENTRY_NODE))); + config_node = UNCORE_DISCOVERY_dvsec_config_node(&uncore_discovery_tables); + PCI_FIND_DVSEC_CAPABILITY_BAR(&config_node, bar_list); + + for (i = 0; i < num_packages; i++) { + if (DRV_PCI_DEVICE_ENTRY_bar_address(&bar_list[i])) { + pkg_table = &UNCORE_DISCOVERY_table_list_entry(&uncore_discovery_tables, i); + SEP_DRV_MEMCPY(&UNCORE_DISCOVERY_TABLE_discovery_entry_node(pkg_table), + &bar_list[i], sizeof(DRV_PCI_DEVICE_ENTRY_NODE)); + SEP_DRV_MEMSET((char *)&mmio_node, 0, sizeof(SEP_MMIO_NODE)); + PCI_Map_Memory(&mmio_node, DRV_PCI_DEVICE_ENTRY_bar_address(&bar_list[i]), + sizeof(UNCORE_DISCOVERY_GLOBAL_NODE)); + global_node = *(UNCORE_DISCOVERY_GLOBAL)(( + char *)(UIOP)SEP_MMIO_NODE_virtual_address(&mmio_node)); + stride_size = (U32)UNCORE_DISCOVERY_GLOBAL_block_stride(&global_node); + max_blocks = (U32)UNCORE_DISCOVERY_GLOBAL_max_blocks(&global_node); + SEP_DRV_MEMCPY(&UNCORE_DISCOVERY_TABLE_discovery_global_node(pkg_table), + &global_node, sizeof(UNCORE_DISCOVERY_GLOBAL_NODE)); + PCI_Unmap_Memory(&mmio_node); + SEP_DRV_LOG_TRACE("stride_size: %u, max_blocks: %u, gbl_ctrl_addr: 0x%llx", + stride_size, max_blocks, + UNCORE_DISCOVERY_GLOBAL_global_ctrl_addr( + &UNCORE_DISCOVERY_TABLE_discovery_global_node( + pkg_table))); + if ((stride_size * 8) != sizeof(UNCORE_DISCOVERY_UNIT_NODE)) { + SEP_DRV_LOG_WARNING("Discovery unit stride %u unmatched size of UNCORE_DISCOVERY_UNIT_NODE %u", + (stride_size * 8), + sizeof(UNCORE_DISCOVERY_UNIT_NODE)); + return; + } + if ((sizeof(UNCORE_DISCOVERY_UNIT_NODE) * MAX_DISCOVERY_UNCORE_UNITS) < + (max_blocks * stride_size * 8)) { + SEP_DRV_LOG_WARNING("Discovery table size %u greater than allocated memory in kernel %u", + (max_blocks * stride_size * 8), + sizeof(UNCORE_DISCOVERY_UNIT_NODE) * + MAX_DISCOVERY_UNCORE_UNITS); + return; + } + SEP_DRV_MEMSET((char *)&mmio_node, 0, sizeof(SEP_MMIO_NODE)); + PCI_Map_Memory(&mmio_node, + DRV_PCI_DEVICE_ENTRY_bar_address(&bar_list[i]) + + (8 * stride_size), + (max_blocks * stride_size * 8)); + SEP_DRV_MEMCPY(UNCORE_DISCOVERY_TABLE_discovery_unit_node(pkg_table), + (char *)(UIOP)SEP_MMIO_NODE_virtual_address(&mmio_node), + (max_blocks * stride_size * 8)); + PCI_Unmap_Memory(&mmio_node); + unc_common_Disc_Add_Allowlist_Entry(pkg_table); + +#if defined(MYDEBUG) + { + U32 j = 0; + for (j = 0; j < max_blocks; j++) { + SEP_DRV_LOG_TRACE("%u:type:%u,id:%u,ad:0x%llx", j, + UNCORE_DISCOVERY_UNIT_unit_type( + &UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry( + pkg_table, j)), + UNCORE_DISCOVERY_UNIT_unit_id( + &UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry( + pkg_table, j)), + UNCORE_DISCOVERY_UNIT_unit_ctrl_addr( + &UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry( + pkg_table, j))); + } + } +#endif + } + } + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/************************************************************/ +/* + * UNC common MSR based API + * + ************************************************************/ + +/*! + * @fn VOID UNC_COMMON_MSR_Clean_Up(PVOID) + * + * @brief clear out out programming + * + * @param None + * + * @return None + */ +VOID +UNC_COMMON_MSR_Clean_Up(VOID *param) +{ + U32 dev_idx; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + dev_idx = *((U32 *)param); + FOR_EACH_REG_ENTRY_UNC (pecb, dev_idx, i) { + if (ECB_entries_clean_up_get(pecb, i)) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, i), 0LL); + } + } + END_FOR_EACH_REG_ENTRY_UNC; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + diff --git a/drivers/platform/x86/sepdk/sep/unc_gt.c b/drivers/platform/x86/sepdk/sep/unc_gt.c new file mode 100644 index 00000000000000..d116562462035c --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_gt.c @@ -0,0 +1,538 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" +#include "inc/unc_gt.h" + +extern U64 *read_counter_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +static U64 unc_gt_virtual_address = 0; +static SEP_MMIO_NODE unc_gt_map; +static U32 unc_gt_rc6_reg1; +static U32 unc_gt_rc6_reg2; +static U32 unc_gt_clk_gt_reg1; +static U32 unc_gt_clk_gt_reg2; +static U32 unc_gt_clk_gt_reg3; +static U32 unc_gt_clk_gt_reg4; + +/*! + * @fn static VOID unc_gt_Initialize(VOID*) + * + * @brief Initial write of PMU registers + * + * @param device id + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Initialize(PVOID param) +{ + U64 bar; + U32 dev_idx = 0; + U32 cur_grp = 0; + ECB pecb = NULL; + MMIO_BAR_INFO mmio_bar_list = NULL; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + if (param == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!param)."); + return; + } + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + } + + mmio_bar_list = &ECB_mmio_bar_list(pecb, 0); + if (!mmio_bar_list) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!mmio_bar_info)."); + return; + } + + bar = (U64)PCI_Read_U32(0, MMIO_BAR_INFO_bus_no(mmio_bar_list), + MMIO_BAR_INFO_dev_no(mmio_bar_list), + MMIO_BAR_INFO_func_no(mmio_bar_list), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_list)); + + bar &= UNC_GT_BAR_MASK; + + PCI_Map_Memory(&unc_gt_map, bar, GT_MMIO_SIZE); + unc_gt_virtual_address = SEP_MMIO_NODE_virtual_address(&unc_gt_map); + + SEP_DRV_LOG_TRACE_OUT(""); + + return; +} + +/*! + * @fn static VOID unc_gt_Write_PMU(VOID*) + * + * @brief Walk through the enties and write the value of the register accordingly. + * + * @param device id + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Write_PMU(VOID *param) +{ + U32 dev_idx; + ECB pecb_entry; + U32 offset_delta; + U32 tmp_value; + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + if (param == NULL) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!param)."); + return; + } + if (!unc_gt_virtual_address) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!unc_gt_virtual_address)."); + return; + } + + dev_idx = *((U32 *)param); + pecb_entry = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, i, PMU_OPERATION_READ) { + offset_delta = ECB_entries_reg_offset(pecb, i); + // this is needed for overflow detection of the accumulators. + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, i); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + //enable the global control to clear the counter first + SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb_entry, 0)); + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, i, PMU_OPERATION_WRITE) { + offset_delta = ECB_entries_reg_offset(pecb, i); + if (offset_delta == PERF_GLOBAL_CTRL) { + continue; + } + PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, + GT_CLEAR_COUNTERS); + + SEP_DRV_LOG_TRACE("CCCR offset delta is 0x%x W is clear ctrs.", + offset_delta); + } + END_FOR_EACH_REG_UNC_OPERATION; + + //disable the counters + SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, i, PMU_OPERATION_WRITE) { + offset_delta = ECB_entries_reg_offset(pecb, i); + if (offset_delta == PERF_GLOBAL_CTRL) { + continue; + } + PCI_MMIO_Write_U32(unc_gt_virtual_address, offset_delta, + ((U32)ECB_entries_reg_value(pecb, i))); + tmp_value = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + // remove compiler warning on unused variables + if (tmp_value) { + } + SEP_DRV_LOG_TRACE("CCCR offset delta is 0x%x R is 0x%x W is 0x%llx.", + offset_delta, tmp_value, + ECB_entries_reg_value(pecb, i)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_gt_Disable_RC6_Clock_Gating(VOID) + * + * @brief This snippet of code allows GT events to count by + * disabling settings related to clock gating/power + * @param none + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Disable_RC6_Clock_Gating(VOID) +{ + U32 tmp = 0; + + SEP_DRV_LOG_TRACE_IN(""); + + // Disable RC6 + unc_gt_rc6_reg1 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1); + tmp = unc_gt_rc6_reg1 | UNC_GT_RC6_REG1_OR_VALUE; + unc_gt_rc6_reg2 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, + UNC_GT_RC6_REG2_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, tmp); + + SEP_DRV_LOG_TRACE("Original value of RC6 rc6_1 = 0x%x, rc6_2 = 0x%x.", + unc_gt_rc6_reg1, unc_gt_rc6_reg2); + + // Disable clock gating + // Save + unc_gt_clk_gt_reg1 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1); + unc_gt_clk_gt_reg2 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2); + unc_gt_clk_gt_reg3 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3); + unc_gt_clk_gt_reg4 = + PCI_MMIO_Read_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4); + + SEP_DRV_LOG_TRACE("Original value of RC6 ck_1 = 0x%x, ck_2 = 0x%x.", + unc_gt_clk_gt_reg1, unc_gt_clk_gt_reg2); + SEP_DRV_LOG_TRACE("Original value of RC6 ck_3 = 0x%x, ck_4 = 0x%x.", + unc_gt_clk_gt_reg3, unc_gt_clk_gt_reg4); + + // Disable + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, + UNC_GT_GCPUNIT_REG1_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, + UNC_GT_GCPUNIT_REG2_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, + UNC_GT_GCPUNIT_REG3_VALUE); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, + UNC_GT_GCPUNIT_REG4_VALUE); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_gt_Restore_RC6_Clock_Gating(VOID) + * + * @brief This snippet of code restores the system settings + * for clock gating/power + * @param none + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Restore_RC6_Clock_Gating(VOID) +{ + SEP_DRV_LOG_TRACE_IN(""); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG2, + unc_gt_rc6_reg2); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_RC6_REG1, + unc_gt_rc6_reg1); + + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG1, + unc_gt_clk_gt_reg1); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG2, + unc_gt_clk_gt_reg2); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG3, + unc_gt_clk_gt_reg3); + PCI_MMIO_Write_U32(unc_gt_virtual_address, UNC_GT_GCPUNIT_REG4, + unc_gt_clk_gt_reg4); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_gt_Enable_PMU(PVOID) + * + * @brief Disable the clock gating and Set the global enable + * + * @param device_id + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Enable_PMU(PVOID param) +{ + U32 dev_idx; + ECB pecb; + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[0]; + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + unc_gt_Disable_RC6_Clock_Gating(); + + if (pecb && GET_DRIVER_STATE() == DRV_STATE_RUNNING) { + SYS_Write_MSR(PERF_GLOBAL_CTRL, ECB_entries_reg_value(pecb, 0)); + SEP_DRV_LOG_TRACE("Enabling GT Global control = 0x%llx.", + ECB_entries_reg_value(pecb, 0)); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} +/*! + * @fn static VOID unc_gt_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * and restore system values for clock gating settings + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_gt_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN("Dummy param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + cur_driver_state = GET_DRIVER_STATE(); + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + unc_gt_Restore_RC6_Clock_Gating(); + + if (unc_gt_virtual_address && + (cur_driver_state == DRV_STATE_STOPPED || + cur_driver_state == DRV_STATE_PREPARE_STOP || + cur_driver_state == DRV_STATE_TERMINATING)) { + SYS_Write_MSR(PERF_GLOBAL_CTRL, 0LL); + PCI_Unmap_Memory(&unc_gt_map); + unc_gt_virtual_address = 0; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn unc_gt_Trigger_Read(param, id, read_from_intr) + * + * @param param The read thread node to process + * @param id The id refers to the device index + * @param read_from_intr Trigger read coming from intr or timer + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID +unc_gt_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 offset_delta = 0; + U32 tmp_value_lo = 0; + U32 tmp_value_hi = 0; + U64 *data; + GT_CTR_NODE gt_ctr_value; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + + GT_CTR_NODE_value_reset(gt_ctr_value); + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION (pecb, id, i, PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, i)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, i))) { + continue; + } + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + offset_delta = ECB_entries_reg_offset(pecb, i); + tmp_value_lo = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + offset_delta = offset_delta + NEXT_ADDR_OFFSET; + tmp_value_hi = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, i)); + GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; + GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; + *data = GT_CTR_NODE_value(gt_ctr_value); + SEP_DRV_LOG_TRACE("DATA offset delta is 0x%x R is 0x%llx.", + offset_delta, + GT_CTR_NODE_value(gt_ctr_value)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +static VOID +unc_gt_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 offset_delta = 0; + U32 tmp_value_lo = 0; + U32 tmp_value_hi = 0; + U64 *buffer = (U64 *)param; + CPU_STATE pcpu; + GT_CTR_NODE gt_ctr_value; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + GT_CTR_NODE_value_reset(gt_ctr_value); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, i, PMU_OPERATION_READ) { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package(pecb, i)); + offset_delta = ECB_entries_reg_offset(pecb, i); + tmp_value_lo = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + offset_delta = offset_delta + NEXT_ADDR_OFFSET; + tmp_value_hi = + PCI_MMIO_Read_U32(unc_gt_virtual_address, offset_delta); + GT_CTR_NODE_low(gt_ctr_value) = tmp_value_lo; + GT_CTR_NODE_high(gt_ctr_value) = tmp_value_hi; + buffer[j] = GT_CTR_NODE_value(gt_ctr_value); + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_gt_dispatch = { + .init = unc_gt_Initialize, // initialize + .fini = NULL, // destroy + .write = unc_gt_Write_PMU, // write + .freeze = unc_gt_Disable_PMU, // freeze + .restart = unc_gt_Enable_PMU, // restart + .read_data = unc_gt_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap_group + .read_lbrs = NULL, // read_lbrs + .cleanup = NULL, // cleanup + .hw_errata = NULL, // hw_errata + .read_power = NULL, // read_power + .check_overflow_errata = NULL, // check_overflow_errata + .read_counts = NULL, // read_counts + .check_overflow_gp_errata = NULL, // check_overflow_gp_errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform_info + .trigger_read = unc_gt_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_mmio.c b/drivers/platform/x86/sepdk/sep/unc_mmio.c new file mode 100644 index 00000000000000..33eed51e132b36 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_mmio.c @@ -0,0 +1,2807 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" + +extern U64 *read_counter_info; +extern U64 *prev_counter_data; +extern DRV_CONFIG drv_cfg; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +U64 regbar_physical_address = 0; +#define MASK_32BIT 0xffffffff +#define MASK_64BIT 0xffffffff00000000ULL + +#define IS_MASTER(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? \ + CPU_STATE_system_master(&pcb[cpu]) : \ + CPU_STATE_socket_master(&pcb[(cpu)])) +#define GET_PACKAGE_NUM(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) +#define IS_64BIT(mask) (((mask) >> 32) != 0) + +#define EVENT_COUNTER_MAX_TRY 30 + +struct FPGA_CONTROL_NODE_S { + union { + struct { + U64 rst_ctrs : 1; + U64 rsvd1 : 7; + U64 frz : 1; + U64 rsvd2 : 7; + U64 event_select : 4; + U64 port_id : 2; + U64 rsvd3 : 1; + U64 port_enable : 1; + U64 rsvd4 : 40; + } bits; + U64 bit_field; + } u; +} control_node; + +/*! + * @fn static VOID unc_mmio_single_bar_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_mmio_single_bar_Write_PMU(VOID *param) +{ + U32 dev_idx = 0; + U32 offset_delta = 0; + U32 event_id = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 entry = 0; + U32 dev_node = 0; + U64 tmp_value = 0; + U64 virtual_addr = 0; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_WRITE) { + PCI_MMIO_Write_U32(virtual_addr, ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + SEP_DRV_LOG_TRACE("va=0x%llx, ri=%u, rv=0x%llx", virtual_addr, + ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!event_based_counts)."); + return; + } + + idx_w = ECB_operations_register_start(pecb_entry, PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_counter_type(pecb, idx) == + PROGRAMMABLE_COUNTER) { + continue; + } + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + if (!mmio_bar_info) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!mmio_bar_info)."); + break; + } + + if (ECB_entries_reg_offset(pecb, idx) >= + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info)) { + offset_delta = ECB_entries_reg_offset(pecb, idx) - + (U32)MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + SEP_DRV_LOG_TRACE("od=0x%x", offset_delta); + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + //Write event code before reading + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + // this is needed for overflow detection of the accumulators. + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > EVENT_COUNTER_MAX_TRY) { + break; + } + tmp_value = PCI_MMIO_Read_U64( + virtual_addr, offset_delta); + counter++; + } while (event_code != (tmp_value >> 60)); + } + tmp_value = + PCI_MMIO_Read_U64(virtual_addr, offset_delta); + } else { + tmp_value = + PCI_MMIO_Read_U32(virtual_addr, offset_delta); + } + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + + LWPMU_DEVICE_prev_value( + &devices[dev_idx])[package_num][event_id] = tmp_value; + + SEP_DRV_LOG_TRACE("cpu=%u, device=%u, package=%u, entry=%u, event_id=%u, value=0x%llu", + this_cpu, dev_idx, package_num, entry, + event_id, tmp_value); + event_id++; + + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_mch_regbar_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_mmio_mch_regbar_Write_PMU ( + VOID *param +) +{ + U32 dev_idx = 0; + U32 this_cpu = 0; + DEV_UNC_CONFIG pcfg_unc; + U64 regbar_reg_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + dev_idx = *((U32*)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); + return; + } + + if (!regbar_physical_address) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!regbar_physical_address)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_WRITE) { + + regbar_reg_id = regbar_physical_address + ECB_entries_reg_id(pecb, idx); + PCI_Write_To_Memory_Address_U64(regbar_reg_id, ECB_entries_reg_value(pecb, idx)); + SEP_DRV_LOG_TRACE("va=0x%llx, ri=%u, rv=0x%llx", regbar_reg_id, ECB_entries_reg_id(pecb, idx), ECB_entries_reg_value(pecb, idx)); + } END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_multiple_bar_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_mmio_multiple_bar_Write_PMU(VOID *param) +{ + U32 dev_idx = 0; + U32 event_id = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 entry = 0; + U32 num_mmio_secondary_bar = 0; + U32 offset = 0; + U64 reg_value = 0; + U64 virtual_addr = 0; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_WRITE) { + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + SEP_DRV_LOG_TRACE("e=%u, bi=%u, va=0x%llx", entry, + ECB_entries_reg_bar_index(pecb, idx), + virtual_addr); + + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL && + ECB_entries_reg_prog_type(pecb, idx) == PMU_REG_PROG_MSR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0LL); + continue; + } + + offset = ECB_entries_reg_offset(pecb, idx); + reg_value = ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + reg_value = PCI_MMIO_Read_U32(virtual_addr, offset); + reg_value &= ECB_entries_aux_read_mask(pecb, idx); + reg_value |= (U32)ECB_entries_reg_value(pecb, idx); + } + + SEP_DRV_LOG_TRACE("va=0x%llx, ro=0x%x, rv=0x%x", virtual_addr, + offset, reg_value); + PCI_MMIO_Write_U32(virtual_addr, offset, (U32)reg_value); + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!event_based_counts)."); + return; + } + + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_READ) { + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + SEP_DRV_LOG_TRACE("e=%u, bi=%u, va=0x%llx", entry, + ECB_entries_reg_bar_index(pecb, idx), + virtual_addr); + + // this is needed for overflow detection of the accumulators. + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + reg_value = PCI_MMIO_Read_U64( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } else { + reg_value = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } + reg_value &= (U64)ECB_entries_max_bits(pecb, idx); + + LWPMU_DEVICE_prev_value( + &devices[dev_idx])[package_num][event_id] = reg_value; + + SEP_DRV_LOG_TRACE("cpu=%u, device=%u, package=%u, event_id=%u, value=0x%llu", + this_cpu, dev_idx, package_num, event_id, + reg_value); + event_id++; + + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_single_bar_Enable_PMU(PVOID) + * + * @brief Capture the previous values to calculate delta later. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_single_bar_Enable_PMU(PVOID param) +{ + U32 j = 0; + U32 this_cpu = 0; + U32 dev_idx = 0; + U32 package_num = 0; + U32 offset_delta = 0; + U32 cur_grp = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 num_events = 0; + U32 entry = 0; + U32 num_pkgs = num_packages; + U32 dev_node = 0; + U64 virtual_addr = 0; + U32 reg_val = 0; + U64 *buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb) for group %u.", + cur_grp); + return; + } + SEP_DRV_LOG_TRACE("enable PMU for group %u", cur_grp); + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (DEV_UNC_CONFIG_device_type(pcfg_unc) == DRV_SINGLE_INSTANCE) { + num_pkgs = 1; + } + + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + // capturing previous values enable freerunning counter delta computation + if (DRV_CONFIG_emon_mode(drv_cfg)) { + num_events = ECB_num_events(pecb_entry); + idx_w = ECB_operations_register_start(pecb_entry, + PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_READ) { + if (ECB_entries_counter_type(pecb, idx) == + PROGRAMMABLE_COUNTER) { + continue; + } + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + if (!mmio_bar_info) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!mmio_bar_info)."); + break; + } + + if (ECB_entries_reg_offset(pecb, idx) >= + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info)) { + offset_delta = + ECB_entries_reg_offset(pecb, idx) - + (U32)MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info); + } else { + offset_delta = + ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = + virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64( + virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = + (U32)control_node.u.bits.event_select; + idx_w++; + } + + if ((ECB_entries_event_scope(pecb, idx) == + PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + if (IS_64BIT((U64)(ECB_entries_max_bits( + pecb, idx)))) { + if (ECB_entries_counter_type(pecb, + idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > + EVENT_COUNTER_MAX_TRY) { + break; + } + buffer[j] = PCI_MMIO_Read_U64( + virtual_addr, + offset_delta); + counter++; + } while (event_code != + (buffer[j] >> 60)); + } + buffer[j] = PCI_MMIO_Read_U64( + virtual_addr, offset_delta); + + } else { + buffer[j] = PCI_MMIO_Read_U32( + virtual_addr, offset_delta); + } + buffer[j] &= + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_ENABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL && + ECB_entries_reg_prog_type(pecb, idx) == PMU_REG_PROG_MSR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + continue; + } + + // FIXME: EDRAM uncore + // 1. control register is GT_PMON_CNT_MISC_CTRL 32bit GTTMMADR+11_7268 + // 2. control register is not for write after read pattern + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + PCI_MMIO_Write_U32( + virtual_addr, ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, ECB_entries_reg_id(pecb, idx)); + reg_val &= ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_id(pecb, idx), + reg_val); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_mch_regbar_Enable_PMU(PVOID) + * + * @brief Enable the PMU to start capturing sample + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_mch_regbar_Enable_PMU ( + PVOID param +) +{ + U32 this_cpu = 0; + U32 dev_idx = 0; + U32 package_num = 0; + U32 cur_grp = 0; + DEV_UNC_CONFIG pcfg_unc; + U64 regbar_reg_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32*)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + SEP_DRV_LOG_TRACE("enable PMU for group %u", cur_grp); + + if (!regbar_physical_address) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!regbar_physical_address)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_ENABLE) { + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + regbar_reg_id = regbar_physical_address + ECB_entries_reg_id(pecb,idx); + PCI_Write_To_Memory_Address_U64(regbar_reg_id, ECB_entries_reg_value(pecb,idx)); + } + + } END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_multiple_bar_Enable_PMU(PVOID) + * + * @brief Capture the previous values to calculate delta later. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_multiple_bar_Enable_PMU(PVOID param) +{ + U32 j = 0; + U32 this_cpu = 0; + U32 dev_idx = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 entry = 0; + U32 num_mmio_secondary_bar = 0; + U64 virtual_addr = 0; + U64 reg_val = 0; + U64 *buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb) for group %u.", + cur_grp); + return; + } + SEP_DRV_LOG_TRACE("enable PMU for group %u", cur_grp); + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + // capturing previous values enable freerunning counter delta computation + if (DRV_CONFIG_emon_mode(drv_cfg)) { + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_READ) { + if (!IS_MMIO_MAP_VALID( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index( + pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + SEP_DRV_LOG_TRACE("e=%u,bi=%u,va=0x%llx", entry, + ECB_entries_reg_bar_index(pecb, idx), + virtual_addr); + + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + buffer[j] = PCI_MMIO_Read_U64( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } else { + buffer[j] = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } + buffer[j] &= (U64)ECB_entries_max_bits(pecb, idx); + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + } + + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_ENABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL && + ECB_entries_reg_prog_type(pecb, idx) == PMU_REG_PROG_MSR) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + continue; + } + + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + SEP_DRV_LOG_TRACE("e=%u, bi=%u, va=0x%llx", entry, + ECB_entries_reg_bar_index(pecb, idx), + virtual_addr); + + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + PCI_MMIO_Write_U32( + virtual_addr, ECB_entries_reg_offset(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + reg_val &= ECB_entries_aux_read_mask(pecb, idx); + reg_val |= (U32)ECB_entries_reg_value(pecb, idx); + SEP_DRV_LOG_TRACE("va=0x%llx, ri=0x%x, ro=0x%x, rv=0x%x", + virtual_addr, + ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + reg_val &= ECB_entries_reg_value(pecb, idx); + SEP_DRV_LOG_TRACE("va=0x%llx, ri=0x%x, ro=0x%x, rv=0x%x", + virtual_addr, + ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + } + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when you stop sampling. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_single_bar_Disable_PMU(PVOID param) +{ + U32 dev_idx = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 entry = 0; + U64 virtual_addr = 0; + U32 reg_val = 0; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[dev_idx])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_DISABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL && + ECB_entries_reg_prog_type(pecb, idx) == PMU_REG_PROG_MSR) { + continue; + } + + // FIXME: EDRAM uncore + // 1. control register is GT_PMON_CNT_MISC_CTRL 32bit GTTMMADR+11_7268 + // 2. control register is not for write after read pattern + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + PCI_MMIO_Write_U32( + virtual_addr, ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, ECB_entries_reg_id(pecb, idx)); + reg_val |= ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_id(pecb, idx), + reg_val); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_mch_regbar_Disable_PMU(PVOID) + * + * @brief Disable the PMU + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_mch_regbar_Disable_PMU ( + PVOID param +) +{ + U32 dev_idx = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + DEV_UNC_CONFIG pcfg_unc; + U64 regbar_reg_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32*)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[dev_idx])[package_num]; + + if (!regbar_physical_address) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!regbar_physical_address)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_DISABLE) { + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + regbar_reg_id = regbar_physical_address + ECB_entries_reg_id(pecb,idx); + PCI_Write_To_Memory_Address_U64(regbar_reg_id, ECB_entries_reg_value(pecb,idx)); + } + } END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_mmio_multiple_bar_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when you stop sampling. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_mmio_multiple_bar_Disable_PMU(PVOID param) +{ + U32 dev_idx = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 entry = 0; + U32 num_mmio_secondary_bar = 0; + U64 virtual_addr = 0; + U64 reg_val = 0; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = + GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[dev_idx])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_DISABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL && + ECB_entries_reg_prog_type(pecb, idx) == PMU_REG_PROG_MSR) { + continue; + } + + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + SEP_DRV_LOG_TRACE("e=%u, bi=%u, va=0x%llx", entry, + ECB_entries_reg_bar_index(pecb, idx), + virtual_addr); + + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_WRITE) { + PCI_MMIO_Write_U32( + virtual_addr, ECB_entries_reg_offset(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + reg_val &= ECB_entries_aux_read_mask(pecb, idx); + reg_val |= (U32)ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + } + + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + reg_val |= ECB_entries_reg_value(pecb, idx); + PCI_MMIO_Write_U32(virtual_addr, + ECB_entries_reg_offset(pecb, idx), + (U32)reg_val); + } + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void unc_mmio_single_bar_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +unc_mmio_single_bar_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu = 0; + U32 cur_grp = 0; + U32 index = 0; + U32 offset_delta = 0; + U32 package_num = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 entry = 0; + U32 dev_node = 0; + U64 diff = 0; + U64 value = 0ULL; + U64 virtual_addr = 0; + U64 *data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb_entry = LWPMU_DEVICE_PMU_register_data(&devices[id])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + //Read in the counts into temporary buffer + idx_w = ECB_operations_register_start(pecb_entry, PMU_OPERATION_WRITE); + FOR_EACH_REG_UNC_OPERATION (pecb, id, idx, PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + + if (!mmio_bar_info) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!mmio_bar_info)."); + break; + } + + if (ECB_entries_reg_offset(pecb, idx) >= + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info)) { + offset_delta = (U32)(ECB_entries_reg_offset(pecb, idx) - + MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info)); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > EVENT_COUNTER_MAX_TRY) { + break; + } + value = PCI_MMIO_Read_U64(virtual_addr, + offset_delta); + counter++; + } while (event_code != (value >> 60)); + } + value = PCI_MMIO_Read_U64(virtual_addr, offset_delta); + } else { + value = PCI_MMIO_Read_U32(virtual_addr, offset_delta); + } + value &= (U64)ECB_entries_max_bits(pecb, idx); + + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + //check for overflow if not a static counter + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + *data = value; + } else { + if (value < LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += + diff; + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index] = value; + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + } + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void unc_mmio_mch_regar_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +unc_mmio_mch_regar_Trigger_Read ( + PVOID param, + U32 id, + U32 read_from_intr +) +{ + U32 this_cpu = 0; + U32 cur_grp = 0; + U32 index = 0; + U32 package_num = 0; + U64 diff = 0; + U64 value = 0ULL; + U64 *data; + DEV_UNC_CONFIG pcfg_unc; + U64 regbar_reg_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION(pecb, id, idx, PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64*)((S8*)param + ECB_group_id_offset_in_trigger_evt_desc(pecb)); + } + else { + data = (U64*)((S8*)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + regbar_reg_id = regbar_physical_address + ECB_entries_reg_offset(pecb, idx); + + PCI_Read_From_Memory_Address_U64(regbar_reg_id, &value); + value &= (U64)ECB_entries_max_bits(pecb, idx); + + data = (U64 *)((S8*)param + ECB_entries_counter_event_offset(pecb, idx)); + //check for overflow if not a static counter + if (value < LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]; + diff += value; + } + else { + diff = value - LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value(&devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = value; + *data = LWPMU_DEVICE_acc_value(&devices[id])[package_num][cur_grp][index]; + index++; + } END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void unc_mmio_multiple_bar_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +unc_mmio_multiple_bar_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu = 0; + U32 cur_grp = 0; + U32 index = 0; + U32 package_num = 0; + U32 idx_w = 0; + U32 entry = 0; + U32 dev_node = 0; + U32 num_mmio_secondary_bar = 0; + U64 diff = 0; + U64 value = 0ULL; + U64 virtual_addr = 0; + U64 *data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb_entry = LWPMU_DEVICE_PMU_register_data(&devices[id])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + //Read in the counts into temporary buffer + idx_w = ECB_operations_register_start(pecb_entry, PMU_OPERATION_WRITE); + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, id, idx, + PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + value = PCI_MMIO_Read_U64(virtual_addr, + ECB_entries_reg_offset(pecb, + idx)); + } else { + value = PCI_MMIO_Read_U32(virtual_addr, + ECB_entries_reg_offset(pecb, + idx)); + } + value &= (U64)ECB_entries_max_bits(pecb, idx); + + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + if (value < + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = + value; + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + index++; + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_single_bar_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static VOID +unc_mmio_single_bar_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 offset_delta = 0; + U32 cur_grp = 0; + U32 idx_w = 0; + U32 event_code = 0; + U32 counter = 0; + U32 num_events = 0; + U32 package_num = 0; + U32 entry = 0; + U32 dev_node = 0; + U32 num_pkgs = num_packages; + U32 this_cpu = 0; + U64 tmp_value = 0ULL; + U64 virtual_addr = 0; + U64 *buffer = (U64 *)param; + U64 *prev_buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (DEV_UNC_CONFIG_device_type(pcfg_unc) == DRV_SINGLE_INSTANCE) { + num_pkgs = 1; + } + + num_events = ECB_num_events(pecb_entry); + + idx_w = ECB_operations_register_start(pecb_entry, PMU_OPERATION_WRITE); + + if (!IS_MMIO_MAP_VALID(dev_node, entry)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + return; + } + + virtual_addr = virtual_address_table(dev_node, entry); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + if (!mmio_bar_info) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!mmio_bar_info)."); + break; + } + + if (ECB_entries_reg_offset(pecb, idx) >= + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info)) { + offset_delta = ECB_entries_reg_offset(pecb, idx) - + (U32)MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info); + } else { + offset_delta = ECB_entries_reg_offset(pecb, idx); + } + + if ((DEV_UNC_CONFIG_device_type(pcfg_unc) == + DRV_SINGLE_INSTANCE) && + (GET_NUM_MAP_ENTRIES(dev_node) > 1)) { + // multiple MMIO mapping per device, find virtual_addr per mapping. + entry = ECB_entries_unit_id(pecb, idx); + virtual_addr = virtual_address_table(dev_node, entry); + } + + if ((ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) && + (ECB_entries_unit_id(pecb, idx) == 0)) { + PCI_MMIO_Write_U64(virtual_addr, + ECB_entries_reg_id(pecb, idx_w), + ECB_entries_reg_value(pecb, idx_w)); + control_node.u.bit_field = + ECB_entries_reg_value(pecb, idx_w); + event_code = (U32)control_node.u.bits.event_select; + idx_w++; + } + + if ((ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + if (ECB_entries_counter_type(pecb, idx) == + PROG_FREERUN_COUNTER) { + do { + if (counter > + EVENT_COUNTER_MAX_TRY) { + break; + } + tmp_value = PCI_MMIO_Read_U64( + virtual_addr, + offset_delta); + counter++; + } while (event_code != + (tmp_value >> 60)); + } + tmp_value = PCI_MMIO_Read_U64(virtual_addr, + offset_delta); + } else { + tmp_value = PCI_MMIO_Read_U32(virtual_addr, + offset_delta); + } + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + if (ECB_entries_counter_type(pecb, idx) == + STATIC_COUNTER) { + buffer[j] = tmp_value; + } else { + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, + idx) - + prev_buffer[j]); + } + } + SEP_DRV_LOG_TRACE("j=%u, v=%llu", j, buffer[j]); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_mch_regbar_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static VOID +unc_mmio_mch_regbar_Read_PMU_Data ( + PVOID param, + U32 dev_idx +) +{ + U32 j = 0; + U32 cur_grp = 0; + U32 package_num = 0; + U32 this_cpu = 0; + U64 tmp_value = 0ULL; + U64 *buffer = (U64 *)param; + U64 *prev_buffer = prev_counter_data; + DEV_UNC_CONFIG pcfg_unc; + U64 regbar_reg_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + + if (!regbar_physical_address) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!regbar_physical_address)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION(pecb, dev_idx, idx, PMU_OPERATION_READ) { + + regbar_reg_id = regbar_physical_address + ECB_entries_reg_offset(pecb, idx); + + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET(package_num, EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package(pecb, idx)); + + PCI_Read_From_Memory_Address_U64(regbar_reg_id, &tmp_value); + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } + else { + buffer[j] = tmp_value + (ECB_entries_max_bits(pecb, idx) - prev_buffer[j]); + } + SEP_DRV_LOG_TRACE("j=%u, v=%llu", j, buffer[j]); + } END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_multiple_bar_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static VOID +unc_mmio_multiple_bar_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 this_cpu = 0; + U32 cur_grp = 0; + U32 package_num = 0; + U32 entry = 0; + U32 dev_node = 0; + U32 num_mmio_secondary_bar = 0; + U64 tmp_value = 0ULL; + U64 virtual_addr = 0; + U64 *buffer = (U64 *)param; + U64 *prev_buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + FOR_EACH_SCHEDULED_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_READ) { + if (!IS_MMIO_MAP_VALID( + dev_node, + entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MMIO_MAP_VALID)."); + continue; + } + + virtual_addr = virtual_address_table( + dev_node, entry * num_mmio_secondary_bar + + ECB_entries_reg_bar_index(pecb, idx)); + + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package(pecb, idx)); + + if (IS_64BIT((U64)(ECB_entries_max_bits(pecb, idx)))) { + tmp_value = PCI_MMIO_Read_U64( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } else { + tmp_value = PCI_MMIO_Read_U32( + virtual_addr, + ECB_entries_reg_offset(pecb, idx)); + } + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = + tmp_value + (ECB_entries_max_bits(pecb, idx) - + prev_buffer[j]); + } + + SEP_DRV_LOG_TRACE("j=%u, v=%llu", j, buffer[j]); + } + END_FOR_EACH_SCHEDULED_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_single_bar_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID +unc_mmio_single_bar_Initialize(PVOID param) +{ + U32 dev_idx = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 i = 0; + U32 j = 0; + U32 use_default_busno = 0; + U32 entries = 0; + U32 domain_no = 0; + U64 physical_address = 0; + U64 bar = 0; + ECB pecb = NULL; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; + j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); + j++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (IS_MMIO_MAP_VALID(dev_node, 0)) { + SEP_DRV_LOG_INIT_TRACE_OUT("Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + + // use busno found from topology scan if available + // otherwise use the default one + if (dev_node) { + entries = GET_NUM_MAP_ENTRIES(dev_node); + SEP_DRV_LOG_TRACE("# if entries - %u", entries); + SEP_DRV_LOG_WARNING_TRACE_OUT("PCB node not available in group %u!", + cur_grp); + } + if (entries == 0) { + use_default_busno = 1; + entries = 1; // this could the client, does not through the scan + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = 1; + } + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + SEP_DRV_MEMSET(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), + 0, entries * sizeof(U64)); + } + + UNC_PCIDEV_num_mmio_main_bar_per_entry(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_num_mmio_secondary_bar_per_entry( + &(unc_pcidev_map[dev_node])) = 1; + + for (i = 0; i < entries; i++) { + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + + if (!use_default_busno) { + if (IS_BUS_MAP_VALID(dev_node, i)) { + MMIO_BAR_INFO_bus_no(mmio_bar_info) = + GET_BUS_MAP(dev_node, i); + domain_no = GET_DOMAIN_MAP(dev_node, i); + } else { + SEP_DRV_LOG_TRACE_OUT("PCI device map not found. Early exit."); + return; + } + } else { + domain_no = 0; + } + + SEP_DRV_LOG_TRACE("D=0x%x, b=0x%lx, d=0x%x, f=0x%x, o=0x%llx", + domain_no, + MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + bar = PCI_Read_U64( + domain_no, MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + + bar &= MMIO_BAR_INFO_main_bar_mask(mmio_bar_info); + + physical_address = + bar + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info); + + SEP_DRV_LOG_TRACE("pa=0x%llx", physical_address); + + PCI_Map_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), i), + physical_address, + MMIO_BAR_INFO_map_size_for_mmio(mmio_bar_info)); + + SEP_DRV_LOG_TRACE("va=0x%llx", + virtual_address_table(dev_node, i)); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_mch_regbar_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Obtain the base address of REGBAR + * + */ +static VOID +unc_mmio_mch_regbar_Initialize ( + PVOID param +) +{ + U32 dev_idx = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 i = 0; + U32 j = 0; + U32 use_default_busno = 0; + U32 entries = 0; + U32 domain_no = 0; + U64 physical_address = 0; + U64 bar = 0; + ECB pecb = NULL; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32*)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); j++) { + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[j]; + if (!pecb) { + continue; + } + else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + // use busno found from topology scan if available + // otherwise use the default one + if (dev_node) { + entries = GET_NUM_MAP_ENTRIES(dev_node); + SEP_DRV_LOG_TRACE("# if entries - %u", entries); + SEP_DRV_LOG_WARNING_TRACE_OUT("PCB node not available in group %u!", cur_grp); + } + if (entries == 0) { + use_default_busno = 1; + entries = 1; // this could the client, does not through the scan + } + + for (i = 0; i < entries; i++) { + + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + + if (!use_default_busno) { + if (IS_BUS_MAP_VALID(dev_node, i)) { + MMIO_BAR_INFO_bus_no(mmio_bar_info) = GET_BUS_MAP(dev_node, i); + domain_no = GET_DOMAIN_MAP(dev_node, i); + } + else { + SEP_DRV_LOG_TRACE_OUT("PCI device map not found. Early exit."); + return; + } + } + else { + domain_no = 0; + } + + SEP_DRV_LOG_TRACE("D=0x%x, b=0x%lx, d=0x%x, f=0x%x, o=0x%llx", domain_no, MMIO_BAR_INFO_bus_no(mmio_bar_info), MMIO_BAR_INFO_dev_no(mmio_bar_info), MMIO_BAR_INFO_func_no(mmio_bar_info), MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + bar = PCI_Read_U64(domain_no, MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + + bar &= MMIO_BAR_INFO_main_bar_mask(mmio_bar_info); + + physical_address = bar + MMIO_BAR_INFO_base_offset_for_mmio(mmio_bar_info); + + SEP_DRV_LOG_TRACE("pa=0x%llx", physical_address); + + PCI_Read_From_Memory_Address_U64(physical_address, ®bar_physical_address); + + regbar_physical_address &= MMIO_BAR_INFO_secondary_bar_mask(mmio_bar_info); + + SEP_DRV_LOG_TRACE("REGBAR pa=0x%llx", regbar_physical_address); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_fpga_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID +unc_mmio_fpga_Initialize(PVOID param) +{ +#if defined(DRV_EM64T) + U32 id = 0; + U32 j = 0; + U32 offset = 0; + U32 dev_idx = 0; + U32 cur_grp = 0; + U32 busno = 0; + U32 page_len = 4096; + U32 package_num = 0; + U32 dev_node = 0; + U32 entries = 0; + U32 bus_list[2] = { 0x5e, 0xbe }; + S32 next_offset = -1; + U64 phys_addr = 0; + U64 virt_addr = 0; + U64 dfh = 0; + ECB pecb = NULL; + SEP_MMIO_NODE tmp_map = { 0 }; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; + j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); + j++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb); + + entries = GET_NUM_MAP_ENTRIES(dev_node); + if (entries == 0) { + entries = num_packages; + } + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + SEP_DRV_MEMSET(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), + 0, (entries * sizeof(SEP_MMIO_NODE))); + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = 0; + UNC_PCIDEV_max_entries(&(unc_pcidev_map[dev_node])) = entries; + } else { + if (virtual_address_table(dev_node, 0) != 0) { + SEP_DRV_LOG_INIT_TRACE_OUT("Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + } + + UNC_PCIDEV_num_mmio_main_bar_per_entry(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_num_mmio_secondary_bar_per_entry( + &(unc_pcidev_map[dev_node])) = 1; + + for (package_num = 0; package_num < num_packages; package_num++) { + if (package_num < 2) { + busno = bus_list[package_num]; + } else { + busno = 0; + } + + mmio_bar_info = &ECB_mmio_bar_list(pecb, 0); + + // For FPGA, use single domain for now. + phys_addr = PCI_Read_U64( + 0, busno, MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + phys_addr &= MMIO_BAR_INFO_main_bar_mask(mmio_bar_info); + if (package_num == 0) { + PCI_Map_Memory(&tmp_map, phys_addr, 8 * page_len); + virt_addr = SEP_MMIO_NODE_virtual_address(&tmp_map); + while (next_offset != 0) { + dfh = PCI_MMIO_Read_U64((U64)virt_addr, offset); + next_offset = (U32)((dfh >> 16) & 0xffffff); + id = (U32)(dfh & 0xfff); + if (offset && (id == MMIO_BAR_INFO_feature_id( + mmio_bar_info))) { + break; + } + offset += next_offset; + } + PCI_Unmap_Memory(&tmp_map); + } + phys_addr += offset; + PCI_Map_Memory( + &UNC_PCIDEV_mmio_map_entry(&(unc_pcidev_map[dev_node]), + package_num), + phys_addr, 8 * page_len); + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node]))++; + } + + SEP_DRV_LOG_TRACE_OUT(""); +#endif + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_multiple_bar_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID +unc_mmio_multiple_bar_Initialize(PVOID param) +{ + U32 mmio_base = 0; + U32 mem_offset = 0; + U32 mem_bar = 0; + U32 dev_idx = 0; + U32 idx = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 i = 0; + U32 j = 0; + U32 entries = 0; + U32 domain_no = 0; + U32 num_mmio_secondary_bar = 0; + U64 virtual_address = 0; + U64 physical_address = 0; + ECB pecb = NULL; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; + j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); + j++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (IS_MMIO_MAP_VALID(dev_node, 0)) { + SEP_DRV_LOG_INIT_TRACE_OUT("Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + + // use busno found from topology scan if available + // otherwise use the default one + entries = GET_NUM_MAP_ENTRIES(dev_node); + if (entries == 0) { + entries = num_packages; + } + if (!GET_NUM_MAP_ENTRIES(dev_node)) { + UNC_PCIDEV_num_entries(&(unc_pcidev_map[dev_node])) = entries; + } + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory( + entries * ECB_num_mmio_secondary_bar(pecb) * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + SEP_DRV_MEMSET(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), + 0, entries * sizeof(U64)); + } + + UNC_PCIDEV_num_mmio_main_bar_per_entry(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_num_mmio_secondary_bar_per_entry( + &(unc_pcidev_map[dev_node])) = ECB_num_mmio_secondary_bar(pecb); + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + if (ECB_unc_discovery_mode_get(pecb)) { + for (idx = ECB_operations_register_start( + pecb, PMU_OPERATION_INITIALIZE); + idx < ECB_operations_register_start( + pecb, PMU_OPERATION_INITIALIZE) + + ECB_operations_register_len( + pecb, PMU_OPERATION_INITIALIZE); + idx++) { + if (ECB_entries_scheduled(pecb, idx) != TRUE) { + continue; + } + physical_address = ECB_entries_phys_addr(pecb, idx); + if (ECB_entries_reg_type(pecb, idx) == + PMU_REG_DISCOVERY_BASE && + physical_address) { + i = ECB_entries_reg_package_id(pecb, idx) * + num_mmio_secondary_bar + + ECB_entries_bar_index(pecb, idx); + PCI_Map_Memory( + &UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), i), + physical_address, + ECB_entries_reg_data_size(pecb, idx)); + virtual_address = SEP_MMIO_NODE_virtual_address( + &UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), + i)); + SEP_DRV_LOG_TRACE("s_id=%u, unit_id=%u, pa=0x%llx, va=0x%llx", + ECB_entries_reg_package_id( + pecb, idx), + ECB_entries_bar_index(pecb, + idx), + physical_address, + virtual_address); + } + } + SEP_DRV_LOG_TRACE_OUT(""); + return; + } + + for (i = 0; i < entries; i++) { + for (j = 0; j < num_mmio_secondary_bar; j++) { + mmio_bar_info = &ECB_mmio_bar_list(pecb, j); + + if (IS_BUS_MAP_VALID(dev_node, i)) { + MMIO_BAR_INFO_bus_no(mmio_bar_info) = + GET_BUS_MAP(dev_node, i); + domain_no = GET_DOMAIN_MAP(dev_node, i); + } else { + SEP_DRV_LOG_TRACE_OUT("PCI device map not found. Early exit."); + return; + } + + SEP_DRV_LOG_TRACE("D=0x%x,b=0x%lx,d=0x%x,f=0x%x,p.o=0x%llx,s.o=0x%llx", + domain_no, + MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset( + mmio_bar_info), + MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_info)); + mmio_base = PCI_Read_U32( + domain_no, MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset(mmio_bar_info)); + mem_offset = PCI_Read_U32( + domain_no, MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_info)); + mem_bar = (U32)((mmio_base & + (U32)MMIO_BAR_INFO_main_bar_mask( + mmio_bar_info)) + << MMIO_BAR_INFO_main_bar_shift( + mmio_bar_info)) | + ((mem_offset & + (U32)MMIO_BAR_INFO_secondary_bar_mask( + mmio_bar_info)) + << MMIO_BAR_INFO_secondary_bar_shift( + mmio_bar_info)); + + physical_address = + mem_bar + MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info); + + PCI_Map_Memory( + &UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), + i * num_mmio_secondary_bar + j), + physical_address, + MMIO_BAR_INFO_map_size_for_mmio(mmio_bar_info)); + + virtual_address = SEP_MMIO_NODE_virtual_address( + &UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), + i * num_mmio_secondary_bar + j)); + + SEP_DRV_LOG_TRACE("i=%u, j=%u, pa=0x%llx, va=0x%llx", i, + j, physical_address, virtual_address); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_Get_PMM_Base (U32 domain, + * U32 bus, + * U32 device, + * U32 function, + * U32 offset, + * U64 pmm_spa_mask, + * U8 pmm_spa_shift) + * + * @param U32 domain - domain number + * U32 bus - bus number + * U32 device - device number + * U32 function - function number + * U32 offset - offset + * U64 pmm_spa_mask - mask to apply on reg value + * U8 pmm_spa_shift - shift to apply on reg value + * + * @return pmm_spa_start - pmem system start address for CSR region + * + * @brief Obtain pmem system start address for CSR region + * + */ +static U64 +unc_mmio_Get_PMM_Base(U32 domain, + U32 bus, + U32 device, + U32 function, + U32 offset, + U64 pmm_spa_mask, + U8 pmm_spa_shift) +{ + U64 value; + U64 pmm_spa_start_64mb_gran; + U64 pmm_spa_start; + + SEP_DRV_LOG_TRACE_IN(""); + + // obtain PMM SPA region details + value = PCI_Read_U64(domain, bus, device, function, offset); + // obtain PMM SPA start address in 64MB granularity + pmm_spa_start_64mb_gran = value & pmm_spa_mask; + // convert to bytes for exact address + pmm_spa_start = pmm_spa_start_64mb_gran << pmm_spa_shift; + + SEP_DRV_LOG_TRACE("DBDFO %x:%x:%x:%x:%x | value: %llx, pmm_spa_start_64mb_gran: %llx, pmm_spa_start: %llx", + domain, bus, device, function, offset, value, + pmm_spa_start_64mb_gran, pmm_spa_start); + + SEP_DRV_LOG_TRACE_OUT("PMM SPA region scan successful"); + + return pmm_spa_start; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_pmm_Initialize(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Do the mapping of the physical address (to do the invalidates in the TLB) + * NOTE: this should never be done with SMP call + * + */ +static VOID +unc_mmio_pmm_Initialize(PVOID param) +{ + U32 dev_idx = 0; + U32 cur_grp = 0; + U32 dev_node = 0; + U32 i = 0; + U32 j = 0; + U32 entries = 0; + U32 domain_no = 0; + U32 map_size = 0; + U32 num_mmio_secondary_bar = 0; + U64 physical_address_base = 0; + U64 physical_address = 0; + U64 base_offset_for_mmio = 0; + ECB pecb = NULL; + MMIO_BAR_INFO mmio_bar_info; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; + j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); + j++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (IS_MMIO_MAP_VALID(dev_node, 0)) { + SEP_DRV_LOG_INIT_TRACE_OUT("Early exit (device[%d] node %d already mapped).", + dev_idx, dev_node); + return; + } + + // use busno found from topology scan if available + // otherwise exit + if (dev_node) { + entries = GET_NUM_MAP_ENTRIES(dev_node); + SEP_DRV_LOG_TRACE("# if entries - %u", entries); + SEP_DRV_LOG_WARNING_TRACE_OUT("PCB node not available in group %u!", + cur_grp); + } + if (entries == 0) { + SEP_DRV_LOG_INIT_TRACE_OUT("Early exit (device[%d] node %d (pmm) not available).", + dev_idx, dev_node); + return; + } + + UNC_PCIDEV_num_mmio_main_bar_per_entry(&(unc_pcidev_map[dev_node])) = 1; + UNC_PCIDEV_num_mmio_secondary_bar_per_entry( + &(unc_pcidev_map[dev_node])) = ECB_num_mmio_secondary_bar(pecb); + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + // it is better to allocate space in the beginning + UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) = + CONTROL_Allocate_Memory(entries * + num_mmio_secondary_bar * + sizeof(SEP_MMIO_NODE)); + if (UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])) == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (No Memory)."); + return; + } + SEP_DRV_MEMSET(UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node])), + 0, sizeof(U64)); + } + + if (!IS_BUS_MAP_VALID(dev_node, 0)) { + SEP_DRV_LOG_TRACE_OUT("PCI device map not found. Early exit."); + return; + } + + for (i = 0; i < entries; i++) { + for (j = 0; j < num_mmio_secondary_bar; j++) { + mmio_bar_info = &ECB_mmio_bar_list(pecb, i); + + // find the base address only for the 1st iteration + if (!i && !j) { + if (IS_BUS_MAP_VALID(dev_node, i)) { + MMIO_BAR_INFO_bus_no(mmio_bar_info) = + GET_BUS_MAP(dev_node, i); + domain_no = GET_DOMAIN_MAP(dev_node, i); + } else { + SEP_DRV_LOG_TRACE_OUT("PCI device map not found. Early exit."); + return; + } + SEP_DRV_LOG_TRACE("D=0x%x, b=0x%x, d=0x%x, f=0x%x, o=0x%x", + domain_no, + MMIO_BAR_INFO_bus_no( + mmio_bar_info), + MMIO_BAR_INFO_dev_no( + mmio_bar_info), + MMIO_BAR_INFO_func_no( + mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset( + mmio_bar_info)); + physical_address_base = unc_mmio_Get_PMM_Base( + domain_no, + MMIO_BAR_INFO_bus_no(mmio_bar_info), + MMIO_BAR_INFO_dev_no(mmio_bar_info), + MMIO_BAR_INFO_func_no(mmio_bar_info), + MMIO_BAR_INFO_main_bar_offset( + mmio_bar_info), + MMIO_BAR_INFO_main_bar_mask( + mmio_bar_info), + MMIO_BAR_INFO_main_bar_shift( + mmio_bar_info)); + } + base_offset_for_mmio = + (j % 2 == 0) ? + MMIO_BAR_INFO_base_offset_for_mmio( + mmio_bar_info) : + MMIO_BAR_INFO_secondary_bar_offset( + mmio_bar_info); + physical_address = + physical_address_base + base_offset_for_mmio; + map_size = + MMIO_BAR_INFO_map_size_for_mmio(mmio_bar_info); + + SEP_DRV_LOG_TRACE("pa=0x%llx, ms=%x", physical_address, + map_size); + + PCI_Map_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), + i * num_mmio_secondary_bar + j), + physical_address, map_size); + + SEP_DRV_LOG_TRACE("va=0x%llx", + virtual_address_table( + dev_node, + i * num_mmio_secondary_bar + + j)); + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_mmio_Destroy(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Invalidate the entry in TLB of the physical address + * NOTE: this should never be done with SMP call + * + */ +static VOID +unc_mmio_Destroy(PVOID param) +{ + U32 dev_idx = 0; + U32 i = 0; + U32 j = 0; + U32 dev_node = 0; + U32 cur_grp = 0; + U32 entries = 0; + U32 num_mmio_secondary_bar = 0; + ECB pecb = NULL; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[dev_idx])[cur_grp]; + + if (!pecb) { + for (j = 0; + j < (U32)LWPMU_DEVICE_em_groups_count(&devices[dev_idx]); + j++) { + pecb = LWPMU_DEVICE_PMU_register_data( + &devices[dev_idx])[j]; + if (!pecb) { + continue; + } else { + break; + } + } + } + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + dev_node = ECB_dev_node(pecb); + + if (!UNC_PCIDEV_mmio_map(&(unc_pcidev_map[dev_node]))) { + SEP_DRV_LOG_TRACE_OUT("Early exit (no mapping)."); + return; + } + + entries = GET_NUM_MAP_ENTRIES(dev_node); + num_mmio_secondary_bar = GET_NUM_MMIO_SECONDARY_BAR(dev_node); + + for (i = 0; i < entries; i++) { + if (num_mmio_secondary_bar > 1) { + for (j = 0; j < num_mmio_secondary_bar; j++) { + if (IS_MMIO_MAP_VALID( + dev_node, + i * num_mmio_secondary_bar + j)) { + PCI_Unmap_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), + i * num_mmio_secondary_bar + j)); + } + } + } else { + if (IS_MMIO_MAP_VALID(dev_node, i)) { + PCI_Unmap_Memory(&UNC_PCIDEV_mmio_map_entry( + &(unc_pcidev_map[dev_node]), i)); + } + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_mmio_single_bar_dispatch = { + .init = unc_mmio_single_bar_Initialize, // initialize + .fini = unc_mmio_Destroy, // destroy + .write = unc_mmio_single_bar_Write_PMU, // write + .freeze = unc_mmio_single_bar_Disable_PMU, // freeze + .restart = unc_mmio_single_bar_Enable_PMU, // restart + .read_data = unc_mmio_single_bar_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_mmio_single_bar_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + +DISPATCH_NODE unc_mmio_mch_regbar_dispatch = +{ + .init = unc_mmio_mch_regbar_Initialize, // initialize + .fini = unc_mmio_Destroy, // destroy + .write = unc_mmio_mch_regbar_Write_PMU, // write + .freeze = unc_mmio_mch_regbar_Disable_PMU, // freeze + .restart = unc_mmio_mch_regbar_Enable_PMU, // restart + .read_data = unc_mmio_mch_regbar_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_mmio_mch_regar_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + +DISPATCH_NODE unc_mmio_fpga_dispatch = +{ + .init = unc_mmio_fpga_Initialize, // initialize + .fini = unc_mmio_Destroy, // destroy + .write = unc_mmio_single_bar_Write_PMU, // write + .freeze = unc_mmio_single_bar_Disable_PMU, // freeze + .restart = unc_mmio_single_bar_Enable_PMU, // restart + .read_data = unc_mmio_single_bar_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_mmio_single_bar_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + +DISPATCH_NODE unc_mmio_multiple_bar_dispatch = { + .init = unc_mmio_multiple_bar_Initialize, // initialize + .fini = unc_mmio_Destroy, // destroy + .write = unc_mmio_multiple_bar_Write_PMU, // write + .freeze = unc_mmio_multiple_bar_Disable_PMU, // freeze + .restart = unc_mmio_multiple_bar_Enable_PMU, // restart + .read_data = unc_mmio_multiple_bar_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_mmio_multiple_bar_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + +DISPATCH_NODE unc_mmio_pmm_dispatch = { + .init = unc_mmio_pmm_Initialize, // initialize + .fini = unc_mmio_Destroy, // destroy + .write = unc_mmio_multiple_bar_Write_PMU, // write + .freeze = unc_mmio_multiple_bar_Disable_PMU, // freeze + .restart = unc_mmio_multiple_bar_Enable_PMU, // restart + .read_data = unc_mmio_multiple_bar_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_mmio_multiple_bar_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_msr.c b/drivers/platform/x86/sepdk/sep/unc_msr.c new file mode 100644 index 00000000000000..bd25539c53b88c --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_msr.c @@ -0,0 +1,427 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" + +extern U64 *read_counter_info; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +extern DRV_CONFIG drv_cfg; +extern U64 *prev_counter_data; + +/*! + * @fn static VOID UNC_COMMON_MSR_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +UNC_MSR_Write_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_WRITE) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + } + END_FOR_EACH_REG_UNC_OPERATION; + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0ULL); + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID UNC_MSR_Enable_PMU(PVOID) + * + * @brief Set the enable bit for all the evsel registers + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +UNC_MSR_Enable_PMU(PVOID param) +{ + U32 j; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U64 reg_val = 0; + U32 package_num = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_ENABLE) { + reg_val = ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_reg_type(pecb, idx) == + PMU_REG_UNIT_CTRL) { + reg_val &= ECB_entries_reg_value(pecb, idx); + } else { + reg_val |= ECB_entries_reg_value(pecb, idx); + } + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_counter_type(pecb, idx) == FREERUN_COUNTER) { + U64 tmp_value = 0; + + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_max_bits(pecb, idx)) { + tmp_value &= ECB_entries_max_bits(pecb, idx); + } + prev_counter_data[j] = tmp_value; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, + prev_counter_data[j], this_cpu); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID UNC_MSR_Disable_PMU(PVOID) + * + * @brief Set the enable bit for all the evsel registers + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +UNC_MSR_Disable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U64 reg_val = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_DISABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + continue; + } + reg_val = ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_reg_type(pecb, idx) == + PMU_REG_UNIT_CTRL) { + reg_val |= ECB_entries_reg_value(pecb, idx); + } else { + reg_val &= ECB_entries_reg_value(pecb, idx); + } + } + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID UNC_MSR_Read_PMU_Data(param) + * + * @param param The read thread node to process + * @param id The id refers to the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer + * Let us say we have 2 core events in a dual socket JKTN; + * The start_index will be at 32 as it will 2 events in 16 CPU per socket + * The position for first event of QPI will be computed based on its event + * + */ +static VOID +UNC_MSR_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 this_cpu; + U32 package_num = 0; + U64 *buffer; + CPU_STATE pcpu; + U32 cur_grp; + ECB pecb_entry; + U64 *prev_buffer = prev_counter_data; + U64 tmp_value = 0ULL; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + buffer = (U64 *)param; + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, + idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_counter_type(pecb, idx) == FREERUN_COUNTER) { + if (ECB_entries_max_bits(pecb, idx)) { + tmp_value &= ECB_entries_max_bits(pecb, idx); + } + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, idx) - + prev_buffer[j]); + } + } else { + buffer[j] = tmp_value; + } + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID UNC_MSR_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +UNC_MSR_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu; + U32 package_num; + U32 cur_grp; + U32 index = 0; + U64 diff = 0; + U64 value; + U64 *data; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u, intr mode: %u.", param, id, + read_from_intr); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + + //Read in the counts into uncore buffer + FOR_EACH_REG_UNC_OPERATION (pecb, id, idx, PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + //check for overflow + if (value < + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = + value; + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_msr_dispatch = { + .init = NULL, // initialize + .fini = NULL, // destroy + .write = UNC_MSR_Write_PMU, // write + .freeze = UNC_MSR_Disable_PMU, // freeze + .restart = UNC_MSR_Enable_PMU, // restart + .read_data = UNC_MSR_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_MSR_Clean_Up, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = UNC_MSR_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_pci.c b/drivers/platform/x86/sepdk/sep/unc_pci.c new file mode 100644 index 00000000000000..caa8e9bc631bbd --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_pci.c @@ -0,0 +1,545 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; +extern UNCORE_TOPOLOGY_INFO_NODE uncore_topology; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +/*! + * @fn static VOID unc_pci_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_pci_Write_PMU(PVOID param) +{ + U32 device_id; + U32 dev_idx; + U32 value; + U32 vendor_id; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node = 0; + U32 cur_grp; + ECB pecb_entry; + U32 busno; + U32 domainno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + // first, figure out which package maps to which bus + dev_node = ECB_dev_node(pecb_entry); + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + domainno = GET_DOMAIN_MAP(dev_node, package_num); + SEP_DRV_LOG_TRACE("pakcage_num=%u, domainno=%u, busno=%u", package_num, + domainno, busno); + + LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]) = dev_node; + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_WRITE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + //Check if we need to zero this MSR out + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), 0LL); + continue; + } + + // otherwise, we have a valid entry + // now we just need to find the corresponding bus # + ECB_entries_bus_no(pecb, idx) = busno; + value = PCI_Read_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), 0); + + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, + device_id); + + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_UNIT_CTRL) { + // busno can not be stored in ECB because different sockets have different bus no. + PCI_Write_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + continue; + } + + // now program at the corresponding offset + PCI_Write_U32(domainno, busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), + (U32)ECB_entries_reg_value(pecb, idx)); + + if ((ECB_entries_reg_value(pecb, idx) >> NEXT_ADDR_SHIFT) != + 0) { + PCI_Write_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx) + + NEXT_ADDR_OFFSET, + (U32)(ECB_entries_reg_value(pecb, idx) >> + NEXT_ADDR_SHIFT)); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + PCI_Write_U64(domainno, busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), 0); + + // this is needed for overflow detection of the accumulators. + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_pci_Enable_PMU(PVOID) + * + * @brief Set the enable bit for all the EVSEL registers + * + * @param Device Index of this PMU unit + * + * @return None + * + * Special Notes: + */ +static VOID +unc_pci_Enable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node; + U32 reg_val = 0; + U32 busno; + U32 domainno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + domainno = GET_DOMAIN_MAP(dev_node, package_num); + SEP_DRV_LOG_TRACE("pakcage_num=%u, domainno=%u, busno=%u", package_num, + domainno, busno); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_ENABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), + ECB_entries_reg_value(pecb, idx)); + continue; + } + reg_val = (U32)ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_Read_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + reg_val &= ECB_entries_reg_value(pecb, idx); + } + PCI_Write_U32(domainno, busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_pci_Disable_PMU(PVOID) + * + * @brief Disable the per unit global control to stop the PMU counters. + * + * @param Device Index of this PMU unit + * @control_msr Control MSR address + * @enable_val If counter freeze bit does not work, counter enable bit should be cleared + * @disable_val Disable collection + * + * @return None + * + * Special Notes: + */ +static VOID +unc_pci_Disable_PMU(PVOID param) +{ + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num = 0; + U32 dev_node; + U32 reg_val = 0; + U32 busno; + U32 domainno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + + package_num = core_to_package_map[this_cpu]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + domainno = GET_DOMAIN_MAP(dev_node, package_num); + SEP_DRV_LOG_TRACE("pakcage_num=%u, domainno=%u, busno=%u", package_num, + domainno, busno); + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_DISABLE) { + if (ECB_entries_reg_type(pecb, idx) == PMU_REG_GLOBAL_CTRL) { + continue; + } + reg_val = (U32)ECB_entries_reg_value(pecb, idx); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_WRITE) { + reg_val = PCI_Read_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + reg_val |= ECB_entries_reg_value(pecb, idx); + } + PCI_Write_U32(domainno, busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx), reg_val); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID unc_pci_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +unc_pci_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu = 0; + U32 package_num = 0; + U32 dev_node = 0; + U32 cur_grp = 0; + U32 index = 0; + U64 value_low = 0; + U64 value_high = 0; + U64 diff = 0; + U64 value; + U64 *data; + U32 busno; + U32 domainno; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[id]); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + domainno = GET_DOMAIN_MAP(dev_node, package_num); + SEP_DRV_LOG_TRACE("pakcage_num=%u, domainno=%u, busno=%u", package_num, + domainno, busno); + + // Read the counts into uncore buffer + FOR_EACH_REG_UNC_OPERATION (pecb, id, idx, PMU_OPERATION_READ) { + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + // read lower 4 bytes + value_low = PCI_Read_U32(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + value = LOWER_4_BYTES_MASK & value_low; + + // read upper 4 bytes + value_high = PCI_Read_U32( + domainno, busno, ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + (ECB_entries_reg_id(pecb, idx) + NEXT_ADDR_OFFSET)); + value |= value_high << NEXT_ADDR_SHIFT; + //check for overflow + if (value < + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += diff; + LWPMU_DEVICE_prev_value(&devices[id])[package_num][index] = + value; + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static unc_pci_Read_PMU_Data(param) + * + * @param param The device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer; + */ +static VOID +unc_pci_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 this_cpu; + U64 *buffer = (U64 *)param; + CPU_STATE pcpu; + U32 cur_grp; + ECB pecb_entry; + U32 dev_node; + U32 package_num = 0; + U32 busno; + U32 domainno; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + dev_node = LWPMU_DEVICE_pci_dev_node_index(&devices[dev_idx]); + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!CPU_STATE_socket_master)."); + return; + } + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + if (!IS_BUS_MAP_VALID(dev_node, package_num)) { + SEP_DRV_LOG_ERROR_TRACE_OUT("No UNC_PCIDEV bus map for %u!", + dev_node); + return; + } + + busno = GET_BUS_MAP(dev_node, package_num); + domainno = GET_DOMAIN_MAP(dev_node, package_num); + SEP_DRV_LOG_TRACE("pakcage_num=%u, domainno=%u, busno=%u", package_num, + domainno, busno); + + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, + idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + buffer[j] = PCI_Read_U64(domainno, busno, + ECB_entries_dev_no(pecb, idx), + ECB_entries_func_no(pecb, idx), + ECB_entries_reg_id(pecb, idx)); + + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_pci_dispatch = { + .init = NULL, // initialize + .fini = NULL, // destroy + .write = unc_pci_Write_PMU, // write + .freeze = unc_pci_Disable_PMU, // freeze + .restart = unc_pci_Enable_PMU, // restart + .read_data = unc_pci_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = NULL, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_pci_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_pmt.c b/drivers/platform/x86/sepdk/sep/unc_pmt.c new file mode 100644 index 00000000000000..88af682c135f03 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_pmt.c @@ -0,0 +1,481 @@ +/**** + Copyright (C) 2020 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" +#include "inc/pci.h" + +extern U64 *read_counter_info; +extern U64 *prev_counter_data; +extern DRV_CONFIG drv_cfg; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; + +extern DRV_PMT_TELEM_DEV_NODE pmt_devices[MAX_PMT_DEVICES]; + +#define MASK_32BIT 0xffffffff +#define MASK_64BIT 0xffffffff00000000ULL + +#define IS_MASTER(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? \ + CPU_STATE_system_master(&pcb[cpu]) : \ + CPU_STATE_socket_master(&pcb[(cpu)])) +#define GET_PACKAGE_NUM(device_type, cpu) \ + (((device_type) == DRV_SINGLE_INSTANCE) ? 0 : core_to_package_map[cpu]) +#define IS_64BIT(mask) (((mask) >> 32) != 0) + +#define EVENT_COUNTER_MAX_TRY 30 + +/*! + * @fn static VOID unc_pmt_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the enties and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param None + * + * @return None + * + * Special Notes: + */ +static VOID +unc_pmt_Write_PMU(VOID *param) +{ + U32 dev_idx = 0; + U32 offset_delta = 0; + U32 event_id = 0; + U32 this_cpu = 0; + U32 package_num = 0; + U32 cur_grp = 0; + U32 entry = 0; + U32 dev_node = 0; + U64 tmp_value = 0; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + U32 pmt_index = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!is_master)."); + return; + } + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!event_based_counts)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + offset_delta = ECB_entries_reg_offset(pecb, idx); + SEP_DRV_LOG_TRACE("od=0x%x", offset_delta); + + // this is needed for overflow detection of the accumulators. + pmt_index = ECB_entries_unit_id(pecb, idx); + if (pmt_index >= MAX_PMT_DEVICES || + !pmt_devices[pmt_index].telem_ep) { + continue; + } + tmp_value = PCI_PMT_Read_U64(pmt_devices[pmt_index].telem_ep, + offset_delta); + tmp_value = tmp_value >> ECB_entries_aux_shift_index(pecb, idx); + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + LWPMU_DEVICE_prev_value( + &devices[dev_idx])[package_num][event_id] = tmp_value; + SEP_DRV_LOG_TRACE("cpu=%u, device=%u, package=%u, entry=%u, event_id=%u, value=0x%llu", + this_cpu, dev_idx, package_num, entry, + event_id, tmp_value); + event_id++; + + if (LWPMU_DEVICE_counter_mask(&devices[dev_idx]) == 0) { + LWPMU_DEVICE_counter_mask(&devices[dev_idx]) = + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID unc_pmt_Enable_PMU(PVOID) + * + * @brief Capture the previous values to calculate delta later. + * + * @param None + * + * @return None + * + * Special Notes: + */ +static void +unc_pmt_Enable_PMU(PVOID param) +{ + U32 j = 0; + U32 this_cpu = 0; + U32 dev_idx = 0; + U32 package_num = 0; + U32 offset_delta = 0; + U32 cur_grp = 0; + U32 num_events = 0; + U32 dev_node = 0; + U64 *buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + U32 pmt_index = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb) for group %u.", + cur_grp); + return; + } + SEP_DRV_LOG_TRACE("enable PMU for group %u", cur_grp); + dev_node = ECB_dev_node(pecb_entry); + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + // capturing previous values enable freerunning counter delta computation + if (DRV_CONFIG_emon_mode(drv_cfg)) { + num_events = ECB_num_events(pecb_entry); + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, + PMU_OPERATION_READ) { + offset_delta = ECB_entries_reg_offset(pecb, idx); + + if ((ECB_entries_event_scope(pecb, idx) == + PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + pmt_index = ECB_entries_unit_id(pecb, idx); + if (pmt_index >= MAX_PMT_DEVICES || + !pmt_devices[pmt_index].telem_ep) { + continue; + } + buffer[j] = PCI_PMT_Read_U64( + pmt_devices[pmt_index].telem_ep, + offset_delta); + buffer[j] = + buffer[j] >> + ECB_entries_aux_shift_index(pecb, idx); + buffer[j] &= + (U64)ECB_entries_max_bits(pecb, idx); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn void unc_pmt_Trigger_Read(id) + * + * @param id Device index + * + * @return None No return needed + * + * @brief Read the Uncore data from counters and store into buffer + */ +static VOID +unc_pmt_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U32 this_cpu = 0; + U32 cur_grp = 0; + U32 index = 0; + U32 offset_delta = 0; + U32 package_num = 0; + U32 entry = 0; + U32 dev_node = 0; + U64 diff = 0; + U64 value = 0ULL; + U64 *data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + U32 pmt_index = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[id]); +#if !defined(DRV_OS_MAC) + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } +#endif + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb_entry = LWPMU_DEVICE_PMU_register_data(&devices[id])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + // Write GroupID + data = (U64 *)((S8 *)param + ECB_group_offset(pecb_entry)); + *data = cur_grp + 1; + //Read in the counts into temporary buffer + FOR_EACH_REG_UNC_OPERATION (pecb, id, idx, PMU_OPERATION_READ) { + offset_delta = ECB_entries_reg_offset(pecb, idx); + pmt_index = ECB_entries_unit_id(pecb, idx); + if (pmt_index >= MAX_PMT_DEVICES || + !pmt_devices[pmt_index].telem_ep) { + continue; + } + value = PCI_PMT_Read_U64(pmt_devices[pmt_index].telem_ep, + offset_delta); + value = value >> ECB_entries_aux_shift_index(pecb, idx); + value &= (U64)ECB_entries_max_bits(pecb, idx); + + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + //check for overflow if not a static counter + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + *data = value; + } else { + if (value < LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]) { + diff = LWPMU_DEVICE_counter_mask(&devices[id]) - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + diff += value; + } else { + diff = value - + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index]; + } + LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index] += + diff; + LWPMU_DEVICE_prev_value( + &devices[id])[package_num][index] = value; + *data = LWPMU_DEVICE_acc_value( + &devices[id])[package_num][cur_grp][index]; + } + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_pmt_Read_PMU_Data(param) + * + * @param param dummy parameter which is not used + * + * @return None No return needed + * + * @brief Read all the data MSR's into a buffer. Called by the interrupt handler. + * + */ +static VOID +unc_pmt_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j = 0; + U32 offset_delta = 0; + U32 cur_grp = 0; + U32 num_events = 0; + U32 package_num = 0; + U32 entry = 0; + U32 dev_node = 0; + U32 num_pkgs = num_packages; + U32 this_cpu = 0; + U64 tmp_value = 0ULL; + U64 *buffer = (U64 *)param; + U64 *prev_buffer = prev_counter_data; + ECB pecb_entry; + DEV_UNC_CONFIG pcfg_unc; + U32 pmt_index = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(&devices[dev_idx]); + if (!IS_MASTER(DEV_UNC_CONFIG_device_type(pcfg_unc), this_cpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!IS_MASTER)."); + return; + } + + package_num = GET_PACKAGE_NUM(DEV_UNC_CONFIG_device_type(pcfg_unc), + this_cpu); + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[(cur_grp)]; + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + dev_node = ECB_dev_node(pecb_entry); + entry = package_num; + + if (DEV_UNC_CONFIG_device_type(pcfg_unc) == DRV_SINGLE_INSTANCE) { + num_pkgs = 1; + } + + num_events = ECB_num_events(pecb_entry); + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + offset_delta = ECB_entries_reg_offset(pecb, idx); + if ((ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) || + (ECB_entries_event_scope(pecb, idx) == SYSTEM_EVENT)) { + if (ECB_entries_event_scope(pecb, idx) == + SYSTEM_EVENT) { + j = ECB_entries_uncore_buffer_offset_in_system( + pecb, idx); + } else { + j = EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( + package_num, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + ECB_entries_uncore_buffer_offset_in_package( + pecb, idx)); + } + + pmt_index = ECB_entries_unit_id(pecb, idx); + if (pmt_index >= MAX_PMT_DEVICES || + !pmt_devices[pmt_index].telem_ep) { + continue; + } + tmp_value = PCI_PMT_Read_U64( + pmt_devices[pmt_index].telem_ep, offset_delta); + tmp_value = tmp_value >> + ECB_entries_aux_shift_index(pecb, idx); + tmp_value &= (U64)ECB_entries_max_bits(pecb, idx); + if (ECB_entries_counter_type(pecb, idx) == + STATIC_COUNTER) { + buffer[j] = tmp_value; + } else { + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, + idx) - + prev_buffer[j]); + } + } + SEP_DRV_LOG_TRACE("j=%u, v=%llu", j, buffer[j]); + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_pmt_dispatch = { + .init = NULL, // initialize + .fini = NULL, // destroy + .write = unc_pmt_Write_PMU, // write + .freeze = NULL, // freeze + .restart = unc_pmt_Enable_PMU, // restart + .read_data = unc_pmt_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = UNC_COMMON_Dummy_Func, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_pmt_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_power.c b/drivers/platform/x86/sepdk/sep/unc_power.c new file mode 100644 index 00000000000000..08371314d011b8 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_power.c @@ -0,0 +1,713 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/unc_common.h" +#include "inc/utility.h" + +extern U64 *read_counter_info; +extern U64 *prev_counter_data; +extern EMON_BUFFER_DRIVER_HELPER emon_buffer_driver_helper; +static U64 **prev_val_per_thread; +static U64 **acc_per_thread; +extern DRV_CONFIG drv_cfg; +extern U64 max_rmid; +static U32 local_id = 0; + +/*! + * @fn static VOID unc_rdt_program_autormid(VOID*) + * + * @brief Program RMID into PRQ_ASSOC register based on core ID in auto-rmid mode + * + * @param Flag to indicate if RMID must be generated based on core ID or + * re-used from the previous value in PQR_ASSOC + * + * @return None + * + * Special Notes: + */ +static VOID +unc_rdt_program_autormid(PVOID param) +{ + U32 this_cpu; + U32 cur_grp; + U64 msr_value; + CPU_STATE pcpu; + U64 read_val; + U32 is_save_pqr_assoc = *((U32 *)param); + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[local_id])[0]; + + FOR_EACH_REG_UNC_OPERATION (pecb, local_id, idx, PMU_OPERATION_READ) { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + if (is_save_pqr_assoc) { + msr_value = SYS_Read_MSR( + (U32)ECB_entries_aux_reg_id_to_read( + pecb, idx)); + CPU_STATE_prev_rdt_pqr_assoc(pcpu) = msr_value; + + read_val = + ((this_cpu <= max_rmid) ? this_cpu : 0); + msr_value &= + ~ECB_entries_aux_read_mask(pecb, idx); + msr_value |= + read_val & + ECB_entries_aux_read_mask(pecb, idx); + SYS_Write_MSR( + (U32)ECB_entries_aux_reg_id_to_read( + pecb, idx), + msr_value); + SEP_DRV_LOG_TRACE( + "cpu 0x%x wrote 0x%x with %llu", + this_cpu, + (U32)ECB_entries_aux_reg_id_to_read( + pecb, idx), + msr_value); + } else { + SYS_Write_MSR( + (U32)ECB_entries_aux_reg_id_to_read( + pecb, idx), + CPU_STATE_prev_rdt_pqr_assoc(pcpu)); + SEP_DRV_LOG_TRACE( + "cpu 0x%x wrote 0x%x with %llu", + this_cpu, + (U32)ECB_entries_aux_reg_id_to_read( + pecb, idx), + CPU_STATE_prev_rdt_pqr_assoc(pcpu)); + } + } + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn unc_power_Allocate(param) + * + * @param param device index + * + * @return None No return needed + * + * @brief Allocate arrays required for reading counts + */ +static VOID +unc_power_Allocate(PVOID param) +{ + U32 id; + U32 cur_grp; + ECB pecb; + U32 i; + U32 j; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + id = *((U32 *)param); + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[0]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + if (!pecb) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + + acc_per_thread = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); + if (acc_per_thread == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for acc_per_thread!"); + return; + } + + prev_val_per_thread = CONTROL_Allocate_Memory( + GLOBAL_STATE_num_cpus(driver_state) * sizeof(U64 *)); + if (prev_val_per_thread == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for prev_val_per_thread!"); + return; + } + + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + acc_per_thread[i] = CONTROL_Allocate_Memory( + ECB_num_events(pecb) * sizeof(U64)); + if (acc_per_thread[i] == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for acc_per_thread[%u]!", + i); + return; + } + + prev_val_per_thread[i] = CONTROL_Allocate_Memory( + ECB_num_events(pecb) * sizeof(U64)); + if (prev_val_per_thread[i] == NULL) { + SEP_DRV_LOG_ERROR_TRACE_OUT( + "Unable to allocate memory for prev_val_per_thread[%u]!", + i); + return; + } + + // initialize all values to 0 + for (j = 0; j < ECB_num_events(pecb); j++) { + acc_per_thread[i][j] = 0LL; + prev_val_per_thread[i][j] = 0LL; + } + } + + if (DRV_CONFIG_rdt_auto_rmid(drv_cfg)) { + U32 is_save_pqr_assoc = TRUE; + local_id = id; + CONTROL_Invoke_Parallel(unc_rdt_program_autormid, + (VOID *)&is_save_pqr_assoc); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn unc_power_Free(param) + * + * @param param device index + * + * @return None No return needed + * + * @brief Free arrays required for reading counts + */ +static VOID +unc_power_Free(PVOID param) +{ + U32 i; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + if (acc_per_thread) { + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + acc_per_thread[i] = + CONTROL_Free_Memory(acc_per_thread[i]); + } + acc_per_thread = CONTROL_Free_Memory(acc_per_thread); + } + + if (prev_val_per_thread) { + for (i = 0; i < (U32)GLOBAL_STATE_num_cpus(driver_state); i++) { + prev_val_per_thread[i] = + CONTROL_Free_Memory(prev_val_per_thread[i]); + } + prev_val_per_thread = CONTROL_Free_Memory(prev_val_per_thread); + } + + if (DRV_CONFIG_rdt_auto_rmid(drv_cfg)) { + U32 is_save_pqr_assoc = FALSE; + CONTROL_Invoke_Parallel(unc_rdt_program_autormid, + (VOID *)&is_save_pqr_assoc); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID rdt_Write_PMU(ECB pecb, U32 idx) + * + * @brief Program event selection register with eventID + * and RMID + * RMID is fetched from PQR_ASSOC register. + * + * @param pecb Pointer to ECB structure + * idx Event register array index in pecb + * + * @return None + * + * Special Notes: + */ +static VOID +rdt_Write_PMU(ECB pecb, U32 idx) +{ + U64 msr_value = 0; + U64 read_val = 0; + U64 masked_val = 0; + U32 read_shift_index = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, %u.", pecb, idx); + + msr_value = ECB_entries_reg_value(pecb, idx); + read_val = 1; + while (!(ECB_entries_aux_read_mask(pecb, idx) & read_val)) { + read_val = read_val << 1; + read_shift_index++; + } + read_val = SYS_Read_MSR((U32)ECB_entries_aux_reg_id_to_read(pecb, idx)); + masked_val = (read_val & ECB_entries_aux_read_mask(pecb, idx)) >> + read_shift_index; + masked_val <<= ECB_entries_aux_shift_index(pecb, idx); + msr_value |= (U64)masked_val; + SYS_Write_MSR(ECB_entries_reg_id(pecb, idx), msr_value); + SEP_DRV_LOG_TRACE("Wrote 0x%x with 0x%x.", + ECB_entries_reg_id(pecb, idx), msr_value); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static U64 common_Read_Data_Counter(ECB pecb, U32 idx) + * + * @brief Read data counter. + * + * @param pecb Pointer to ECB structure + * idx Event register array index in pecb + * + * @return Counter value + * + * Special Notes: + */ +static U64 +common_Read_Data_Counter(ECB pecb, U32 idx) +{ + U64 offset = 0; + U32 read_shift_index = 0; + U64 mask_value = 0; + U64 value = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p, %u.", pecb, idx); + + if (ECB_entries_reg_rw_type(pecb, idx) == PMU_REG_RW_READ_MERGE_READ) { + mask_value = 1; + while (!(ECB_entries_aux_read_mask(pecb, idx) & mask_value)) { + mask_value = mask_value << 1; + read_shift_index++; + } + offset = (SYS_Read_MSR((U32)ECB_entries_aux_reg_id_to_read( + pecb, idx)) & + ECB_entries_aux_read_mask(pecb, idx)) >> + read_shift_index; + } + // The SYS_Read_MSR call below is common for RDT and Power event reads. + // Offset is 0 except for RDT CAT Mask events. + value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx) + (U32)offset); + + // Verify error bit (CTR[63]) and unavailable bit (CTR[62]) + // and discard data if they are set in CTR. + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_VALIDATE_MASK) { + if ((value >> ECB_entries_bit_position(pecb, idx)) & + ECB_entries_aux_read_mask(pecb, idx)) { + value = 0; + } + } + if (ECB_entries_max_bits(pecb, idx)) { + value &= ECB_entries_max_bits(pecb, idx); + } + SEP_DRV_LOG_TRACE_OUT("val read: %llu: ", value); + + return value; +} + +/*! + * @fn unc_power_Trigger_Read(param, id, read_from_intr) + * + * @param param pointer to sample buffer + * @param id device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param + */ +static VOID +unc_power_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U64 *data = (U64 *)param; + U32 cur_grp; + U32 this_cpu; + U32 package_num; + U32 index = 0; + U64 diff = 0; + U64 value; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u, intr mode: %u.", param, id, + read_from_intr); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + + FOR_EACH_REG_UNC_OPERATION (pecb, id, idx, PMU_OPERATION_READ) { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + rdt_Write_PMU(pecb, idx); + continue; + } + // If the function is invoked from pmi, the event we are + // reading counts must be an unc intr event. + // If the function is invoked from timer, the event must not be + // an interrupt read event. + if ((read_from_intr && + !ECB_entries_unc_evt_intr_read_get(pecb, idx)) || + (!read_from_intr && + ECB_entries_unc_evt_intr_read_get(pecb, idx))) { + index++; + continue; + } + // Write GroupID based on interrupt read event or timer event into + // the respective groupd id offsets + if (read_from_intr) { + data = (U64 *)((S8 *)param + + ECB_group_id_offset_in_trigger_evt_desc( + pecb)); + } else { + data = (U64 *)((S8 *)param + ECB_group_offset(pecb)); + } + *data = cur_grp + 1; + + data = (U64 *)((S8 *)param + + ECB_entries_counter_event_offset(pecb, idx)); + + value = common_Read_Data_Counter(pecb, idx); + + //check for overflow if not a static counter + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + *data = value; + } else { + if (value < prev_val_per_thread[this_cpu][index]) { + diff = ECB_entries_max_bits(pecb, idx) - + prev_val_per_thread[this_cpu][index]; + diff += value; + } else { + diff = value - + prev_val_per_thread[this_cpu][index]; + } + acc_per_thread[this_cpu][index] += diff; + prev_val_per_thread[this_cpu][index] = value; + *data = acc_per_thread[this_cpu][index]; + } + index++; + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_power_Enable_PMU(param) + * + * @param None + * + * @return None + * + * @brief Capture the previous values to calculate delta later. + */ +static VOID +unc_power_Enable_PMU(PVOID param) +{ + U32 j; + U64 *buffer = prev_counter_data; + U32 dev_idx; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_event_count = 0; + U32 thread_event_count = 0; + U32 module_event_count = 0; + U64 tmp_value = 0; + U32 package_id = 0; + U32 module_id = 0; + U32 core_id = 0; + U32 thread_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + dev_idx = *((U32 *)param); + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_id = core_to_package_map[this_cpu]; + module_id = core_to_module_map[this_cpu]; + core_id = core_to_phys_core_map[this_cpu]; + thread_id = core_to_thread_map[this_cpu]; + + // NOTE THAT the enable function currently captures previous values + // for EMON collection to avoid unnecessary memory copy. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + rdt_Write_PMU(pecb, idx); + continue; + } + if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { + j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + package_event_count); + package_event_count++; + } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { + j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + CPU_STATE_cpu_module_master(pcpu), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_event_count); + module_event_count++; + } else { + j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + GLOBAL_STATE_num_modules(driver_state), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_enum_supported, module_id, + max_cores_per_module*max_threads_per_core, core_id, + max_threads_per_core, thread_id, + EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( + emon_buffer_driver_helper), + thread_event_count); + thread_event_count++; + } + + tmp_value = SYS_Read_MSR(ECB_entries_reg_id(pecb, idx)); + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_VALIDATE_MASK) { + if ((tmp_value >> ECB_entries_bit_position(pecb, idx)) & + ECB_entries_aux_read_mask(pecb, idx)) { + tmp_value = 0; + } + } + if (ECB_entries_max_bits(pecb, idx)) { + tmp_value &= ECB_entries_max_bits(pecb, idx); + } + buffer[j] = tmp_value; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u, th_evt_ct=%u", j, + buffer[j], this_cpu, thread_event_count); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn unc_power_Read_PMU_Data(param) + * + * @param param The read thread node to process + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * Uncore PMU does not support sampling, i.e. ignore the id parameter. + */ +static VOID +unc_power_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j; + U64 *buffer = (U64 *)param; + U64 *prev_buffer = prev_counter_data; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_event_count = 0; + U32 thread_event_count = 0; + U32 module_event_count = 0; + U64 tmp_value; + U32 package_id = 0; + U32 module_id = 0; + U32 core_id = 0; + U32 thread_id = 0; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_id = core_to_package_map[this_cpu]; + module_id = core_to_module_map[this_cpu]; + core_id = core_to_phys_core_map[this_cpu]; + thread_id = core_to_thread_map[this_cpu]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + if (ECB_entries_reg_rw_type(pecb, idx) == + PMU_REG_RW_READ_MASK_WRITE) { + rdt_Write_PMU(pecb, idx); + continue; + } + + if (ECB_entries_event_scope(pecb, idx) == PACKAGE_EVENT) { + j = EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + package_event_count); + package_event_count++; + } else if (ECB_entries_event_scope(pecb, idx) == MODULE_EVENT) { + j = EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + CPU_STATE_cpu_module_master(pcpu), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_event_count); + module_event_count++; + } else { + j = EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( + package_id, + EMON_BUFFER_DRIVER_HELPER_num_entries_per_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package( + emon_buffer_driver_helper), + EMON_BUFFER_DRIVER_HELPER_power_num_package_events( + emon_buffer_driver_helper), + GLOBAL_STATE_num_modules(driver_state), + EMON_BUFFER_DRIVER_HELPER_power_num_module_events( + emon_buffer_driver_helper), + module_enum_supported, module_id, + max_cores_per_module*max_threads_per_core, core_id, + max_threads_per_core, thread_id, + EMON_BUFFER_DRIVER_HELPER_power_num_thread_events( + emon_buffer_driver_helper), + thread_event_count); + thread_event_count++; + } + + tmp_value = common_Read_Data_Counter(pecb, idx); + + if (ECB_entries_counter_type(pecb, idx) == STATIC_COUNTER) { + buffer[j] = tmp_value; + } else { + if (tmp_value >= prev_buffer[j]) { + buffer[j] = tmp_value - prev_buffer[j]; + } else { + buffer[j] = tmp_value + + (ECB_entries_max_bits(pecb, idx) - + prev_buffer[j]); + } + } + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE unc_power_dispatch = { + .init = unc_power_Allocate, // initialize + .fini = unc_power_Free, // destroy + .write = UNC_COMMON_Dummy_Func, // write + .freeze = NULL, // freeze + .restart = unc_power_Enable_PMU, // restart + .read_data = unc_power_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = NULL, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_power_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + +DISPATCH_NODE unc_rdt_dispatch = { + .init = unc_power_Allocate, // initialize + .fini = unc_power_Free, // destroy + .write = NULL, // write + .freeze = NULL, // freeze + .restart = unc_power_Enable_PMU, // restart + .read_data = unc_power_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = NULL, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = unc_power_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/unc_sa.c b/drivers/platform/x86/sepdk/sep/unc_sa.c new file mode 100644 index 00000000000000..b4627d5c6c7d2b --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/unc_sa.c @@ -0,0 +1,188 @@ +/**** + Copyright (C) 2011 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/haswellunc_sa.h" +#include "inc/pci.h" +#include "inc/utility.h" + +extern U64 *read_counter_info; +extern DRV_CONFIG drv_cfg; + +#if !defined(DISABLE_BUILD_SOCPERF) +extern VOID +SOCPERF_Read_Data3(PVOID data_buffer); +#endif + +/*! + * @fn static VOID hswunc_sa_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID +hswunc_sa_Initialize(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn hswunc_sa_Read_Counts(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID +hswunc_sa_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 this_cpu; + U32 package_num; + + SEP_DRV_LOG_TRACE_IN("Param: %p, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // group id + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); +#if !defined(DISABLE_BUILD_SOCPERF) + SOCPERF_Read_Data3((void *)data); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn hswunc_sa_Read_PMU_Data(param) + * + * @param param the device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID +hswunc_sa_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j; + U64 *buffer = (U64 *)param; + U32 this_cpu; + CPU_STATE pcpu; + U32 event_index = 0; + U64 counter_buffer[HSWUNC_SA_MAX_COUNTERS + 1]; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + +#if !defined(DISABLE_BUILD_SOCPERF) + SOCPERF_Read_Data3((void *)counter_buffer); +#endif + + FOR_EACH_PCI_DATA_REG_RAW (pecb, i, dev_idx) { + j = ECB_entries_uncore_buffer_offset_in_system(pecb, i); + buffer[j] = counter_buffer[event_index + 1]; + event_index++; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_PCI_DATA_REG_RAW; + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE hswunc_sa_dispatch = { + .init = hswunc_sa_Initialize, // initialize + .fini = NULL, // destroy + .write = NULL, // write + .freeze = NULL, // freeze + .restart = NULL, // restart + .read_data = hswunc_sa_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = NULL, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = hswunc_sa_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/sepdk/sep/utility.c b/drivers/platform/x86/sepdk/sep/utility.c new file mode 100644 index 00000000000000..d080c71ec1a5f7 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/utility.c @@ -0,0 +1,1280 @@ +/**** + Copyright (C) 2005 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv.h" +#include "core2.h" +#include "silvermont.h" +#include "perfver4.h" +#include "valleyview_sochap.h" +#include "unc_gt.h" +#include "haswellunc_sa.h" +#include "utility.h" + +#include "control.h" + +volatile int config_done; +extern DISPATCH_NODE unc_msr_dispatch; +extern DISPATCH_NODE unc_pci_dispatch; +extern DISPATCH_NODE unc_mmio_single_bar_dispatch; +extern DISPATCH_NODE unc_mmio_multiple_bar_dispatch; +extern DISPATCH_NODE unc_mmio_fpga_dispatch; +extern DISPATCH_NODE unc_mmio_pmm_dispatch; +extern DISPATCH_NODE unc_mmio_mch_regbar_dispatch; +extern DISPATCH_NODE unc_power_dispatch; +extern DISPATCH_NODE unc_rdt_dispatch; +extern DISPATCH_NODE hswunc_sa_dispatch; + +#if defined(DRV_PMT_ENABLE) +extern DISPATCH_NODE unc_pmt_dispatch; +#endif +extern U32 drv_type; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +extern char sym_lookup_func_addr[17]; +#else +extern char *sym_lookup_func_addr; +#endif +typedef unsigned long (*sym_lookup_func)(char const *); +static unsigned long (*kallsyms_lookup_name_local)(char const *); +#endif + +extern VOID +UTILITY_down_read_mm(struct mm_struct *mm) +{ + SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) + down_read((struct rw_semaphore *)&mm->mmap_sem); +#else + down_read((struct rw_semaphore *)&mm->mmap_lock); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +extern VOID +UTILITY_up_read_mm(struct mm_struct *mm) +{ + SEP_DRV_LOG_TRACE_IN("Mm: %p.", mm); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) + up_read((struct rw_semaphore *)&mm->mmap_sem); +#else + up_read((struct rw_semaphore *)&mm->mmap_lock); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +// NOT to be instrumented, used inside DRV_LOG! +extern VOID +UTILITY_Read_TSC(U64 *pTsc) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + *pTsc = rdtsc_ordered(); +#else + rdtscll(*(pTsc)); +#endif + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID UTILITY_Read_Cpuid + * + * @brief executes the cpuid_function of cpuid and returns values + * + * @param IN cpuid_function + * OUT rax - results of the cpuid instruction in the + * OUT rbx - corresponding registers + * OUT rcx + * OUT rdx + * + * @return none + * + * Special Notes: + * + * + */ +extern VOID +UTILITY_Read_Cpuid(U64 cpuid_function, + U64 *rax_value, + U64 *rbx_value, + U64 *rcx_value, + U64 *rdx_value) +{ + U32 function = (U32)cpuid_function; + U32 *eax = (U32 *)rax_value; + U32 *ebx = (U32 *)rbx_value; + U32 *ecx = (U32 *)rcx_value; + U32 *edx = (U32 *)rdx_value; + + SEP_DRV_LOG_TRACE_IN( + "Fn: %llu, rax_p: %p, rbx_p: %p, rcx_p: %p, rdx_p: %p.", + cpuid_function, rax_value, rbx_value, rcx_value, rdx_value); + + *eax = function; + + __asm__("cpuid" + : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) + : "a"(function), "b"(*ebx), "c"(*ecx), "d"(*edx)); + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID UTILITY_Configure_CPU + * + * @brief Reads the CPU information from the hardware + * + * @param param dispatch_id - The id of the dispatch table. + * + * @return Pointer to the correct dispatch table for the CPU architecture + * + * Special Notes: + * + */ +extern DISPATCH +UTILITY_Configure_CPU(U32 dispatch_id) +{ + DISPATCH dispatch = NULL; + + SEP_DRV_LOG_TRACE_IN("Dispatch_id: %u.", dispatch_id); + + switch (dispatch_id) { + case 6: + SEP_DRV_LOG_INIT("Set up the Silvermont dispatch table."); + dispatch = &silvermont_dispatch; + break; + case 7: + SEP_DRV_LOG_INIT( + "Set up the perfver4 HTON dispatch table such as Skylake."); + dispatch = &perfver4_dispatch; + break; + case 8: + SEP_DRV_LOG_INIT( + "Set up the perfver4 HTOFF dispatch table such as Skylake."); + dispatch = &perfver4_dispatch_htoff_mode; + break; + case 11: + SEP_DRV_LOG_INIT( + "Set up the perfver4 NONHT dispatch table such as Icelake."); + dispatch = &perfver4_dispatch_nonht_mode; + break; + case 700: + case 701: + case 1100: + SEP_DRV_LOG_INIT("Set up the Valleyview SA dispatch table."); + dispatch = &valleyview_visa_dispatch; + break; + case 2: + SEP_DRV_LOG_INIT( + "Set up the Core i7(TM) processor dispatch table."); + dispatch = &corei7_dispatch; + break; + case 3: + SEP_DRV_LOG_INIT("Set up the Core i7(TM) dispatch table."); + dispatch = &corei7_dispatch_htoff_mode; + break; + case 10: + SEP_DRV_LOG_INIT("Set up the Knights family dispatch table."); + dispatch = &knights_dispatch; + break; + case 100: + SEP_DRV_LOG_INIT("Set up the MSR based uncore dispatch table."); + dispatch = &unc_msr_dispatch; + break; + case 110: + SEP_DRV_LOG_INIT("Set up the PCI Based Uncore dispatch table."); + dispatch = &unc_pci_dispatch; + break; + case 120: + SEP_DRV_LOG_INIT( + "Set up the MMIO based single bar uncore dispatch table."); + dispatch = &unc_mmio_single_bar_dispatch; + break; + case 121: + SEP_DRV_LOG_INIT( + "Set up the MMIO based uncore dispatch table for FPGA."); + dispatch = &unc_mmio_fpga_dispatch; + break; + case 122: + SEP_DRV_LOG_INIT( + "Set up the MMIO based multiple bar uncore dispatch table."); + dispatch = &unc_mmio_multiple_bar_dispatch; + break; + case 123: + SEP_DRV_LOG_INIT( + "Set up the MMIO based uncore dispatch table for PMM."); + dispatch = &unc_mmio_pmm_dispatch; + break; + case 130: + SEP_DRV_LOG_INIT("Set up the Uncore Power dispatch table."); + dispatch = &unc_power_dispatch; + break; + case 131: + SEP_DRV_LOG_INIT("Set up the Uncore RDT dispatch table."); + dispatch = &unc_rdt_dispatch; + break; + case 132: + SEP_DRV_LOG_INIT( + "Set up the MMIO based REGBAR uncore dispatch table."); + dispatch = &unc_mmio_mch_regbar_dispatch; + break; + case 230: + SEP_DRV_LOG_INIT("Set up the Haswell SA dispatch table."); + dispatch = &hswunc_sa_dispatch; + break; + case 400: + SEP_DRV_LOG_INIT("Set up the GT dispatch table."); + dispatch = &unc_gt_dispatch; + break; + +#if defined(DRV_PMT_ENABLE) + case 500: + SEP_DRV_LOG_INIT("Set up the PMT UNC dispatch table."); + dispatch = &unc_pmt_dispatch; + break; +#endif + default: + dispatch = NULL; + SEP_DRV_LOG_ERROR( + "Architecture not supported (dispatch_id: %d).", + dispatch_id); + break; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %p.", dispatch); + return dispatch; +} + +#if defined(DRV_USE_RDPMC) +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 SYS_Read_PMC (IN int ctr_addr, IN U32 is_fixed_reg) + * @brief Wrapper function of read perfmon counters + * + * @param ctr_addr - counter address + * is_fixed_reg - flag to indicate if counter is fixed or GP + * + * @return Counter value + * + * Special Notes: + * Counter relative index from base is specified in bits [29:0] + * If fixed register is requested, bit 30 of input operand must be additionally set + * + */ +extern U64 +SYS_Read_PMC_opt(U32 ctr_addr, U32 is_fixed_ctr) +{ +#if !defined(rdpmcl) + U32 low = 0; + U32 high = 0; +#endif + U64 val = 0; + int counter = 0; + + if (is_fixed_ctr) { + counter = (1ULL << RDPMC_COUNTER_TYPE_BIT_SHIFT); + counter |= ctr_addr - IA32_FIXED_CTR0; + } else { + counter |= ctr_addr - IA32_PMC0; + } + SEP_DRV_LOG_REGISTER_IN("Will read counter 0x%x, rdpmc ctr_index %d.", + ctr_addr, counter); +#if defined(rdpmcl) + rdpmcl(counter, val); +#else + rdpmc(counter, low, high); + val = ((U64)high << 32) | low; +#endif + SEP_DRV_LOG_REGISTER_OUT("Has read counter 0x%x: %llu.", ctr_addr, val); + + return val; +} +#endif + +extern U64 +SYS_Read_MSR_With_Status(U32 msr, S32 *status) +{ + U64 val = 0; + int error; + + if (status) { + *status = 0; + } + +#if defined(DRV_SAFE_MSR) + SEP_DRV_LOG_REGISTER_IN("Will safely read MSR 0x%x.", msr); +#else + SEP_DRV_LOG_REGISTER_IN("Will read MSR 0x%x.", msr); +#endif + + if (!msr) { + SEP_DRV_LOG_WARNING("Ignoring MSR address is 0."); + return 0ULL; + } + +#if defined(DRV_SAFE_MSR) + error = rdmsrl_safe(msr, &val); + if (error) { + if (status) { + *status = error; + } + SEP_DRV_LOG_ERROR("Failed to read MSR 0x%x.", msr); + } + SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx (error: %d).", msr, + val, error); +#else + rdmsrl(msr, val); + SEP_DRV_LOG_REGISTER_OUT("Has read MSR 0x%x: 0x%llx.", msr, val); +#endif + + return val; +} + +extern void +SYS_Write_MSR_With_Status(U32 msr, U64 val, S32 *status) +{ + int error; + + if (status) { + *status = 0; + } + +#if defined(DRV_SAFE_MSR) + SEP_DRV_LOG_REGISTER_IN("Will safely write MSR 0x%x: 0x%llx.", msr, + val); +#else + SEP_DRV_LOG_REGISTER_IN("Will write MSR 0x%x: 0x%llx.", msr, val); +#endif + + if (!msr) { + SEP_DRV_LOG_WARNING("Ignoring MSR address is 0."); + return; + } + +#if defined(DRV_SAFE_MSR) + error = wrmsr_safe(msr, (U32)val, (U32)(val >> 32)); + if (error) { + if (status) { + *status = error; + } + SEP_DRV_LOG_ERROR("Failed to write MSR 0x%x: 0x%llx.", msr, + val); + } + SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx (error: %d).", msr, + val, error); + +#else // !DRV_SAFE_MSR +#if defined(DRV_IA32) + wrmsr(msr, (U32)val, (U32)(val >> 32)); +#endif +#if defined(DRV_EM64T) + wrmsrl(msr, val); +#endif + SEP_DRV_LOG_REGISTER_OUT("Wrote MSR 0x%x: 0x%llx.", msr, val); + +#endif // !DRV_SAFE_MSR +} + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) +static unsigned long utility_Compare_Symbol_Names_Return_Value = 0; +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int utility_Compare_Symbol_Names (void* ref_name const char* symbol_name, struct module* dummy, unsigned long symbol_address) + * + * @brief Comparator for kallsyms_on_each_symbol. + * + * @param void * ref_name : Symbol we are looking for + * const char * symbol_name : Name of the current symbol being evaluated + * struct module* dummy : Pointer to the module structure. Not needed. + * unsigned long symbol_address : Address of the current symbol being evaluated + * + * @return 1 if ref_name matches symbol_name, 0 otherwise. Fills utility_Compare_Symbol_Names_Return_Value with the symbol's address on success. + * + * Special Notes: + * Only used as a callback comparator for kallsyms_on_each_symbol. + */ +static int +utility_Compare_Symbol_Names(void *ref_name, + char const *symbol_name, + struct module *dummy, + unsigned long symbol_address) +{ + int res = 0; + + SEP_DRV_LOG_TRACE_IN( + "Ref_name: %p, symbol_name: %p, dummy: %p, symbol_address: %u.", + ref_name, symbol_name, dummy, symbol_address); + + if (strcmp((char *)ref_name, symbol_name) == 0) { + utility_Compare_Symbol_Names_Return_Value = symbol_address; + res = 1; + } + + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern unsigned long UTILITY_Find_Symbol (const char* name) + * + * @brief Finds the address of the specified kernel symbol. + * + * @param const char* name - name of the symbol to look for + * + * @return Symbol address (0 if could not find) + * + * Special Notes: + * This wrapper is needed due to kallsyms_lookup_name not being exported + * in kernel version 2.6.32.*. + * Careful! This code is *NOT* multithread-safe or reentrant! Should only + * be called from 1 context at a time! + */ +extern unsigned long +UTILITY_Find_Symbol(char const *name) +{ + unsigned long res = 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) + int ret; + unsigned long addr = 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + char buf[MAXNAMELEN]; +#endif +#endif + + SEP_DRV_LOG_TRACE_IN( + "Name: %p.", + name); // Not printing the name to follow the log convention: *must not* dereference any pointer in an 'IN' message + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) + if (!kallsyms_lookup_name_local) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + if (!sym_lookup_func_addr) { + SEP_DRV_LOG_TRACE_OUT("sym_lookup_func_addr is NULL"); + return res; + } +#endif + ret = kstrtoul(sym_lookup_func_addr, 16, &addr); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +#if defined(CONFIG_KALLSYMS) + sprint_symbol(buf, addr); + + if (strstr(buf, "kallsyms_lookup_name")) { + SEP_DRV_LOG_TRACE( + "Found the address of kallsyms_lookup_name: 0x%lx", + addr); + } else { + SEP_DRV_LOG_WARNING( + "Failed to verify the param address 0x%lx is for kallsyms_lookup_name function", + addr); + } +#else + SEP_DRV_LOG_WARNING( + "Could not verify the param address 0x%lx because KALLSYSMS kernel config is not enabled", + addr); +#endif +#endif + + if (!ret) { + kallsyms_lookup_name_local = ((sym_lookup_func)addr); + } + } +#endif + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32) + if (kallsyms_on_each_symbol(utility_Compare_Symbol_Names, + (void *)name)) { + res = utility_Compare_Symbol_Names_Return_Value; + } +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) + if (kallsyms_lookup_name_local) { + SEP_DRV_LOG_TRACE("kallsyms_lookup_name 0x%lx", + (unsigned long)kallsyms_lookup_name_local); + res = kallsyms_lookup_name_local(name); + } else { + SEP_DRV_LOG_WARNING( + "Failed to locate kallsyms_lookup_name address"); + } +#else + res = kallsyms_lookup_name(name); +#endif + + SEP_DRV_LOG_INIT( + "Name: '%s': 0x%llx.", name ? name : "NULL", + (unsigned long long) + res); // Printing here instead. (Paranoia in case of corrupt pointer.) + + SEP_DRV_LOG_TRACE_OUT("Res: 0x%llx.", (unsigned long long)res); + return res; +} + +/* + ************************************ + * DRIVER LOG BUFFER DECLARATIONS * + ************************************ + */ + +volatile U8 active_ioctl; + +DRV_LOG_BUFFER driver_log_buffer; + +static char const *drv_log_categories[DRV_NB_LOG_CATEGORIES] = { + "load", "init", "detection", "error", "state change", + "mark", "debug", "flow", "alloc", "interrupt", + "trace", "register", "notification", "warning" +}; + +#define DRV_LOG_NB_DRIVER_STATES 9 +static char const *drv_log_states[DRV_LOG_NB_DRIVER_STATES] = { + "Uninitialized", "Reserved", "Idle", "Paused", "Stopped", + "Running", "Pausing", "Prepare_Stop", "Terminating" +}; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID utility_Driver_Log_Kprint_Helper (U8 category, char** category_string, + * U8 secondary, char** secondary_string_1, + * char** secondary_string_2, char** secondary_string_3, + * char** secondary_string_4) + * + * @brief Helper function for printing log messages to the system log. + * + * @param IN category - message category + * IN/OUT category_string - location where to place a pointer to the category's name + * IN secondary - secondary field value for the message + * IN/OUT secondary_string_1 - location where to place a pointer to the 1st part of the secondary info's decoded information + * IN/OUT secondary_string_2 - location where to place a pointer to the 2nd part of the secondary info's decoded information + * IN/OUT secondary_string_3 - location where to place a pointer to the 3rd part of the secondary info's decoded information + * IN/OUT secondary_string_4 - location where to place a pointer to the 4th part of the secondary info's decoded information + * + * @return none + * + * Special Notes: + * Allows a single format string to be used for all categories (instead of category-specific format + * strings) when calling printk, simplifying the print routine and reducing potential errors. + * There is a performance cost to this approach (forcing printk to process empty strings), but it + * should be dwarved by the cost of calling printk in the first place. + * NB: none of the input string pointers may be NULL! + */ +static VOID +utility_Driver_Log_Kprint_Helper(U8 category, + char **category_string, + U8 secondary, + char **secondary_string_1, + char **secondary_string_2, + char **secondary_string_3, + char **secondary_string_4) +{ + if (category >= DRV_NB_LOG_CATEGORIES) { + *category_string = "Unknown category"; + } else { + *category_string = (char *)drv_log_categories[category]; + } + + *secondary_string_1 = ""; + *secondary_string_2 = ""; + *secondary_string_3 = ""; + *secondary_string_4 = ""; + + switch (category) { + case DRV_LOG_CATEGORY_FLOW: + case DRV_LOG_CATEGORY_TRACE: + case DRV_LOG_CATEGORY_INTERRUPT: // we should *never* be kprinting from an interrupt context... + if (secondary != DRV_LOG_NOTHING) { + *secondary_string_1 = ", "; + if (secondary == DRV_LOG_FLOW_IN) { + *secondary_string_2 = "Entering"; + } else if (secondary == DRV_LOG_FLOW_OUT) { + *secondary_string_2 = "Leaving"; + } + } + break; + case DRV_LOG_CATEGORY_STATE_CHANGE: { + U8 orig_state, dest_state; + + orig_state = (secondary & 0xF0) >> 4; + dest_state = secondary & 0x0F; + + *secondary_string_1 = ", "; + + if (orig_state < DRV_LOG_NB_DRIVER_STATES) { + *secondary_string_2 = + (char *)drv_log_states[orig_state]; + } else { + *secondary_string_2 = "Unknown_state"; + } + + *secondary_string_3 = " -> "; + + if (dest_state < DRV_LOG_NB_DRIVER_STATES) { + *secondary_string_4 = + (char *)drv_log_states[dest_state]; + } else { + *secondary_string_4 = "Unknown_state"; + } + } break; + + default: + break; + } + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static inline VOID utility_Log_Write ( + * U8 destination, U8 category, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, U64 tsc, U8 ioctl, U16 processor_id, + * U8 driver_state, U16 nb_active_interrupts, + * U16 nb_active_notifications, + * const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, and logs it as appropriate. + * + * @param U8 destination - whether to write to the primary (0) or the auxiliary log buffer (1) + * U8 category - message category + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function (more efficient + * to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * U64 tsc - time stamp value to use + * U8 ioctl - current active ioctl + * U16 processor_id - id of the active core/thread + * U8 driver_state - current driver state + * U16 nb_active_interrupts - number of interrupts currently being processed + * U16 nb_active_notifications - number of notifications currently being processed + * const char* format_string - classical format string for printf-like functions + * ... - elements to print + * + * @return none + * + * Special Notes: + * Writes the specified message to the specified log buffer. + * The order of writes (integrity tag at the beginning, overflow tag at the very end) matters + * to ensure the logged information can be detected to be only partially written if applicable). + * Much of the needed information (active core, driver state, tsc..) is passed through the + * stack (instead of obtained inside utility_Log_Write) to guarantee entries representing the + * same message (or log call) in different channels use consistent information, letting the + * decoder reliably identify duplicates. + */ +static inline VOID +utility_Log_Write(U8 destination, + U8 category, + U8 secondary, + char const *function_name, + U32 function_name_length, + U32 line_number, + U64 tsc, + U8 ioctl, + U16 processor_id, + U8 driver_state, + U16 nb_active_interrupts, + U16 nb_active_notifications, + char const *format_string, + va_list args) +{ + U32 entry_id; + U16 overflow_tag; + DRV_LOG_ENTRY entry; + char *target_func_buffer; + U32 local_func_name_length; + U32 i; + + if (destination == 0) { // primary buffer + entry_id = __sync_add_and_fetch( + &DRV_LOG_BUFFER_pri_entry_index(DRV_LOG()), 1); + overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_PRI_ENTRIES); + entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + + entry_id % DRV_LOG_MAX_NB_PRI_ENTRIES; + } else { + entry_id = __sync_add_and_fetch( + &DRV_LOG_BUFFER_aux_entry_index(DRV_LOG()), 1); + overflow_tag = (U16)(entry_id / DRV_LOG_MAX_NB_AUX_ENTRIES); + entry = DRV_LOG_BUFFER_entries(DRV_LOG()) + + DRV_LOG_MAX_NB_PRI_ENTRIES + + entry_id % DRV_LOG_MAX_NB_AUX_ENTRIES; + } + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_ENTRY_integrity_tag(entry) = overflow_tag; + DRV_LOG_COMPILER_MEM_BARRIER(); + + if (format_string && + *format_string) { // setting this one first to try to increase MLP + vsnprintf(DRV_LOG_ENTRY_message(entry), DRV_LOG_MESSAGE_LENGTH, + format_string, args); + } else { + DRV_LOG_ENTRY_message(entry)[0] = 0; + } + + target_func_buffer = DRV_LOG_ENTRY_function_name(entry); + local_func_name_length = + function_name_length < DRV_LOG_FUNCTION_NAME_LENGTH ? + function_name_length : + DRV_LOG_FUNCTION_NAME_LENGTH; + for (i = 0; i < local_func_name_length - 1; i++) { + target_func_buffer[i] = function_name[i]; + } + target_func_buffer[i] = 0; + + DRV_LOG_ENTRY_category(entry) = category; + DRV_LOG_ENTRY_secondary_info(entry) = secondary; + DRV_LOG_ENTRY_line_number(entry) = line_number; + DRV_LOG_ENTRY_active_drv_operation(entry) = ioctl; + DRV_LOG_ENTRY_processor_id(entry) = processor_id; + DRV_LOG_ENTRY_driver_state(entry) = driver_state; + DRV_LOG_ENTRY_nb_active_interrupts(entry) = nb_active_interrupts; + DRV_LOG_ENTRY_nb_active_notifications(entry) = nb_active_notifications; + DRV_LOG_ENTRY_tsc(entry) = tsc; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_ENTRY_temporal_tag(entry) = overflow_tag; + DRV_LOG_COMPILER_MEM_BARRIER(); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Log (U8 category, U8 in_notification, U8 secondary, + * const char* function_name, U32 func_name_len, + * U32 line_number, const char* format_string, ...) + * + * @brief Checks whether and where the message should be logged, and logs it as appropriate. + * + * @param U8 category - message category + * U8 in_notification - whether or not we are in a notification/OS callback context + * (this information cannot be reliably obtained without passing + * it through the stack) + * U8 secondary - secondary information field for the message + * const char* function_name - name of the calling function + * U32 func_name_len - length of the name of the calling function (more efficient + * to pass it as parameter than finding it back at runtime) + * U32 line_number - line number of the call site + * const char* format_string - classical format string for printf-like functions + * ... - elements to print + * + * @return none + * + * Special Notes: + * Takes a snapshot of various elements (TSC, driver state, etc.) to ensure a single log call + * writes consistent information to all applicable channels (i.e. favoring consistency over + * instantaneous accuracy). See utility_Log_Write for details. + */ +extern VOID +UTILITY_Log(U8 category, + U8 in_notification, + U8 secondary, + char const *function_name, + U32 func_name_len, + U32 line_number, + char const *format_string, + ...) +{ + U64 tsc_snapshot; + U8 ioctl_snapshot; + U8 driver_state_snapshot; + U16 processor_id_snapshot; + U16 nb_active_interrupts_snapshot; + U16 nb_active_notifications_snapshot; + U8 category_verbosity; + U8 in_interrupt; + U8 is_enabled; + U8 is_logging; + + category_verbosity = DRV_LOG_VERBOSITY(category); + processor_id_snapshot = raw_smp_processor_id(); + in_interrupt = ((pcb && atomic_read(&CPU_STATE_in_interrupt( + &pcb[processor_id_snapshot]))) + + (category == DRV_LOG_CATEGORY_INTERRUPT)); + is_enabled = + in_interrupt * !!(category_verbosity & LOG_CONTEXT_INTERRUPT) + + in_notification * + !!(category_verbosity & LOG_CONTEXT_NOTIFICATION) + + (!in_interrupt * !in_notification) * + !!(category_verbosity & LOG_CONTEXT_REGULAR); + + if (is_enabled) { + va_list args; + U32 i; + + ioctl_snapshot = active_ioctl; + driver_state_snapshot = GET_DRIVER_STATE(); + nb_active_interrupts_snapshot = + DRV_LOG_BUFFER_nb_active_interrupts(DRV_LOG()); + nb_active_notifications_snapshot = + DRV_LOG_BUFFER_nb_active_notifications(DRV_LOG()); + UTILITY_Read_TSC(&tsc_snapshot); + + va_start(args, format_string); + + for (i = 0; i < 2; i++) { + if (category_verbosity & (1 << i)) { + va_list args_copy; + va_copy(args_copy, args); + utility_Log_Write( + i, // 0 for primary log, 1 for auxiliary log + category, secondary, function_name, + func_name_len, line_number, + tsc_snapshot, ioctl_snapshot, + processor_id_snapshot, + driver_state_snapshot, + nb_active_interrupts_snapshot, + nb_active_notifications_snapshot, + format_string, args_copy); + va_end(args_copy); + } + } + if (category_verbosity & LOG_CHANNEL_PRINTK || + category_verbosity & LOG_CHANNEL_TRACEK) { +#define DRV_LOG_DEBUG_ARRAY_SIZE 512 + char tmp_array[DRV_LOG_DEBUG_ARRAY_SIZE]; + U32 nb_written_characters; + char *category_s, *sec1_s, *sec2_s, *sec3_s, *sec4_s; + va_list args_copy; + utility_Driver_Log_Kprint_Helper(category, &category_s, + secondary, &sec1_s, + &sec2_s, &sec3_s, + &sec4_s); + + nb_written_characters = snprintf( + tmp_array, DRV_LOG_DEBUG_ARRAY_SIZE - 1, + SEP_MSG_PREFIX " [%s%s%s%s%s] [%s@%d]: ", + category_s, sec1_s, sec2_s, sec3_s, sec4_s, + function_name, line_number); + + if (nb_written_characters > 0) { + va_copy(args_copy, args); + nb_written_characters += vsnprintf( + tmp_array + nb_written_characters, + DRV_LOG_DEBUG_ARRAY_SIZE - + nb_written_characters - 1, + format_string, args_copy); + va_end(args_copy); +#undef DRV_LOG_DEBUG_ARRAY_SIZE + + tmp_array[nb_written_characters++] = '\n'; + tmp_array[nb_written_characters++] = 0; + + is_logging = (category_verbosity & + LOG_CHANNEL_PRINTK) * + !in_interrupt * !in_notification; + if (is_logging) { + if (!in_atomic()) { + switch (category) { + case DRV_LOG_CATEGORY_ERROR: + printk(KERN_ERR "%s", + tmp_array); + break; + case DRV_LOG_CATEGORY_WARNING: + printk(KERN_WARNING + "%s", + tmp_array); + break; + default: + printk(KERN_INFO "%s", + tmp_array); + break; + } + } + } + + /* + //trace_printk is allowed only in debug kernel +#if defined(CONFIG_DYNAMIC_FTRACE) + if (category_verbosity & LOG_CHANNEL_TRACEK) { + trace_printk("%s", tmp_array); + } +#endif + */ + } + } + + va_end(args); + } + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Init (void) + * + * @brief Allocates and initializes the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be (successfully) run before any non-LOAD log calls. + * Allocates memory without going through CONTROL_Allocate (to avoid + * complicating the instrumentation of CONTROL_* functions): calling + * UTILITY_Driver_Log_Free is necessary to free the log structure. + * Falls back to vmalloc when contiguous physical memory cannot be + * allocated. This does not impact runtime behavior, but may impact + * the easiness of retrieving the log from a core dump if the system + * crashes. + */ +extern DRV_STATUS +UTILITY_Driver_Log_Init(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + struct timespec64 cur_time; +#else + struct timespec cur_time; +#endif + U32 size = sizeof(*driver_log_buffer); + U8 using_contiguous_physical_memory; + U32 bitness; + + if (size < + MAX_KMALLOC_SIZE) { // allocating outside the regular function to restrict the area of the driver + driver_log_buffer = (PVOID)kmalloc( + size, + GFP_KERNEL); // where the log might not be initialized + } else { + driver_log_buffer = + (PVOID)__get_free_pages(GFP_KERNEL, get_order(size)); + } + + if (driver_log_buffer) { + using_contiguous_physical_memory = 1; + } else { + driver_log_buffer = vmalloc(size); + + if (!driver_log_buffer) { + return OS_NO_MEM; + } + + using_contiguous_physical_memory = 0; + } + + memset(driver_log_buffer, DRV_LOG_FILLER_BYTE, + sizeof(*driver_log_buffer)); // we don't want zero-filled pages (so that the buffer's pages don't get ommitted in some crash dumps) + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[0] = + DRV_LOG_SIGNATURE_0; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[0] = + DRV_LOG_SIGNATURE_6; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[3] = + DRV_LOG_SIGNATURE_3; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[3] = + DRV_LOG_SIGNATURE_3; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[2] = + DRV_LOG_SIGNATURE_2; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[2] = + DRV_LOG_SIGNATURE_4; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[1] = + DRV_LOG_SIGNATURE_1; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[1] = + DRV_LOG_SIGNATURE_5; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[7] = + DRV_LOG_SIGNATURE_7; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[7] = + DRV_LOG_SIGNATURE_7; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[5] = + DRV_LOG_SIGNATURE_5; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[5] = + DRV_LOG_SIGNATURE_1; + + DRV_LOG_COMPILER_MEM_BARRIER(); + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[6] = + DRV_LOG_SIGNATURE_6; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[6] = + DRV_LOG_SIGNATURE_0; + DRV_LOG_BUFFER_header_signature(driver_log_buffer)[4] = + DRV_LOG_SIGNATURE_4; + DRV_LOG_BUFFER_footer_signature(driver_log_buffer)[4] = + DRV_LOG_SIGNATURE_2; + + DRV_LOG_BUFFER_log_size(driver_log_buffer) = sizeof(*driver_log_buffer); + DRV_LOG_BUFFER_max_nb_pri_entries(driver_log_buffer) = + DRV_LOG_MAX_NB_PRI_ENTRIES; + DRV_LOG_BUFFER_max_nb_aux_entries(driver_log_buffer) = + DRV_LOG_MAX_NB_AUX_ENTRIES; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) + ktime_get_real_ts64(&cur_time); + timespec64_to_ns(&cur_time); +#else + getnstimeofday(&cur_time); +#endif + + DRV_LOG_BUFFER_init_time(driver_log_buffer) = cur_time.tv_sec; + DRV_LOG_BUFFER_disambiguator(driver_log_buffer) = 0; + DRV_LOG_BUFFER_log_version(driver_log_buffer) = DRV_LOG_VERSION; + DRV_LOG_BUFFER_pri_entry_index(driver_log_buffer) = (U32)((S32)-1); + DRV_LOG_BUFFER_aux_entry_index(driver_log_buffer) = (U32)((S32)-1); + +#if defined(DRV_EM64T) + bitness = 64; +#else + bitness = 32; +#endif + + snprintf(DRV_LOG_BUFFER_driver_version(driver_log_buffer), + DRV_LOG_DRIVER_VERSION_SIZE, + "[%u-bit Linux] SEP v%d.%d . API %d. type %u.", bitness, + SEP_MAJOR_VERSION, SEP_MINOR_VERSION, SEP_API_VERSION, + drv_type); + + DRV_LOG_BUFFER_driver_state(driver_log_buffer) = GET_DRIVER_STATE(); + DRV_LOG_BUFFER_active_drv_operation(driver_log_buffer) = active_ioctl; + DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_interrupts(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_active_interrupts(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_notifications(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_active_notifications(driver_log_buffer) = 0; + DRV_LOG_BUFFER_nb_driver_state_transitions(driver_log_buffer) = 0; + + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_LOAD) = + DRV_LOG_DEFAULT_LOAD_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INIT) = + DRV_LOG_DEFAULT_INIT_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DETECTION) = + DRV_LOG_DEFAULT_DETECTION_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ERROR) = + DRV_LOG_DEFAULT_ERROR_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_STATE_CHANGE) = + DRV_LOG_DEFAULT_STATE_CHANGE_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_MARK) = + DRV_LOG_DEFAULT_MARK_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_DEBUG) = + DRV_LOG_DEFAULT_DEBUG_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_FLOW) = + DRV_LOG_DEFAULT_FLOW_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_ALLOC) = + DRV_LOG_DEFAULT_ALLOC_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_INTERRUPT) = + DRV_LOG_DEFAULT_INTERRUPT_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_TRACE) = + DRV_LOG_DEFAULT_TRACE_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_REGISTER) = + DRV_LOG_DEFAULT_REGISTER_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_NOTIFICATION) = + DRV_LOG_DEFAULT_NOTIFICATION_VERBOSITY; + DRV_LOG_VERBOSITY(DRV_LOG_CATEGORY_WARNING) = + DRV_LOG_DEFAULT_WARNING_VERBOSITY; + + DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) = + using_contiguous_physical_memory; + + SEP_DRV_LOG_LOAD( + "Initialized driver log using %scontiguous physical memory.", + DRV_LOG_BUFFER_contiguous_physical_memory(driver_log_buffer) ? + "" : + "non-"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern DRV_STATUS UTILITY_Driver_Log_Free (void) + * + * @brief Frees the driver log buffer. + * + * @param none + * + * @return OS_SUCCESS on success, OS_NO_MEM on error. + * + * Special Notes: + * Should be done before unloading the driver. + * See UTILITY_Driver_Log_Init for details. + */ +extern void +UTILITY_Driver_Log_Free(VOID) +{ + U32 size = sizeof(*driver_log_buffer); + + if (driver_log_buffer) { + if (DRV_LOG_BUFFER_contiguous_physical_memory( + driver_log_buffer)) { + if (size < MAX_KMALLOC_SIZE) { + kfree(driver_log_buffer); + } else { + free_pages((unsigned long)driver_log_buffer, + get_order(size)); + } + } else { + vfree(driver_log_buffer); + } + + driver_log_buffer = NULL; + } +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern void UTILITY_Driver_Set_Active_Ioctl (U32 ioctl) + * + * @brief Sets the 'active_ioctl' global to the specified value. + * + * @param U32 ioctl - ioctl/drvop code to use + * + * @return none + * + * Special Notes: + * Used to keep track of the IOCTL operation currently being processed. + * This information is saved in the log buffer (globally), as well as + * in every log entry. + * NB: only IOCTLs for which grabbing the ioctl mutex is necessary + * should be kept track of this way. + */ +extern void +UTILITY_Driver_Set_Active_Ioctl(U32 ioctl) +{ + active_ioctl = ioctl; + if (ioctl) { + DRV_LOG_BUFFER_nb_drv_operations(driver_log_buffer)++; + } +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern const char** UTILITY_Log_Category_Strings (void) + * + * @brief Accessor function for the log category string array + * + * @param none + * + * @return none + * + * Special Notes: + * Only needed for cosmetic purposes when adjusting category verbosities. + */ +extern char const ** +UTILITY_Log_Category_Strings(void) +{ + return drv_log_categories; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern U32 UTILITY_Change_Driver_State (U32 allowed_prior_states, U32 state, const char* func, U32 line_number) + * + * @brief Updates the driver state (if the transition is legal). + * + * @param U32 allowed_prior_states - the bitmask representing the states from which the transition is allowed to occur + * U32 state - the destination state + * const char* func - the callsite's function's name + * U32 line_number - the callsite's line number + * + * @return 1 in case of success, 0 otherwise + * + * Special Notes: + * + */ +extern U32 +UTILITY_Change_Driver_State(U32 allowed_prior_states, + U32 state, + char const *func, + U32 line_number) +{ + U32 res = 1; + U32 previous_state; + U32 current_state = GET_DRIVER_STATE(); + U32 nb_attempts = 0; + + SEP_DRV_LOG_TRACE_IN( + "Prior states: 0x%x, state: %u, func: %p, line: %u.", + allowed_prior_states, state, func, line_number); + + if (state >= DRV_LOG_NB_DRIVER_STATES) { + SEP_DRV_LOG_ERROR("Illegal destination state %d (%s@%u)!", + state, func, line_number); + res = 0; + goto clean_return; + } + + do { + previous_state = current_state; + nb_attempts++; + SEP_DRV_LOG_TRACE("Attempt #%d to transition to state %s.", + nb_attempts, drv_log_states[state]); + + if (DRIVER_STATE_IN(current_state, allowed_prior_states)) { + current_state = cmpxchg(&GET_DRIVER_STATE(), + previous_state, state); + } else { + SEP_DRV_LOG_ERROR( + "Invalid transition [%s -> %s] (%s@%u)!", + drv_log_states[previous_state], + drv_log_states[state], func, line_number); + res = 0; + goto clean_return; + } + + } while (previous_state != current_state); + + SEP_DRV_LOG_STATE_TRANSITION(previous_state, state, "From %s@%u.", func, + line_number); + +clean_return: + SEP_DRV_LOG_TRACE_OUT("Res: %u.", res); + return res; +} + diff --git a/drivers/platform/x86/sepdk/sep/valleyview_sochap.c b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c new file mode 100644 index 00000000000000..5fb2e134f6d118 --- /dev/null +++ b/drivers/platform/x86/sepdk/sep/valleyview_sochap.c @@ -0,0 +1,319 @@ +/**** + Copyright (C) 2012 Intel Corporation. All Rights Reserved. + + This file is part of SEP Development Kit. + + SEP Development Kit is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public License + version 2 as published by the Free Software Foundation. + + SEP Development Kit is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + As a special exception, you may use this file as part of a free software + library without restriction. Specifically, if other files instantiate + templates or use macros or inline functions from this file, or you compile + this file and link it with other files to produce an executable, this + file does not by itself cause the resulting executable to be covered by + the GNU General Public License. This exception does not however + invalidate any other reasons why the executable file might be covered by + the GNU General Public License. +****/ + + + + + + + + + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/ecb_iterators.h" +#include "inc/control.h" +#include "inc/utility.h" +#include "inc/valleyview_sochap.h" + +extern U64 *read_counter_info; +static U64 *uncore_current_data = NULL; +static U64 *uncore_to_read_data = NULL; +extern DRV_CONFIG drv_cfg; + +#if !defined(DISABLE_BUILD_SOCPERF) +extern VOID +SOCPERF_Read_Data3(PVOID data_buffer); +#endif + +/*! + * @fn static VOID valleyview_VISA_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID +valleyview_VISA_Initialize(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + // Allocate memory for reading GMCH counter values + the group id + if (!uncore_current_data) { + uncore_current_data = CONTROL_Allocate_Memory( + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + if (!uncore_current_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (uncore_current_data is NULL!)."); + return; + } + } + if (!uncore_to_read_data) { + uncore_to_read_data = CONTROL_Allocate_Memory( + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + if (!uncore_to_read_data) { + SEP_DRV_LOG_ERROR_TRACE_OUT("Early exit (uncore_to_read_data is NULL!)."); + return; + } + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID valleyview_VISA_Enable_PMU(PVOID) + * + * @brief Start counting + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID +valleyview_VISA_Enable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + + SEP_DRV_LOG_TRACE("Starting the counters..."); + if (uncore_current_data) { + memset(uncore_current_data, 0, + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + } + if (uncore_to_read_data) { + memset(uncore_to_read_data, 0, + (VLV_CHAP_MAX_COUNTERS + 1) * sizeof(U64)); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID valleyview_VISA_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID +valleyview_VISA_Disable_PMU(PVOID param) +{ + U32 this_cpu; + CPU_STATE pcpu; + U32 cur_driver_state; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + cur_driver_state = GET_DRIVER_STATE(); + + if (!CPU_STATE_system_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!system_master)."); + return; + } + SEP_DRV_LOG_TRACE("Stopping the counters..."); + if (cur_driver_state == DRV_STATE_PREPARE_STOP || + cur_driver_state == DRV_STATE_STOPPED || + cur_driver_state == DRV_STATE_TERMINATING) { + uncore_current_data = CONTROL_Free_Memory(uncore_current_data); + uncore_to_read_data = CONTROL_Free_Memory(uncore_to_read_data); + } + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/*! + * @fn static VOID valleyview_VISA_Clean_Up(PVOID) + * + * @brief Reset any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID +valleyview_VISA_Clean_Up(VOID *param) +{ + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + SEP_DRV_LOG_TRACE_OUT("Empty function."); + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn valleyview_VISA_Read_PMU_Data(param) + * + * @param param The device index + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID +valleyview_VISA_Read_PMU_Data(PVOID param, U32 dev_idx) +{ + U32 j; + U64 *buffer = (U64 *)param; + U32 this_cpu; + CPU_STATE pcpu; + U32 package_num; + U32 event_index = 0; + U32 cur_grp; + ECB pecb_entry; + U64 counter_buffer[VLV_CHAP_MAX_COUNTERS + 1]; + + SEP_DRV_LOG_TRACE_IN("Param: %p.", param); + + this_cpu = CONTROL_THIS_CPU(); + pcpu = &pcb[this_cpu]; + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[(dev_idx)])[package_num]; + pecb_entry = + LWPMU_DEVICE_PMU_register_data(&devices[(dev_idx)])[cur_grp]; + + // NOTE THAT the read_pmu function on for EMON collection. + if (!DRV_CONFIG_emon_mode(drv_cfg)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!emon_mode)."); + return; + } + if (!CPU_STATE_socket_master(pcpu)) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!socket_master)."); + return; + } + if (!pecb_entry) { + SEP_DRV_LOG_TRACE_OUT("Early exit (!pecb)."); + return; + } + +#if !defined(DISABLE_BUILD_SOCPERF) + SOCPERF_Read_Data3((void *)counter_buffer); +#endif + + FOR_EACH_REG_UNC_OPERATION (pecb, dev_idx, idx, PMU_OPERATION_READ) { + //the buffer index for this PMU needs to account for each event + j = ECB_entries_uncore_buffer_offset_in_system(pecb, idx); + buffer[j] = counter_buffer[event_index + 1]; + event_index++; + SEP_DRV_LOG_TRACE("j=%u, value=%llu, cpu=%u", j, buffer[j], + this_cpu); + } + END_FOR_EACH_REG_UNC_OPERATION; + + SEP_DRV_LOG_TRACE_OUT(""); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn valleyview_Trigger_Read(param, id, read_from_intr) + * + * @param param Pointer to populate read data + * @param id Device index + * @param read_from_intr Read data from interrupt or timer + * + * @return None No return needed + * + * @brief Read the SoCHAP counters when timer is triggered + * + */ +static VOID +valleyview_Trigger_Read(PVOID param, U32 id, U32 read_from_intr) +{ + U64 *data = (U64 *)param; + U32 cur_grp; + ECB pecb; + U32 this_cpu; + U32 package_num; + + SEP_DRV_LOG_TRACE_IN("Param: %p,, id: %u.", param, id); + + this_cpu = CONTROL_THIS_CPU(); + package_num = core_to_package_map[this_cpu]; + cur_grp = LWPMU_DEVICE_cur_group(&devices[id])[package_num]; + pecb = LWPMU_DEVICE_PMU_register_data(&devices[id])[cur_grp]; + + // group id + data = (U64 *)((S8 *)data + ECB_group_offset(pecb)); +#if !defined(DISABLE_BUILD_SOCPERF) + SOCPERF_Read_Data3((void *)data); +#endif + + SEP_DRV_LOG_TRACE_OUT(""); + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE valleyview_visa_dispatch = { + .init = valleyview_VISA_Initialize, // initialize + .fini = NULL, // destroy + .write = NULL, // write + .freeze = valleyview_VISA_Disable_PMU, // freeze + .restart = valleyview_VISA_Enable_PMU, // restart + .read_data = valleyview_VISA_Read_PMU_Data, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, // swap group + .read_lbrs = NULL, // read lbrs + .cleanup = valleyview_VISA_Clean_Up, // cleanup + .hw_errata = NULL, // hw errata + .read_power = NULL, // read power + .check_overflow_errata = NULL, // check overflow errata + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, // check overflow gp errata + .read_ro = NULL, // read_ro + .platform_info = NULL, // platform info + .trigger_read = valleyview_Trigger_Read, // trigger read + .scan_for_uncore = NULL, // scan for uncore + .read_metrics = NULL // read metrics +}; + diff --git a/drivers/platform/x86/socperf/Makefile b/drivers/platform/x86/socperf/Makefile new file mode 100644 index 00000000000000..953f4d7fdf19b5 --- /dev/null +++ b/drivers/platform/x86/socperf/Makefile @@ -0,0 +1,70 @@ +# +# Version: 1.0 +# +# This file is provided under a dual BSD/GPLv2 license. When using or +# redistributing this file, you may do so under either license. +# +# GPL LICENSE SUMMARY +# +# Copyright (C) 2008-2021 Intel Corporation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of version 2 of the GNU General Public License as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# -------------------- user configurable options ------------------------ + +# base name of socperf driver +DRIVER_NAME = socperf3 + +ccflags-y := -I$(srctree)/drivers/platform/x86/socperf/include -I$(srctree)/drivers/platform/x86/socperf/inc + +obj-m := $(DRIVER_NAME).o + +arch-objs := pci.o \ + soc_uncore.o \ + haswellunc_sa.o \ + npk_uncore.o \ + haswellunc_sa.o \ + npk_uncore.o + +$(DRIVER_NAME)-objs := \ + socperfdrv.o \ + control.o \ + utility.o \ + pmu_list.o \ + $(arch-objs) + diff --git a/drivers/platform/x86/socperf/control.c b/drivers/platform/x86/socperf/control.c new file mode 100644 index 00000000000000..8aaa59086eadd7 --- /dev/null +++ b/drivers/platform/x86/socperf/control.c @@ -0,0 +1,762 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "socperfdrv.h" +#include "control.h" +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (wait)) +#else +#define SMP_CALL_FUNCTION(func, ctx, retry, wait) \ + smp_call_function((func), (ctx), (retry), (wait)) +#endif + +/* + * Global State Nodes - keep here for now. Abstract out when necessary. + */ +GLOBAL_STATE_NODE socperf_driver_state; +static MEM_TRACKER mem_tr_head; // start of the mem tracker list +static MEM_TRACKER mem_tr_tail; // end of mem tracker list +static spinlock_t mem_tr_lock; // spinlock for mem tracker list +static unsigned long flags; + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID SOCPERF_Invoke_Cpu (func, ctx, arg) + * + * @brief Set up a DPC call and insert it into the queue + * + * @param IN cpu_idx - the core id to dispatch this function to + * IN func - function to be invoked by the specified core(s) + * IN ctx - pointer to the parameter block for each function + * invocation + * + * @return None + * + * Special Notes: + * + */ +extern VOID +SOCPERF_Invoke_Cpu(int cpu_idx, VOID (*func)(PVOID), PVOID ctx) +{ + SOCPERF_Invoke_Parallel(func, ctx); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID SOCPERF_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns None + * + * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking or + * non-blocking mode. The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. + * Use the aliases SOCPERF_Invoke_Parallel(), SOCPERF_Invoke_Parallel_NB(), + * or SOCPERF_Invoke_Parallel_XS(). + * + */ +extern VOID +SOCPERF_Invoke_Parallel_Service(VOID (*func)(PVOID), + PVOID ctx, + int blocking, + int exclude) +{ + GLOBAL_STATE_cpu_count(socperf_driver_state) = 0; + GLOBAL_STATE_dpc_count(socperf_driver_state) = 0; + + preempt_disable(); + SMP_CALL_FUNCTION(func, ctx, 0, blocking); + + if (!exclude) { + func(ctx); + } + preempt_enable(); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Delete_Node(mem_tr) + * + * @param IN mem_tr - memory tracker node to delete + * + * @returns None + * + * @brief Delete specified node in the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + */ +static VOID +control_Memory_Tracker_Delete_Node(MEM_TRACKER mem_tr) +{ + MEM_TRACKER prev_tr = NULL; + MEM_TRACKER next_tr = NULL; + U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); + + if (!mem_tr) { + return; + } + + // free the allocated mem_el array (if any) + if (MEM_TRACKER_mem(mem_tr)) { + if (size < MAX_KMALLOC_SIZE) { + kfree(MEM_TRACKER_mem(mem_tr)); + } else { + free_pages((unsigned long)MEM_TRACKER_mem(mem_tr), + get_order(size)); + } + } + + // update the linked list + prev_tr = MEM_TRACKER_prev(mem_tr); + next_tr = MEM_TRACKER_next(mem_tr); + if (prev_tr) { + MEM_TRACKER_next(prev_tr) = next_tr; + } + if (next_tr) { + MEM_TRACKER_prev(next_tr) = prev_tr; + } + + // free the mem_tracker node + kfree(mem_tr); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Create_Node(void) + * + * @param None - size of the memory to allocate + * + * @returns OS_SUCCESS if successful, otherwise error + * + * @brief Initialize the memory tracker + * + * Special Notes: + * Assumes mem_tr_lock is already held while calling this function! + * + * Since this function can be called within either GFP_KERNEL or + * GFP_ATOMIC contexts, the most restrictive allocation is used + * (viz., GFP_ATOMIC). + */ +static U32 control_Memory_Tracker_Create_Node(void) +{ + U32 size = MEM_EL_MAX_ARRAY_SIZE * sizeof(MEM_EL_NODE); + PVOID location = NULL; + MEM_TRACKER mem_tr = NULL; + + // create a mem tracker node + mem_tr = (MEM_TRACKER)kmalloc(sizeof(MEM_TRACKER_NODE), GFP_ATOMIC); + if (!mem_tr) { + SOCPERF_PRINT_ERROR( + "control_Initialize_Memory_Tracker: failed to allocate mem tracker node\n"); + return OS_FAULT; + } + + // create an initial array of mem_el's inside the mem tracker node + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SOCPERF_PRINT_DEBUG( + "control_Memory_Tracker_Create_Node: allocated small memory (0x%p, %d)\n", + location, (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + SOCPERF_PRINT_DEBUG( + "control_Memory_Tracker_Create_Node: allocated large memory (0x%p, %d)\n", + location, (S32)size); + } + + // initialize new mem tracker node + MEM_TRACKER_mem(mem_tr) = location; + MEM_TRACKER_prev(mem_tr) = NULL; + MEM_TRACKER_next(mem_tr) = NULL; + + // if mem_el array allocation failed, then remove node + if (!MEM_TRACKER_mem(mem_tr)) { + control_Memory_Tracker_Delete_Node(mem_tr); + SOCPERF_PRINT_ERROR( + "control_Memory_Tracker_Create_Node: failed to allocate mem_el array in tracker node ... deleting node\n"); + return OS_FAULT; + } + + // initialize mem_tracker's mem_el array + MEM_TRACKER_max_size(mem_tr) = MEM_EL_MAX_ARRAY_SIZE; + memset(MEM_TRACKER_mem(mem_tr), 0, size); + + // update the linked list + if (!mem_tr_head) { + mem_tr_head = mem_tr; + } else { + MEM_TRACKER_prev(mem_tr) = mem_tr_tail; + MEM_TRACKER_next(mem_tr_tail) = mem_tr; + } + mem_tr_tail = mem_tr; + SOCPERF_PRINT_DEBUG( + "control_Memory_Tracker_Create_node: allocating new node=0x%p, max_elements=%d, size=%d\n", + MEM_TRACKER_mem(mem_tr_tail), MEM_EL_MAX_ARRAY_SIZE, size); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID control_Memory_Tracker_Add(location, size, vmalloc_flag) + * + * @param IN location - memory location + * @param IN size - size of the memory to allocate + * @param IN vmalloc_flag - flag that indicates if the allocation was done with vmalloc + * + * @returns None + * + * @brief Keep track of allocated memory with memory tracker + * + * Special Notes: + * Starting from first mem_tracker node, the algorithm + * finds the first "hole" in the mem_tracker list and + * tracks the memory allocation there. + */ +static U32 +control_Memory_Tracker_Add(PVOID location, ssize_t size, DRV_BOOL vmalloc_flag) +{ + S32 i, n; + U32 status; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check if there is space in ANY of mem_tracker's nodes for the memory item + mem_tr = mem_tr_head; + found = FALSE; + status = OS_SUCCESS; + i = n = 0; + while (mem_tr && (!found)) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (!MEM_TRACKER_mem_address(mem_tr, i)) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Add: found index %d of %d available\n", + i, MEM_TRACKER_max_size(mem_tr) - 1); + n = i; + found = TRUE; + } + } + if (!found) { + mem_tr = MEM_TRACKER_next(mem_tr); + } + } + + if (!found) { + // extend into (i.e., create new) mem_tracker node ... + status = control_Memory_Tracker_Create_Node(); + if (status != OS_SUCCESS) { + SOCPERF_PRINT_ERROR( + "Unable to create mem tracker node\n"); + goto finish_add; + } + // use mem tracker tail node and first available entry in mem_el array + mem_tr = mem_tr_tail; + n = 0; + } + + // we now have a location in mem tracker to keep track of the memory item + MEM_TRACKER_mem_address(mem_tr, n) = location; + MEM_TRACKER_mem_size(mem_tr, n) = size; + MEM_TRACKER_mem_vmalloc(mem_tr, n) = vmalloc_flag; + SOCPERF_PRINT_DEBUG( + "control_Memory_Tracker_Add: tracking (0x%p, %d) in node %d of %d\n", + location, (S32)size, n, MEM_TRACKER_max_size(mem_tr) - 1); + +finish_add: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + return status; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID SOCPERF_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being loaded. + */ +extern VOID SOCPERF_Memory_Tracker_Init(VOID) +{ + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Init: initializing mem tracker\n"); + + mem_tr_head = NULL; + mem_tr_tail = NULL; + + spin_lock_init(&mem_tr_lock); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID SOCPERF_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the driver is being unloaded. + */ +extern VOID SOCPERF_Memory_Tracker_Free(VOID) +{ + S32 i; + MEM_TRACKER temp; + + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Free: destroying mem tracker\n"); + + spin_lock_irqsave(&mem_tr_lock, flags); + + // check for any memory that was not freed, and free it + while (mem_tr_head) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr_head); i++) { + if (MEM_TRACKER_mem_address(mem_tr_head, i)) { + SOCPERF_PRINT_WARNING( + "SOCPERF_Memory_Tracker_Free: index %d of %d, not freed (0x%p, %d) ... freeing now\n", + i, + MEM_TRACKER_max_size(mem_tr_head) - 1, + MEM_TRACKER_mem_address(mem_tr_head, i), + MEM_TRACKER_mem_size(mem_tr_head, i)); + free_pages( + (unsigned long)MEM_TRACKER_mem_address( + mem_tr_head, i), + get_order(MEM_TRACKER_mem_size( + mem_tr_head, i))); + MEM_TRACKER_mem_address(mem_tr_head, i) = NULL; + MEM_TRACKER_mem_size(mem_tr_head, i) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr_head, i) = FALSE; + } + } + temp = MEM_TRACKER_next(mem_tr_head); + control_Memory_Tracker_Delete_Node(mem_tr_head); + mem_tr_head = temp; + } + + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Free: mem tracker destruction complete\n"); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn VOID SOCPERF_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * The algorithm compacts mem_tracker nodes such that + * node entries are full starting from mem_tr_head + * up until the first empty node is detected, after + * which nodes up to mem_tr_tail will be empty. + * At end of collection (or at other safe sync point), + * we reclaim/compact space used by mem tracker. + */ +extern VOID SOCPERF_Memory_Tracker_Compaction(void) +{ + S32 i, j, n, m, c, d; + DRV_BOOL found, overlap; + MEM_TRACKER mem_tr1, mem_tr2; + + spin_lock_irqsave(&mem_tr_lock, flags); + + mem_tr1 = mem_tr_head; + mem_tr2 = mem_tr_tail; + + // if memory tracker was never used, then no need to compact + if (!mem_tr1 || !mem_tr2) { + goto finish_compact; + } + + i = j = n = c = d = 0; + m = MEM_TRACKER_max_size(mem_tr2) - 1; + overlap = FALSE; + while (!overlap) { + // find an empty node + found = FALSE; + while (!found && !overlap && mem_tr1) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Compaction: looking at mem_tr1 0x%p, index=%d\n", + mem_tr1, n); + for (i = n; i < MEM_TRACKER_max_size(mem_tr1); i++) { + if (!MEM_TRACKER_mem_address(mem_tr1, i)) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Compaction: found index %d of %d empty\n", + i, MEM_TRACKER_max_size(mem_tr1) - 1); + found = TRUE; + } + } + // check for overlap + overlap = (mem_tr1 == mem_tr2) && (i >= m); + + // if no overlap and an empty node was not found, then advance to next node + if (!found && !overlap) { + mem_tr1 = MEM_TRACKER_next(mem_tr1); + n = 0; + } + } + // all nodes going in forward direction are full, so exit + if (!found || overlap) { + goto finish_compact; + } + + // find a non-empty node + found = FALSE; + while (!found && !overlap && mem_tr2) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Compaction: looking at mem_tr2 0x%p, index=%d\n", + mem_tr2, m); + for (j = m; j >= 0; j--) { + if (MEM_TRACKER_mem_address(mem_tr2, j)) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Compaction: found index %d of %d non-empty\n", + j, MEM_TRACKER_max_size(mem_tr2) - 1); + found = TRUE; + } + } + // check for overlap + overlap = (mem_tr1 == mem_tr2) && (j <= i); + + // if no overlap and no non-empty node was found, then retreat to prev node + if (!found && !overlap) { + // keep track of empty node + MEM_TRACKER empty_tr = mem_tr2; + mem_tr2 = MEM_TRACKER_prev(mem_tr2); + m = MEM_TRACKER_max_size(mem_tr2) - 1; + mem_tr_tail = mem_tr2; // keep track of new tail + // reclaim empty mem_tracker node + control_Memory_Tracker_Delete_Node(empty_tr); + // keep track of number of node deletions performed + d++; + } + } + // all nodes going in reverse direction are empty, so exit + if (!found || overlap) { + goto finish_compact; + } + + // swap empty node with non-empty node so that "holes" get bubbled towards the end of list + MEM_TRACKER_mem_address(mem_tr1, i) = + MEM_TRACKER_mem_address(mem_tr2, j); + MEM_TRACKER_mem_size(mem_tr1, i) = + MEM_TRACKER_mem_size(mem_tr2, j); + MEM_TRACKER_mem_vmalloc(mem_tr1, i) = + MEM_TRACKER_mem_vmalloc(mem_tr2, j); + + MEM_TRACKER_mem_address(mem_tr2, j) = NULL; + MEM_TRACKER_mem_size(mem_tr2, j) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr2, j) = FALSE; + + // keep track of number of memory compactions performed + c++; + + // start new search starting from next element in mem_tr1 + n = i + 1; + + // start new search starting from prev element in mem_tr2 + m = j - 1; + } + +finish_compact: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + SOCPERF_PRINT_DEBUG( + "SOCPERF_Memory_Tracker_Compaction: number of elements compacted = %d, nodes deleted = %d\n", + c, d); + + return; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID SOCPERF_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * SOCPERF_Allocate_KMemory instead. + */ +extern PVOID SOCPERF_Allocate_Memory(size_t size) +{ + U32 status; + PVOID location; + + if (size <= 0) { + return NULL; + } + + // determine whether to use mem_tracker or not + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_KERNEL); + SOCPERF_PRINT_DEBUG( + "SOCPERF_Allocate_Memory: allocated small memory (0x%p, %d)\n", + location, (S32)size); + } else { + location = (PVOID)vmalloc(size); + if (location) { + status = control_Memory_Tracker_Add(location, size, + TRUE); + SOCPERF_PRINT_DEBUG( + "SOCPERF_Allocate_Memory: - allocated *large* memory (0x%p, %d)\n", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + vfree(location); + SOCPERF_PRINT_ERROR( + "SOCPERF_Allocate_Memory: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n"); + return NULL; + } + } + } + + if (!location) { + SOCPERF_PRINT_ERROR( + "SOCPERF_Allocate_Memory: failed for size %d bytes\n", + (S32)size); + return NULL; + } + + memset(location, 0, size); + + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID SOCPERF_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +extern PVOID SOCPERF_Allocate_KMemory(size_t size) +{ + U32 status; + PVOID location; + + if (size <= 0) { + return NULL; + } + + if (size < MAX_KMALLOC_SIZE) { + location = (PVOID)kmalloc(size, GFP_ATOMIC); + SOCPERF_PRINT_DEBUG( + "SOCPERF_Allocate_KMemory: allocated small memory (0x%p, %d)\n", + location, (S32)size); + } else { + location = (PVOID)__get_free_pages(GFP_ATOMIC, get_order(size)); + status = control_Memory_Tracker_Add(location, size, FALSE); + SOCPERF_PRINT_DEBUG( + "SOCPERF_Allocate_KMemory: allocated large memory (0x%p, %d)\n", + location, (S32)size); + if (status != OS_SUCCESS) { + // failed to track in mem_tracker, so free up memory and return NULL + free_pages((unsigned long)location, get_order(size)); + SOCPERF_PRINT_ERROR( + "SOCPERF_Allocate_KMemory: - able to allocate, but failed to track via MEM_TRACKER ... freeing\n"); + return NULL; + } + } + + if (!location) { + SOCPERF_PRINT_ERROR( + "SOCPERF_Allocate_KMemory: failed for size %d bytes\n", + (S32)size); + return NULL; + } + + memset(location, 0, size); + + return location; +} + +/* ------------------------------------------------------------------------- */ +/* + * @fn PVOID SOCPERF_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = SOCPERF_Free_Memory(ptr); + * Does not do compaction ... can have "holes" in + * mem_tracker list after this operation. + */ +extern PVOID SOCPERF_Free_Memory(PVOID location) +{ + S32 i; + DRV_BOOL found; + MEM_TRACKER mem_tr; + + if (!location) { + return NULL; + } + + spin_lock_irqsave(&mem_tr_lock, flags); + + // scan through mem_tracker nodes for matching entry (if any) + mem_tr = mem_tr_head; + found = FALSE; + while (mem_tr) { + for (i = 0; i < MEM_TRACKER_max_size(mem_tr); i++) { + if (location == MEM_TRACKER_mem_address(mem_tr, i)) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Free_Memory: freeing large memory location 0x%p\n", + location); + found = TRUE; + if (MEM_TRACKER_mem_vmalloc(mem_tr, i)) { + vfree(location); + } else { + free_pages( + (unsigned long)location, + get_order(MEM_TRACKER_mem_size( + mem_tr, i))); + } + MEM_TRACKER_mem_address(mem_tr, i) = NULL; + MEM_TRACKER_mem_size(mem_tr, i) = 0; + MEM_TRACKER_mem_vmalloc(mem_tr, i) = FALSE; + goto finish_free; + } + } + mem_tr = MEM_TRACKER_next(mem_tr); + } + +finish_free: + spin_unlock_irqrestore(&mem_tr_lock, flags); + + // must have been of smaller than the size limit for mem tracker nodes + if (!found) { + SOCPERF_PRINT_DEBUG( + "SOCPERF_Free_Memory: freeing small memory location 0x%p\n", + location); + kfree(location); + } + + return NULL; +} diff --git a/drivers/platform/x86/socperf/haswellunc_sa.c b/drivers/platform/x86/socperf/haswellunc_sa.c new file mode 100644 index 00000000000000..c07412d4e42a8f --- /dev/null +++ b/drivers/platform/x86/socperf/haswellunc_sa.c @@ -0,0 +1,419 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "socperfdrv.h" +#include "control.h" +#include "haswellunc_sa.h" +#include "ecb_iterators.h" +#include "inc/pci.h" + +static U64 counter_virtual_address; +static U32 counter_overflow[HSWUNC_SA_MAX_COUNTERS]; +extern LWPMU_DEVICE device_uncore; +static U32 device_id; + +/*! + * @fn static VOID hswunc_sa_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the entries and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID +hswunc_sa_Write_PMU(VOID *param) +{ + U32 dev_idx = *((U32 *)param); + U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + ECB pecb = LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; + DRV_PCI_DEVICE_ENTRY dpden; + U32 pci_address; + U32 bar_lo; + U64 bar_hi; + U64 final_bar; + U64 physical_address; + U32 dev_index = 0; + S32 bar_list[HSWUNC_SA_MAX_PCI_DEVICES]; + U32 bar_index = 0; + U64 gdxc_bar = 0; + U32 map_size = 0; + U64 virtual_address = 0; + U64 mmio_offset = 0; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U32 next_bar_offset = 0; + U32 i = 0; + + for (dev_index = 0; dev_index < HSWUNC_SA_MAX_PCI_DEVICES; + dev_index++) { + bar_list[dev_index] = -1; + } + + device_id = dev_idx; + // initialize the CHAP per-counter overflow numbers + for (i = 0; i < HSWUNC_SA_MAX_COUNTERS; i++) { + counter_overflow[i] = 0; + socperf_pcb[0].last_uncore_count[i] = 0; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + if (counter_virtual_address) { + for (i = 0; i < ECB_num_entries(pecb); i++) { + writel(HSWUNC_SA_CHAP_STOP, + (U32 *)(((char *)(UIOP)counter_virtual_address) + + HSWUNC_SA_CHAP_CTRL_REG_OFFSET + + i * 0x10)); + } + } + + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + if (DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == + UNC_PCICFG) { + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + mmio_offset); + SOCPERF_PCI_Write_Ulong( + pci_address, + DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry)); + continue; + } + // UNC_MMIO programming + if (bar_list[bar_name] != -1) { + bar_index = bar_list[bar_name]; + virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = + DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + writel(DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), + (U32 *)(((char *)(UIOP)virtual_address) + + mmio_offset)); + continue; + } + if (bar_name == UNC_GDXCBAR) { + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = + gdxc_bar; + } else { + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_bar_offset( + curr_pci_entry)); + bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); + next_bar_offset = DRV_PCI_DEVICE_ENTRY_bar_offset( + curr_pci_entry) + + HSWUNC_SA_NEXT_ADDR_OFFSET; + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + next_bar_offset); + bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); + final_bar = + (bar_hi << HSWUNC_SA_BAR_ADDR_SHIFT) | bar_lo; + final_bar &= HSWUNC_SA_BAR_ADDR_MASK; + + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = + final_bar; + } + physical_address = + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); + + if (physical_address) { + if (bar_name == UNC_MCHBAR) { + map_size = HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE; + } else if (bar_name == UNC_PCIEXBAR) { + map_size = HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE; + } else { + map_size = HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE; + } + DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = + (U64)(UIOP)ioremap(physical_address, map_size); + virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( + curr_pci_entry); + + if (!gdxc_bar && bar_name == UNC_MCHBAR) { + bar_lo = readl( + (U32 *)((char *)(UIOP)virtual_address + + HSWUNC_SA_GDXCBAR_OFFSET_LO)); + bar_hi = readl( + (U32 *)((char *)(UIOP)virtual_address + + HSWUNC_SA_GDXCBAR_OFFSET_HI)); + gdxc_bar = + (bar_hi << HSWUNC_SA_BAR_ADDR_SHIFT) | + bar_lo; + gdxc_bar = gdxc_bar & HSWUNC_SA_GDXCBAR_MASK; + } + writel((U32)DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), + (U32 *)(((char *)(UIOP)virtual_address) + + mmio_offset)); + bar_list[bar_name] = dev_index; + if (counter_virtual_address == 0 && + bar_name == UNC_CHAPADR) { + counter_virtual_address = virtual_address; + } + } + } + + return; +} + +/*! + * @fn static VOID hswunc_sa_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID hswunc_sa_Disable_PMU(PVOID param) +{ + DRV_PCI_DEVICE_ENTRY dpden; + U32 dev_index = 0; + U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + ECB pecb = LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; + U32 i = 0; + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP) { + if (counter_virtual_address) { + for (i = 0; i < ECB_num_entries(pecb); i++) { + writel(HSWUNC_SA_CHAP_STOP, + (U32 *)(((char *)(UIOP) counter_virtual_address) + + HSWUNC_SA_CHAP_CTRL_REG_OFFSET + i * 0x10)); + } + } + + dpden = ECB_pcidev_entry_list(pecb); + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + if (DRV_PCI_DEVICE_ENTRY_config_type( + &dpden[dev_index]) == UNC_MMIO && + DRV_PCI_DEVICE_ENTRY_bar_address( + &dpden[dev_index]) != 0) { + iounmap(( + void *)(UIOP)(DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[dev_index]))); + } + } + counter_virtual_address = 0; + } + + return; +} + +/*! + * @fn static VOID hswunc_sa_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID hswunc_sa_Initialize(VOID *param) +{ + counter_virtual_address = 0; + return; +} + +/*! + * @fn static VOID hswunc_sa_Clean_Up(PVOID) + * + * @brief Reset any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID hswunc_sa_Clean_Up(VOID *param) +{ + counter_virtual_address = 0; + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn hswunc_sa_Read_Data(param, id) + * + * @param data_buffer data buffer to read data into + * + * @return None No return needed + * + * @brief Read the Uncore count data and store into the buffer param; + * + */ +static VOID hswunc_sa_Read_Data(PVOID data_buffer) +{ + U32 event_id = 0; + U64 *data; + int data_index; + U32 data_val = 0; + U64 total_count = 0; + U32 cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_UNINITIALIZED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_IDLE || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_RESERVED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_STOPPED) { + SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); + return; + } + if (data_buffer == NULL) { + return; + } + data = (U64 *)data_buffer; + data_index = 0; + // group id + data[data_index] = cur_grp + 1; + data_index++; + + FOR_EACH_PCI_DATA_REG_RAW (pecb, i, dev_idx) { + //event_id = ECB_entries_event_id_index_local(pecb, i); + if (counter_virtual_address) { + writel(HSWUNC_SA_CHAP_SAMPLE_DATA, + (U32 *)(((char *)(UIOP)counter_virtual_address) + + HSWUNC_SA_CHAP_CTRL_REG_OFFSET + + i * 0x10)); + data_val = readl(( + U32 *)((char *)(UIOP)(counter_virtual_address) + + ECB_entries_reg_offset(pecb, i))); + } + + if (data_val < socperf_pcb[0].last_uncore_count[i]) { + counter_overflow[i]++; + } + socperf_pcb[0].last_uncore_count[i] = data_val; + + total_count = + data_val + counter_overflow[i] * HSWUNC_SA_MAX_COUNT; + data[data_index + event_id] = total_count; + SOCPERF_PRINT_DEBUG("DATA=%u\n", data_val); + event_id++; + } + END_FOR_EACH_PCI_DATA_REG_RAW; + + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE socperf_hswunc_sa_dispatch = { + .init = hswunc_sa_Initialize, // initialize + .fini = NULL, // destroy + .write = hswunc_sa_Write_PMU, // write + .freeze = hswunc_sa_Disable_PMU, // freeze + .restart = NULL, // restart + .read_data = NULL, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, + .read_lbrs = NULL, + .clean_up = hswunc_sa_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, //read_counts + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = NULL, + .read_current_data = hswunc_sa_Read_Data, + .create_mem = NULL, + .check_status = NULL, + .read_mem = NULL, + .stop_mem = NULL +}; diff --git a/drivers/platform/x86/socperf/inc/control.h b/drivers/platform/x86/socperf/inc/control.h new file mode 100644 index 00000000000000..fdd11193197a7f --- /dev/null +++ b/drivers/platform/x86/socperf/inc/control.h @@ -0,0 +1,473 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include +#include +#if defined(DRV_IA32) +#include +#endif +#include +#if defined(DRV_IA32) +#include +#endif +#include + +#include "lwpmudrv_defines.h" +#include "socperfdrv.h" +#include "lwpmudrv_types.h" + +// large memory allocation will be used if the requested size (in bytes) is +// above this threshold +#define MAX_KMALLOC_SIZE ((1 << 17) - 1) +#define SOCPERF_DRV_MEMSET memset + +// check whether Linux driver should use unlocked ioctls (not protected by BKL) +// Kernel 5.9 removed the HAVE_UNLOCKED_IOCTL and HAVE_COMPAT_IOCTL definitions +// Changing the kernel version check to 5.0.0 as SLES15SP3 has backported the above change +#if defined(HAVE_UNLOCKED_IOCTL) || \ + LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define DRV_USE_UNLOCKED_IOCTL +#endif +#if defined(DRV_USE_UNLOCKED_IOCTL) +#define IOCTL_OP .unlocked_ioctl +#define IOCTL_OP_TYPE long +#define IOCTL_USE_INODE +#else +#define IOCTL_OP .ioctl +#define IOCTL_OP_TYPE S32 +#define IOCTL_USE_INODE struct inode *inode, +#endif + +// Information about the state of the driver +typedef struct GLOBAL_STATE_NODE_S GLOBAL_STATE_NODE; +typedef GLOBAL_STATE_NODE *GLOBAL_STATE; +struct GLOBAL_STATE_NODE_S { + volatile S32 cpu_count; + volatile S32 dpc_count; + + S32 num_cpus; // Number of CPUs in the system + // Number of active CPUs - some cores can be deactivated by the user / admin + S32 active_cpus; + S32 num_em_groups; + S32 num_descriptors; + volatile S32 current_phase; +}; + +// Access Macros +#define GLOBAL_STATE_num_cpus(x) ((x).num_cpus) +#define GLOBAL_STATE_active_cpus(x) ((x).active_cpus) +#define GLOBAL_STATE_cpu_count(x) ((x).cpu_count) +#define GLOBAL_STATE_dpc_count(x) ((x).dpc_count) +#define GLOBAL_STATE_num_em_groups(x) ((x).num_em_groups) +#define GLOBAL_STATE_num_descriptors(x) ((x).num_descriptors) +#define GLOBAL_STATE_current_phase(x) ((x).current_phase) +#define GLOBAL_STATE_sampler_id(x) ((x).sampler_id) + +/* + * + * + * CPU State data structure and access macros + * + */ +typedef struct CPU_STATE_NODE_S CPU_STATE_NODE; +typedef CPU_STATE_NODE *CPU_STATE; +struct CPU_STATE_NODE_S { + S32 apic_id; // Processor ID on the system bus + PVOID apic_linear_addr; // linear address of local apic + PVOID apic_physical_addr; // physical address of local apic + + PVOID idt_base; // local IDT base address + atomic_t in_interrupt; + +#if defined(DRV_IA32) + U64 saved_ih; // saved perfvector to restore +#endif +#if defined(DRV_EM64T) + PVOID saved_ih; // saved perfvector to restore +#endif + + S64 *em_tables; // holds the data that is saved/restored + // during event multiplexing + + struct timer_list *em_timer; + U32 current_group; + S32 trigger_count; + S32 trigger_event_num; + + DISPATCH dispatch; + PVOID lbr_area; + PVOID old_dts_buffer; + PVOID dts_buffer; + U32 initial_mask; + U32 accept_interrupt; + + U64 *pmu_state; // holds PMU state (e.g., MSRs) that will be + // saved before and restored after collection + S32 socket_master; + S32 core_master; + S32 thr_master; + U64 num_samples; + U64 reset_mask; + U64 group_swap; + U64 last_uncore_count[16]; +}; + +#define CPU_STATE_apic_id(cpu) ((cpu)->apic_id) +#define CPU_STATE_apic_linear_addr(cpu) ((cpu)->apic_linear_addr) +#define CPU_STATE_apic_physical_addr(cpu) ((cpu)->apic_physical_addr) +#define CPU_STATE_idt_base(cpu) ((cpu)->idt_base) +#define CPU_STATE_in_interrupt(cpu) ((cpu)->in_interrupt) +#define CPU_STATE_saved_ih(cpu) ((cpu)->saved_ih) +#define CPU_STATE_saved_ih_hi(cpu) ((cpu)->saved_ih_hi) +#define CPU_STATE_dpc(cpu) ((cpu)->dpc) +#define CPU_STATE_em_tables(cpu) ((cpu)->em_tables) +#define CPU_STATE_pmu_state(cpu) ((cpu)->pmu_state) +#define CPU_STATE_em_dpc(cpu) ((cpu)->em_dpc) +#define CPU_STATE_em_timer(cpu) ((cpu)->em_timer) +#define CPU_STATE_current_group(cpu) ((cpu)->current_group) +#define CPU_STATE_trigger_count(cpu) ((cpu)->trigger_count) +#define CPU_STATE_trigger_event_num(cpu) ((cpu)->trigger_event_num) +#define CPU_STATE_dispatch(cpu) ((cpu)->dispatch) +#define CPU_STATE_lbr(cpu) ((cpu)->lbr) +#define CPU_STATE_old_dts_buffer(cpu) ((cpu)->old_dts_buffer) +#define CPU_STATE_dts_buffer(cpu) ((cpu)->dts_buffer) +#define CPU_STATE_initial_mask(cpu) ((cpu)->initial_mask) +#define CPU_STATE_accept_interrupt(cpu) ((cpu)->accept_interrupt) +#define CPU_STATE_msr_value(cpu) ((cpu)->msr_value) +#define CPU_STATE_msr_addr(cpu) ((cpu)->msr_addr) +#define CPU_STATE_socket_master(cpu) ((cpu)->socket_master) +#define CPU_STATE_core_master(cpu) ((cpu)->core_master) +#define CPU_STATE_thr_master(cpu) ((cpu)->thr_master) +#define CPU_STATE_num_samples(cpu) ((cpu)->num_samples) +#define CPU_STATE_reset_mask(cpu) ((cpu)->reset_mask) +#define CPU_STATE_group_swap(cpu) ((cpu)->group_swap) + +/* + * For storing data for --read/--write-msr command line options + */ +typedef struct MSR_DATA_NODE_S MSR_DATA_NODE; +typedef MSR_DATA_NODE *MSR_DATA; +struct MSR_DATA_NODE_S { + U64 value; // Used for emon, for read/write-msr value + U64 addr; +}; + +#define MSR_DATA_value(md) ((md)->value) +#define MSR_DATA_addr(md) ((md)->addr) + +/* + * Memory Allocation tracker + * + * Currently used to track large memory allocations + */ + +typedef struct MEM_EL_NODE_S MEM_EL_NODE; +typedef MEM_EL_NODE *MEM_EL; +struct MEM_EL_NODE_S { + char *address; // pointer to piece of memory we're tracking + S32 size; // size (bytes) of the piece of memory + // flag to check if the memory is allocated using vmalloc + DRV_BOOL is_addr_vmalloc; +}; + +// accessors for MEM_EL defined in terms of MEM_TRACKER below + +#define MEM_EL_MAX_ARRAY_SIZE 32 // minimum is 1, nominal is 64 + +typedef struct MEM_TRACKER_NODE_S MEM_TRACKER_NODE; +typedef MEM_TRACKER_NODE *MEM_TRACKER; +struct MEM_TRACKER_NODE_S { + // number of elements in the array (default: MEM_EL_MAX_ARRAY_SIZE) + S32 max_size; + MEM_EL mem; // array of large memory items we're tracking + // enables bi-directional scanning of linked list + MEM_TRACKER prev, next; +}; + +#define MEM_TRACKER_max_size(mt) ((mt)->max_size) +#define MEM_TRACKER_mem(mt) ((mt)->mem) +#define MEM_TRACKER_prev(mt) ((mt)->prev) +#define MEM_TRACKER_next(mt) ((mt)->next) +#define MEM_TRACKER_mem_address(mt, i) (MEM_TRACKER_mem(mt)[(i)].address) +#define MEM_TRACKER_mem_size(mt, i) (MEM_TRACKER_mem(mt)[(i)].size) +#define MEM_TRACKER_mem_vmalloc(mt, i) \ + (MEM_TRACKER_mem(mt)[(i)].is_addr_vmalloc) + +/**************************************************************************** + ** Global State variables exported + ***************************************************************************/ +extern CPU_STATE socperf_pcb; +extern U64 *tsc_info; +extern GLOBAL_STATE_NODE socperf_driver_state; +extern MSR_DATA msr_data; +extern U32 *core_to_package_map; +extern U32 num_packages; +extern U64 *restore_bl_bypass; +extern U32 **restore_ha_direct2core; +extern U32 **restore_qpi_direct2core; +/**************************************************************************** + ** Handy Short cuts + ***************************************************************************/ + +/* + * SOCPERF_THIS_CPU() + * Parameters + * None + * Returns + * CPU number of the processor being executed on + * + */ +#define SOCPERF_THIS_CPU() smp_processor_id() + +/**************************************************************************** + ** Interface definitions + ***************************************************************************/ + +/* + * Execution Control Functions + */ + +extern VOID SOCPERF_Invoke_Cpu(S32 cpuid, VOID (*func)(PVOID), PVOID ctx); + +/* + * @fn VOID SOCPERF_Invoke_Parallel_Service(func, ctx, blocking, exclude) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * @param blocking - Wait for invoked function to complete + * @param exclude - exclude the current core from executing the code + * + * @returns none + * + * @brief Service routine to handle all kinds of parallel invoke on all CPU calls + * + * Special Notes: + * Invoke the function provided in parallel in either a blocking/non-blocking mode. + * The current core may be excluded if desired. + * NOTE - Do not call this function directly from source code. Use the aliases + * SOCPERF_Invoke_Parallel(), SOCPERF_Invoke_Parallel_NB(), SOCPERF_Invoke_Parallel_XS(). + * + */ +extern VOID +SOCPERF_Invoke_Parallel_Service(VOID (*func)(PVOID), + PVOID ctx, + S32 blocking, + S32 exclude); + +/* + * @fn VOID SOCPERF_Invoke_Parallel(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define SOCPERF_Invoke_Parallel(a, b) \ + SOCPERF_Invoke_Parallel_Service((a), (b), TRUE, FALSE) + +/* + * @fn VOID SOCPERF_Invoke_Parallel_NB(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. DO NOT Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, including the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define SOCPERF_Invoke_Parallel_NB(a, b) \ + SOCPERF_Invoke_Parallel_Service((a), (b), FALSE, FALSE) + +/* + * @fn VOID SOCPERF_Invoke_Parallel_XS(func, ctx) + * + * @param func - function to be invoked by each core in the system + * @param ctx - pointer to the parameter block for each function invocation + * + * @returns none + * + * @brief Invoke the named function in parallel. Wait for all the functions to complete. + * + * Special Notes: + * Invoke the function named in parallel, excluding the CPU that the control is + * being invoked on + * Macro built on the service routine + * + */ +#define SOCPERF_Invoke_Parallel_XS(a, b) \ + SOCPERF_Invoke_Parallel_Service((a), (b), TRUE, TRUE) + +/* + * @fn VOID SOCPERF_Memory_Tracker_Init(void) + * + * @param None + * + * @returns None + * + * @brief Initializes Memory Tracker + * + * Special Notes: + * This should only be called when the + * the driver is being loaded. + */ +extern VOID SOCPERF_Memory_Tracker_Init(VOID); + +/* + * @fn VOID SOCPERF_Memory_Tracker_Free(void) + * + * @param None + * + * @returns None + * + * @brief Frees memory used by Memory Tracker + * + * Special Notes: + * This should only be called when the + * driver is being unloaded. + */ +extern VOID SOCPERF_Memory_Tracker_Free(VOID); + +/* + * @fn VOID SOCPERF_Memory_Tracker_Compaction(void) + * + * @param None + * + * @returns None + * + * @brief Compacts the memory allocator if holes are detected + * + * Special Notes: + * At end of collection (or at other safe sync point), + * reclaim/compact space used by mem tracker + */ +extern VOID SOCPERF_Memory_Tracker_Compaction(void); + +/* + * @fn PVOID SOCPERF_Allocate_Memory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_KERNEL pool. + * + * Use this if memory is to be allocated within a context where + * the allocator can block the allocation (e.g., by putting + * the caller to sleep) while it tries to free up memory to + * satisfy the request. Otherwise, if the allocation must + * occur atomically (e.g., caller cannot sleep), then use + * SOCPERF_Allocate_KMemory instead. + */ +extern PVOID SOCPERF_Allocate_Memory(size_t size); + +/* + * @fn PVOID SOCPERF_Allocate_KMemory(size) + * + * @param IN size - size of the memory to allocate + * + * @returns char* - pointer to the allocated memory block + * + * @brief Allocate and zero memory + * + * Special Notes: + * Allocate memory in the GFP_ATOMIC pool. + * + * Use this if memory is to be allocated within a context where + * the allocator cannot block the allocation (e.g., by putting + * the caller to sleep) as it tries to free up memory to + * satisfy the request. Examples include interrupt handlers, + * process context code holding locks, etc. + */ +extern PVOID SOCPERF_Allocate_KMemory(size_t size); + +/* + * @fn PVOID SOCPERF_Free_Memory(location) + * + * @param IN location - size of the memory to allocate + * + * @returns pointer to the allocated memory block + * + * @brief Frees the memory block + * + * Special Notes: + * Does not try to free memory if fed with a NULL pointer + * Expected usage: + * ptr = SOCPERF_Free_Memory(ptr); + */ +extern PVOID SOCPERF_Free_Memory(PVOID location); + +#endif diff --git a/drivers/platform/x86/socperf/inc/ecb_iterators.h b/drivers/platform/x86/socperf/inc/ecb_iterators.h new file mode 100644 index 00000000000000..686057d1a92334 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/ecb_iterators.h @@ -0,0 +1,136 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _ECB_ITERATORS_H_ +#define _ECB_ITERATORS_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +// +// Loop macros to walk through the event control block +// Use for access only in the kernel mode +// To Do - Control access from kernel mode by a macro +// + +#define FOR_EACH_PCI_DATA_REG_RAW(pecb, i, device_idx) \ + { \ + U32 (i) = 0; \ + U32 (cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + device_uncore)[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = ECB_operations_register_start( \ + pecb, PMU_OPERATION_READ); \ + (i) < ECB_operations_register_start( \ + pecb, PMU_OPERATION_READ) + \ + ECB_operations_register_len( \ + pecb, PMU_OPERATION_READ); \ + (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_DATA_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_PCI_REG_RAW(pecb, i, device_idx) \ + { \ + U32 (i) = 0; \ + U32 (cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + device_uncore)[(cur_grp)]; \ + if ((pecb)) { \ + for ((i) = 0; (i) < ECB_num_entries(pecb); (i)++) { \ + if (ECB_entries_reg_offset((pecb), (i)) == \ + 0) { \ + continue; \ + } + +#define END_FOR_EACH_PCI_REG_RAW \ + } \ + } \ + } + +#define FOR_EACH_REG_ENTRY_UNC(pecb, device_idx, idx) \ + { \ + U32 (idx); \ + U32 (cur_grp) = LWPMU_DEVICE_cur_group(device_uncore); \ + ECB (pecb) = LWPMU_DEVICE_PMU_register_data( \ + device_uncore)[(cur_grp)]; \ + if ((pecb)) { \ + for ((idx) = 0; (idx) < ECB_num_entries(pecb); \ + (idx)++) { \ + if (ECB_entries_bus_no((pecb), (idx)) == 0 && \ + ECB_entries_reg_id((pecb), (idx)) == 0) { \ + continue; \ + } + +#define END_FOR_EACH_REG_ENTRY_UNC \ + } \ + } \ + } + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/socperf/inc/haswellunc_sa.h b/drivers/platform/x86/socperf/inc/haswellunc_sa.h new file mode 100644 index 00000000000000..87e1843f1fe6b5 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/haswellunc_sa.h @@ -0,0 +1,85 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2011-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _HSWUNC_SA_H_INC_ +#define _HSWUNC_SA_H_INC_ + +/* + * Local to this architecture: Haswell uncore SA unit + * + */ +#define HSWUNC_SA_DESKTOP_DID 0x000C04 +#define HSWUNC_SA_NEXT_ADDR_OFFSET 4 +#define HSWUNC_SA_BAR_ADDR_SHIFT 32 +#define HSWUNC_SA_BAR_ADDR_MASK 0x0007FFFFFF000LL +#define HSWUNC_SA_MAX_PCI_DEVICES 16 +#define HSWUNC_SA_MAX_COUNT 0x00000000FFFFFFFFLL +#define HSWUNC_SA_MAX_COUNTERS 8 + +#define HSWUNC_SA_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) +#define HSWUNC_SA_PCIEXBAR_MMIO_PAGE_SIZE (57 * 4096) +#define HSWUNC_SA_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define HSWUNC_SA_GDXCBAR_OFFSET_LO 0x5420 +#define HSWUNC_SA_GDXCBAR_OFFSET_HI 0x5424 +#define HSWUNC_SA_GDXCBAR_MASK 0x7FFFFFF000LL +#define HSWUNC_SA_CHAP_SAMPLE_DATA 0x00020000 +#define HSWUNC_SA_CHAP_STOP 0x00040000 +#define HSWUNC_SA_CHAP_CTRL_REG_OFFSET 0x0 + +extern DISPATCH_NODE socperf_hswunc_sa_dispatch; + +#endif diff --git a/drivers/platform/x86/socperf/inc/npk_uncore.h b/drivers/platform/x86/socperf/inc/npk_uncore.h new file mode 100644 index 00000000000000..03f2c10bdd634b --- /dev/null +++ b/drivers/platform/x86/socperf/inc/npk_uncore.h @@ -0,0 +1,82 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _NPK_UNCORE_H_INC_ +#define _NPK_UNCORE_H_INC_ + +/* + * Local to this architecture: uncore SA unit + * + */ +#define SOC_NPK_UNCORE_NEXT_ADDR_OFFSET 4 +#define SOC_NPK_UNCORE_BAR_ADDR_SHIFT 32 +#define SOC_NPK_UNCORE_BAR_ADDR_MASK 0x00FFFFF00000LL +#define SOC_NPK_UNCORE_MAX_PCI_DEVICES 16 +#define SOC_NPK_COUNTER_MAX_COUNTERS 16 +#define SOC_NPK_COUNTER_MAX_COUNT 0x00000000FFFFFFFFLL +#define SOC_NPK_UNCORE_MCHBAR_ADDR_MASK 0x7FFFFF8000LL + +#define SOC_NPK_UNCORE_NPK_BAR_MMIO_PAGE_SIZE 0x100000 +#define SOC_NPK_UNCORE_MCHBAR_MMIO_PAGE_SIZE (8 * 4096) +#define SOC_NPK_UNCORE_SAMPLE_DATA 0x00120000 +#define SOC_NPK_UNCORE_STOP 0x00040000 +#define SOC_NPK_UNCORE_CHAP_START 0x00110000 +#define SOC_NPK_UNCORE_CHAP_CTRL_REG_OFFSET 0x0 + +extern DISPATCH_NODE npk_dispatch; + +#endif diff --git a/drivers/platform/x86/socperf/inc/pci.h b/drivers/platform/x86/socperf/inc/pci.h new file mode 100644 index 00000000000000..7ecf2d6187ed34 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/pci.h @@ -0,0 +1,110 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _PCI_H_ +#define _PCI_H_ + +#include "lwpmudrv_defines.h" + +/* + * PCI Config Address macros + */ +#define PCI_ENABLE 0x80000000 + +#define PCI_ADDR_IO 0xCF8 +#define PCI_DATA_IO 0xCFC + +#define BIT0 0x1 +#define BIT1 0x2 + +/* + * Macro for forming a PCI configuration address + */ +#define FORM_PCI_ADDR(bus, dev, fun, off) \ + (((PCI_ENABLE)) | ((bus & 0xFF) << 16) | ((dev & 0x1F) << 11) | \ + ((fun & 0x07) << 8) | ((off & 0xFF) << 0)) + +#define VENDOR_ID_MASK 0x0000FFFF +#define DEVICE_ID_MASK 0xFFFF0000 +#define DEVICE_ID_BITSHIFT 16 +#define LOWER_4_BYTES_MASK 0x00000000FFFFFFFF +#define MAX_BUSNO 256 +#define NEXT_ADDR_OFFSET 4 +#define NEXT_ADDR_SHIFT 32 +#define DRV_IS_PCI_VENDOR_ID_INTEL 0x8086 + +#define CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, device_id) \ + { \ + vendor_id = value & VENDOR_ID_MASK; \ + device_id = (value & DEVICE_ID_MASK) >> DEVICE_ID_BITSHIFT; \ + \ + if (vendor_id != DRV_IS_PCI_VENDOR_ID_INTEL) { \ + continue; \ + } \ + } + +#if defined(DRV_IA32) || defined(DRV_EM64T) +extern int SOCPERF_PCI_Read_From_Memory_Address(U32 addr, U32 *val); + +extern int SOCPERF_PCI_Write_To_Memory_Address(U32 addr, U32 val); + +extern int SOCPERF_PCI_Read_Ulong(U32 pci_address); + +extern void SOCPERF_PCI_Write_Ulong(U32 pci_address, U32 value); +#endif + +#endif diff --git a/drivers/platform/x86/socperf/inc/pmu_info.h b/drivers/platform/x86/socperf/inc/pmu_info.h new file mode 100644 index 00000000000000..5d7738ef325267 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/pmu_info.h @@ -0,0 +1,93 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _PMU_INFO_H_INC_ +#define _PMU_INFO_H_INC_ + +static const PMU_INFO_NODE pmu_info_list[] = { + // CTI_Avoton = 0x406D0 + {0x6, 0x4D, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // CTI_Anniedale = 0x506A0 + { 0x6, 0x5A, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // CTI_Broxton = 0x506C0 + { 0x6, 0x5C, 0x0, 0xF, NULL, NULL, bxt_mmio_info_list}, + + // CTI_Cherryview = 0x406C0 + { 0x6, 0x4C, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // CTI_CougarMountain = 0x60E0 + { 0x6, 0x6E, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // CTI_Denverton = 0x506F0 + { 0x6, 0x5F, 0x0, 0xF, NULL, NULL, dnv_mmio_info_list}, + + // CTI_Geminilake = 0x706A0 + { 0x6, 0x7A, 0x0, 0xF, NULL, NULL, bxt_mmio_info_list}, + + // CTI_Tangier = 0x406A0 + { 0x6, 0x4A, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // CTI_Silvermont = 0x30670 + { 0x6, 0x37, 0x0, 0xF, NULL, NULL, atm_mmio_info_list}, + + // Last + { 0x0, 0x0, 0x0, 0x0, NULL, NULL, NULL } +}; + +#endif diff --git a/drivers/platform/x86/socperf/inc/pmu_info_mmio.h b/drivers/platform/x86/socperf/inc/pmu_info_mmio.h new file mode 100644 index 00000000000000..81cec0d9dd33f6 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/pmu_info_mmio.h @@ -0,0 +1,1118 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _PMU_INFO_MMIO_H_INC_ +#define _PMU_INFO_MMIO_H_INC_ + +static U32 mmio_atm_visa_offset[] = { + 0x28, 0x34, 0x38, 0x44, 0x48, 0x54, 0x58, 0x64, 0x68, 0x74, 0x78, 0x84, + 0x88, 0x94, 0x98, 0xA4, 0X0 }; + +static U32 mmio_bxt_visa_offset[] = { + 0x5028, 0x5034, 0x5038, 0x5044, 0x5048, 0x5054, 0x5058, 0x5064, 0x5068, + 0x5074, 0x5078, 0x5084, 0x5088, 0x5094, 0X0 }; + +static U32 mmio_dnv_visa_offset[] = { + 0x5048, 0x5054, 0x5058, 0x5064, 0x5068, 0x5074, 0x5078, 0x5084, 0x5088, + 0x5094, 0x5098, 0x50A4, 0x50A8, 0x50B4, 0x50B8, 0x50C4, 0x50C8, 0x50D4, + 0x0 }; + +static PMU_MMIO_UNIT_INFO_NODE atm_mmio_info_list[] = { + { { { { 0, 0, 0, 0x0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x14 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x18 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x1C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x20 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x24 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x28 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x2C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x30 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x34 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x38 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x40 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x44 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x4C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x50 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x54 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x58 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x5C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x60 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x64 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x68 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x6C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x70 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x74 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x78 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x7C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x88 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x8C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x90 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x94 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x98 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xA0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xA4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xA8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xAC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xB0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E8C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E88 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E90 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E94 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x3E98 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9680 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9684 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9688 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x968C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9880 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9884 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9888 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9900 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9904 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9908 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x990C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9910 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9914 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9918 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9980 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9984 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x9988 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0x998C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD800 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD804 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD808 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD880 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD884 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xD888 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE600 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE604 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE608 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE644 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE648 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE64C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE658 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE65C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0xE660 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_atm_visa_offset}, + { { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL } +}; + +static PMU_MMIO_UNIT_INFO_NODE bxt_mmio_info_list[] = { + { { { { 0, 0, 2, 0x80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x88 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x8C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x90 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xAC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xB0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xB4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xB8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xBC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xC0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5000 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5008 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x500C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5010 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5014 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5018 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x501C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5020 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5024 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5028 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x502C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5030 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5034 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5038 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x503C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5040 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5044 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5048 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x504C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5050 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5054 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5058 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x505C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5060 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5064 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5068 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x506C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5070 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5074 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5078 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x507C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5080 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5084 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5088 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x508C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5090 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5094 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x5098 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x509C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x50A0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x50A4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x50A8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x6C00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x6C04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x6C80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x6C84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 0, 0x6D48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x7080 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x7084 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x7100 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0x7104 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA180 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA188 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA184 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA200 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA204 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA280 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA284 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xA288 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE400 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE404 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE408 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE600 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE604 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE608 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE60C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xE610 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEB00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEB04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEB08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEC00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEC04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEC08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEC0C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEC10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEE00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEE04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEE08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEE0C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xEE10 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xF480 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xF484 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xF488 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xF48C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 2, 0xF490 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_bxt_visa_offset}, + { { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL } +}; + +static PMU_MMIO_UNIT_INFO_NODE dnv_mmio_info_list[] = { + { { { { 0, 31, 7, 0x80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x88 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x8C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x90 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x94 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x98 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x9C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xAC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xE0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x180 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x184 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4200 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4204 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4208 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4380 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4384 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4400 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4404 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4408 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4880 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4884 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4888 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4900 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4904 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4908 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4F00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4F04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x4F08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5000 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5008 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x500C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5010 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5014 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5018 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x501C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5020 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5024 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5028 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x502C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5030 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5034 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5038 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x503C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5040 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5044 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5048 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x504C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5050 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5054 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5058 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x505C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5060 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5064 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5068 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x506C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5070 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5074 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5078 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x507C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5080 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5084 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5088 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x508C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5090 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5094 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5098 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x509C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50A0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50A4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50A8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50AC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50B0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50B4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50B8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50BC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50C0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50C4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50C8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50CC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50D0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50D4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50D8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50DC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50E0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50E4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50E8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50EC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50F0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50F4 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50F8 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x50FC } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5100 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5104 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5108 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x510C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5110 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5114 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5118 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x511C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5120 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5124 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5128 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x512C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5130 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5134 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5138 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x513C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5140 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5144 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x5148 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x6C00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x6C04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x6C80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x6C84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 0, 0, 0x6D48 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x7080 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x7084 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x7088 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x7100 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0x7104 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA180 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA184 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA200 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA204 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA280 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xA284 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xAE00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xAE04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xAE08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB400 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB404 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB408 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB580 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB584 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB588 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB800 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB804 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB880 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB884 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB888 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB900 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB904 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB908 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB90C } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB910 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB914 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB918 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB980 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xB984 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA00 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA04 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA08 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA80 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA84 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 31, 7, 0xBA88 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0xFFFFFFFFULL }, + { { { 0, 0, 0, 0 } }, 0, MMIO_SINGLE_BAR_TYPE, 0, 0x0 }, + mmio_dnv_visa_offset}, + { { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + { { { 0, 0, 0, 0 } }, 0, 0, 0, 0x0 }, + NULL } +}; + +#endif diff --git a/drivers/platform/x86/socperf/inc/pmu_info_struct.h b/drivers/platform/x86/socperf/inc/pmu_info_struct.h new file mode 100644 index 00000000000000..c98608635fcc23 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/pmu_info_struct.h @@ -0,0 +1,139 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _PMU_INFO_STRUCT_H_INC_ +#define _PMU_INFO_STRUCT_H_INC_ + +// Data Structures for storing entire PMU list +typedef struct PMU_MSR_INFO_NODE_S PMU_MSR_INFO_NODE; +struct PMU_MSR_INFO_NODE_S { + U64 msr_id; + U64 mask; + U16 range; + U16 dynamic; // to be updated +}; + +typedef struct PMU_PCI_INFO_NODE_S PMU_PCI_INFO_NODE; +struct PMU_PCI_INFO_NODE_S { + union { + struct { + U64 bus : 8; + U64 dev : 5; + U64 func : 3; + U64 offset : 16; + U64 rsvd : 32; + } s; + U64 reg; + } u; +}; + +typedef struct PMU_PCI_UNIT_INFO_NODE_S PMU_PCI_UNIT_INFO_NODE; +struct PMU_PCI_UNIT_INFO_NODE_S { + U32 dev; + U32 func; + U16 *reg_offset_list; +}; + +typedef struct PMU_MMIO_BAR_INFO_NODE_S PMU_MMIO_BAR_INFO_NODE; +struct PMU_MMIO_BAR_INFO_NODE_S { + union { + struct { + U32 bus : 8; + U32 dev : 5; + U32 func : 3; + U32 offset : 16; + } s; + U32 reg; + } u; + U8 shift; + U8 bar_prog_type; + U16 reserved; + U64 mask; +}; + +enum { MMIO_SINGLE_BAR_TYPE = 1, MMIO_DUAL_BAR_TYPE, MMIO_DIRECT_BAR_TYPE }; + +typedef struct PMU_MMIO_UNIT_INFO_NODE_S PMU_MMIO_UNIT_INFO_NODE; +struct PMU_MMIO_UNIT_INFO_NODE_S { + PMU_MMIO_BAR_INFO_NODE primary; + PMU_MMIO_BAR_INFO_NODE secondary; + U32 *reg_offset_list; +}; + +typedef struct PMU_INFO_NODE_S PMU_INFO_NODE; +struct PMU_INFO_NODE_S { + U16 family; + U16 model; + U16 stepping_from; + U16 stepping_to; + PMU_MSR_INFO_NODE **msr_info_list; + PMU_PCI_UNIT_INFO_NODE *pci_info_list; + PMU_MMIO_UNIT_INFO_NODE *mmio_info_list; +}; + +// Data Structure for search operation +typedef struct PMU_SEARCH_NODE_S PMU_SEARCH_NODE; +struct PMU_SEARCH_NODE_S { + U64 key; // common for MSR/PCI/MMIO + void *addr; // copy address of static node + PMU_SEARCH_NODE *left; + PMU_SEARCH_NODE *right; + U16 height; // For balancing the search tree + U16 range; // For MSR +}; + +#endif diff --git a/drivers/platform/x86/socperf/inc/pmu_list.h b/drivers/platform/x86/socperf/inc/pmu_list.h new file mode 100644 index 00000000000000..212cbb47cf2260 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/pmu_list.h @@ -0,0 +1,116 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _PMU_LIST_H_INC_ +#define _PMU_LIST_H_INC_ + +/************************************************************/ +/* + * PMU list API for checking valid PMU list + * + ************************************************************/ + +/*! + * @fn DRV_BOOL PMU_LIST_Check_MMIO (PMU_MMIO_BAR_INFO_NODE, + * PMU_MMIO_BAR_INFO_NODE, + * U32) + * + * @brief Search the MMIO programming info in the list + * + * @param primary - pimary MMIO BAR programming info + * secondary - secondary MMIO BAR programming info + * offset - MMIO offset + * + * @return TRUE if the MMIO information is found in the list, + * FALSE otherwise + */ +extern DRV_BOOL +PMU_LIST_Check_MMIO(PMU_MMIO_BAR_INFO_NODE primary, + PMU_MMIO_BAR_INFO_NODE secondary, + U32 offset); + +/*! + * @fn OS_STATUS PMU_LIST_Initialize (void) + * @brief Detect the CPU id and locate the applicable PMU list + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Initialize(void); + +/*! + * @fn OS_STATUS PMU_LIST_Build_MMIO_List (void) + * @brief Build the MMIO search tree + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Build_MMIO_List(void); + +/*! + * @fn OS_STATUS PMU_LIST_Clean_Up (void) + * @brief Clean up all the search trees + * + * @param None + * + * @return OS_SUCCESS + */ +extern OS_STATUS PMU_LIST_Clean_Up(void); + +#endif diff --git a/drivers/platform/x86/socperf/inc/soc_uncore.h b/drivers/platform/x86/socperf/inc/soc_uncore.h new file mode 100644 index 00000000000000..102fd475b35d39 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/soc_uncore.h @@ -0,0 +1,91 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _SOC_UNCORE_H_INC_ +#define _SOC_UNCORE_H_INC_ + +/* + * Local to this architecture: SoC uncore unit + * + */ +#define SOC_UNCORE_DESKTOP_DID 0x000C04 +#define SOC_UNCORE_NEXT_ADDR_OFFSET 4 +#define SOC_UNCORE_BAR_ADDR_SHIFT 32 +#define SOC_UNCORE_BAR_ADDR_MASK 0x000FFFC00000LL +#define SOC_UNCORE_MAX_PCI_DEVICES 16 +#define SOC_UNCORE_MCR_REG_OFFSET 0xD0 +#define SOC_UNCORE_MDR_REG_OFFSET 0xD4 +#define SOC_UNCORE_MCRX_REG_OFFSET 0xD8 +#define SOC_UNCORE_BYTE_ENABLES 0xF +#define SOC_UNCORE_OP_CODE_SHIFT 24 +#define SOC_UNCORE_PORT_ID_SHIFT 16 +#define SOC_UNCORE_OFFSET_HI_MASK 0xFFFFFF00 +#define SOC_UNCORE_OFFSET_LO_MASK 0xFF +#define SOC_COUNTER_PORT_ID 23 +#define SOC_COUNTER_WRITE_OP_CODE 1 +#define SOC_COUNTER_READ_OP_CODE 0 +#define UNCORE_MAX_COUNTERS 8 +#define UNCORE_MAX_COUNT 0x00000000FFFFFFFFLL + +#define SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE 4096 +#define SOC_UNCORE_SAMPLE_DATA 0x00020000 +#define SOC_UNCORE_STOP 0x00040000 +#define SOC_UNCORE_CTRL_REG_OFFSET 0x0 + +extern DISPATCH_NODE soc_uncore_dispatch; + +#endif diff --git a/drivers/platform/x86/socperf/inc/socperfdrv.h b/drivers/platform/x86/socperf/inc/socperfdrv.h new file mode 100644 index 00000000000000..5b520f8c3b79a6 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/socperfdrv.h @@ -0,0 +1,203 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _SOCPERFDRV_H_ +#define _SOCPERFDRV_H_ + +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include +#else +#include +#endif +#include "lwpmudrv_defines.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv_types.h" +#include "lwpmudrv_version.h" + +/* + * Print macros for driver messages + */ + +#if defined(MYDEBUG) +#define SOCPERF_PRINT_DEBUG(fmt, args...) \ + { \ + printk(KERN_INFO SOCPERF_MSG_PREFIX " [DEBUG] " fmt, ##args); \ + } +#else +#define SOCPERF_PRINT_DEBUG(fmt, args...) \ + { \ + ; \ + } +#endif + +#define SOCPERF_PRINT(fmt, args...) \ + { \ + printk(KERN_INFO SOCPERF_MSG_PREFIX " " fmt, ##args); \ + } + +#define SOCPERF_PRINT_WARNING(fmt, args...) \ + { \ + printk(KERN_ALERT SOCPERF_MSG_PREFIX " [Warning] " fmt, \ + ##args); \ + } + +#define SOCPERF_PRINT_ERROR(fmt, args...) \ + { \ + printk(KERN_CRIT SOCPERF_MSG_PREFIX " [ERROR] " fmt, ##args); \ + } + +// Macro to return the thread group id +#define GET_CURRENT_TGID() (current->tgid) + +#if defined(DRV_IA32) || defined(DRV_EM64T) +#define OVERFLOW_ARGS U64 *, U64 * +#elif defined(DRV_IA64) +#define OVERFLOW_ARGS U64 *, U64 *, U64 *, U64 *, U64 *, U64 * +#endif + +/* + * Dispatch table for virtualized functions. + * Used to enable common functionality for different + * processor microarchitectures + */ +typedef struct DISPATCH_NODE_S DISPATCH_NODE; +typedef DISPATCH_NODE *DISPATCH; + +struct DISPATCH_NODE_S { + VOID (*init)(PVOID); + VOID (*fini)(PVOID); + VOID (*write)(PVOID); + VOID (*freeze)(PVOID); + VOID (*restart)(PVOID); + VOID (*read_data)(PVOID); + VOID (*check_overflow)(VOID); + VOID (*swap_group)(DRV_BOOL); + VOID (*read_lbrs)(PVOID); + VOID (*clean_up)(PVOID); + VOID (*hw_errata)(VOID); + VOID (*read_power)(PVOID); + U64 (*check_overflow_errata)(ECB, U32, U64); + VOID (*read_counts)(PVOID, U32); + U64 (*check_overflow_gp_errata)(ECB, U64 *); + VOID (*read_ro)(PVOID, U32, U32); + U64 (*platform_info)(VOID); + // Counter reads triggered/initiated by User mode timer + VOID (*trigger_read)(VOID); + VOID (*read_current_data)(PVOID); + VOID (*create_mem)(U32, U64 *); + VOID (*check_status)(U64 *, U32 *); + VOID (*read_mem)(U64, U64 *, U32); + VOID (*stop_mem)(VOID); +}; + +extern DISPATCH dispatch; + +extern VOID **PMU_register_data; +extern VOID **desc_data; +extern U64 *prev_counter_data; +extern U64 *cur_counter_data; + +/*! + * @struct LWPMU_DEVICE_NODE_S + * @brief Struct to hold fields per device + * PMU_register_data_unc - MSR info + * dispatch_unc - dispatch table + * em_groups_counts_unc - # groups + * pcfg_unc - config struct + */ +typedef struct LWPMU_DEVICE_NODE_S LWPMU_DEVICE_NODE; +typedef LWPMU_DEVICE_NODE *LWPMU_DEVICE; + +struct LWPMU_DEVICE_NODE_S { + VOID **PMU_register_data_unc; + DISPATCH dispatch_unc; + S32 em_groups_count_unc; + VOID *pcfg_unc; + U64 **acc_per_thread; + U64 **prev_val_per_thread; + U64 counter_mask; + U64 num_events; + U32 num_units; + VOID *ec; + S32 cur_group; +}; + +#define LWPMU_DEVICE_PMU_register_data(dev) ((dev)->PMU_register_data_unc) +#define LWPMU_DEVICE_dispatch(dev) ((dev)->dispatch_unc) +#define LWPMU_DEVICE_em_groups_count(dev) ((dev)->em_groups_count_unc) +#define LWPMU_DEVICE_pcfg(dev) ((dev)->pcfg_unc) +#define LWPMU_DEVICE_acc_per_thread(dev) ((dev)->acc_per_thread) +#define LWPMU_DEVICE_prev_val_per_thread(dev) ((dev)->prev_val_per_thread) +#define LWPMU_DEVICE_counter_mask(dev) ((dev)->counter_mask) +#define LWPMU_DEVICE_num_events(dev) ((dev)->num_events) +#define LWPMU_DEVICE_num_units(dev) ((dev)->num_units) +#define LWPMU_DEVICE_ec(dev) ((dev)->ec) +#define LWPMU_DEVICE_cur_group(dev) ((dev)->cur_group) + +extern U32 num_devices; +extern U32 cur_devices; +extern LWPMU_DEVICE device_uncore; +extern U64 *pmu_state; + +// Handy macro +#define TSC_SKEW(this_cpu) (tsc_info[this_cpu] - tsc_info[0]) + +#endif diff --git a/drivers/platform/x86/socperf/inc/utility.h b/drivers/platform/x86/socperf/inc/utility.h new file mode 100644 index 00000000000000..26f756bd209950 --- /dev/null +++ b/drivers/platform/x86/socperf/inc/utility.h @@ -0,0 +1,73 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _UTILITY_H_ +#define _UTILITY_H_ + +extern void +SOCPERF_UTILITY_Read_TSC(U64 *pTsc); + +extern void +SOCPERF_UTILITY_Read_Cpuid(U64 cpuid_function, + U64 *rax_value, + U64 *rbx_value, + U64 *rcx_value, + U64 *rdx_value); + +extern DISPATCH SOCPERF_UTILITY_Configure_CPU(U32); + +#endif diff --git a/drivers/platform/x86/socperf/include/error_reporting_utils.h b/drivers/platform/x86/socperf/include/error_reporting_utils.h new file mode 100644 index 00000000000000..20aacbd7acac9e --- /dev/null +++ b/drivers/platform/x86/socperf/include/error_reporting_utils.h @@ -0,0 +1,331 @@ +/* + * Copyright (C) 2002 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef __ERROR_REPORTING_UTILS_H__ +#define __ERROR_REPORTING_UTILS_H__ + +#define DRV_ASSERT_N_RET_VAL(ret_val) \ + { \ + DRV_ASSERT((ret_val) == VT_SUCCESS); \ + DRV_CHECK_N_RETURN_N_FAIL(ret_val); \ + } + +#define DRV_ASSERT_N_CONTINUE(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } + +#ifndef DRV_CHECK_N_RETURN_N_FAIL +#define DRV_CHECK_N_RETURN_N_FAIL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return ret_val; \ + } +#endif + +#define DRV_CHECK_N_RETURN_N_NULL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return NULL; \ + } + +#ifndef DRV_CHECK_N_RETURN_NO_RETVAL +#define DRV_CHECK_N_RETURN_NO_RETVAL(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return; \ + } +#endif +#define DRV_CHECK_N_RETURN_N_USERDEFINED(ret_val, user_defined_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + return user_defined_val; \ + } + +#define DRV_CHECK_N_CONTINUE(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + continue; \ + } + +#define DRV_CHECK_N_SET_RET_N_GOTO_LABEL(ret_val, ret_var, ret_status, \ + goto_label) \ + if ((ret_val) != VT_SUCCESS) { \ + ret_var = ret_status; \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + goto goto_label; \ + } + +#ifndef DRV_CHECK_PTR_N_RET_VAL +#define DRV_CHECK_PTR_N_RET_VAL(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return VT_SAM_ERROR; \ + } +#endif + +#define DRV_CHECK_PTR_N_RET_GIVEN_VAL(ptr, ret_val) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return ret_val; \ + } + +#define DRV_CHECK_PTR_N_RET_NULL(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return NULL; \ + } + +#define DRV_CHECK_PTR_N_RET_FALSE(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return FALSE; \ + } + +#define DRV_CHECK_PTR_N_RET(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + return; \ + } + +#define DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + } + +#define DRV_CHECK_PTR_N_CLEANUP(ptr, gotolabel, ret_val) \ + if ((ptr) == NULL) { \ + LOG_ERR0(VTSA_T("Encountered null pointer")); \ + ret_val = VT_SAM_ERROR; \ + goto gotolabel; \ + } + +#define DRV_CHECK_PTR_ON_NULL_CLEANUP_N_RETURN(ptr, gotolabel) \ + if ((ptr) == NULL) { \ + DRV_CHECK_PTR_N_LOG_NO_RETURN(ptr); \ + goto gotolabel; \ + } + +#define DRV_CHECK_PTR_N_RET_ASSIGNED_VAL(ptr, return_val) \ + if (!(ptr)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return return_val; \ + } + +#define DRV_CHECK_PTR_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, ret_status, \ + goto_label) \ + if (status == NULL) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_N_LOG_NO_RETURN(ret_val) \ + if ((ret_val) != VT_SUCCESS) { \ + LOG_ERR1(VTSA_T("Operation failed with error code "), \ + (ret_val)); \ + } + +#define DRV_CHECK_N_RET_NEG_ONE(ret_val) \ + if ((ret_val) == -1) { \ + LOG_ERR0(VTSA_T("Operation failed with error code = -1")); \ + return VT_SAM_ERROR; \ + } + +#define DRV_CHECK_N_RET_NEG_ONE_N_CLEANUP(ret_val, gotolabel) \ + if ((ret_val) == -1) { \ + LOG_ERR0(VTSA_T("Operation failed with error code = -1")); \ + goto gotolabel; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_N_FAIL(cond) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return VT_SAM_ERROR; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_N_GOTO_CLEANUP(cond, gotolabel) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + goto gotolabel; \ + } + +#define DRV_REQUIRES_TRUE_COND_RET_ASSIGNED_VAL(cond, ret_val) \ + if (!(cond)) { \ + LOG_ERR0(VTSA_T("Condition check failed")); \ + return ret_val; \ + } + +#define DRV_CHECK_N_ERR_LOG_ERR_STRNG_N_RET(rise_err) \ + if (rise_err != VT_SUCCESS) { \ + PVOID rise_ptr = NULL; \ + const VTSA_CHAR *error_str = NULL; \ + RISE_open(&rise_ptr); \ + RISE_translate_err_code(rise_ptr, rise_err, &error_str); \ + LogItW(LOG_LEVEL_ERROR | LOG_AREA_GENERAL, \ + L"Operation failed with error [ %d ] = %s\n", rise_err, \ + error_str); \ + RISE_close(rise_ptr); \ + return rise_err; \ + } + +#define DRV_CHECK_ON_FAIL_CLEANUP_N_RETURN(ret_val, gotolabel) \ + if ((ret_val) != VT_SUCCESS) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } + +#define DRV_CHECK_RET_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, ret_status, \ + goto_label) \ + if (status != VT_SUCCESS) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_N_CLEANUP_N_RETURN_RET_NEG_ONE(ret_val, gotolabel) \ + if ((ret_val) == -1) { \ + DRV_CHECK_N_LOG_NO_RETURN(ret_val); \ + goto gotolabel; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_RETURN_GIVEN_VAL(inp_val, ret_val) \ + if ((inp_val) < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "Operation failed with error code %d\n", ret_val)); \ + return ret_val; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_RETURN_NO_VAL(inp_val) \ + if ((inp_val) < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "Operation failed with error code %d\n", ret_val)); \ + return; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_GOTO_CLEANUP(status, goto_label) \ + if (status < 0) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define DRV_CHECK_IF_NEG_RET_N_SET_RET_N_GOTO_CLEANUP(status, ret_var, \ + ret_status, goto_label) \ + if (status < 0) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, "%s:%d error %d\n", \ + __FUNCTION__, __LINE__, status)); \ + goto goto_label; \ + } + +#define FREE_N_SET_NULL(ptr) \ + if (ptr != NULL) { \ + free(ptr); \ + ptr = NULL; \ + } + +#define DELETE_N_SET_NULL(ptr) \ + delete ptr; \ + ptr = NULL; + +/* + * Memory management error handling macros + */ +// Check for NULL ptr and return VT_NO_MEMORY +#define SEP_CHECK_ALLOCATION_N_RET_VAL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return VT_NO_MEMORY; \ + } + +// Check for NULL ptr and exit with -1 status +#define SEP_CHECK_ALLOCATION_N_EXIT_WITH_FAILURE(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + exit(-1); \ + } + +// Check for NULL ptr and return void +#define SEP_CHECK_ALLOCATION_N_RET_NOVAL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return; \ + } + +// Check for NULL ptr and return False +#define SEP_CHECK_ALLOCATION_N_RET_BOOL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return FALSE; \ + } + +// Check for NULL ptr and return NULL +#define SEP_CHECK_ALLOCATION_N_RET_NULL(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + return NULL; \ + } + +// Check for NULL ptr and goto provided label +#define SEP_CHECK_ALLOCATION_N_GOTO_CLEANUP(loc, goto_label) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + goto goto_label; \ + } + +// Check for NULL ptr and continue the loop +#define SEP_CHECK_ALLOCATION_N_CONTINUE(loc) \ + if (!(loc)) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + continue; \ + } + +// Check for NULL ptr, set return var with provided status and goto provided label +#define SEP_CHECK_ALLOCATION_SET_RETURN_N_GOTO_CLEANUP(loc, ret_var, \ + ret_status, goto_label) \ + if (!(loc)) { \ + ret_var = ret_status; \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Encountered null pointer\n", __FUNCTION__, \ + __LINE__)); \ + goto goto_label; \ + } + +#define SEP_CHECK_ALLOCATION_N_RET_ASSIGNED_VAL DRV_CHECK_PTR_N_RET_ASSIGNED_VAL + +#endif diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_defines.h b/drivers/platform/x86/socperf/include/lwpmudrv_defines.h new file mode 100644 index 00000000000000..620b88159fbc5a --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_defines.h @@ -0,0 +1,607 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_DEFINES_H_ +#define _LWPMUDRV_DEFINES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif +// +// Start off with none of the OS'es are defined +// +#undef DRV_OS_WINDOWS +#undef DRV_OS_LINUX +#undef DRV_OS_SOLARIS +#undef DRV_OS_MAC +#undef DRV_OS_ANDROID +#undef DRV_OS_UNIX + +// +// Make sure none of the architectures is defined here +// +#undef DRV_IA32 +#undef DRV_EM64T + +// +// Make sure one (and only one) of the OS'es gets defined here +// +// Unfortunately entirex defines _WIN32 so we need to check for linux +// first. The definition of these flags is one and only one +// _OS_xxx is allowed to be defined. +// +#if defined(__ANDROID__) +#define DRV_OS_ANDROID +#define DRV_OS_UNIX +#elif defined(__linux__) +#define DRV_OS_LINUX +#define DRV_OS_UNIX +#elif defined(sun) +#define DRV_OS_SOLARIS +#define DRV_OS_UNIX +#elif defined(_WIN32) +#define DRV_OS_WINDOWS +#elif defined(__APPLE__) +#define DRV_OS_MAC +#define DRV_OS_UNIX +#elif defined(__FreeBSD__) +#define DRV_OS_FREEBSD +#define DRV_OS_UNIX +#else +#error "Compiling for an unknown OS" +#endif + +// +// Make sure one (and only one) architecture is defined here +// as well as one (and only one) pointer__ size +// +#if defined(_M_IX86) || defined(__i386__) +#define DRV_IA32 +#elif defined(_M_AMD64) || defined(__x86_64__) +#define DRV_EM64T +#else +#error "Unknown architecture for compilation" +#endif + +// +// Add a well defined definition of compiling for release (free) vs. +// debug (checked). Once again, don't assume these are the only two values, +// always have an else clause in case we want to expand this. +// +#if defined(DRV_OS_UNIX) +#define WINAPI +#endif + +/* + * Add OS neutral defines for file processing. This is needed in both + * the user code and the kernel code for cleanliness + */ +#undef DRV_FILE_DESC +#undef DRV_INVALID_FILE_DESC_VALUE +#define DRV_ASSERT assert + +#if defined(DRV_OS_WINDOWS) + +#define DRV_FILE_DESC HANDLE +#define DRV_INVALID_FILE_DESC_VALUE INVALID_HANDLE_VALUE + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_ANDROID) + +#define DRV_IOCTL_FILE_DESC SIOP +#define DRV_FILE_DESC SIOP +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_FREEBSD) + +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE -1 + +#elif defined(DRV_OS_MAC) +#if defined __LP64__ +#define DRV_IOCTL_FILE_DESC S64 +#define DRV_FILE_DESC S64 +#define DRV_INVALID_FILE_DESC_VALUE (S64)(-1) +#else +#define DRV_IOCTL_FILE_DESC S32 +#define DRV_FILE_DESC S32 +#define DRV_INVALID_FILE_DESC_VALUE (S32)(-1) +#endif + +#else + +#error "Compiling for an unknown OS" + +#endif + +#define OUT +#define IN +#define INOUT + +// +// VERIFY_SIZEOF let's you insert a compile-time check that the size of a data +// type (e.g. a struct) is what you think it should be. Usually it is +// important to know what the actual size of your struct is, and to make sure +// it is the same across all platforms. So this will prevent the code from +// compiling if something happens that you didn't expect, whether it's because +// you counted wring, or more often because the compiler inserted padding that +// you don't want. +// +// NOTE: 'elem' and 'size' must both be identifier safe, e.g. matching the +// regular expression /^[0-9a-zA-Z_]$/. +// +// Example: +// typedef struct { void *ptr; int data; } mytype; +// VERIFY_SIZEOF(mytype, 8); +// ^-- this is correct on 32-bit platforms, but fails +// on 64-bit platforms, indicating a possible +// portability issue. +// +#define VERIFY_SIZEOF(type, size) \ + enum { sizeof_##type##_eq_##size = 1 / (int)(sizeof(type) == size) } + +#if defined(DRV_OS_WINDOWS) +#define DRV_DLLIMPORT __declspec(dllimport) +#define DRV_DLLEXPORT __declspec(dllexport) +#endif +#if defined(DRV_OS_UNIX) +#define DRV_DLLIMPORT +#define DRV_DLLEXPORT +#endif + +#define DRV_STRING_FORMAT_WIDTH2(str) #str +#define DRV_STRING_FORMAT_WIDTH(str) DRV_STRING_FORMAT_WIDTH2(str) +#if defined(DRV_OS_WINDOWS) +#define FSI64RAW "I64" +#define DRV_PATH_SEPARATOR "\\" +#define L_DRV_PATH_SEPARATOR L"\\" +#endif + +#if defined(DRV_OS_UNIX) +#define FSI64RAW "ll" +#define DRV_PATH_SEPARATOR "/" +#define L_DRV_PATH_SEPARATOR L"/" +#endif + +#define FSS64 "%" FSI64RAW "d" +#define FSU64 "%" FSI64RAW "u" +#define FSX64 "%" FSI64RAW "x" + +#if defined(DRV_OS_WINDOWS) +#define DRV_RTLD_NOW 0 +#endif +#if defined(DRV_OS_UNIX) +#if defined(DRV_OS_FREEBSD) +#define DRV_RTLD_NOW 0 +#else +#define DRV_RTLD_NOW RTLD_NOW +#endif +#endif + +#define DRV_STRLEN (U32) strlen +#define DRV_WCSLEN (U32) wcslen +#define DRV_STRCSPN strcspn +#define DRV_STRCHR strchr +#define DRV_STRRCHR strrchr +#define DRV_WCSRCHR wcsrchr +#if !defined(STATIC_PROG_API) +#define DRV_STRCPY strcpy_safe +#define DRV_STRNCPY strncpy_safe +#define DRV_STRCAT strcat_safe +#define DRV_STRNCAT strncat_safe +#define DRV_WCSCPY wcscpy_safe +#define DRV_WCSNCPY wcsncpy_safe +#define DRV_WCSCAT wcscat_safe +#define DRV_WCSNCAT wcsncat_safe +#define DRV_SPRINTF sprintf_safe +#define DRV_SNPRINTF snprintf_safe +#define DRV_SNWPRINTF snwprintf_safe +#define DRV_MEMCPY memcpy_safe +#define DRV_FOPEN(fp, name, mode) fopen_safe(&(fp), (name), (mode)) +#if defined(DRV_OS_WINDOWS) +#define DRV_WFOPEN(fp, name, mode) wfopen_safe(&(fp), (name), (mode)) +#endif +#endif +#if defined(DRV_OS_WINDOWS) +#define DRV_STCHARLEN DRV_WCSLEN +#else +#define DRV_STCHARLEN DRV_STRLEN +#endif + +#if defined(DRV_OS_WINDOWS) +// To minimize dependencies on other files in sampling_utils +// confining the below MACRO definitions to this +// file alone for static PROG_API case +#if defined(STATIC_PROG_API) +#define DRV_STRCPY(dst, dst_size, src) \ + (strcpy_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCPY(dst, dst_size, src, n) \ + (strncpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRCAT(dst, dst_size, src) \ + (strcat_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCAT(dst, dst_size, src, n) \ + (strncat_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCPY(dst, dst_size, src) \ + (wcscpy_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCPY(dst, dst_size, src, n) \ + (wcsncpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCAT(dst, dst_size, src) \ + (wcscat_s(dst, dst_size, src) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCAT(dst, dst_size, src, n) \ + (wcsncat_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SPRINTF(dst, dst_size, args...) \ + (sprintf_s((dst), dst_size, ##args) > = 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_MEMCPY(dst, dst_size, src, n) \ + (memcpy_s(dst, dst_size, src, n) == 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNPRINTF(buf, buf_size, length, args...) \ + (_snprintf_s(buf, buf_size, length, ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ + (_snwprintf_s(buf, buf_size, length, ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_FOPEN(fp, name, mode) fopen_s(&(fp),(name),(mode) +#define DRV_WFOPEN(fp, name, mode) wfopen_s(&(fp),(name),(mode) +#endif +#define DRV_STRICMP _stricmp +#define DRV_STRNCMP strncmp +#define DRV_STRNICMP _strnicmp +#define DRV_STRDUP _strdup +#define DRV_WCSDUP _wcsdup +#define DRV_STRCMP strcmp +#define DRV_WCSCMP wcscmp +#define DRV_VSNPRINTF(buf, buf_size, length, format, args) \ + _vsnprintf_s((buf), (buf_size), (_TRUNCATE), (format), (args)) +#define DRV_VSNWPRINTF(buf, buf_size, length, format, args) \ + _vsnwprintf_s((buf), (buf_size), (_TRUNCATE), (format), (args)) +#define DRV_SSCANF sscanf_s +#define DRV_WMEMCPY wmemcpy_s +#define DRV_STRTOK strtok_s +#define DRV_STRTOUL strtoul +#define DRV_STRTOL strtol +#define DRV_STRTOULL _strtoui64 +#define DRV_STRTOQ _strtoui64 +#define DRV_FCLOSE(fp) \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } +#define DRV_WCSTOK wcstok_s +#define DRV_WCSSTR wcsstr +#define DRV_STRERROR strerror_s +#define DRV_VSPRINTF vsprintf_s +#define DRV_VSWPRINTF vswprintf_s +#define DRV_GETENV_S getenv_s +#define DRV_WGETENV_S wgetenv_s +#define DRV_PUTENV(name) _putenv(name) +#define DRV_USTRCMP(X, Y) DRV_WCSCMP(X, Y) +#define DRV_USTRDUP(X) DRV_WCSDUP(X) +#define DRV_ACCESS(X) _access_s(X, 4) +#define DRV_STRSTR strstr + +#define DRV_STCHAR_COPY DRV_WCSNCPY + +#define DRV_GETENV(buf, buf_size, name) _dupenv_s(&(buf), &(buf_size), (name)) +#define DRV_WGETENV(buf, buf_size, name) _wdupenv_s(&(buf), &(buf_size), (name)) +#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) +#define DRV_SCLOSE(fp) _close(fp) +#define DRV_WRITE(fp, buf, buf_size) _write(fp, buf, buf_size); +#define DRV_SOPEN_S(fp, name, oflag, shflag, pmode) \ + _sopen_s((fp), (name), (oflag), (shflag), (pmode)) +#endif + +#if defined(DRV_OS_UNIX) +/* + Note: Many of the following macros have a "size" as the second argument. Generally + speaking, this is for compatibility with the _s versions available on Windows. + On Linux/Solaris/Mac, it is ignored. On Windows, it is the size of the destination + buffer and is used wrt memory checking features available in the C runtime in debug + mode. Do not confuse it with the number of bytes to be copied, or such. + + On Windows, this size should correspond to the number of allocated characters + (char or wchar_t) pointed to by the first argument. See MSDN for more details. +*/ +// To minimize dependencies on other files in sampling_utils +// confining the below MACRO definitions to this +// file alone for static PROG_API case +#if defined(STATIC_PROG_API) +#define DRV_STRCPY(dst, dst_size, src) \ + (strcpy((dst), (src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCPY(dst, dst_size, src, n) \ + (strncpy((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRCAT(dst, dst_size, src) \ + (strcat((dst), (src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_STRNCAT(dst, dst_size, src, n) \ + (strncat((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCPY(dst, dst_size, src) \ + (wcscpy((dst), (const wchar_t *)(src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCPY(dst, dst_size, src, count) \ + (wcsncpy((dst), (const wchar_t *)(src), (count)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSCAT(dst, dst_size, src) \ + (wcscat((dst), (const wchar_t *)(src)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_WCSNCAT(dst, dst_size, src, n) \ + (wcsncat((dst), (const wchar_t *)(src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SPRINTF(dst, dst_size, args...) \ + (sprintf((dst), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNPRINTF(buf, buf_size, length, args...) \ + (snprintf((buf), (length), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_SNWPRINTF(buf, buf_size, length, args...) \ + (snwprintf((buf), (length), ##args) >= 0) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_MEMCPY(dst, dst_size, src, n) \ + (memcpy((dst), (src), (n)) != NULL) ? VT_SUCCESS : VT_SAM_ERROR +#define DRV_FOPEN(fp, name, mode) ((fp) = fopen((name), (mode))) +#endif + +#define DRV_STRICMP strcasecmp +#define DRV_STRDUP strdup +#define DRV_STRNDUP strndup +#define DRV_STRCMP strcmp +#define DRV_STRNCMP strncmp +#define DRV_STRSTR strstr +#define DRV_VSNPRINTF(buf, buf_size, length, args...) \ + vsnprintf((buf), (length), ##args) +#define DRV_SSCANF sscanf +#define DRV_STRTOK(tok, delim, context) strtok((tok), (delim)) +#define DRV_STRTOUL strtoul +#define DRV_STRTOULL strtoull +#define DRV_STRTOL strtol +#define DRV_FCLOSE(fp) \ + if ((fp) != NULL) { \ + fclose((fp)); \ + } +#define DRV_WCSTOK(tok, delim, context) \ + wcstok((tok), (const wchar_t *)(delim), (context)) +#define DRV_STRERROR strerror +#define DRV_VSPRINTF(dst, dst_size, length, args...) \ + vsprintf((dst), (length), ##args) +#define DRV_VSWPRINTF(dst, dst_size, length, args...) \ + vswprintf((dst), (length), ##args) +#define DRV_GETENV_S(dst, dst_size) getenv(dst) +#define DRV_WGETENV_S(dst, dst_size) wgetenv(dst) +#define DRV_PUTENV(name) putenv(name) +#define DRV_GETENV(buf, buf_size, name) ((buf) = getenv((name))) +#define DRV_USTRCMP(X, Y) DRV_STRCMP(X, Y) +#define DRV_USTRDUP(X) DRV_STRDUP(X) +#define DRV_ACCESS(X) access(X, X_OK) +#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) + +#define DRV_STCHAR_COPY DRV_STRNCPY +#endif + +#if defined(DRV_OS_WINDOWS) +#define DRV_STRTOK_R(tok, delim, context) strtok_s((tok), (delim), (context)) +#else +#define DRV_STRTOK_R(tok, delim, context) strtok_r((tok), (delim), (context)) +#endif + +#if defined(DRV_OS_OPENWRT) || defined(DRV_OS_ANDROID) +#define DRV_STRTOQ strtol +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) +#define DRV_STRTOQ strtoq +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_STRTOQ strtoll +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define DRV_WCSDUP wcsdup +#endif + +#if defined(DRV_OS_SOLARIS) +#define DRV_WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_ANDROID) +#define DRV_WCSDUP android_wcsdup +#endif + +/* + * Windows uses wchar_t and linux uses char for strings. + * Need an extra level of abstraction to standardize it. + */ +#if defined(DRV_OS_WINDOWS) +#define DRV_STDUP DRV_WCSDUP +#define DRV_FORMAT_STRING(x) L##x +#define DRV_PRINT_STRING(stream, format, ...) \ + fwprintf((stream), (format), __VA_ARGS__) +#define DRV_STRING_FORMAT_SPECIFIER "%ls" +#else +#define DRV_STDUP DRV_STRDUP +#define DRV_FORMAT_STRING(x) x +#define DRV_PRINT_STRING(stream, format, ...) \ + fprintf((stream), (format), __VA_ARGS__) +#define DRV_STRING_FORMAT_SPECIFIER "%s" +#endif + +/* + * OS return types + */ +#if defined(DRV_OS_UNIX) +#define OS_STATUS int +#define OS_SUCCESS 0 +#if defined(BUILD_DRV_ESX) +#define OS_ILLEGAL_IOCTL -1 +#define OS_NO_MEM -2 +#define OS_FAULT -3 +#define OS_INVALID -4 +#define OS_NO_SYSCALL -5 +#define OS_RESTART_SYSCALL -6 +#define OS_IN_PROGRESS -7 +#else +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT +#define OS_INVALID -EINVAL +#define OS_NO_SYSCALL -ENOSYS +#define OS_RESTART_SYSCALL -ERESTARTSYS +#define OS_IN_PROGRESS -EALREADY +#endif +#endif +#if defined(DRV_OS_WINDOWS) +#define OS_STATUS NTSTATUS +#define OS_SUCCESS STATUS_SUCCESS +#define OS_ILLEGAL_IOCTL STATUS_UNSUCCESSFUL +#define OS_NO_MEM STATUS_UNSUCCESSFUL +#define OS_FAULT STATUS_UNSUCCESSFUL +#define OS_INVALID STATUS_UNSUCCESSFUL +#define OS_NO_SYSCALL STATUS_UNSUCCESSFUL +#define OS_RESTART_SYSCALL STATUS_UNSUCCESSFUL +#define OS_IN_PROGRESS STATUS_UNSUCCESSFUL +#endif + +/**************************************************************************** + ** Driver State defintions + ***************************************************************************/ +#define DRV_STATE_UNINITIALIZED 0 +#define DRV_STATE_RESERVED 1 +#define DRV_STATE_IDLE 2 +#define DRV_STATE_PAUSED 3 +#define DRV_STATE_STOPPED 4 +#define DRV_STATE_RUNNING 5 +#define DRV_STATE_PAUSING 6 +#define DRV_STATE_PREPARE_STOP 7 +#define DRV_STATE_TERMINATING 8 + +#define MATCHING_STATE_BIT(state) ((U32)1 << state) +#define STATE_BIT_UNINITIALIZED MATCHING_STATE_BIT(DRV_STATE_UNINITIALIZED) +#define STATE_BIT_RESERVED MATCHING_STATE_BIT(DRV_STATE_RESERVED) +#define STATE_BIT_IDLE MATCHING_STATE_BIT(DRV_STATE_IDLE) +#define STATE_BIT_PAUSED MATCHING_STATE_BIT(DRV_STATE_PAUSED) +#define STATE_BIT_STOPPED MATCHING_STATE_BIT(DRV_STATE_STOPPED) +#define STATE_BIT_RUNNING MATCHING_STATE_BIT(DRV_STATE_RUNNING) +#define STATE_BIT_PAUSING MATCHING_STATE_BIT(DRV_STATE_PAUSING) +#define STATE_BIT_PREPARE_STOP MATCHING_STATE_BIT(DRV_STATE_PREPARE_STOP) +#define STATE_BIT_TERMINATING MATCHING_STATE_BIT(DRV_STATE_TERMINATING) +#define STATE_BIT_ANY ((U32)-1) + +#define IS_COLLECTING_STATE(state) \ + (!!(MATCHING_STATE_BIT(state) & \ + (STATE_BIT_RUNNING | STATE_BIT_PAUSING | STATE_BIT_PAUSED))) + +/* + * Stop codes + */ +#define DRV_STOP_BASE 0 +#define DRV_STOP_NORMAL 1 +#define DRV_STOP_ASYNC 2 +#define DRV_STOP_CANCEL 3 + +#define MAX_EVENTS 256 // Limiting maximum multiplexing events to 256. +#if defined(DRV_OS_UNIX) +#define UNREFERENCED_PARAMETER(p) ((p) = (p)) +#endif + +/* + * Global marker names + */ +#define START_MARKER_NAME "SEP_START_MARKER" +#define PAUSE_MARKER_NAME "SEP_PAUSE_MARKER" +#define RESUME_MARKER_NAME "SEP_RESUME_MARKER" + +#define DRV_SOC_STRING_LEN (100 + MAX_MARKER_LENGTH) + +/* + * Temp path + */ +#define SEP_TMPDIR "SEP_TMP_DIR" +#if defined(DRV_OS_WINDOWS) +#define OS_TMPDIR "TEMP" +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + GetTempPath((U32)size, dir); \ + } +#else +#define OS_TMPDIR "TMPDIR" +/* + * Unix has default tmp dir + */ +#if defined(DRV_OS_ANDROID) +#define TEMP_PATH "/data" +#else +#define TEMP_PATH "/tmp" +#endif +#define GET_DEFAULT_TMPDIR(dir, size) \ + { \ + DRV_STRCPY((STCHAR *)dir, (U32)size, (STCHAR *)TEMP_PATH); \ + } +#endif + +#define OS_ID_UNKNOWN -1 +#define OS_ID_NATIVE 0 +#define OS_ID_VMM 0 +#define OS_ID_MODEM 1 +#define OS_ID_ANDROID 2 +#define OS_ID_SECVM 3 +#define OS_ID_ACRN 0xFFFF + +#define PERF_HW_VER4 (5) + +#define INITIAL_BASE_NUM_EVENTS 2000 +#define INITIAL_BASE_NUM_MATRIX_EVENTS 100 +#define NUM_EVENTS_MULTIPLY_FACTOR 2 + +/* + * Memory allocation and deallocation macros + */ + +/* Checks if ptr is not NULL and dealocates it, logs in verbose mode */ +#ifdef SEP_FREE +#undef SEP_FREE +#endif + +#define SEP_FREE(loc) \ + if ( (loc) ) \ + { \ + free(loc); \ + LOGIT((LOG_AREA_GENERAL|LOG_LEVEL_VERBOSE, "%s:%d Memory free: %d bytes\n", \ + __FUNCTION__, __LINE__, sizeof(loc))); \ + loc = NULL; \ + } + +/* + * All Allocation macros, in versbose mode log the number of bytes requested, + * Allocates the requested size and checks if return value is NULL and logs error message + * Doesn't do any error handling, please use "Memory management error handling macros" in error_reporting_utils.h + */ +#define SEP_MALLOC(loc, size, type) \ + if (!((loc) = (type *)malloc(size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Malloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory allocation: %d bytes\n", __FUNCTION__, \ + __LINE__, size)); \ + } + +#define SEP_CALLOC(loc, num, size, type) \ + if (!((loc) = (type *)calloc(num, size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Calloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory allocation: %d bytes\n", __FUNCTION__, \ + __LINE__, size)); \ + } + +#define SEP_REALLOC(new_loc, old_loc, size, type) \ + if (!((new_loc) = (type *)realloc(old_loc, size))) { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_ERROR, \ + "%s:%d Realloc failed\n", __FUNCTION__, __LINE__)); \ + } else { \ + LOGIT((LOG_AREA_GENERAL | LOG_LEVEL_VERBOSE, \ + "%s:%d Memory reallocation: %d -> %d bytes\n", \ + __FUNCTION__, __LINE__, sizeof(old_loc), size)); \ + } + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h b/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h new file mode 100644 index 00000000000000..efefffab4d0ba0 --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_ecb.h @@ -0,0 +1,1317 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_ECB_UTILS_H_ +#define _LWPMUDRV_ECB_UTILS_H_ + +#if defined(DRV_OS_WINDOWS) +#pragma warning(disable : 4200) +#pragma warning(disable : 4214) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +// control register types +#define CCCR 1 // counter configuration control register +#define ESCR 2 // event selection control register +#define DATA 4 // collected as snapshot of current value +#define DATA_RO_DELTA 8 // read-only counter collected as current-previous +#define DATA_RO_SS \ + 16 // read-only counter collected as snapshot of current value +#define METRICS 32 // hardware metrics + +// event multiplexing modes +#define EM_DISABLED -1 +#define EM_TIMER_BASED 0 +#define EM_EVENT_BASED_PROFILING 1 +#define EM_TRIGGER_BASED 2 + +// *************************************************************************** + +/*!\struct EVENT_DESC_NODE + * \var sample_size - size of buffer in bytes to hold the sample + extras + * \var max_gp_events - max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture information + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the number of lbr register information + * \var latency_offset_in_sample - offset in the sample to locate the latency information + * \var latency_size_in_sample - size of latency records in the sample + * \var latency_size_from_pebs_record - size of the latency data from pebs record in the sample + * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information + * in pebs record + * \var power_offset_in_sample - offset in the sample to locate the power information + * \var ebc_offset - offset in the sample to locate the ebc count information + * \var uncore_ebc_offset - offset in the sample to locate the uncore ebc count information + * + * \var ro_offset - offset of RO data in the sample + * \var ro_count - total number of RO entries (including all of IEAR/DEAR/BTB/IPEAR) + * \var iear_offset - offset into RO data at which IEAR entries begin + * \var dear_offset - offset into RO data at which DEAR entries begin + * \var btb_offset - offset into RO data at which BTB entries begin (these use the same PMDs) + * \var ipear_offset - offset into RO data at which IPEAR entries begin (these use the same PMDs) + * \var iear_count - number of IEAR entries + * \var dear_count - number of DEAR entries + * \var btb_count - number of BTB entries + * \var ipear_count - number of IPEAR entries + * + * \var pwr_offset - offset in the sample to locate the pwr count information + * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_DESC_NODE_S EVENT_DESC_NODE; +typedef EVENT_DESC_NODE *EVENT_DESC; + +struct EVENT_DESC_NODE_S { + U32 sample_size; + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 uncore_ebc_offset; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U32 perfmetrics_offset; + U32 perfmetrics_size; + /* ----------ADAPTIVE PEBS FIELDS --------- */ + U16 applicable_counters_offset; + U16 gpr_info_offset; + U16 gpr_info_size; + U16 xmm_info_offset; + U16 xmm_info_size; + U16 lbr_info_size; + /*------------------------------------------*/ + U32 reserved2; + U64 reserved3; +}; + +// +// Accessor macros for EVENT_DESC node +// +#define EVENT_DESC_sample_size(ec) ((ec)->sample_size) +#define EVENT_DESC_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_DESC_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_DESC_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_DESC_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_DESC_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) +#define EVENT_DESC_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_DESC_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_DESC_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_DESC_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_DESC_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_DESC_uncore_ebc_offset(ec) ((ec)->uncore_ebc_offset) +#define EVENT_DESC_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_DESC_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_DESC_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_DESC_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_DESC_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_DESC_perfmetrics_offset(ec) ((ec)->perfmetrics_offset) +#define EVENT_DESC_perfmetrics_size(ec) ((ec)->perfmetrics_size) +#define EVENT_DESC_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_DESC_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) +#define EVENT_DESC_applicable_counters_offset(ec) \ + ((ec)->applicable_counters_offset) +#define EVENT_DESC_gpr_info_offset(ec) ((ec)->gpr_info_offset) +#define EVENT_DESC_gpr_info_size(ec) ((ec)->gpr_info_size) +#define EVENT_DESC_xmm_info_offset(ec) ((ec)->xmm_info_offset) +#define EVENT_DESC_xmm_info_size(ec) ((ec)->xmm_info_size) +#define EVENT_DESC_lbr_info_size(ec) ((ec)->lbr_info_size) + +// *************************************************************************** + +/*!\struct EVENT_CONFIG_NODE + * \var num_groups - The number of groups being programmed + * \var em_mode - Is EM valid? If so how? + * \var em_time_slice - EM valid? time slice in milliseconds + * \var sample_size - size of buffer in bytes to hold the sample + extras + * \var max_gp_events - Max number of General Purpose events per EM group + * \var pebs_offset - offset in the sample to locate the pebs capture information + * \var lbr_offset - offset in the sample to locate the lbr information + * \var lbr_num_regs - offset in the sample to locate the lbr information + * \var latency_offset_in_sample - offset in the sample to locate the latency information + * \var latency_size_in_sample - size of latency records in the sample + * \var latency_size_from_pebs_record - offset in the sample to locate the latency + * size from pebs record + * \var latency_offset_in_pebs_record - offset in the sample to locate the latency information + * in pebs record + * \var power_offset_in_sample - offset in the sample to locate the power information + * \var ebc_offset - offset in the sample to locate the ebc count information + * + * \var pwr_offset - offset in the sample to locate the pwr count information + * \var p_state_offset - offset in the sample to locate the p_state information (APERF/MPERF) + * + * \brief Data structure to describe the events and the mode + * + */ + +typedef struct EVENT_CONFIG_NODE_S EVENT_CONFIG_NODE; +typedef EVENT_CONFIG_NODE *EVENT_CONFIG; + +struct EVENT_CONFIG_NODE_S { + U32 num_groups; + S32 em_mode; + S32 em_factor; + S32 em_event_num; + U32 sample_size; + U32 max_gp_events; + U32 max_fixed_counters; + U32 max_ro_counters; // maximum read-only counters + U32 pebs_offset; + U32 pebs_size; + U32 lbr_offset; + U32 lbr_num_regs; + U32 latency_offset_in_sample; + U32 latency_size_in_sample; + U32 latency_size_from_pebs_record; + U32 latency_offset_in_pebs_record; + U32 power_offset_in_sample; + U32 ebc_offset; + U32 num_groups_unc; + U32 ebc_offset_unc; + U32 sample_size_unc; + U32 eventing_ip_offset; + U32 hle_offset; + U32 pwr_offset; + U32 callstack_offset; + U32 callstack_size; + U32 p_state_offset; + U32 pebs_tsc_offset; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for EVENT_CONFIG node +// +#define EVENT_CONFIG_num_groups(ec) ((ec)->num_groups) +#define EVENT_CONFIG_mode(ec) ((ec)->em_mode) +#define EVENT_CONFIG_em_factor(ec) ((ec)->em_factor) +#define EVENT_CONFIG_em_event_num(ec) ((ec)->em_event_num) +#define EVENT_CONFIG_sample_size(ec) ((ec)->sample_size) +#define EVENT_CONFIG_max_gp_events(ec) ((ec)->max_gp_events) +#define EVENT_CONFIG_max_fixed_counters(ec) ((ec)->max_fixed_counters) +#define EVENT_CONFIG_max_ro_counters(ec) ((ec)->max_ro_counters) +#define EVENT_CONFIG_pebs_offset(ec) ((ec)->pebs_offset) +#define EVENT_CONFIG_pebs_size(ec) ((ec)->pebs_size) +#define EVENT_CONFIG_lbr_offset(ec) ((ec)->lbr_offset) +#define EVENT_CONFIG_lbr_num_regs(ec) ((ec)->lbr_num_regs) +#define EVENT_CONFIG_latency_offset_in_sample(ec) ((ec)->latency_offset_in_sample) +#define EVENT_CONFIG_latency_size_from_pebs_record(ec) \ + ((ec)->latency_size_from_pebs_record) +#define EVENT_CONFIG_latency_offset_in_pebs_record(ec) \ + ((ec)->latency_offset_in_pebs_record) +#define EVENT_CONFIG_latency_size_in_sample(ec) ((ec)->latency_size_in_sample) +#define EVENT_CONFIG_power_offset_in_sample(ec) ((ec)->power_offset_in_sample) +#define EVENT_CONFIG_ebc_offset(ec) ((ec)->ebc_offset) +#define EVENT_CONFIG_num_groups_unc(ec) ((ec)->num_groups_unc) +#define EVENT_CONFIG_ebc_offset_unc(ec) ((ec)->ebc_offset_unc) +#define EVENT_CONFIG_sample_size_unc(ec) ((ec)->sample_size_unc) +#define EVENT_CONFIG_eventing_ip_offset(ec) ((ec)->eventing_ip_offset) +#define EVENT_CONFIG_hle_offset(ec) ((ec)->hle_offset) +#define EVENT_CONFIG_pwr_offset(ec) ((ec)->pwr_offset) +#define EVENT_CONFIG_callstack_offset(ec) ((ec)->callstack_offset) +#define EVENT_CONFIG_callstack_size(ec) ((ec)->callstack_size) +#define EVENT_CONFIG_p_state_offset(ec) ((ec)->p_state_offset) +#define EVENT_CONFIG_pebs_tsc_offset(ec) ((ec)->pebs_tsc_offset) + +typedef enum { UNC_MUX = 1, UNC_COUNTER } UNC_SA_PROG_TYPE; + +typedef enum { + UNC_PCICFG = 1, + UNC_MMIO, + UNC_STOP, + UNC_MEMORY, + UNC_STATUS +} UNC_SA_CONFIG_TYPE; + +typedef enum { + UNC_MCHBAR = 1, + UNC_DMIBAR, + UNC_PCIEXBAR, + UNC_GTTMMADR, + UNC_GDXCBAR, + UNC_CHAPADR, + UNC_SOCPCI, + UNC_NPKBAR +} UNC_SA_BAR_TYPE; + +typedef enum { UNC_OP_READ = 1, UNC_OP_WRITE, UNC_OP_RMW } UNC_SA_OPERATION; + +typedef enum { + STATIC_COUNTER = 1, + FREERUN_COUNTER, + PROG_FREERUN_COUNTER, + PROGRAMMABLE_COUNTER +} COUNTER_TYPES; + +typedef enum { + PACKAGE_EVENT = 1, + MODULE_EVENT, + THREAD_EVENT, + SYSTEM_EVENT +} EVENT_SCOPE_TYPES; + +typedef enum { + DEVICE_CORE = 1, // CORE DEVICE + DEVICE_HETERO, + DEVICE_UNC_CBO = 10, // UNCORE DEVICES START + DEVICE_UNC_HA, + DEVICE_UNC_IMC, + DEVICE_UNC_IRP, + DEVICE_UNC_NCU, + DEVICE_UNC_PCU, + DEVICE_UNC_POWER, + DEVICE_UNC_QPI, + DEVICE_UNC_R2PCIE, + DEVICE_UNC_R3QPI, + DEVICE_UNC_SBOX, + DEVICE_UNC_GT, + DEVICE_UNC_UBOX, + DEVICE_UNC_WBOX, + DEVICE_UNC_COREI7, + DEVICE_UNC_CHA, + DEVICE_UNC_EDC, + DEVICE_UNC_IIO, + DEVICE_UNC_M2M, + DEVICE_UNC_EDRAM, + DEVICE_UNC_FPGA_CACHE, + DEVICE_UNC_FPGA_FAB, + DEVICE_UNC_FPGA_THERMAL, + DEVICE_UNC_FPGA_POWER, + DEVICE_UNC_FPGA_GB, + DEVICE_UNC_MDF, + DEVICE_UNC_RDT, + DEVICE_UNC_PMT, // Core Telemetry + DEVICE_UNC_M2HBM, + DEVICE_UNC_MCHBM, + DEVICE_UNC_CXLCM, + DEVICE_UNC_CXLDP, + DEVICE_UNC_NOC, + DEVICE_UNC_TELEMETRY = 150, // TELEMETRY DEVICE + DEVICE_UNC_CHAP = 200, // CHIPSET DEVICES START + DEVICE_UNC_GMCH, + DEVICE_UNC_GFX, + DEVICE_UNC_SOCPERF = 300, // UNCORE VISA DEVICES START + DEVICE_UNC_HFI_RXE = 400, // STL HFI + DEVICE_UNC_HFI_TXE, + DEVICE_UNC_1 = 450, + DEVICE_UNC_2, + DEVICE_UNC_GFX_PMT = 500, // Graphics Cards + DEVICE_UNC_CSTATE_PKG = 600, + DEVICE_UNC_CSTATE_CORE, + DEVICE_PMEM_FC, + DEVICE_PMEM_MC +} DEVICE_TYPES; + +typedef enum { + LBR_ENTRY_TOS = 0, + LBR_ENTRY_FROM_IP, + LBR_ENTRY_TO_IP, + LBR_ENTRY_INFO, + LER_ENTRY_FROM_IP, + LER_ENTRY_TO_IP, + LER_ENTRY_INFO +} LBR_ENTRY_TYPE; + +// *************************************************************************** + +/*!\struct EVENT_REG_ID_NODE + * \var reg_id - MSR index to r/w + * \var pci_id - PCI based register and its details to operate on + */ +typedef struct EVENT_REG_ID_NODE_S EVENT_REG_ID_NODE; +typedef EVENT_REG_ID_NODE *EVENT_REG_ID; + +struct EVENT_REG_ID_NODE_S { + U32 reg_id; + U32 pci_bus_no; + U32 pci_dev_no; + U32 pci_func_no; + U32 data_size; + U32 bar_index; // Points to the index (MMIO_INDEX_LIST) + // of bar memory map list to be used in mmio_bar_list of ECB + U32 reserved1; + U32 reserved2; + U64 phys_addr; // 64 bit register address +}; + +// *************************************************************************** + +typedef enum { + PMU_REG_RW_READ = 1, + PMU_REG_RW_WRITE, + PMU_REG_RW_READ_WRITE, + PMU_REG_RW_READ_MASK_WRITE, + PMU_REG_RW_READ_VALIDATE_MASK, + PMU_REG_RW_READ_MERGE_READ, +} PMU_REG_RW_TYPES; + +typedef enum { + PMU_REG_PROG_MSR = 1, + PMU_REG_PROG_PCI, + PMU_REG_PROG_MMIO, +} PMU_REG_PROG_TYPES; + +typedef enum { + PMU_REG_GLOBAL_CTRL = 1, + PMU_REG_UNIT_CTRL, + PMU_REG_UNIT_STATUS, + PMU_REG_DATA, + PMU_REG_EVENT_SELECT, + PMU_REG_FILTER, + PMU_REG_FIXED_CTRL, + PMU_REG_DISCOVERY_BASE, +} PMU_REG_TYPES; + +/*!\struct EVENT_REG_NODE + * \var reg_type - register type + * \var event_id_index - event ID index + * \var event_reg_id - register ID/pci register details + * \var desc_id - desc ID + * \var flags - flags + * \var reg_value - register value + * \var max_bits - max bits + * \var scheduled - boolean to specify if this event node has been scheduled already + * \var bus_no - PCI bus number + * \var dev_no - PCI device number + * \var func_no - PCI function number + * \var counter_type - Event counter type - static/freerun + * \var event_scope - Event scope - package/module/thread + * \var reg_prog_type - Register Programming type + * \var reg_rw_type - Register Read/Write type + * \var reg_order - Register order in the programming sequence + * \var + * \brief Data structure to describe the event registers + * + */ + +typedef struct EVENT_REG_NODE_S EVENT_REG_NODE; +typedef EVENT_REG_NODE *EVENT_REG; + +struct EVENT_REG_NODE_S { + U8 reg_type; + U8 unit_id; + U16 event_id_index; + U16 counter_event_offset; + U8 driverless_dup_event; // identify event as duplicate + U8 reg_package_id; + EVENT_REG_ID_NODE event_reg_id; + U64 reg_value; + U16 desc_id; + U16 flags; + U32 reserved2; + U64 max_bits; + U8 scheduled; + S8 secondary_pci_offset_shift; + U16 secondary_pci_offset_offset; // offset of the offset + U32 counter_type; + U32 event_scope; + U8 reg_prog_type; + U8 reg_rw_type; + U8 reg_order; + U8 bit_position; + U64 secondary_pci_offset_mask; + U32 core_event_id; + U32 uncore_buffer_offset_in_package; + U32 uncore_buffer_offset_in_system; + U32 aux_reg_id_to_read; + U64 aux_read_mask; + U32 device_subtype; + U32 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for EVENT_REG node +// Note: the flags field is not directly addressible to prevent hackery +// +#define EVENT_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define EVENT_REG_event_id_index(x, i) ((x)[(i)].event_id_index) +#define EVENT_REG_unit_id(x, i) ((x)[(i)].unit_id) +#define EVENT_REG_reg_package_id(x, i) ((x)[(i)].reg_package_id) +#define EVENT_REG_counter_event_offset(x, i) ((x)[(i)].counter_event_offset) +#define EVENT_REG_reg_id(x, i) ((x)[(i)].event_reg_id.reg_id) +#define EVENT_REG_bus_no(x, i) ((x)[(i)].event_reg_id.pci_bus_no) +#define EVENT_REG_dev_no(x, i) ((x)[(i)].event_reg_id.pci_dev_no) +#define EVENT_REG_func_no(x, i) ((x)[(i)].event_reg_id.pci_func_no) + +// points to the reg_id +#define EVENT_REG_offset(x, i) ((x)[(i)].event_reg_id.reg_id) +#define EVENT_REG_data_size(x, i) ((x)[(i)].event_reg_id.data_size) +#define EVENT_REG_bar_index(x, i) ((x)[(i)].event_reg_id.bar_index) +#define EVENT_REG_phys_addr(x, i) ((x)[(i)].event_reg_id.phys_addr) +#define EVENT_REG_desc_id(x, i) ((x)[(i)].desc_id) +#define EVENT_REG_flags(x, i) ((x)[(i)].flags) +#define EVENT_REG_reg_value(x, i) ((x)[(i)].reg_value) +#define EVENT_REG_max_bits(x, i) ((x)[(i)].max_bits) +#define EVENT_REG_scheduled(x, i) ((x)[(i)].scheduled) +#define EVENT_REG_secondary_pci_offset_shift(x, i) \ + ((x)[(i)].secondary_pci_offset_shift) +#define EVENT_REG_secondary_pci_offset_offset(x, i) \ + ((x)[(i)].secondary_pci_offset_offset) +#define EVENT_REG_secondary_pci_offset_mask(x, i) \ + ((x)[(i)].secondary_pci_offset_mask) + +#define EVENT_REG_counter_type(x, i) ((x)[(i)].counter_type) +#define EVENT_REG_event_scope(x, i) ((x)[(i)].event_scope) +#define EVENT_REG_reg_prog_type(x, i) ((x)[(i)].reg_prog_type) +#define EVENT_REG_reg_rw_type(x, i) ((x)[(i)].reg_rw_type) +#define EVENT_REG_reg_order(x, i) ((x)[(i)].reg_order) +#define EVENT_REG_bit_position(x, i) ((x)[(i)].bit_position) + +#define EVENT_REG_core_event_id(x, i) ((x)[(i)].core_event_id) +#define EVENT_REG_uncore_buffer_offset_in_package(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_package) +#define EVENT_REG_uncore_buffer_offset_in_system(x, i) \ + ((x)[(i)].uncore_buffer_offset_in_system) +#define EVENT_REG_aux_reg_id_to_read(x, i) ((x)[(i)].aux_reg_id_to_read) +#define EVENT_REG_aux_read_mask(x, i) ((x)[(i)].aux_read_mask) +#define EVENT_REG_aux_shift_index(x, i) ((x)[(i)].bit_position) // Alias +#define EVENT_REG_device_subtype(x, i) ((x)[(i)].device_subtype) +#define EVENT_REG_driverless_dup_event(x, i) ((x)[(i)].driverless_dup_event) + +// +// Config bits +// +#define EVENT_REG_precise_bit 0x00000001 +#define EVENT_REG_global_bit 0x00000002 +#define EVENT_REG_uncore_bit 0x00000004 +#define EVENT_REG_uncore_q_rst_bit 0x00000008 +#define EVENT_REG_latency_bit 0x00000010 +#define EVENT_REG_is_gp_reg_bit 0x00000020 +#define EVENT_REG_clean_up_bit 0x00000040 +#define EVENT_REG_em_trigger_bit 0x00000080 +#define EVENT_REG_lbr_value_bit 0x00000100 +#define EVENT_REG_fixed_reg_bit 0x00000200 +#define EVENT_REG_unc_evt_intr_read_bit 0x00000400 +#define EVENT_REG_generic_status_bit 0x00000800 +#define EVENT_REG_multi_pkg_evt_bit 0x00001000 +#define EVENT_REG_branch_evt_bit 0x00002000 +#define EVENT_REG_ebc_sampling_evt_bit 0x00004000 +#define EVENT_REG_collect_on_ctx_sw 0x00008000 +// +// Accessor macros for config bits +// +#define EVENT_REG_precise_get(x, i) ((x)[(i)].flags & EVENT_REG_precise_bit) +#define EVENT_REG_precise_set(x, i) ((x)[(i)].flags |= EVENT_REG_precise_bit) +#define EVENT_REG_precise_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_precise_bit) + +#define EVENT_REG_global_get(x, i) ((x)[(i)].flags & EVENT_REG_global_bit) +#define EVENT_REG_global_set(x, i) ((x)[(i)].flags |= EVENT_REG_global_bit) +#define EVENT_REG_global_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_global_bit) + +#define EVENT_REG_uncore_get(x, i) ((x)[(i)].flags & EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_set(x, i) ((x)[(i)].flags |= EVENT_REG_uncore_bit) +#define EVENT_REG_uncore_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_uncore_bit) + +#define EVENT_REG_uncore_q_rst_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_uncore_q_rst_bit) +#define EVENT_REG_uncore_q_rst_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_uncore_q_rst_bit) + +#define EVENT_REG_latency_get(x, i) ((x)[(i)].flags & EVENT_REG_latency_bit) +#define EVENT_REG_latency_set(x, i) ((x)[(i)].flags |= EVENT_REG_latency_bit) +#define EVENT_REG_latency_clear(x, i) ((x)[(i)].flags &= ~EVENT_REG_latency_bit) + +#define EVENT_REG_is_gp_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_is_gp_reg_bit) +#define EVENT_REG_is_gp_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_is_gp_reg_bit) + +#define EVENT_REG_lbr_value_get(x, i) ((x)[(i)].flags & EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_lbr_value_bit) +#define EVENT_REG_lbr_value_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_lbr_value_bit) + +#define EVENT_REG_fixed_reg_get(x, i) ((x)[(i)].flags & EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_fixed_reg_bit) +#define EVENT_REG_fixed_reg_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_fixed_reg_bit) + +#define EVENT_REG_multi_pkg_evt_bit_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_multi_pkg_evt_bit) +#define EVENT_REG_multi_pkg_evt_bit_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_multi_pkg_evt_bit) + +#define EVENT_REG_clean_up_get(x, i) ((x)[(i)].flags & EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_set(x, i) ((x)[(i)].flags |= EVENT_REG_clean_up_bit) +#define EVENT_REG_clean_up_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_clean_up_bit) + +#define EVENT_REG_em_trigger_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_em_trigger_bit) +#define EVENT_REG_em_trigger_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_em_trigger_bit) + +#define EVENT_REG_branch_evt_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_branch_evt_bit) +#define EVENT_REG_branch_evt_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_branch_evt_bit) + +#define EVENT_REG_ebc_sampling_evt_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_ebc_sampling_evt_bit) +#define EVENT_REG_ebc_sampling_evt_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_ebc_sampling_evt_bit) +#define EVENT_REG_ebc_sampling_evt_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_ebc_sampling_evt_bit) + +#define EVENT_REG_collect_on_ctx_sw_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_collect_on_ctx_sw) +#define EVENT_REG_collect_on_ctx_sw_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_collect_on_ctx_sw) +#define EVENT_REG_collect_on_ctx_sw_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_collect_on_ctx_sw) + +#define EVENT_REG_unc_evt_intr_read_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_unc_evt_intr_read_bit) +#define EVENT_REG_unc_evt_intr_read_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_unc_evt_intr_read_bit) +#define EVENT_REG_unc_evt_intr_read_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_unc_evt_intr_read_bit) + +#define EVENT_REG_generic_status_get(x, i) \ + ((x)[(i)].flags & EVENT_REG_generic_status_bit) +#define EVENT_REG_generic_status_set(x, i) \ + ((x)[(i)].flags |= EVENT_REG_generic_status_bit) +#define EVENT_REG_generic_status_clear(x, i) \ + ((x)[(i)].flags &= ~EVENT_REG_generic_status_bit) + +// *************************************************************************** + +/*!\struct DRV_PCI_DEVICE_ENTRY_NODE_S + * \var bus_no - PCI bus no to read + * \var dev_no - PCI device no to read + * \var func_no PCI device no to read + * \var bar_offset BASE Address Register offset of the PCI based PMU + * \var bit_offset Bit offset of the same + * \var size size of read/write + * \var bar_address the actual BAR present + * \var enable_offset Offset info to enable/disable + * \var enabled Status of enable/disable + * \brief Data structure to describe the PCI Device + * + */ + +typedef struct DRV_PCI_DEVICE_ENTRY_NODE_S DRV_PCI_DEVICE_ENTRY_NODE; +typedef DRV_PCI_DEVICE_ENTRY_NODE *DRV_PCI_DEVICE_ENTRY; + +struct DRV_PCI_DEVICE_ENTRY_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 bar_offset; + U64 bar_mask; + U32 bit_offset; + U32 size; + U64 bar_address; + U32 enable_offset; + U32 enabled; + U32 base_offset_for_mmio; + U32 operation; + U32 bar_name; + U32 prog_type; + U32 config_type; + S8 bar_shift; // positive shifts right, negative shifts left + U8 reserved0; + U16 reserved1; + U64 value; + U64 mask; + U64 virtual_address; + U32 port_id; + U32 op_code; + U32 device_id; + U16 bar_num; + U16 feature_id; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +// +// Accessor macros for DRV_PCI_DEVICE_NODE node +// +#define DRV_PCI_DEVICE_ENTRY_bus_no(x) ((x)->bus_no) +#define DRV_PCI_DEVICE_ENTRY_dev_no(x) ((x)->dev_no) +#define DRV_PCI_DEVICE_ENTRY_func_no(x) ((x)->func_no) +#define DRV_PCI_DEVICE_ENTRY_bar_offset(x) ((x)->bar_offset) +#define DRV_PCI_DEVICE_ENTRY_bar_mask(x) ((x)->bar_mask) +#define DRV_PCI_DEVICE_ENTRY_bit_offset(x) ((x)->bit_offset) +#define DRV_PCI_DEVICE_ENTRY_size(x) ((x)->size) +#define DRV_PCI_DEVICE_ENTRY_bar_address(x) ((x)->bar_address) +#define DRV_PCI_DEVICE_ENTRY_enable_offset(x) ((x)->enable_offset) +#define DRV_PCI_DEVICE_ENTRY_enable(x) ((x)->enabled) +#define DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) +#define DRV_PCI_DEVICE_ENTRY_operation(x) ((x)->operation) +#define DRV_PCI_DEVICE_ENTRY_bar_name(x) ((x)->bar_name) +#define DRV_PCI_DEVICE_ENTRY_prog_type(x) ((x)->prog_type) +#define DRV_PCI_DEVICE_ENTRY_config_type(x) ((x)->config_type) +#define DRV_PCI_DEVICE_ENTRY_bar_shift(x) ((x)->bar_shift) +#define DRV_PCI_DEVICE_ENTRY_value(x) ((x)->value) +#define DRV_PCI_DEVICE_ENTRY_mask(x) ((x)->mask) +#define DRV_PCI_DEVICE_ENTRY_virtual_address(x) ((x)->virtual_address) +#define DRV_PCI_DEVICE_ENTRY_port_id(x) ((x)->port_id) +#define DRV_PCI_DEVICE_ENTRY_op_code(x) ((x)->op_code) +#define DRV_PCI_DEVICE_ENTRY_device_id(x) ((x)->device_id) +#define DRV_PCI_DEVICE_ENTRY_bar_num(x) ((x)->bar_num) +#define DRV_PCI_DEVICE_ENTRY_feature_id(x) ((x)->feature_id) + +// *************************************************************************** +typedef enum { + PMU_OPERATION_INITIALIZE = 0, + PMU_OPERATION_WRITE, + PMU_OPERATION_ENABLE, + PMU_OPERATION_DISABLE, + PMU_OPERATION_READ, + PMU_OPERATION_CLEANUP, + PMU_OPERATION_READ_LBRS, + PMU_OPERATION_GLOBAL_REGS, + PMU_OPERATION_CTRL_GP, + PMU_OPERATION_DATA_FIXED, + PMU_OPERATION_DATA_GP, + PMU_OPERATION_OCR, + PMU_OPERATION_HW_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_GP_ERRATA, + PMU_OPERATION_CHECK_OVERFLOW_ERRATA, + PMU_OPERATION_ALL_REG, + PMU_OPERATION_DATA_ALL, + PMU_OPERATION_GLOBAL_STATUS, + PMU_OPERATION_METRICS, +} PMU_OPERATION_TYPES; +#define MAX_OPERATION_TYPES 32 + +/*!\struct PMU_OPERATIONS_NODE + * \var operation_type - Type of operation from enumeration PMU_OPERATION_TYPES + * \var register_start - Start index of the registers for a specific operation + * \var register_len - Number of registers for a specific operation + * + * \brief + * Structure for defining start and end indices in the ECB entries array for + * each type of operation performed in the driver + * initialize, write, read, enable, disable, etc. + */ +typedef struct PMU_OPERATIONS_NODE_S PMU_OPERATIONS_NODE; +typedef PMU_OPERATIONS_NODE *PMU_OPERATIONS; +struct PMU_OPERATIONS_NODE_S { + U32 operation_type; + U32 register_start; + U32 register_len; + U32 reserved1; + U32 reserved2; + U32 reserved3; +}; +#define PMU_OPERATIONS_operation_type(x) ((x)->operation_type) +#define PMU_OPERATIONS_register_start(x) ((x)->register_start) +#define PMU_OPERATIONS_register_len(x) ((x)->register_len) +#define PMU_OPER_operation_type(x, i) ((x)[(i)].operation_type) +#define PMU_OPER_register_start(x, i) ((x)[(i)].register_start) +#define PMU_OPER_register_len(x, i) ((x)[(i)].register_len) + +typedef enum { + ECB_MMIO_BAR1 = 1, + ECB_MMIO_BAR2 = 2, + ECB_MMIO_BAR3 = 3, + ECB_MMIO_BAR4 = 4, + ECB_MMIO_BAR5 = 5, + ECB_MMIO_BAR6 = 6, + ECB_MMIO_BAR7 = 7, + ECB_MMIO_BAR8 = 8, +} MMIO_INDEX_LIST; +#define MAX_MMIO_BARS 8 + +/*!\struct MMIO_BAR_INFO_NODE + */ +typedef struct MMIO_BAR_INFO_NODE_S MMIO_BAR_INFO_NODE; +typedef MMIO_BAR_INFO_NODE *MMIO_BAR_INFO; + +struct MMIO_BAR_INFO_NODE_S { + U32 bus_no; + U32 dev_no; + U32 func_no; + U32 addr_size; + U64 base_offset_for_mmio; + U32 map_size_for_mmio; + U32 main_bar_offset; + U64 main_bar_mask; + U8 main_bar_shift; + U8 reserved1; + U16 reserved2; + U32 reserved3; + U32 reserved4; + U32 secondary_bar_offset; + U64 secondary_bar_mask; + U8 secondary_bar_shift; + U8 reserved5; + U16 reserved6; + U32 reserved7; + U16 feature_id; + U16 reserved8; + U32 reserved9; + U64 reserved10; +}; + +#define MMIO_BAR_INFO_bus_no(x) ((x)->bus_no) +#define MMIO_BAR_INFO_dev_no(x) ((x)->dev_no) +#define MMIO_BAR_INFO_func_no(x) ((x)->func_no) +#define MMIO_BAR_INFO_addr_size(x) ((x)->addr_size) +#define MMIO_BAR_INFO_base_offset_for_mmio(x) ((x)->base_offset_for_mmio) +#define MMIO_BAR_INFO_map_size_for_mmio(x) ((x)->map_size_for_mmio) +#define MMIO_BAR_INFO_main_bar_offset(x) ((x)->main_bar_offset) +#define MMIO_BAR_INFO_main_bar_mask(x) ((x)->main_bar_mask) +#define MMIO_BAR_INFO_main_bar_shift(x) ((x)->main_bar_shift) +#define MMIO_BAR_INFO_secondary_bar_offset(x) ((x)->secondary_bar_offset) +#define MMIO_BAR_INFO_secondary_bar_mask(x) ((x)->secondary_bar_mask) +#define MMIO_BAR_INFO_secondary_bar_shift(x) ((x)->secondary_bar_shift) +#define MMIO_BAR_INFO_physical_address(x) ((x)->physical_address) +#define MMIO_BAR_INFO_virtual_address(x) ((x)->virtual_address) +#define MMIO_BAR_INFO_feature_id(x) ((x)->feature_id) + +/*!\struct ECB_NODE_S + * \var version - Version of ECB struct. + * \var num_entries - Total number of entries in "entries". + * \var group_id - Group ID. + * \var num_events - Number of events in this group. + * \var cccr_start - Starting index of counter configuration control registers in "entries". + * \var cccr_pop - Number of counter configuration control registers in "entries". + * \var escr_start - Starting index of event selection control registers in "entries". + * \var escr_pop - Number of event selection control registers in "entries". + * \var data_start - Starting index of data registers in "entries". + * \var data_pop - Number of data registers in "entries". + * \var pcidev_entry_node PCI device details for one device. + * \var metric_start Starting index of metric registers in "entries". + * \var metric_pop Number of metric registers in "entries". + * \var entries - All the register nodes required for programming. + * + * \brief + */ + +typedef struct ECB_NODE_S ECB_NODE; +typedef ECB_NODE *ECB; + +struct ECB_NODE_S { + U8 version; + U8 reserved1; + U16 reserved2; + U32 num_entries; + U32 group_id; + U32 num_events; + U32 cccr_start; + U32 cccr_pop; + U32 escr_start; + U32 escr_pop; + U32 data_start; + U32 data_pop; + U16 flags; + U8 pmu_timer_interval; + U8 reserved3; + U32 size_of_allocation; + U32 group_offset; + U32 reserved4; + DRV_PCI_DEVICE_ENTRY_NODE pcidev_entry_node; + U32 num_pci_devices; + U32 pcidev_list_offset; + DRV_PCI_DEVICE_ENTRY pcidev_entry_list; +#if defined(DRV_IA32) + U32 pointer_padding; //add padding for the DRV_PCI_DEVICE_ENTRY pointer + //before this field for 32-bit mode +#endif + U32 device_type; + U32 dev_node; + PMU_OPERATIONS_NODE operations[MAX_OPERATION_TYPES]; + U32 descriptor_id; + U32 reserved5; + U32 metric_start; + U32 metric_pop; + MMIO_BAR_INFO_NODE mmio_bar_list[MAX_MMIO_BARS]; + U32 num_mmio_secondary_bar; + U32 group_id_offset_in_trigger_evt_desc; + U64 reserved7; + U64 reserved8; + EVENT_REG_NODE entries[]; +}; + +// +// Accessor macros for ECB node +// +#define ECB_version(x) ((x)->version) +#define ECB_num_entries(x) ((x)->num_entries) +#define ECB_group_id(x) ((x)->group_id) +#define ECB_num_events(x) ((x)->num_events) +#define ECB_cccr_start(x) ((x)->cccr_start) +#define ECB_cccr_pop(x) ((x)->cccr_pop) +#define ECB_escr_start(x) ((x)->escr_start) +#define ECB_escr_pop(x) ((x)->escr_pop) +#define ECB_data_start(x) ((x)->data_start) +#define ECB_data_pop(x) ((x)->data_pop) +#define ECB_metric_start(x) ((x)->metric_start) +#define ECB_metric_pop(x) ((x)->metric_pop) +#define ECB_pcidev_entry_node(x) ((x)->pcidev_entry_node) +#define ECB_num_pci_devices(x) ((x)->num_pci_devices) +#define ECB_pcidev_list_offset(x) ((x)->pcidev_list_offset) +#define ECB_pcidev_entry_list(x) ((x)->pcidev_entry_list) +#define ECB_flags(x) ((x)->flags) +#define ECB_pmu_timer_interval(x) ((x)->pmu_timer_interval) +#define ECB_size_of_allocation(x) ((x)->size_of_allocation) +#define ECB_group_offset(x) ((x)->group_offset) +#define ECB_device_type(x) ((x)->device_type) +#define ECB_dev_node(x) ((x)->dev_node) +#define ECB_operations(x) ((x)->operations) +#define ECB_descriptor_id(x) ((x)->descriptor_id) +#define ECB_entries(x) ((x)->entries) +#define ECB_num_mmio_secondary_bar(x) ((x)->num_mmio_secondary_bar) +#define ECB_mmio_bar_list(x, i) ((x)->mmio_bar_list[i]) +#define ECB_group_id_offset_in_trigger_evt_desc(x) \ + ((x)->group_id_offset_in_trigger_evt_desc) + +// for flag bit field +#define ECB_direct2core_bit 0x0001 +#define ECB_bl_bypass_bit 0x0002 +#define ECB_pci_id_offset_bit 0x0003 +#define ECB_pcu_ccst_debug 0x0004 +#define ECB_unc_evt_int_read_bit 0x0008 +#define ECB_unc_discovery_mode_bit 0x0010 + +#define ECB_unc_evt_intr_read_get(x) ((x)->flags & ECB_unc_evt_int_read_bit) +#define ECB_unc_evt_intr_read_set(x) ((x)->flags |= ECB_unc_evt_int_read_bit) +#define ECB_unc_evt_intr_read_clear(x) ((x)->flags &= ~ECB_unc_evt_int_read_bit) + +#define ECB_unc_discovery_mode_get(x) ((x)->flags & ECB_unc_discovery_mode_bit) +#define ECB_unc_discovery_mode_set(x) ((x)->flags |= ECB_unc_discovery_mode_bit) +#define ECB_unc_discovery_mode_clear(x) \ + ((x)->flags &= ~ECB_unc_discovery_mode_bit) + +#define ECB_VERSION 2 + +#define ECB_CONSTRUCT(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +#define ECB_CONSTRUCT2(x, num_entries, group_id, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_num_pci_devices((x)) = 0; \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +#define ECB_CONSTRUCT1(x, num_entries, group_id, cccr_start, escr_start, \ + data_start, num_pci_devices, size_of_allocation) \ +{ \ + ECB_num_entries((x)) = (num_entries); \ + ECB_group_id((x)) = (group_id); \ + ECB_cccr_start((x)) = (cccr_start); \ + ECB_cccr_pop((x)) = 0; \ + ECB_escr_start((x)) = (escr_start); \ + ECB_escr_pop((x)) = 0; \ + ECB_data_start((x)) = (data_start); \ + ECB_data_pop((x)) = 0; \ + ECB_metric_start((x)) = 0; \ + ECB_metric_pop((x)) = 0; \ + ECB_num_pci_devices((x)) = (num_pci_devices); \ + ECB_version((x)) = ECB_VERSION; \ + ECB_size_of_allocation((x)) = (size_of_allocation); \ +} + +// +// Accessor macros for ECB node entries +// +#define ECB_entries_reg_type(x, i) EVENT_REG_reg_type((ECB_entries(x)), (i)) +#define ECB_entries_event_id_index(x, i) \ + EVENT_REG_event_id_index((ECB_entries(x)), (i)) +#define ECB_entries_unit_id(x, i) EVENT_REG_unit_id((ECB_entries(x)), (i)) +#define ECB_entries_reg_package_id(x, i) \ + EVENT_REG_reg_package_id((ECB_entries(x)), (i)) +#define ECB_entries_counter_event_offset(x, i) \ + EVENT_REG_counter_event_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_id(x, i) EVENT_REG_reg_id((ECB_entries(x)), (i)) +#define ECB_entries_phys_addr(x, i) EVENT_REG_phys_addr((ECB_entries(x)), (i)) +#define ECB_entries_bar_index(x, i) EVENT_REG_bar_index((ECB_entries(x)), (i)) +#define ECB_entries_reg_prog_type(x, i) \ + EVENT_REG_reg_prog_type((ECB_entries(x)), (i)) +#define ECB_entries_reg_offset(x, i) EVENT_REG_offset((ECB_entries(x)), (i)) +#define ECB_entries_reg_data_size(x, i) \ + EVENT_REG_data_size((ECB_entries(x)), (i)) +#define ECB_entries_reg_bar_index(x, i) \ + EVENT_REG_bar_index((ECB_entries(x)), (i)) +#define ECB_entries_desc_id(x, i) EVENT_REG_desc_id((ECB_entries(x)), i) +#define ECB_entries_flags(x, i) EVENT_REG_flags((ECB_entries(x)), i) +#define ECB_entries_reg_order(x, i) EVENT_REG_reg_order((ECB_entries(x)), i) +#define ECB_entries_reg_value(x, i) EVENT_REG_reg_value((ECB_entries(x)), (i)) +#define ECB_entries_max_bits(x, i) EVENT_REG_max_bits((ECB_entries(x)), (i)) +#define ECB_entries_scheduled(x, i) EVENT_REG_scheduled((ECB_entries(x)), (i)) +#define ECB_entries_bit_position(x, i) \ + EVENT_REG_bit_position((ECB_entries(x)), (i)) +// PCI config-specific fields +#define ECB_entries_bus_no(x, i) EVENT_REG_bus_no((ECB_entries(x)), (i)) +#define ECB_entries_dev_no(x, i) EVENT_REG_dev_no((ECB_entries(x)), (i)) +#define ECB_entries_func_no(x, i) EVENT_REG_func_no((ECB_entries(x)), (i)) +#define ECB_entries_counter_type(x, i) \ + EVENT_REG_counter_type((ECB_entries(x)), (i)) +#define ECB_entries_event_scope(x, i) \ + EVENT_REG_event_scope((ECB_entries(x)), (i)) +#define ECB_entries_precise_get(x, i) \ + EVENT_REG_precise_get((ECB_entries(x)), (i)) +#define ECB_entries_global_get(x, i) EVENT_REG_global_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_get(x, i) EVENT_REG_uncore_get((ECB_entries(x)), (i)) +#define ECB_entries_uncore_q_rst_get(x, i) \ + EVENT_REG_uncore_q_rst_get((ECB_entries(x)), (i)) +#define ECB_entries_is_gp_reg_get(x, i) \ + EVENT_REG_is_gp_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_lbr_value_get(x, i) \ + EVENT_REG_lbr_value_get((ECB_entries(x)), (i)) +#define ECB_entries_fixed_reg_get(x, i) \ + EVENT_REG_fixed_reg_get((ECB_entries(x)), (i)) +#define ECB_entries_is_multi_pkg_bit_set(x, i) \ + EVENT_REG_multi_pkg_evt_bit_get((ECB_entries(x)), (i)) +#define ECB_entries_clean_up_get(x, i) \ + EVENT_REG_clean_up_get((ECB_entries(x)), (i)) +#define ECB_entries_em_trigger_get(x, i) \ + EVENT_REG_em_trigger_get((ECB_entries(x)), (i)) +#define ECB_entries_branch_evt_get(x, i) \ + EVENT_REG_branch_evt_get((ECB_entries(x)), (i)) +#define ECB_entries_ebc_sampling_evt_get(x, i) \ + EVENT_REG_ebc_sampling_evt_get((ECB_entries(x)), (i)) +#define ECB_entries_unc_evt_intr_read_get(x, i) \ + EVENT_REG_unc_evt_intr_read_get((ECB_entries(x)), (i)) +#define ECB_entries_generic_status_set(x, i) \ + EVENT_REG_generic_status_set((ECB_entries(x)), (i)) +#define ECB_entries_generic_status_get(x, i) \ + EVENT_REG_generic_status_get((ECB_entries(x)), (i)) +#define ECB_entries_reg_rw_type(x, i) \ + EVENT_REG_reg_rw_type((ECB_entries(x)), (i)) +#define ECB_entries_collect_on_ctx_sw_get(x, i) \ + EVENT_REG_collect_on_ctx_sw_get((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_offset(x, i) \ + EVENT_REG_secondary_pci_offset_offset((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_shift(x, i) \ + EVENT_REG_secondary_pci_offset_shift((ECB_entries(x)), (i)) +#define ECB_entries_secondary_pci_offset_mask(x, i) \ + EVENT_REG_secondary_pci_offset_mask((ECB_entries(x)), (i)) +#define ECB_operations_operation_type(x, i) \ + PMU_OPER_operation_type((ECB_operations(x)), (i)) +#define ECB_operations_register_start(x, i) \ + PMU_OPER_register_start((ECB_operations(x)), (i)) +#define ECB_operations_register_len(x, i) \ + PMU_OPER_register_len((ECB_operations(x)), (i)) + +#define ECB_entries_core_event_id(x, i) \ + EVENT_REG_core_event_id((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_package(x, i) \ + EVENT_REG_uncore_buffer_offset_in_package((ECB_entries(x)), (i)) +#define ECB_entries_uncore_buffer_offset_in_system(x, i) \ + EVENT_REG_uncore_buffer_offset_in_system((ECB_entries(x)), (i)) +#define ECB_entries_device_subtype(x, i) \ + EVENT_REG_device_subtype((ECB_entries(x)), (i)) +#define ECB_entries_driverless_dup_event(x, i) \ + EVENT_REG_driverless_dup_event((ECB_entries(x)), (i)) + +#define ECB_entries_aux_reg_id_to_read(x, i) \ + EVENT_REG_aux_reg_id_to_read((ECB_entries(x)), (i)) +#define ECB_entries_aux_read_mask(x, i) \ + EVENT_REG_aux_read_mask((ECB_entries(x)), (i)) +#define ECB_entries_aux_shift_index(x, i) \ + EVENT_REG_aux_shift_index((ECB_entries(x)), (i)) +#define ECB_SET_OPERATIONS(x, operation_type, start, len) \ + ECB_operations_operation_type(x, operation_type) = operation_type; \ + ECB_operations_register_start(x, operation_type) = start; \ + ECB_operations_register_len(x, operation_type) = len; + +#define ECB_mmio_bar_list_bus_no(x, i) \ + MMIO_BAR_bus_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_dev_no(x, i) \ + MMIO_BAR_dev_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_func_no(x, i) \ + MMIO_BAR_func_no(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_offset(x, i) \ + MMIO_BAR_offset(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_addr_size(x, i) \ + MMIO_BAR_addr_size(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_map_size(x, i) \ + MMIO_BAR_map_size(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_bar_shift(x, i) \ + MMIO_BAR_bar_shift(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_bar_mask(x, i) \ + MMIO_BAR_bar_mask(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_base_mmio_offset(x, i) \ + MMIO_BAR_base_mmio_offset(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_physical_address(x, i) \ + MMIO_BAR_physical_address(ECB_mmio_bar_list(x), (i)) +#define ECB_mmio_bar_list_virtual_address(x, i) \ + MMIO_BAR_virtual_address(ECB_mmio_bar_list(x), (i)) + +// *************************************************************************** + +/*!\struct LBR_ENTRY_NODE_S + * \var etype TOS = 0; FROM = 1; TO = 2 + * \var type_index + * \var reg_id + */ + +typedef struct LBR_ENTRY_NODE_S LBR_ENTRY_NODE; +typedef LBR_ENTRY_NODE *LBR_ENTRY; + +struct LBR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for LBR entries +// +#define LBR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define LBR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define LBR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct LBR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the LBR registers that need to be read + * + */ + +typedef struct LBR_NODE_S LBR_NODE; +typedef LBR_NODE *LBR; + +struct LBR_NODE_S { + U32 size; + U32 num_entries; + LBR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for LBR node +// +#define LBR_size(lbr) ((lbr)->size) +#define LBR_num_entries(lbr) ((lbr)->num_entries) +#define LBR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define LBR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define LBR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct PWR_ENTRY_NODE_S + * \var etype none as yet + * \var type_index + * \var reg_id + */ + +typedef struct PWR_ENTRY_NODE_S PWR_ENTRY_NODE; +typedef PWR_ENTRY_NODE *PWR_ENTRY; + +struct PWR_ENTRY_NODE_S { + U16 etype; + U16 type_index; + U32 reg_id; +}; + +// +// Accessor macros for PWR entries +// +#define PWR_ENTRY_NODE_etype(lentry) ((lentry).etype) +#define PWR_ENTRY_NODE_type_index(lentry) ((lentry).type_index) +#define PWR_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct PWR_NODE_S + * \var num_entries - The number of entries + * \var entries - The entries in the list + * + * \brief Data structure to describe the PWR registers that need to be read + * + */ + +typedef struct PWR_NODE_S PWR_NODE; +typedef PWR_NODE * PWR; + +struct PWR_NODE_S { + U32 size; + U32 num_entries; + PWR_ENTRY_NODE entries[]; +}; + +// +// Accessor macros for PWR node +// +#define PWR_size(lbr) ((lbr)->size) +#define PWR_num_entries(lbr) ((lbr)->num_entries) +#define PWR_entries_etype(lbr, idx) ((lbr)->entries[idx].etype) +#define PWR_entries_type_index(lbr, idx) ((lbr)->entries[idx].type_index) +#define PWR_entries_reg_id(lbr, idx) ((lbr)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct RO_ENTRY_NODE_S + * \var type - DEAR, IEAR, BTB. + */ + +typedef struct RO_ENTRY_NODE_S RO_ENTRY_NODE; +typedef RO_ENTRY_NODE *RO_ENTRY; + +struct RO_ENTRY_NODE_S { + U32 reg_id; +}; + +// +// Accessor macros for RO entries +// +#define RO_ENTRY_NODE_reg_id(lentry) ((lentry).reg_id) + +// *************************************************************************** + +/*!\struct RO_NODE_S + * \var size - The total size including header and entries. + * \var num_entries - The number of entries. + * \var entries - The entries in the list. + * + * \brief Data structure to describe the RO registers that need to be read. + * + */ + +typedef struct RO_NODE_S RO_NODE; +typedef RO_NODE *RO; + +struct RO_NODE_S { + U32 size; + U32 num_entries; + RO_ENTRY_NODE entries[]; +}; + +typedef enum { + IPT_BUFFER_DEFAULT = 0, + IPT_BUFFER_SMALL = 1, + IPT_BUFFER_LARGE = 2, +} IPT_BUFFER_SIZE_TYPE; + +// +// Accessor macros for RO node +// +#define RO_size(ro) ((ro)->size) +#define RO_num_entries(ro) ((ro)->num_entries) +#define RO_entries_reg_id(ro, idx) ((ro)->entries[idx].reg_id) + +// *************************************************************************** + +/*!\struct IPT_CONFIG_NODE_S + * \var etype TOS = 0; FROM = 1; TO = 2 + * \var type_index + * \var reg_id + */ + +typedef struct IPT_CONFIG_NODE_S IPT_CONFIG_NODE; +typedef IPT_CONFIG_NODE *IPT_CONFIG; + +struct IPT_CONFIG_NODE_S { + union { + U64 switches; + struct { + U64 branch : 1; + U64 power : 1; + U64 usr : 1; + U64 os : 1; + U64 cyc : 1; + U64 cycthresh : 4; + U64 cr3filter : 1; + U64 mtc : 1; + U64 mtcfreq : 4; + U64 tsc : 1; + U64 psbfreq : 4; + U64 ptw : 1; + U64 reserved : 43; + } s1; + } u1; + union { + U64 buf_config; + struct { + U64 ringbuf : 1; + U64 bufsize : 2; + U64 reserved : 61; + } s2; + } u2; + U64 cr3match; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; + +// +// Accessor macros for IPT config +// +#define IPT_CONFIG_branch(ptcfg) ((ptcfg)->u1.s1.branch) +#define IPT_CONFIG_power(ptcfg) ((ptcfg)->u1.s1.power) +#define IPT_CONFIG_usr(ptcfg) ((ptcfg)->u1.s1.usr) +#define IPT_CONFIG_os(ptcfg) ((ptcfg)->u1.s1.os) +#define IPT_CONFIG_cyc(ptcfg) ((ptcfg)->u1.s1.cyc) +#define IPT_CONFIG_cycthresh(ptcfg) ((ptcfg)->u1.s1.cycthresh) +#define IPT_CONFIG_cr3filter(ptcfg) ((ptcfg)->u1.s1.cr3filter) +#define IPT_CONFIG_mtc(ptcfg) ((ptcfg)->u1.s1.mtc) +#define IPT_CONFIG_mtcfreq(ptcfg) ((ptcfg)->u1.s1.mtcfreq) +#define IPT_CONFIG_tsc(ptcfg) ((ptcfg)->u1.s1.tsc) +#define IPT_CONFIG_psbfreq(ptcfg) ((ptcfg)->u1.s1.psbfreq) +#define IPT_CONFIG_ptw(ptcfg) ((ptcfg)->u1.s1.ptw) +#define IPT_CONFIG_ringbuf(ptcfg) ((ptcfg)->u2.s2.ringbuf) +#define IPT_CONFIG_bufsize(ptcfg) ((ptcfg)->u2.s2.bufsize) +#define IPT_CONFIG_cr3match(ptcfg) ((ptcfg)->cr3match) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h b/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h new file mode 100644 index 00000000000000..8d662b5171a987 --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_ioctl.h @@ -0,0 +1,312 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2007-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2007-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#ifndef _LWPMUDRV_IOCTL_H_ +#define _LWPMUDRV_IOCTL_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + + +//SEP Driver Operation defines +// +#define DRV_OPERATION_START 1 +#define DRV_OPERATION_STOP 2 +#define DRV_OPERATION_INIT_PMU 3 +#define DRV_OPERATION_GET_NORMALIZED_TSC 4 +#define DRV_OPERATION_TSC_SKEW_INFO 5 +#define DRV_OPERATION_PAUSE 6 +#define DRV_OPERATION_RESUME 7 +#define DRV_OPERATION_TERMINATE 8 +#define DRV_OPERATION_RESERVE 9 +#define DRV_OPERATION_VERSION 10 +#define DRV_OPERATION_SWITCH_GROUP 11 +#define DRV_OPERATION_GET_DRIVER_STATE 12 +#define DRV_OPERATION_INIT_UNCORE 13 +#define DRV_OPERATION_EM_GROUPS_UNCORE 14 +#define DRV_OPERATION_EM_CONFIG_NEXT_UNCORE 15 +#define DRV_OPERATION_READ_UNCORE_DATA 16 +#define DRV_OPERATION_STOP_MEM 17 +#define DRV_OPERATION_CREATE_MEM 18 +#define DRV_OPERATION_READ_MEM 19 +#define DRV_OPERATION_CHECK_STATUS 20 +#define DRV_OPERATION_TIMER_TRIGGER_READ 21 +#define DRV_OPERATION_INIT_DRIVER 22 +#define DRV_OPERATION_GET_UNCORE_TOPOLOGY 23 + +// IOCTL_SETUP +// + +#if defined(DRV_OS_WINDOWS) + +// +// NtDeviceIoControlFile IoControlCode values for this device. +// +// Warning: Remember that the low two bits of the code specify how the +// buffers are passed to the driver! +// +// 16 bit device type. 12 bit function codes +#define LWPMUDRV_IOCTL_DEVICE_TYPE 0xA000 // values 0-32768 reserved for Microsoft +#define LWPMUDRV_IOCTL_FUNCTION 0x0A00 // values 0-2047 reserved for Microsoft + +// +// Basic CTL CODE macro to reduce typographical errors +// Use for FILE_READ_ACCESS +// +#define LWPMUDRV_CTL_READ_CODE(x) CTL_CODE(LWPMUDRV_IOCTL_DEVICE_TYPE, \ + LWPMUDRV_IOCTL_FUNCTION+(x), \ + METHOD_BUFFERED, \ + FILE_READ_ACCESS) + +#define LWPMUDRV_IOCTL_START LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_START) +#define LWPMUDRV_IOCTL_STOP LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP) +#define LWPMUDRV_IOCTL_INIT_PMU LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_PMU) +#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_NORMALIZED_TSC) +#define LWPMUDRV_IOCTL_TSC_SKEW_INFO LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TSC_SKEW_INFO) +#define LWPMUDRV_IOCTL_PAUSE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_PAUSE) +#define LWPMUDRV_IOCTL_RESUME LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESUME) +#define LWPMUDRV_IOCTL_TERMINATE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TERMINATE) +#define LWPMUDRV_IOCTL_RESERVE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_RESERVE) +#define LWPMUDRV_IOCTL_VERSION LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_VERSION) +#define LWPMUDRV_IOCTL_SWITCH_GROUP LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_SWITCH_GROUP) +#define LWPMUDRV_IOCTL_GET_DRIVER_STATE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_DRIVER_STATE) +#define LWPMUDRV_IOCTL_INIT_UNCORE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_UNCORE) +#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_GROUPS_UNCORE) +#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_EM_CONFIG_NEXT_UNCORE) +#define LWPMUDRV_IOCTL_READ_UNCORE_DATA LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_UNCORE_DATA) +#define LWPMUDRV_IOCTL_STOP_MEM LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_STOP_MEM) +#define LWPMUDRV_IOCTL_CREATE_MEM LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CREATE_MEM) +#define LWPMUDRV_IOCTL_READ_MEM LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_READ_MEM) +#define LWPMUDRV_IOCTL_CHECK_STATUS LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_CHECK_STATUS) +#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_TIMER_TRIGGER_READ) +#define LWPMUDRV_IOCTL_INIT_DRIVER LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_INIT_DRIVER) +#define LWPMUDRV_IOCTL_GET_UNCORE_TOPOLOGY LWPMUDRV_CTL_READ_CODE(DRV_OPERATION_GET_UNCORE_TOPOLOGY) + +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined (DRV_OS_ANDROID) +// IOCTL_ARGS +typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; +typedef IOCTL_ARGS_NODE *IOCTL_ARGS; +struct IOCTL_ARGS_NODE_S { + U64 len_drv_to_usr; + U64 len_usr_to_drv; + char *buf_drv_to_usr; + char *buf_usr_to_drv; +}; + +// COMPAT IOCTL_ARGS +#if defined (CONFIG_COMPAT) && defined(DRV_EM64T) +typedef struct IOCTL_COMPAT_ARGS_NODE_S IOCTL_COMPAT_ARGS_NODE; +typedef IOCTL_COMPAT_ARGS_NODE *IOCTL_COMPAT_ARGS; +struct IOCTL_COMPAT_ARGS_NODE_S { + U64 len_drv_to_usr; + U64 len_usr_to_drv; + compat_uptr_t buf_drv_to_usr; + compat_uptr_t buf_usr_to_drv; +}; +#endif + +#define LWPMU_IOC_MAGIC 99 + +// IOCTL_SETUP +// +#define LWPMUDRV_IOCTL_START _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_START) +#define LWPMUDRV_IOCTL_STOP _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP) +#define LWPMUDRV_IOCTL_INIT_PMU _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, int) +#define LWPMUDRV_IOCTL_TSC_SKEW_INFO _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_PAUSE _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE) +#define LWPMUDRV_IOCTL_RESUME _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME) +#define LWPMUDRV_IOCTL_TERMINATE _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE) +#define LWPMUDRV_IOCTL_RESERVE _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_VERSION _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_SWITCH_GROUP _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP) +#define LWPMUDRV_IOCTL_GET_DRIVER_STATE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_INIT_UNCORE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_READ_UNCORE_DATA _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_STOP_MEM _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM) +#define LWPMUDRV_IOCTL_CREATE_MEM _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_READ_MEM _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_CHECK_STATUS _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ) +#define LWPMUDRV_IOCTL_INIT_DRIVER _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_DRIVER, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_GET_UNCORE_TOPOLOGY _IOW (LWPMU_IOC_MAGIC, DRV_OPERATION_GET_UNCORE_TOPOLOGY, IOCTL_ARGS) + +#elif defined(DRV_OS_FREEBSD) + +// IOCTL_ARGS +typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; +typedef IOCTL_ARGS_NODE *IOCTL_ARGS; +struct IOCTL_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U64 len_usr_to_drv; + char *buf_usr_to_drv; +}; + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 + +/* FreeBSD is very strict about IOR/IOW/IOWR specifications on IOCTLs. + * Since these IOCTLs all pass down the real read/write buffer lengths + * and addresses inside of an IOCTL_ARGS_NODE data structure, we + * need to specify all of these as _IOW so that the kernel will + * view it as userspace passing the data to the driver, rather than + * the reverse. There are also some cases where Linux is passing + * a smaller type than IOCTL_ARGS_NODE, even though its really + * passing an IOCTL_ARGS_NODE. These needed to be fixed for FreeBSD. + */ +#define LWPMUDRV_IOCTL_START _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_START) +#define LWPMUDRV_IOCTL_STOP _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP) +#define LWPMUDRV_IOCTL_INIT_PMU _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_PMU) +#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_NORMALIZED_TSC, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_TSC_SKEW_INFO _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_TSC_SKEW_INFO, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_PAUSE _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_PAUSE) +#define LWPMUDRV_IOCTL_RESUME _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_RESUME) +#define LWPMUDRV_IOCTL_TERMINATE _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TERMINATE) +#define LWPMUDRV_IOCTL_RESERVE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_RESERVE, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_VERSION _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_VERSION, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_SWITCH_GROUP _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_SWITCH_GROUP) +#define LWPMUDRV_IOCTL_GET_DRIVER_STATE _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_DRIVER_STATE, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_INIT_UNCORE _IOW (LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE _IOW (LWPMU_IOC_MAGIC, DRV_OPERATION_EM_GROUPS_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE _IOW (LWPMU_IOC_MAGIC, DRV_OPERATION_EM_CONFIG_NEXT_UNCORE, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_READ_UNCORE_DATA _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_UNCORE_DATA, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_STOP_MEM _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_STOP_MEM) +#define LWPMUDRV_IOCTL_CREATE_MEM _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_CREATE_MEM, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_READ_MEM _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_READ_MEM, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_CHECK_STATUS _IOR(LWPMU_IOC_MAGIC, DRV_OPERATION_CHECK_STATUS, IOCTL_ARGS_NODE) +#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ _IO (LWPMU_IOC_MAGIC, DRV_OPERATION_TIMER_TRIGGER_READ) +#define LWPMUDRV_IOCTL_INIT_DRIVER _IOW (LWPMU_IOC_MAGIC, DRV_OPERATION_INIT_DRIVER, IOCTL_ARGS) +#define LWPMUDRV_IOCTL_GET_UNCORE_TOPOLOGY _IOW(LWPMU_IOC_MAGIC, DRV_OPERATION_GET_UNCORE_TOPOLOGY, IOCTL_ARGS) + +#elif defined(DRV_OS_MAC) + +// IOCTL_ARGS +typedef struct IOCTL_ARGS_NODE_S IOCTL_ARGS_NODE; +typedef IOCTL_ARGS_NODE *IOCTL_ARGS; +struct IOCTL_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U64 len_usr_to_drv; + char *buf_usr_to_drv; + U32 command; +}; + +typedef struct CPU_ARGS_NODE_S CPU_ARGS_NODE; +typedef CPU_ARGS_NODE *CPU_ARGS; +struct CPU_ARGS_NODE_S { + U64 len_drv_to_usr; + char *buf_drv_to_usr; + U32 command; + U32 CPU_ID; + U32 BUCKET_ID; +}; + +// IOCTL_SETUP +// +#define LWPMU_IOC_MAGIC 99 +#define OS_SUCCESS 0 +#define OS_STATUS int +#define OS_ILLEGAL_IOCTL -ENOTTY +#define OS_NO_MEM -ENOMEM +#define OS_FAULT -EFAULT + +// Task file Opcodes. +// keeping the definitions as IOCTL but in MAC OSX +// these are really OpCodes consumed by Execute command. +#define LWPMUDRV_IOCTL_START DRV_OPERATION_START +#define LWPMUDRV_IOCTL_STOP DRV_OPERATION_STOP +#define LWPMUDRV_IOCTL_INIT_PMU DRV_OPERATION_INIT_PMU +#define LWPMUDRV_IOCTL_GET_NORMALIZED_TSC DRV_OPERATION_GET_NORMALIZED_TSC +#define LWPMUDRV_IOCTL_TSC_SKEW_INFO DRV_OPERATION_TSC_SKEW_INFO +#define LWPMUDRV_IOCTL_PAUSE DRV_OPERATION_PAUSE +#define LWPMUDRV_IOCTL_RESUME DRV_OPERATION_RESUME +#define LWPMUDRV_IOCTL_TERMINATE DRV_OPERATION_TERMINATE +#define LWPMUDRV_IOCTL_RESERVE DRV_OPERATION_RESERVE +#define LWPMUDRV_IOCTL_VERSION DRV_OPERATION_VERSION +#define LWPMUDRV_IOCTL_SWITCH_GROUP DRV_OPERATION_SWITCH_GROUP +#define LWPMUDRV_IOCTL_GET_DRIVER_STATE DRV_OPERATION_GET_DRIVER_STATE +#define LWPMUDRV_IOCTL_INIT_UNCORE DRV_OPERATION_INIT_UNCORE +#define LWPMUDRV_IOCTL_EM_GROUPS_UNCORE DRV_OPERATION_EM_GROUPS_UNCORE +#define LWPMUDRV_IOCTL_EM_CONFIG_NEXT_UNCORE DRV_OPERATION_EM_CONFIG_NEXT_UNCORE +#define LWPMUDRV_IOCTL_READ_UNCORE_DATA DRV_OPERATION_READ_UNCORE_DATA +#define LWPMUDRV_IOCTL_STOP_MEM DRV_OPERATION_STOP_MEM +#define LWPMUDRV_IOCTL_CREATE_MEM DRV_OPERATION_CREATE_MEM +#define LWPMUDRV_IOCTL_READ_MEM DRV_OPERATION_READ_MEM +#define LWPMUDRV_IOCTL_CHECK_STATUS DRV_OPERATION_CHECK_STATUS +#define LWPMUDRV_IOCTL_TIMER_TRIGGER_READ DRV_OPERATION_TIMER_TRIGGER_READ +#define LWPMUDRV_IOCTL_INIT_DRIVER DRV_OPERATION_INIT_DRIVER +#define LWPMUDRV_IOCTL_GET_UNCORE_TOPOLOGY DRV_OPERATION_GET_UNCORE_TOPOLOGY + +// This is only for MAC OSX +#define LWPMUDRV_IOCTL_SET_OSX_VERSION 998 +#define LWPMUDRV_IOCTL_PROVIDE_FUNCTION_PTRS 999 + +#else +#error "unknown OS in lwpmudrv_ioctl.h" +#endif + +#if defined(__cplusplus) +} +#endif + +#endif + diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_struct.h b/drivers/platform/x86/socperf/include/lwpmudrv_struct.h new file mode 100644 index 00000000000000..0fe66bc955f070 --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_struct.h @@ -0,0 +1,2768 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_STRUCT_UTILS_H_ +#define _LWPMUDRV_STRUCT_UTILS_H_ + +#if defined(DRV_OS_WINDOWS) +#pragma warning(disable : 4100) +#pragma warning(disable : 4127) +#pragma warning(disable : 4214) +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +// processor execution modes +#define MODE_UNKNOWN 99 +// the following defines must start at 0 +#define MODE_64BIT 3 +#define MODE_32BIT 2 +#define MODE_16BIT 1 +#define MODE_V86 0 + +// sampling methods +#define SM_RTC 2020 // real time clock +#define SM_VTD 2021 // OS Virtual Timer Device +#define SM_NMI 2022 // non-maskable interrupt time based +#define SM_EBS 2023 // event based sampling +#define SM_EBC 2024 // event based counting + +// sampling mechanism bitmap definitions +#define INTERRUPT_RTC 0x1 +#define INTERRUPT_VTD 0x2 +#define INTERRUPT_NMI 0x4 +#define INTERRUPT_EBS 0x8 + +// Device types +#define DEV_CORE 0x01 +#define DEV_UNC 0x02 + +// eflags defines +#define EFLAGS_VM 0x00020000 // V86 mode +#define EFLAGS_IOPL0 0 +#define EFLAGS_IOPL1 0x00001000 +#define EFLAGS_IOPL2 0x00002000 +#define EFLAGS_IOPL3 0x00003000 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_BUSNO 256 +#define MAX_DEVICES 30 +#define MAX_REGS 64 +#define MAX_EMON_GROUPS 1000 +#define MAX_PCI_DEVNO 32 +#define MAX_PCI_FUNCNO 8 +#define MAX_PCI_DEVUNIT 16 +#define MAX_TURBO_VALUES 32 +#define REG_BIT_MASK 0xFFFFFFFFFFFFFFFFULL +#define MAX_DISCOVERY_UNCORE_UNITS 1024 // Total units across all uncore devices + +extern float freq_multiplier; + +// Enumeration for invoking dispatch on multiple cpus or not +typedef enum { DRV_MULTIPLE_INSTANCE = 0, DRV_SINGLE_INSTANCE } DRV_PROG_TYPE; + +typedef struct DRV_CONFIG_NODE_S DRV_CONFIG_NODE; +typedef DRV_CONFIG_NODE *DRV_CONFIG; + +struct DRV_CONFIG_NODE_S { + U32 size; + U16 version; + U16 ipt_mode; + U32 num_events; + U32 num_chipset_events; + U32 chipset_offset; + S32 seed_name_len; + union { + S8 *seed_name; + U64 dummy1; + } u1; + union { + S8 *cpu_mask; + U64 dummy2; + } u2; + union { + U64 collection_config; + struct { + U64 start_paused : 1; + U64 counting_mode : 1; + U64 enable_chipset : 1; + U64 enable_gfx : 1; + U64 enable_pwr : 1; + U64 emon_mode : 1; + U64 debug_inject : 1; + U64 virt_phys_translation : 1; + U64 enable_p_state : 1; + U64 enable_cp_mode : 1; + U64 read_pstate_msrs : 1; + U64 use_pcl : 1; + U64 enable_ebc : 1; + U64 enable_tbc : 1; + U64 ds_area_available : 1; + U64 per_cpu_tsc : 1; + U64 mixed_ebc_available : 1; + U64 hetero_supported : 1; + U64 rdt_auto_rmid : 1; + U64 enable_ipt : 1; + U64 enable_sideband : 1; + U64 reserved_field1 : 43; + } s1; + } u3; + U64 target_pid; + U32 os_of_interest; + U16 unc_timer_interval; + U16 unc_em_factor; + S32 p_state_trigger_index; + DRV_BOOL multi_pebs_enabled; + S32 emon_timer_interval; + DRV_BOOL unc_collect_in_intr_enabled; + U32 emon_driverless_mux_interval; + U32 reserved1; + U64 reserved2; + U64 reserved3; +}; + +#define DRV_CONFIG_size(cfg) ((cfg)->size) +#define DRV_CONFIG_version(cfg) ((cfg)->version) +#define DRV_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DRV_CONFIG_num_chipset_events(cfg) ((cfg)->num_chipset_events) +#define DRV_CONFIG_chipset_offset(cfg) ((cfg)->chipset_offset) + +#define DRV_CONFIG_seed_name(cfg) ((cfg)->u1.seed_name) +#define DRV_CONFIG_seed_name_len(cfg) ((cfg)->seed_name_len) +#define DRV_CONFIG_cpu_mask(cfg) ((cfg)->u2.cpu_mask) +#define DRV_CONFIG_start_paused(cfg) ((cfg)->u3.s1.start_paused) +#define DRV_CONFIG_counting_mode(cfg) ((cfg)->u3.s1.counting_mode) +#define DRV_CONFIG_enable_chipset(cfg) ((cfg)->u3.s1.enable_chipset) +#define DRV_CONFIG_enable_gfx(cfg) ((cfg)->u3.s1.enable_gfx) +#define DRV_CONFIG_enable_pwr(cfg) ((cfg)->u3.s1.enable_pwr) +#define DRV_CONFIG_emon_mode(cfg) ((cfg)->u3.s1.emon_mode) +#define DRV_CONFIG_debug_inject(cfg) ((cfg)->u3.s1.debug_inject) +#define DRV_CONFIG_virt_phys_translation(cfg) ((cfg)->u3.s1.virt_phys_translation) +#define DRV_CONFIG_enable_p_state(cfg) ((cfg)->u3.s1.enable_p_state) +#define DRV_CONFIG_enable_cp_mode(cfg) ((cfg)->u3.s1.enable_cp_mode) +#define DRV_CONFIG_read_pstate_msrs(cfg) ((cfg)->u3.s1.read_pstate_msrs) +#define DRV_CONFIG_use_pcl(cfg) ((cfg)->u3.s1.use_pcl) +#define DRV_CONFIG_event_based_counts(cfg) ((cfg)->u3.s1.enable_ebc) +#define DRV_CONFIG_timer_based_counts(cfg) ((cfg)->u3.s1.enable_tbc) +#define DRV_CONFIG_ds_area_available(cfg) ((cfg)->u3.s1.ds_area_available) +#define DRV_CONFIG_per_cpu_tsc(cfg) ((cfg)->u3.s1.per_cpu_tsc) +#define DRV_CONFIG_mixed_ebc_available(cfg) ((cfg)->u3.s1.mixed_ebc_available) +#define DRV_CONFIG_hetero_supported(cfg) ((cfg)->u3.s1.hetero_supported) +#define DRV_CONFIG_rdt_auto_rmid(cfg) ((cfg)->u3.s1.rdt_auto_rmid) +#define DRV_CONFIG_enable_ipt(cfg) ((cfg)->u3.s1.enable_ipt) +#define DRV_CONFIG_enable_sideband(cfg) ((cfg)->u3.s1.enable_sideband) +#define DRV_CONFIG_target_pid(cfg) ((cfg)->target_pid) +#define DRV_CONFIG_os_of_interest(cfg) ((cfg)->os_of_interest) +#define DRV_CONFIG_unc_timer_interval(cfg) ((cfg)->unc_timer_interval) +#define DRV_CONFIG_unc_em_factor(cfg) ((cfg)->unc_em_factor) +#define DRV_CONFIG_p_state_trigger_index(cfg) ((cfg)->p_state_trigger_index) +#define DRV_CONFIG_multi_pebs_enabled(cfg) ((cfg)->multi_pebs_enabled) +#define DRV_CONFIG_emon_timer_interval(cfg) ((cfg)->emon_timer_interval) +#define DRV_CONFIG_unc_collect_in_intr_enabled(cfg) \ + ((cfg)->unc_collect_in_intr_enabled) +#define DRV_CONFIG_ipt_mode(cfg) ((cfg)->ipt_mode) +#define DRV_CONFIG_emon_driverless_mux_interval(cfg) \ + ((cfg)->emon_driverless_mux_interval) + +#define DRV_CONFIG_VERSION 1 + +typedef struct DEV_CONFIG_NODE_S DEV_CONFIG_NODE; +typedef DEV_CONFIG_NODE *DEV_CONFIG; + +struct DEV_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 pebs_mode; + U32 pebs_record_num; + U32 results_offset; // this is to store the offset for this device's results + U32 max_gp_counters; + U32 device_type; + U32 core_type; + union { + U64 enable_bit_fields; + struct { + U64 pebs_capture : 1; + U64 collect_lbrs : 1; + U64 collect_callstacks : 1; + U64 collect_kernel_callstacks : 1; + U64 latency_capture : 1; + U64 power_capture : 1; + U64 htoff_mode : 1; + U64 eventing_ip_capture : 1; + U64 hle_capture : 1; + U64 precise_ip_lbrs : 1; + U64 store_lbrs : 1; + U64 tsc_capture : 1; + U64 enable_perf_metrics : 1; + U64 enable_adaptive_pebs : 1; + U64 apebs_collect_mem_info : 1; + U64 apebs_collect_gpr : 1; + U64 apebs_collect_xmm : 1; + U64 apebs_collect_lbrs : 1; + U64 collect_fixed_counter_pebs : 1; + U64 collect_os_callstacks : 1; + U64 enable_arch_lbrs : 1; + U64 reserved_field1 : 43; + } s1; + } u1; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U32 ebc_group_id_offset; + U8 num_perf_metrics; + U8 num_lbr_entries; + U16 emon_perf_metrics_offset; + U32 device_scope; + U32 num_events; + U16 device_index; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEV_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_CONFIG_pebs_mode(cfg) ((cfg)->pebs_mode) +#define DEV_CONFIG_pebs_record_num(cfg) ((cfg)->pebs_record_num) +#define DEV_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_CONFIG_max_gp_counters(cfg) ((cfg)->max_gp_counters) + +#define DEV_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_CONFIG_core_type(cfg) ((cfg)->core_type) + +#define DEV_CONFIG_pebs_capture(cfg) ((cfg)->u1.s1.pebs_capture) +#define DEV_CONFIG_collect_lbrs(cfg) ((cfg)->u1.s1.collect_lbrs) +#define DEV_CONFIG_collect_callstacks(cfg) ((cfg)->u1.s1.collect_callstacks) +#define DEV_CONFIG_collect_kernel_callstacks(cfg) \ + ((cfg)->u1.s1.collect_kernel_callstacks) +#define DEV_CONFIG_latency_capture(cfg) ((cfg)->u1.s1.latency_capture) +#define DEV_CONFIG_power_capture(cfg) ((cfg)->u1.s1.power_capture) +#define DEV_CONFIG_htoff_mode(cfg) ((cfg)->u1.s1.htoff_mode) +#define DEV_CONFIG_eventing_ip_capture(cfg) ((cfg)->u1.s1.eventing_ip_capture) +#define DEV_CONFIG_hle_capture(cfg) ((cfg)->u1.s1.hle_capture) +#define DEV_CONFIG_precise_ip_lbrs(cfg) ((cfg)->u1.s1.precise_ip_lbrs) +#define DEV_CONFIG_store_lbrs(cfg) ((cfg)->u1.s1.store_lbrs) +#define DEV_CONFIG_tsc_capture(cfg) ((cfg)->u1.s1.tsc_capture) +#define DEV_CONFIG_enable_perf_metrics(cfg) ((cfg)->u1.s1.enable_perf_metrics) +#define DEV_CONFIG_enable_adaptive_pebs(cfg) ((cfg)->u1.s1.enable_adaptive_pebs) +#define DEV_CONFIG_apebs_collect_mem_info(cfg) \ + ((cfg)->u1.s1.apebs_collect_mem_info) +#define DEV_CONFIG_apebs_collect_gpr(cfg) ((cfg)->u1.s1.apebs_collect_gpr) +#define DEV_CONFIG_apebs_collect_xmm(cfg) ((cfg)->u1.s1.apebs_collect_xmm) +#define DEV_CONFIG_apebs_collect_lbrs(cfg) ((cfg)->u1.s1.apebs_collect_lbrs) +#define DEV_CONFIG_collect_fixed_counter_pebs(cfg) \ + ((cfg)->u1.s1.collect_fixed_counter_pebs) +#define DEV_CONFIG_collect_os_callstacks(cfg) ((cfg)->u1.s1.collect_os_callstacks) +#define DEV_CONFIG_enable_arch_lbrs(cfg) ((cfg)->u1.s1.enable_arch_lbrs) +#define DEV_CONFIG_enable_bit_fields(cfg) ((cfg)->u1.enable_bit_fields) +#define DEV_CONFIG_emon_unc_offset(cfg, grp_num) ((cfg)->emon_unc_offset[grp_num]) +#define DEV_CONFIG_ebc_group_id_offset(cfg) ((cfg)->ebc_group_id_offset) +#define DEV_CONFIG_num_perf_metrics(cfg) ((cfg)->num_perf_metrics) +#define DEV_CONFIG_num_lbr_entries(cfg) ((cfg)->num_lbr_entries) +#define DEV_CONFIG_emon_perf_metrics_offset(cfg) ((cfg)->emon_perf_metrics_offset) +#define DEV_CONFIG_device_scope(cfg) ((cfg)->device_scope) +#define DEV_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DEV_CONFIG_device_index(cfg) ((cfg)->device_index) + +typedef struct DEV_UNC_CONFIG_NODE_S DEV_UNC_CONFIG_NODE; +typedef DEV_UNC_CONFIG_NODE *DEV_UNC_CONFIG; + +struct DEV_UNC_CONFIG_NODE_S { + U16 size; + U16 version; + U32 dispatch_id; + U32 results_offset; + U32 device_type; + U32 device_scope; + U32 num_events; + U32 emon_unc_offset[MAX_EMON_GROUPS]; + U16 device_index; + U16 num_units; + U32 device_with_intr_events; // contains event(s) to read at trigger evt sampl + U32 num_module_events; + U32 reserved2; + U64 reserved3; +}; + +#define DEV_UNC_CONFIG_dispatch_id(cfg) ((cfg)->dispatch_id) +#define DEV_UNC_CONFIG_results_offset(cfg) ((cfg)->results_offset) +#define DEV_UNC_CONFIG_emon_unc_offset(cfg, grp_num) \ + ((cfg)->emon_unc_offset[grp_num]) +#define DEV_UNC_CONFIG_device_type(cfg) ((cfg)->device_type) +#define DEV_UNC_CONFIG_device_scope(cfg) ((cfg)->device_scope) +#define DEV_UNC_CONFIG_num_events(cfg) ((cfg)->num_events) +#define DEV_UNC_CONFIG_device_index(cfg) ((cfg)->device_index) +#define DEV_UNC_CONFIG_num_units(cfg) ((cfg)->num_units) +#define DEV_UNC_CONFIG_device_with_intr_events(cfg) \ + ((cfg)->device_with_intr_events) + +/* + * X86 processor code descriptor + */ +typedef struct CodeDescriptor_s { + union { + U32 lowWord; // low dword of descriptor + struct { // low broken out by fields + U16 limitLow; // segment limit 15:00 + U16 baseLow; // segment base 15:00 + } s1; + } u1; + union { + U32 highWord; // high word of descriptor + struct { // high broken out by bit fields + U32 baseMid : 8; // base 23:16 + U32 accessed : 1; // accessed + U32 readable : 1; // readable + U32 conforming : 1; // conforming code segment + U32 oneOne : 2; // always 11 + U32 dpl : 2; // Dpl + U32 pres : 1; // present bit + U32 limitHi : 4; // limit 19:16 + U32 sys : 1; // available for use by system + U32 reserved_0 : 1; // reserved, always 0 + U32 default_size : 1; // default operation size (1=32bit, 0=16bit) + U32 granularity : 1; // granularity (1=32 bit, 0=20 bit) + U32 baseHi : 8; // base hi 31:24 + } s2; + } u2; +} CodeDescriptor; + +/* + * Module record. These are emitted whenever a DLL or EXE is loaded or unloaded. + * The filename fields may be 0 on an unload. The records reperesent a module for a + * certain span of time, delineated by the load / unload samplecounts. + * Note: + * The structure contains 64 bit fields which may cause the compiler to pad the + * length of the structure to an 8 byte boundary. + */ +typedef struct ModuleRecord_s { + U16 recLength; // total length of this record (including this length, + // always U32 multiple) output from sampler is variable + // length (pathname at end of record) sampfile builder moves + // path names to a separate "literal pool" area + // so that these records become fixed length, and can be treated + // as an array see modrecFixedLen in header + + U16 segmentType : 2; // V86, 16, 32, 64 (see MODE_ defines), maybe inaccurate for Win95 + // .. a 16 bit module may become a 32 bit module, inferred by + // ..looking at 1st sample record that matches the module selector + U16 loadEvent : 1; // 0 for load, 1 for unload + U16 processed : 1; // 0 for load, 1 for unload + U16 reserved0 : 12; + + U16 selector; // code selector or V86 segment + U16 segmentNameLength; // length of the segment name if the segmentNameSet bit is set + U32 segmentNumber; // segment number, Win95 (and now Java) can have multiple pieces for one module + union { + U32 flags; // all the flags as one dword + struct { + U32 exe : 1; // this module is an exe + U32 globalModule : 1; // globally loaded module. There may be multiple + // module records for a global module, but the samples + // will only point to the 1st one, the others will be + // ignored. NT's Kernel32 is an example of this. + // REVISIT this?? + U32 bogusWin95 : 1; // "bogus" win95 module. By bogus, we mean a + // module that has a pid of 0, no length and no base. + // Selector actually used as a 32 bit module. + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 sampleFound : 1; // at least one sample referenced this module + U32 tscUsed : 1; // tsc set when record written + U32 duplicate : 1; // 1st pass analysis has determined this is a + // duplicate load + U32 globalModuleTB5 : 1; // module mapped into all processes on system + U32 segmentNameSet : 1; // set if the segment name was collected + // (initially done for xbox collections) + U32 firstModuleRecInProcess : 1; // if the pidCreatesTrackedInModuleRecs flag is set + // in the SampleHeaderEx struct and this flag + // is set, the associated module indicates + // the beginning of a new process + U32 source : 1; // 0 for path in target system, 1 for path in host system (offloaded) + U32 unknownLoadAddress : 1; // for 0 valid loadAddr64 value, 1 for invalid loadAddr64 value + U32 reserved1 : 20; + } s1; + } u2; + U64 length64; // module length + U64 loadAddr64; // load address + U32 pidRecIndex; // process ID rec index (index into start of pid record section). + // .. (see pidRecIndexRaw). If pidRecIndex == 0 and pidRecIndexRaw == 1 + // ..then this is a kernel or global module. Can validly + // ..be 0 if not raw (array index). Use ReturnPid() to access this + // ..field + U32 osid; // OS identifier + U64 unloadTsc; // TSC collected on an unload event + U32 path; // module path name (section offset on disk) + // ..when initally written by sampler name is at end of this + // ..struct, when merged with main file names are pooled at end + // ..of ModuleRecord Section so ModulesRecords can be + // ..fixed length + U16 pathLength; // path name length (inludes terminating \0) + U16 filenameOffset; // offset into path name of base filename + U32 segmentName; // offset to the segmentName from the beginning of the + // module section in a processed module section + // (s/b 0 in a raw module record) + // in a raw module record, the segment name will follow the + // module name and the module name's terminating NULL char + U32 page_offset_high; + U64 tsc; // time stamp counter module event occurred + U32 parent_pid; // Parent PID of the process + U32 page_offset_low; +} ModuleRecord; + +#define MR_unloadTscSet(x, y) ((x)->unloadTsc = (y)) +#define MR_unloadTscGet(x) ((x)->unloadTsc) + +#define MR_page_offset_Set(x, y) \ + { \ + (x)->page_offset_low = (y)&0xFFFFFFFF; \ + (x)->page_offset_high = ((y) >> 32) & 0xFFFFFFFF; \ + } +#define MR_page_offset_Get(x) \ + ((((U64)(x)->page_offset_high) << 32) | (x)->page_offset_low) + +// Accessor macros for ModuleRecord +#define MODULE_RECORD_rec_length(x) ((x)->recLength) +#define MODULE_RECORD_segment_type(x) ((x)->segmentType) +#define MODULE_RECORD_load_event(x) ((x)->loadEvent) +#define MODULE_RECORD_processed(x) ((x)->processed) +#define MODULE_RECORD_selector(x) ((x)->selector) +#define MODULE_RECORD_segment_name_length(x) ((x)->segmentNameLength) +#define MODULE_RECORD_segment_number(x) ((x)->segmentNumber) +#define MODULE_RECORD_flags(x) ((x)->u2.flags) +#define MODULE_RECORD_exe(x) ((x)->u2.s1.exe) +#define MODULE_RECORD_global_module(x) ((x)->u2.s1.globalModule) +#define MODULE_RECORD_bogus_win95(x) ((x)->u2.s1.bogusWin95) +#define MODULE_RECORD_pid_rec_index_raw(x) ((x)->u2.s1.pidRecIndexRaw) +#define MODULE_RECORD_sample_found(x) ((x)->u2.s1.sampleFound) +#define MODULE_RECORD_tsc_used(x) ((x)->u2.s1.tscUsed) +#define MODULE_RECORD_duplicate(x) ((x)->u2.s1.duplicate) +#define MODULE_RECORD_global_module_tb5(x) ((x)->u2.s1.globalModuleTB5) +#define MODULE_RECORD_segment_name_set(x) ((x)->u2.s1.segmentNameSet) +#define MODULE_RECORD_first_module_rec_in_process(x) \ + ((x)->u2.s1.firstModuleRecInProcess) +#define MODULE_RECORD_source(x) ((x)->u2.s1.source) +#define MODULE_RECORD_unknown_load_address(x) ((x)->u2.s1.unknownLoadAddress) +#define MODULE_RECORD_length64(x) ((x)->length64) +#define MODULE_RECORD_load_addr64(x) ((x)->loadAddr64) +#define MODULE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define MODULE_RECORD_load_sample_count(x) ((x)->u5.s2.loadSampleCount) +#define MODULE_RECORD_unload_sample_count(x) ((x)->u5.s2.unloadSampleCount) +#define MODULE_RECORD_unload_tsc(x) ((x)->unloadTsc) +#define MODULE_RECORD_path(x) ((x)->path) +#define MODULE_RECORD_path_length(x) ((x)->pathLength) +#define MODULE_RECORD_filename_offset(x) ((x)->filenameOffset) +#define MODULE_RECORD_segment_name(x) ((x)->segmentName) +#define MODULE_RECORD_tsc(x) ((x)->tsc) +#define MODULE_RECORD_parent_pid(x) ((x)->parent_pid) +#define MODULE_RECORD_osid(x) ((x)->osid) + +/* + * Sample record. Size can be determined by looking at the header record. + * There can be up to 3 sections. The SampleFileHeader defines the presence + * of sections and their offsets. Within a sample file, all of the sample + * records have the same number of sections and the same size. However, + * different sample record sections and sizes can exist in different + * sample files. Since recording counters and the time stamp counter for + * each sample can be space consuming, the user can determine whether or not + * this information is kept at sample collection time. + */ + +typedef struct SampleRecordPC_s { // Program Counter section + U32 descriptor_id; + U32 osid; // OS identifier + union { + struct { + U64 iip; // IA64 interrupt instruction pointer + U64 ipsr; // IA64 interrupt processor status register + } s1; + struct { + U32 eip; // IA32 instruction pointer + U32 eflags; // IA32 eflags + CodeDescriptor csd; // IA32 code seg descriptor (8 bytes) + } s2; + } u1; + U16 cs; // IA32 cs (0 for IA64) + union { + U16 cpuAndOS; // cpu and OS info as one word + struct { // cpu and OS info broken out + U16 cpuNum : 12; // cpu number (0 - 4096) + U16 notVmid0 : 1; // win95, vmid0 flag (1 means NOT vmid 0) + U16 codeMode : 2; // processor mode, see MODE_ defines + U16 uncore_valid : 1; // identifies if the uncore count is valid + } s3; + } u2; + U32 tid; // OS thread ID (may get reused, see tidIsRaw) + U32 pidRecIndex; // process ID rec index (index into start of pid + // record section) .. can validly be 0 if not raw + // (array index). Use ReturnPid() to + // ..access this field .. (see pidRecIndexRaw) + union { + U32 bitFields2; + struct { + U32 mrIndex : 20; // module record index (index into start of + // module rec section) .. (see mrIndexNone) + U32 eventIndex : 8; // index into the Events section + U32 tidIsRaw : 1; // tid is raw OS tid + U32 IA64PC : 1; // TRUE=this is a IA64 PC sample record + U32 pidRecIndexRaw : 1; // pidRecIndex is raw OS pid + U32 mrIndexNone : 1; // no mrIndex (unknown module) + } s4; + } u3; + U64 tsc; // processor timestamp counter +} SampleRecordPC, *PSampleRecordPC; + +#define SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define SAMPLE_RECORD_osid(x) ((x)->osid) +#define SAMPLE_RECORD_iip(x) ((x)->u1.s1.iip) +#define SAMPLE_RECORD_ipsr(x) ((x)->u1.s1.ipsr) +#define SAMPLE_RECORD_eip(x) ((x)->u1.s2.eip) +#define SAMPLE_RECORD_eflags(x) ((x)->u1.s2.eflags) +#define SAMPLE_RECORD_csd(x) ((x)->u1.s2.csd) +#define SAMPLE_RECORD_cs(x) ((x)->cs) +#define SAMPLE_RECORD_cpu_and_os(x) ((x)->u2.cpuAndOS) +#define SAMPLE_RECORD_cpu_num(x) ((x)->u2.s3.cpuNum) +#define SAMPLE_RECORD_uncore_valid(x) ((x)->u2.s3.uncore_valid) +#define SAMPLE_RECORD_not_vmid0(x) ((x)->u2.s3.notVmid0) +#define SAMPLE_RECORD_code_mode(x) ((x)->u2.s3.codeMode) +#define SAMPLE_RECORD_tid(x) ((x)->tid) +#define SAMPLE_RECORD_pid_rec_index(x) ((x)->pidRecIndex) +#define SAMPLE_RECORD_bit_fields2(x) ((x)->u3.bitFields2) +#define SAMPLE_RECORD_mr_index(x) ((x)->u3.s4.mrIndex) +#define SAMPLE_RECORD_event_index(x) ((x)->u3.s4.eventIndex) +#define SAMPLE_RECORD_tid_is_raw(x) ((x)->u3.s4.tidIsRaw) +#define SAMPLE_RECORD_ia64_pc(x) ((x)->u3.s4.IA64PC) +#define SAMPLE_RECORD_pid_rec_index_raw(x) ((x)->u3.s4.pidRecIndexRaw) +#define SAMPLE_RECORD_mr_index_none(x) ((x)->u3.s4.mrIndexNone) +#define SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of SampleRecord sections + +/* Uncore Sample Record definition. This is a skinny sample record used by uncore boxes + to record samples. The sample record consists of a descriptor id, cpu info and timestamp.*/ + +typedef struct UncoreSampleRecordPC_s { + U32 descriptor_id; + U32 osid; + U16 cpuNum; + U16 pkgNum; + union { + U32 flags; + struct { + U32 uncore_valid : 1; // identifies if the uncore count is valid + U32 reserved1 : 31; + } s1; + } u1; + U64 reserved2; + U64 tsc; // processor timestamp counter +} UncoreSampleRecordPC, *PUnocreSampleRecordPC; + +#define UNCORE_SAMPLE_RECORD_descriptor_id(x) ((x)->descriptor_id) +#define UNCORE_SAMPLE_RECORD_osid(x) ((x)->osid) +#define UNCORE_SAMPLE_RECORD_cpu_num(x) ((x)->cpuNum) +#define UNCORE_SAMPLE_RECORD_pkg_num(x) ((x)->pkgNum) +#define UNCORE_SAMPLE_RECORD_uncore_valid(x) ((x)->u1.s1.uncore_valid) +#define UNCORE_SAMPLE_RECORD_tsc(x) ((x)->tsc) + +// end of UncoreSampleRecord section + +// Definitions for user markers data +// The instances of these structures will be written to the user markers temp file. +#define MARKER_DEFAULT_TYPE "Default_Marker" +#define MARKER_DEFAULT_ID 0 +#define MAX_MARKER_LENGTH 136 + +#define MARK_ID 4 +#define MARK_DATA 2 +#define THREAD_INFO 8 + +/* do not use it at ths moment +typedef enum { + SMRK_USER_DEFINED = 0, + SMRK_THREAD_NAME, + SMRK_WALLCLOCK, + SMRK_TEXT, + SMRK_TYPE_ID +} SMRK_TYPE; +*/ + +/* + * Common Register descriptions + */ + +/* + * Bits used in the debug control register + */ +#define DEBUG_CTL_LBR 0x0000001 +#define DEBUG_CTL_BTF 0x0000002 +#define DEBUG_CTL_TR 0x0000040 +#define DEBUG_CTL_BTS 0x0000080 +#define DEBUG_CTL_BTINT 0x0000100 +#define DEBUG_CTL_BT_OFF_OS 0x0000200 +#define DEBUG_CTL_BTS_OFF_USR 0x0000400 +#define DEBUG_CTL_FRZ_LBR_ON_PMI 0x0000800 +#define DEBUG_CTL_FRZ_PMON_ON_PMI 0x0001000 +#define DEBUG_CTL_ENABLE_UNCORE_PMI_BIT 0x0002000 + +#define DEBUG_CTL_NODE_lbr_get(reg) ((reg) & DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_set(reg) ((reg) |= DEBUG_CTL_LBR) +#define DEBUG_CTL_NODE_lbr_clear(reg) ((reg) &= ~DEBUG_CTL_LBR) + +#define DEBUG_CTL_NODE_btf_get(reg) ((reg) & DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_set(reg) ((reg) |= DEBUG_CTL_BTF) +#define DEBUG_CTL_NODE_btf_clear(reg) ((reg) &= ~DEBUG_CTL_BTF) + +#define DEBUG_CTL_NODE_tr_get(reg) ((reg) & DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_set(reg) ((reg) |= DEBUG_CTL_TR) +#define DEBUG_CTL_NODE_tr_clear(reg) ((reg) &= ~DEBUG_CTL_TR) + +#define DEBUG_CTL_NODE_bts_get(reg) ((reg) & DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_set(reg) ((reg) |= DEBUG_CTL_BTS) +#define DEBUG_CTL_NODE_bts_clear(reg) ((reg) &= ~DEBUG_CTL_BTS) + +#define DEBUG_CTL_NODE_btint_get(reg) ((reg) & DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_set(reg) ((reg) |= DEBUG_CTL_BTINT) +#define DEBUG_CTL_NODE_btint_clear(reg) ((reg) &= ~DEBUG_CTL_BTINT) + +#define DEBUG_CTL_NODE_bts_off_os_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_OS) +#define DEBUG_CTL_NODE_bts_off_os_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_OS) + +#define DEBUG_CTL_NODE_bts_off_usr_get(reg) ((reg) & DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_set(reg) ((reg) |= DEBUG_CTL_BTS_OFF_USR) +#define DEBUG_CTL_NODE_bts_off_usr_clear(reg) ((reg) &= ~DEBUG_CTL_BTS_OFF_USR) + +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_get(reg) ((reg) & DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_set(reg) ((reg) |= DEBUG_CTL_FRZ_LBR_ON_PMI) +#define DEBUG_CTL_NODE_frz_lbr_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_LBR_ON_PMI) + +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_get(reg) \ + ((reg) & DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_FRZ_PMON_ON_PMI) +#define DEBUG_CTL_NODE_frz_pmon_on_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_FRZ_PMON_ON_PMI) + +#define DEBUG_CTL_NODE_enable_uncore_pmi_get(reg) \ + ((reg) & DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_set(reg) \ + ((reg) |= DEBUG_CTL_ENABLE_UNCORE_PMI) +#define DEBUG_CTL_NODE_enable_uncore_pmi_clear(reg) \ + ((reg) &= ~DEBUG_CTL_ENABLE_UNCORE_PMI) + +/* + * Bits used in the LBR control register (Arch LBRs) + */ +#define LBR_CTL_EN 0x0000001 +#define LBR_CTL_OS 0x0000002 +#define LBR_CTL_USR 0x0000004 +#define LBR_CTL_EN_ALL 0x0000007 +#define LBR_CTL_CALL_STACK 0x0000008 +/* LBR filter bits */ +#define LBR_CTL_FILTER_JCC 0x0010000 +#define LBR_CTL_FILTER_NEAR_REL_JMP 0x0020000 +#define LBR_CTL_FILTER_NEAR_IND_JMP 0x0040000 +#define LBR_CTL_FILTER_NEAR_REL_CALL 0x0080000 +#define LBR_CTL_FILTER_NEAR_IND_CALL 0x0100000 +#define LBR_CTL_FILTER_NEAR_RET 0x0200000 +#define LBR_CTL_FILTER_OTHER_BR 0x0400000 +#define LBR_CTL_FILTER_ALL_BR_TYPES 0x07F0000 + +#define LBR_CTL_NODE_lbr_en_get(reg) ((reg) & LBR_CTL_EN) +#define LBR_CTL_NODE_lbr_en_set(reg) ((reg) |= LBR_CTL_EN) +#define LBR_CTL_NODE_lbr_en_clear(reg) ((reg) &= ~LBR_CTL_EN) + +#define LBR_CTL_NODE_lbr_en_all_get(reg) ((reg) & LBR_CTL_EN_ALL) +#define LBR_CTL_NODE_lbr_en_all_set(reg) ((reg) |= LBR_CTL_EN_ALL) +#define LBR_CTL_NODE_lbr_en_all_clear(reg) ((reg) &= ~LBR_CTL_EN_ALL) + +#define LBR_CTL_NODE_filter_all_br_get(reg) ((reg) & LBR_CTL_FILTER_ALL_BR_TYPES) +#define LBR_CTL_NODE_filter_all_br_set(reg) ((reg) |= LBR_CTL_FILTER_ALL_BR_TYPES) +#define LBR_CTL_NODE_filter_all_br_clear(reg) \ + ((reg) &= ~LBR_CTL_FILTER_ALL_BR_TYPES) + +/* + * @macro SEP_VERSION_NODE_S + * @brief + * This structure supports versioning in Sep. The field major indicates the major version, + * minor indicates the minor version and api indicates the api version for the current + * sep build. This structure is initialized at the time when the driver is loaded. + */ + +typedef struct SEP_VERSION_NODE_S SEP_VERSION_NODE; +typedef SEP_VERSION_NODE *SEP_VERSION; + +struct SEP_VERSION_NODE_S { + union { + U32 sep_version; + struct { + S32 major : 8; + S32 minor : 8; + S32 api : 8; + S32 update : 8; + } s1; + } u1; +}; + +#define SEP_VERSION_NODE_sep_version(version) ((version)->u1.sep_version) +#define SEP_VERSION_NODE_major(version) ((version)->u1.s1.major) +#define SEP_VERSION_NODE_minor(version) ((version)->u1.s1.minor) +#define SEP_VERSION_NODE_api(version) ((version)->u1.s1.api) +#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) + +#define MAX_RELEASE_STRING 256 +typedef struct LINUX_VERSION_NODE_S LINUX_VERSION_NODE; +typedef LINUX_VERSION_NODE *LINUX_VERSION; +struct LINUX_VERSION_NODE_S { + S32 major; + S32 minor; + S32 revision; + S8 patch[MAX_RELEASE_STRING]; +}; + +#define LINUX_VERSION_NODE_major(version) ((version).major) +#define LINUX_VERSION_NODE_minor(version) ((version).minor) +#define LINUX_VERSION_NODE_revision(version) ((version).revision) +#define LINUX_VERSION_NODE_patch(version) ((version).patch) + +/* + * The VTSA_SYS_INFO_STRUCT information that is shared across kernel mode + * and user mode code, very specifically for tb5 file generation + */ + +typedef enum { + GT_UNK = 0, + GT_PER_CPU, + GT_PER_CHIPSET, + GT_CPUID, + GT_NODE, + GT_SYSTEM, + GT_SAMPLE_RECORD_INFO +} GEN_ENTRY_TYPES; + +typedef enum { + GST_UNK = 0, + GST_X86, + GST_ITANIUM, + GST_SA, //strong arm + GST_XSC, + GST_EM64T, + GST_CS860 +} GEN_ENTRY_SUBTYPES; + +typedef struct __fixed_size_pointer { + union { + U64 fs_force_alignment; + struct { + U32 fs_unused; + U32 is_ptr : 1; + } s1; + } u1; + union { + U64 fs_offset; + void *fs_ptr; + } u2; +} VTSA_FIXED_SIZE_PTR; + +#define VTSA_FIXED_SIZE_PTR_is_ptr(fsp) ((fsp)->u1.s1.is_ptr) +#define VTSA_FIXED_SIZE_PTR_fs_offset(fsp) ((fsp)->u2.fs_offset) +#define VTSA_FIXED_SIZE_PTR_fs_ptr(fsp) ((fsp)->u2.fs_ptr) + +typedef struct __generic_array_header { + // + // Information realted to the generic header + // + U32 hdr_size; // size of this generic header + // (for versioning and real data starts + // after the header) + + U32 next_field_hdr_padding; // make sure the next field is 8-byte aligned + + // + // VTSA_FIXED_SIZE_PTR should always be on an 8-byte boundary... + // + // pointer to the next generic header if there is one + // + VTSA_FIXED_SIZE_PTR hdr_next_gen_hdr; + + U32 hdr_reserved[7]; // padding for future use - force to 64 bytes... + + // + // Information related to the array this header is describing + // + U32 array_num_entries; + U32 array_entry_size; + U16 array_type; // from the GEN_ENTRY_TYPES enumeration + U16 array_subtype; // from the GEN_ENTRY_SUBTYPES enumeration +} VTSA_GEN_ARRAY_HDR; + +#define VTSA_GEN_ARRAY_HDR_hdr_size(gah) ((gah)->hdr_size) +#define VTSA_GEN_ARRAY_HDR_hdr_next_gen_hdr(gah) ((gah)->hdr_next_gen_hdr) +#define VTSA_GEN_ARRAY_HDR_array_num_entries(gah) ((gah)->array_num_entries) +#define VTSA_GEN_ARRAY_HDR_array_entry_size(gah) ((gah)->array_entry_size) +#define VTSA_GEN_ARRAY_HDR_array_type(gah) ((gah)->array_type) +#define VTSA_GEN_ARRAY_HDR_array_subtype(gah) ((gah)->array_subtype) + +typedef struct __cpuid_x86 { + U32 cpuid_eax_input; + U32 cpuid_eax; + U32 cpuid_ebx; + U32 cpuid_ecx; + U32 cpuid_edx; +} VTSA_CPUID_X86; + +#define VTSA_CPUID_X86_cpuid_eax_input(cid) ((cid)->cpuid_eax_input) +#define VTSA_CPUID_X86_cpuid_eax(cid) ((cid)->cpuid_eax) +#define VTSA_CPUID_X86_cpuid_ebx(cid) ((cid)->cpuid_ebx) +#define VTSA_CPUID_X86_cpuid_ecx(cid) ((cid)->cpuid_ecx) +#define VTSA_CPUID_X86_cpuid_edx(cid) ((cid)->cpuid_edx) + +typedef struct __cpuid_ipf { + U64 cpuid_select; + U64 cpuid_val; +} VTSA_CPUID_IPF; + +#define VTSA_CPUID_IPF_cpuid_select(cid) ((cid)->cpuid_select) +#define VTSA_CPUID_IPF_cpuid_val(cid) ((cid)->cpuid_val) + +typedef struct __generic_per_cpu { + // + // per cpu information + // + U32 cpu_number; // cpu number (as defined by the OS) + U32 cpu_speed_mhz; // cpu speed (in Mhz) + U32 cpu_fsb_mhz; // Front Side Bus speed (in Mhz) (if known) + U32 cpu_cache_L2; // ??? USER: cpu L2 (marketing definition) cache size (if known) + + // + // And pointer to other structures. Keep this on an 8-byte boundary + // + // "pointer" to generic array header that should contain + // cpuid information for this cpu + // + VTSA_FIXED_SIZE_PTR cpu_cpuid_array; + + S64 cpu_tsc_offset; // TSC offset from CPU 0 computed as (TSC CPU N - TSC CPU 0) + // + // intel processor number (from mkting). + // Currently 3 decimal digits (3xx, 5xx and 7xx) + // + U32 cpu_intel_processor_number; + + U32 cpu_cache_L3; // ??? USER: cpu L3 (marketing definition) cache size (if known) + + U64 platform_id; + + // + // package/mapping information + // + // The hierarchy for uniquely identifying a logical processor + // in a system is node number/id (from the node structure), + // package number, core number, and thread number. + // Core number is for identifying a core within a package. + // + // Actually, on Itanium getting all this information is + // pretty involved with complicated algorithm using PAL calls. + // I don't know how important all this stuff is to the user. + // Maybe we can just have the place holder now and figure out + // how to fill them later. + // + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // hw thread number inside the core (if known) + + U16 cpu_threads_per_core; // total number of h/w threads per core (if known) + U16 cpu_module_id; // Processor module number + U16 cpu_num_modules; // Number of processor modules + U32 cpu_core_type; // Core type for hetero + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved1; + U64 reserved2; + U64 reserved3; + +} VTSA_GEN_PER_CPU; + +#define VTSA_GEN_PER_CPU_cpu_number(p_cpu) ((p_cpu)->cpu_number) +#define VTSA_GEN_PER_CPU_cpu_speed_mhz(p_cpu) ((p_cpu)->cpu_speed_mhz) +#define VTSA_GEN_PER_CPU_cpu_fsb_mhz(p_cpu) ((p_cpu)->cpu_fsb_mhz) +#define VTSA_GEN_PER_CPU_cpu_cache_L2(p_cpu) ((p_cpu)->cpu_cache_L2) +#define VTSA_GEN_PER_CPU_cpu_cpuid_array(p_cpu) ((p_cpu)->cpu_cpuid_array) +#define VTSA_GEN_PER_CPU_cpu_tsc_offset(p_cpu) ((p_cpu)->cpu_tsc_offset) +#define VTSA_GEN_PER_CPU_cpu_intel_processor_number(p_cpu) \ + ((p_cpu)->cpu_intel_processor_number) +#define VTSA_GEN_PER_CPU_cpu_cache_L3(p_cpu) ((p_cpu)->cpu_cache_L3) +#define VTSA_GEN_PER_CPU_platform_id(p_cpu) ((p_cpu)->platform_id) +#define VTSA_GEN_PER_CPU_cpu_package_num(p_cpu) ((p_cpu)->cpu_package_num) +#define VTSA_GEN_PER_CPU_cpu_core_num(p_cpu) ((p_cpu)->cpu_core_num) +#define VTSA_GEN_PER_CPU_cpu_hw_thread_num(p_cpu) ((p_cpu)->cpu_hw_thread_num) +#define VTSA_GEN_PER_CPU_cpu_threads_per_core(p_cpu) \ + ((p_cpu)->cpu_threads_per_core) +#define VTSA_GEN_PER_CPU_cpu_module_num(p_cpu) ((p_cpu)->cpu_module_id) +#define VTSA_GEN_PER_CPU_cpu_num_modules(p_cpu) ((p_cpu)->cpu_num_modules) +#define VTSA_GEN_PER_CPU_cpu_core_type(p_cpu) ((p_cpu)->cpu_core_type) +#define VTSA_GEN_PER_CPU_arch_perfmon_ver(p_cpu) ((p_cpu)->arch_perfmon_ver) +#define VTSA_GEN_PER_CPU_num_gp_counters(p_cpu) ((p_cpu)->num_gp_counters) +#define VTSA_GEN_PER_CPU_num_fixed_counters(p_cpu) ((p_cpu)->num_fixed_counters) + +typedef struct __node_info { + U32 node_type_from_shell; + U32 node_id; // The node number/id (if known) + + U32 node_num_available; // total number cpus on this node + U32 node_num_used; // USER: number used based on cpu mask at time of run + + U64 node_physical_memory; // amount of physical memory (bytes) on this node + + // + // pointer to the first generic header that + // contains the per-cpu information + // + // Keep the VTSA_FIXED_SIZE_PTR on an 8-byte boundary... + // + VTSA_FIXED_SIZE_PTR node_percpu_array; + + U32 node_reserved[2]; // leave some space + +} VTSA_NODE_INFO; + +#define VTSA_NODE_INFO_node_type_from_shell(vni) ((vni)->node_type_from_shell) +#define VTSA_NODE_INFO_node_id(vni) ((vni)->node_id) +#define VTSA_NODE_INFO_node_num_available(vni) ((vni)->node_num_available) +#define VTSA_NODE_INFO_node_num_used(vni) ((vni)->node_num_used) +#define VTSA_NODE_INFO_node_physical_memory(vni) ((vni)->node_physical_memory) +#define VTSA_NODE_INFO_node_percpu_array(vni) ((vni)->node_percpu_array) + +typedef struct __sys_info { + // + // Keep this on an 8-byte boundary + // + VTSA_FIXED_SIZE_PTR node_array; // the per-node information + + U64 min_app_address; // USER: lower allowed user space address (if known) + U64 max_app_address; // USER: upper allowed user space address (if known) + U32 page_size; // Current page size + U32 allocation_granularity; // USER: Granularity of allocation requests (if known) + U32 reserved1; // added for future fields + U32 reserved2; // alignment purpose + U64 reserved3[3]; // added for future fields + +} VTSA_SYS_INFO; + +#define VTSA_SYS_INFO_node_array(sys_info) ((sys_info)->node_array) +#define VTSA_SYS_INFO_min_app_address(sys_info) ((sys_info)->min_app_address) +#define VTSA_SYS_INFO_max_app_address(sys_info) ((sys_info)->max_app_address) +#define VTSA_SYS_INFO_page_size(sys_info) ((sys_info)->page_size) +#define VTSA_SYS_INFO_allocation_granularity(sys_info) \ + ((sys_info)->allocation_granularity) + +typedef struct DRV_TOPOLOGY_INFO_NODE_S DRV_TOPOLOGY_INFO_NODE; +typedef DRV_TOPOLOGY_INFO_NODE *DRV_TOPOLOGY_INFO; + +struct DRV_TOPOLOGY_INFO_NODE_S { + U32 cpu_number; // cpu number (as defined by the OS) + U16 cpu_package_num; // package number for this cpu (if known) + U16 cpu_core_num; // core number (if known) + U16 cpu_hw_thread_num; // T0 or T1 if HT enabled + U16 reserved1; + S32 socket_master; + S32 core_master; + S32 thr_master; + U32 cpu_module_num; + U32 cpu_module_master; + U32 cpu_num_modules; + U32 cpu_core_type; + U32 arch_perfmon_ver; + U32 num_gp_counters; + U32 num_fixed_counters; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_TOPOLOGY_INFO_cpu_number(dti) ((dti)->cpu_number) +#define DRV_TOPOLOGY_INFO_cpu_package_num(dti) ((dti)->cpu_package_num) +#define DRV_TOPOLOGY_INFO_cpu_core_num(dti) ((dti)->cpu_core_num) +#define DRV_TOPOLOGY_INFO_socket_master(dti) ((dti)->socket_master) +#define DRV_TOPOLOGY_INFO_core_master(dti) ((dti)->core_master) +#define DRV_TOPOLOGY_INFO_thr_master(dti) ((dti)->thr_master) +#define DRV_TOPOLOGY_INFO_cpu_hw_thread_num(dti) ((dti)->cpu_hw_thread_num) +#define DRV_TOPOLOGY_INFO_cpu_module_num(dti) ((dti)->cpu_module_num) +#define DRV_TOPOLOGY_INFO_cpu_module_master(dti) ((dti)->cpu_module_master) +#define DRV_TOPOLOGY_INFO_cpu_num_modules(dti) ((dti)->cpu_num_modules) +#define DRV_TOPOLOGY_INFO_cpu_core_type(dti) ((dti)->cpu_core_type) +#define DRV_TOPOLOGY_INFO_arch_perfmon_ver(dti) ((dti)->arch_perfmon_ver) +#define DRV_TOPOLOGY_INFO_num_gp_counters(dti) ((dti)->num_gp_counters) +#define DRV_TOPOLOGY_INFO_num_fixed_counters(dti) ((dti)->num_fixed_counters) + +#define VALUE_TO_BE_DISCOVERED 0 + +// dimm information +typedef struct DRV_DIMM_INFO_NODE_S DRV_DIMM_INFO_NODE; +typedef DRV_DIMM_INFO_NODE *DRV_DIMM_INFO; + +struct DRV_DIMM_INFO_NODE_S { + U32 platform_id; + U32 channel_num; + U32 rank_num; + U32 value; + U8 mc_num; + U8 dimm_valid; + U8 valid_value; + U8 rank_value; + U8 density_value; + U8 width_value; + U16 socket_num; + U64 reserved1; + U64 reserved2; +}; + +#define DRV_DIMM_INFO_platform_id(di) ((di)->platform_id) +#define DRV_DIMM_INFO_channel_num(di) ((di)->channel_num) +#define DRV_DIMM_INFO_rank_num(di) ((di)->rank_num) +#define DRV_DIMM_INFO_value(di) ((di)->value) +#define DRV_DIMM_INFO_mc_num(di) ((di)->mc_num) +#define DRV_DIMM_INFO_dimm_valid(di) ((di)->dimm_valid) +#define DRV_DIMM_INFO_valid_value(di) ((di)->valid_value) +#define DRV_DIMM_INFO_rank_value(di) ((di)->rank_value) +#define DRV_DIMM_INFO_density_value(di) ((di)->density_value) +#define DRV_DIMM_INFO_width_value(di) ((di)->width_value) +#define DRV_DIMM_INFO_socket_num(di) ((di)->socket_num) + +//platform information. need to get from driver +#define MAX_PACKAGES 16 +#define MAX_CHANNELS 8 +#define MAX_RANKS 3 + +typedef struct DRV_PLATFORM_INFO_NODE_S DRV_PLATFORM_INFO_NODE; +typedef DRV_PLATFORM_INFO_NODE *DRV_PLATFORM_INFO; + +struct DRV_PLATFORM_INFO_NODE_S { + U64 info; // platform info + U64 ddr_freq_index; // freq table index + U8 misc_valid; // misc enabled valid bit + U8 reserved1; // added for alignment purpose + U16 reserved2; + U32 vmm_timer_freq; // timer frequency from VMM on SoFIA (in HZ) + U64 misc_info; // misc enabled info + U64 ufs_freq; // ufs frequency (HSX only) + DRV_DIMM_INFO_NODE dimm_info[MAX_PACKAGES * MAX_CHANNELS * MAX_RANKS]; + U64 energy_multiplier; // Value of energy multiplier + U64 reserved3; + U64 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_PLATFORM_INFO_info(data) ((data)->info) +#define DRV_PLATFORM_INFO_ddr_freq_index(data) ((data)->ddr_freq_index) +#define DRV_PLATFORM_INFO_misc_valid(data) ((data)->misc_valid) +#define DRV_PLATFORM_INFO_misc_info(data) ((data)->misc_info) +#define DRV_PLATFORM_INFO_ufs_freq(data) ((data)->ufs_freq) +#define DRV_PLATFORM_INFO_dimm_info(data) ((data)->dimm_info) +#define DRV_PLATFORM_INFO_energy_multiplier(data) ((data)->energy_multiplier) +#define DRV_PLATFORM_INFO_vmm_timer_freq(data) ((data)->vmm_timer_freq) + +//platform information. need to get from Platform picker +typedef struct PLATFORM_FREQ_INFO_NODE_S PLATFORM_FREQ_INFO_NODE; +typedef PLATFORM_FREQ_INFO_NODE *PLATFORM_FREQ_INFO; + +struct PLATFORM_FREQ_INFO_NODE_S { + float multiplier; // freq multiplier + double *table; // freq table + U32 table_size; // freq table size + U32 default_core_crystal_clock_freq; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; +#define PLATFORM_FREQ_INFO_multiplier(data) ((data)->multiplier) +#define PLATFORM_FREQ_INFO_table(data) ((data)->table) +#define PLATFORM_FREQ_INFO_table_size(data) ((data)->table_size) +#define PLATFORM_FREQ_INFO_default_core_crystal_clock_freq(data) \ + ((data)->default_core_crystal_clock_freq) + +/************************************************************** + * PMT DEVICE TOPOLOGY STRUCTS START +***************************************************************/ +#define MAX_PMT_DEVICES 32 +#define MAX_PMT_TILES 16 + +typedef struct PMT_DEVICE_TILE_NODE_S PMT_DEVICE_TILE_NODE; +struct PMT_DEVICE_TILE_NODE_S { + U32 tile_id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 pmt_valid; + U32 oobmsm_deviceid; + U32 found; + U32 unit_id; + U64 pmt_endpoint_guid; + U64 reserved1; + U64 reserved2; + U64 reserved3; +}; + +typedef struct PMT_TOPOLOGY_DISCOVERY_NODE_S PMT_TOPOLOGY_DISCOVERY_NODE; +struct PMT_TOPOLOGY_DISCOVERY_NODE_S { + U32 id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 deviceid; + U32 num_tiles; + U32 num_entries_found; + U32 device_scan; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; + PMT_DEVICE_TILE_NODE pmt_tiles[MAX_PMT_TILES]; +}; + +typedef struct PMT_TOPOLOGY_DISCOVERY_LIST_NODE_S PMT_TOPOLOGY_DISCOVERY_LIST_NODE; +typedef PMT_TOPOLOGY_DISCOVERY_LIST_NODE *PMT_TOPOLOGY_DISCOVERY_LIST; +struct PMT_TOPOLOGY_DISCOVERY_LIST_NODE_S { + U32 num_pmt_devices; + U32 topology_detected; + PMT_TOPOLOGY_DISCOVERY_NODE pmt_device[MAX_PMT_DEVICES]; +}; + +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_pmt_devices(x) ((x)->num_pmt_devices) +#define PMT_TOPOLOGY_DISCOVERY_LIST_topology_detected(x) ((x)->topology_detected) +#define PMT_TOPOLOGY_DISCOVERY_LIST_pmt_device(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index]) +#define PMT_TOPOLOGY_DISCOVERY_LIST_id(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_domain(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].domain) +#define PMT_TOPOLOGY_DISCOVERY_LIST_bus(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].bus) +#define PMT_TOPOLOGY_DISCOVERY_LIST_device(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].device) +#define PMT_TOPOLOGY_DISCOVERY_LIST_function(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].function) +#define PMT_TOPOLOGY_DISCOVERY_LIST_deviceid(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].deviceid) +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_tiles(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].num_tiles) +#define PMT_TOPOLOGY_DISCOVERY_LIST_num_entries_found(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].num_entries_found) +#define PMT_TOPOLOGY_DISCOVERY_LIST_device_scan(x, pmt_dev_index) \ + ((x)->pmt_device[pmt_dev_index].device_scan) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_id(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].tile_id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_domain(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].domain) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_bus(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].bus) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_device(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].device) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_function(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].function) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_found(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].found) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_unit_id(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].unit_id) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_oobmsm_deviceid(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].oobmsm_deviceid) +#define PMT_TOPOLOGY_DISCOVERY_LIST_tile_pmt_endpoint_guid(x, pmt_dev_index, \ + pmt_dev_tile_index) \ + ((x)->pmt_device[pmt_dev_index].pmt_tiles[pmt_dev_tile_index].pmt_endpoint_guid) + +typedef struct PMT_DEVICE_INFO_NODE_S PMT_DEVICE_INFO_NODE; +typedef PMT_DEVICE_INFO_NODE *PMT_DEVICE_INFO; +struct PMT_DEVICE_INFO_NODE_S { + U32 device_id; + U32 domain; + U32 bus; + U32 device; + U32 function; + U32 pmt_index; + U64 guid; + U64 reserved1; + U64 reserved2; +}; +/************************************************************** + * PMT DEVICE TOPOLOGY STRUCTS END +***************************************************************/ + +/****************************************************************************************** + * DEVICE INFO STRUCTS START (used in platform picker for detecting each core/uncore device) +*******************************************************************************************/ +typedef struct DEVICE_INFO_NODE_S DEVICE_INFO_NODE; +typedef DEVICE_INFO_NODE *DEVICE_INFO; //NEEDED in PP + +struct DEVICE_INFO_NODE_S { + S8 *dll_name; + PVOID dll_handle; + S8 *cpu_name; + S8 *pmu_name; + DRV_STCHAR *event_db_file_name; + //PLATFORM_IDENTITY plat_identity; // this is undefined right now. Please take this as structure containing U64 + U32 plat_type; // device type (e.g., DEVICE_INFO_CORE, etc. ... see enum below) + U32 plat_sub_type; // cti_type (e.g., CTI_Sandybridge, etc., ... see env_info_types.h) + S32 dispatch_id; // this will be set in user mode dlls and will be unique across all IPF, IA32 (including MIDS). + ECB *ecb; + EVENT_CONFIG ec; + DEV_CONFIG pcfg; + DEV_UNC_CONFIG pcfg_unc; + U32 num_of_groups; + U32 size_of_alloc; // size of each event control block + PVOID drv_event; + U32 num_events; + U32 event_id_index; // event id index of device (basically how many events processed before this device) + U32 num_counters; + U32 group_index; + U32 num_packages; + U32 num_units; + U32 device_type; + U32 core_type; + U32 pmu_clone_id; // cti_type of platform to impersonate in device DLLs + U32 device_scope; + U32 num_subunits; + U64 reserved1; + U64 reserved2; + PMT_DEVICE_INFO_NODE pmt_device; +}; + +#define MAX_EVENT_NAME_LENGTH 256 + +#define DEVICE_INFO_dll_name(pdev) ((pdev)->dll_name) +#define DEVICE_INFO_dll_handle(pdev) ((pdev)->dll_handle) +#define DEVICE_INFO_cpu_name(pdev) ((pdev)->cpu_name) +#define DEVICE_INFO_pmu_name(pdev) ((pdev)->pmu_name) +#define DEVICE_INFO_event_db_file_name(pdev) ((pdev)->event_db_file_name) +#define DEVICE_INFO_plat_type(pdev) ((pdev)->plat_type) +#define DEVICE_INFO_plat_sub_type(pdev) ((pdev)->plat_sub_type) +#define DEVICE_INFO_pmu_clone_id(pdev) ((pdev)->pmu_clone_id) +#define DEVICE_INFO_dispatch_id(pdev) ((pdev)->dispatch_id) +#define DEVICE_INFO_ecb(pdev) ((pdev)->ecb) +#define DEVICE_INFO_ec(pdev) ((pdev)->ec) +#define DEVICE_INFO_pcfg(pdev) ((pdev)->pcfg) +#define DEVICE_INFO_pcfg_unc(pdev) ((pdev)->pcfg_unc) +#define DEVICE_INFO_num_groups(pdev) ((pdev)->num_of_groups) +#define DEVICE_INFO_size_of_alloc(pdev) ((pdev)->size_of_alloc) +#define DEVICE_INFO_drv_event(pdev) ((pdev)->drv_event) +#define DEVICE_INFO_num_events(pdev) ((pdev)->num_events) +#define DEVICE_INFO_event_id_index(pdev) ((pdev)->event_id_index) +#define DEVICE_INFO_num_counters(pdev) ((pdev)->num_counters) +#define DEVICE_INFO_group_index(pdev) ((pdev)->group_index) +#define DEVICE_INFO_num_packages(pdev) ((pdev)->num_packages) +#define DEVICE_INFO_num_units(pdev) ((pdev)->num_units) +#define DEVICE_INFO_device_type(pdev) ((pdev)->device_type) +#define DEVICE_INFO_core_type(pdev) ((pdev)->core_type) +#define DEVICE_INFO_device_scope(pdev) ((pdev)->device_scope) +#define DEVICE_INFO_num_subunits(pdev) ((pdev)->num_subunits) +#define DEVICE_INFO_pmt_device(pdev) ((pdev)->pmt_device) +#define DEVICE_INFO_pmt_device_id(pdev) ((pdev)->pmt_device.device_id) +#define DEVICE_INFO_pmt_device_domain(pdev) ((pdev)->pmt_device.domain) +#define DEVICE_INFO_pmt_device_bus(pdev) ((pdev)->pmt_device.bus) +#define DEVICE_INFO_pmt_device_device(pdev) ((pdev)->pmt_device.device) +#define DEVICE_INFO_pmt_device_function(pdev) ((pdev)->pmt_device.function) +#define DEVICE_INFO_pmt_device_index(pdev) ((pdev)->pmt_device.pmt_index) + +typedef struct DEVICE_INFO_DATA_NODE_S DEVICE_INFO_DATA_NODE; +typedef DEVICE_INFO_DATA_NODE *DEVICE_INFO_DATA; //NEEDED in PP + +struct DEVICE_INFO_DATA_NODE_S { + DEVICE_INFO pdev_info; + U32 num_elements; + U32 num_allocated; + U64 reserved1; + U64 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DEVICE_INFO_DATA_pdev_info(d) ((d)->pdev_info) +#define DEVICE_INFO_DATA_num_elements(d) ((d)->num_elements) +#define DEVICE_INFO_DATA_num_allocated(d) ((d)->num_allocated) + +typedef enum { + DEVICE_INFO_CORE = 0, + DEVICE_INFO_UNCORE = 1, + DEVICE_INFO_CHIPSET = 2, + DEVICE_INFO_GFX = 3, + DEVICE_INFO_PWR = 4, + DEVICE_INFO_TELEMETRY = 5 +} DEVICE_INFO_TYPE; + +typedef enum { + INVALID_TERMINATE_TYPE = 0, + STOP_TERMINATE, + CANCEL_TERMINATE +} ABNORMAL_TERMINATE_TYPE; + +typedef enum { + DEVICE_SCOPE_PACKAGE = 0, + DEVICE_SCOPE_SYSTEM = 1 +} DEVICE_SCOPE_TYPE; + +/************************************************************** + * DEVICE_INFO STRUCTS END +***************************************************************/ + +/************************************************************** + * UNCORE PMU TOPOLOGY STRUCTS START +***************************************************************/ +typedef struct PCIFUNC_INFO_NODE_S PCIFUNC_INFO_NODE; +typedef PCIFUNC_INFO_NODE *PCIFUNC_INFO; + +struct PCIFUNC_INFO_NODE_S { + U32 valid; + U32 num_entries; // the number of entries found with same but difference bus_no. + U64 deviceId; + U64 reserved1; + U64 reserved2; +}; + +#define PCIFUNC_INFO_NODE_valid(x) ((x)->valid) +#define PCIFUNC_INFO_NODE_deviceId(x) ((x)->deviceId) +#define PCIFUNC_INFO_NODE_num_entries(x) ((x)->num_entries) + +typedef struct PCIDEV_INFO_NODE_S PCIDEV_INFO_NODE; +typedef PCIDEV_INFO_NODE *PCIDEV_INFO; + +struct PCIDEV_INFO_NODE_S { + PCIFUNC_INFO_NODE func_info[MAX_PCI_FUNCNO]; + U32 valid; + U32 dispatch_id; + U64 reserved1; + U64 reserved2; +}; + +#define PCIDEV_INFO_NODE_func_info(x, i) ((x).func_info[i]) +#define PCIDEV_INFO_NODE_valid(x) ((x).valid) + +typedef struct UNCORE_PCIDEV_NODE_S UNCORE_PCIDEV_NODE; + +struct UNCORE_PCIDEV_NODE_S { + PCIDEV_INFO_NODE pcidev[MAX_PCI_DEVNO]; + U32 dispatch_id; + U32 scan; + U32 num_uncore_units; + U32 num_deviceid_entries; + U8 dimm_device1; + U8 dimm_device2; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; + U32 deviceid_list[MAX_PCI_DEVNO]; +}; + +// Structure used to perform uncore device discovery using PCI device scan + +typedef struct UNCORE_TOPOLOGY_INFO_NODE_S UNCORE_TOPOLOGY_INFO_NODE; +typedef UNCORE_TOPOLOGY_INFO_NODE *UNCORE_TOPOLOGY_INFO; + +struct UNCORE_TOPOLOGY_INFO_NODE_S { + UNCORE_PCIDEV_NODE device[MAX_DEVICES]; +}; + +#define UNCORE_TOPOLOGY_INFO_device(x, dev_index) ((x)->device[dev_index]) +#define UNCORE_TOPOLOGY_INFO_device_dispatch_id(x, dev_index) \ + ((x)->device[dev_index].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_device_scan(x, dev_index) \ + ((x)->device[dev_index].scan) +#define UNCORE_TOPOLOGY_INFO_pcidev_valid(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].valid) +#define UNCORE_TOPOLOGY_INFO_pcidev_dispatch_id(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno].dispatch_id) +#define UNCORE_TOPOLOGY_INFO_pcidev(x, dev_index, devno) \ + ((x)->device[dev_index].pcidev[devno]) +#define UNCORE_TOPOLOGY_INFO_num_uncore_units(x, dev_index) \ + ((x)->device[dev_index].num_uncore_units) +#define UNCORE_TOPOLOGY_INFO_num_deviceid_entries(x, dev_index) \ + ((x)->device[dev_index].num_deviceid_entries) +#define UNCORE_TOPOLOGY_INFO_dimm_device1(x, dev_index) \ + ((x)->device[dev_index].dimm_device1) +#define UNCORE_TOPOLOGY_INFO_dimm_device2(x, dev_index) \ + ((x)->device[dev_index].dimm_device2) +#define UNCORE_TOPOLOGY_INFO_deviceid(x, dev_index, deviceid_idx) \ + ((x)->device[dev_index].deviceid_list[deviceid_idx]) +#define UNCORE_TOPOLOGY_INFO_pcidev_set_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid = 1) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_found_in_platform(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].valid ? TRUE : FALSE) +#define UNCORE_TOPOLOGY_INFO_pcidev_is_device_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries > 0) +#define UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found(x, dev_index, devno, \ + funcno) \ + ((x)->device[dev_index].pcidev[devno].func_info[funcno].num_entries) + +// Structures used to store platform uncore discovery table + +typedef enum { + DISCOVERY_UNIT_CHA = 0, + DISCOVERY_UNIT_IIO, + DISCOVERY_UNIT_IRP, + DISCOVERY_UNIT_M2PCIE, + DISCOVERY_UNIT_PCU, // 4 + DISCOVERY_UNIT_UBOX, + DISCOVERY_UNIT_MC, + DISCOVERY_UNIT_M2M, + DISCOVERY_UNIT_UPI, // 8 + DISCOVERY_UNIT_M3UPI, + DISCOVERY_UNIT_PCIE, + DISCOVERY_UNIT_MDF, + DISCOVERY_UNIT_CXLCM, + DISCOVERY_UNIT_CXLDP, + DISCOVERY_UNIT_HBM, // 14 +} UNCORE_DISCOVERY_TYPE_IDS; + +typedef struct UNCORE_DISCOVERY_UNIT_NODE_S UNCORE_DISCOVERY_UNIT_NODE; +typedef UNCORE_DISCOVERY_UNIT_NODE *UNCORE_DISCOVERY_UNIT; + +struct UNCORE_DISCOVERY_UNIT_NODE_S { + union { + struct { + U64 num_gp_ctrs : 8; + U64 evtsel0_offset : 8; + U64 ctr_width : 8; + U64 ctr0_offset : 8; + U64 unit_status_offset : 8; + U64 rsvd : 21; + U64 pmu_access_type : 3; + } bits; + U64 bit_field_0; + } u1; + U64 unit_ctrl_addr; + union { + struct { + U64 unit_type : 16; + U64 unit_id : 16; + U64 rsvd : 32; + } bits; + U64 bit_field_1; + } u2; + U64 trace_info; +}; + +#define UNCORE_DISCOVERY_UNIT_num_gp_ctrs(x) ((x)->u1.bits.num_gp_ctrs) +#define UNCORE_DISCOVERY_UNIT_evtsel0_offset(x) ((x)->u1.bits.evtsel0_offset) +#define UNCORE_DISCOVERY_UNIT_ctr_width(x) ((x)->u1.bits.ctr_width) +#define UNCORE_DISCOVERY_UNIT_ctr0_offset(x) ((x)->u1.bits.ctr0_offset) +#define UNCORE_DISCOVERY_UNIT_unit_status_offset(x) \ + ((x)->u1.bits.unit_status_offset) +#define UNCORE_DISCOVERY_UNIT_pmu_access_type(x) ((x)->u1.bits.pmu_access_type) +#define UNCORE_DISCOVERY_UNIT_bit_field_0(x) ((x)->u1.bit_field_0) +#define UNCORE_DISCOVERY_UNIT_unit_ctrl_addr(x) ((x)->unit_ctrl_addr) +#define UNCORE_DISCOVERY_UNIT_unit_type(x) ((x)->u2.bits.unit_type) +#define UNCORE_DISCOVERY_UNIT_unit_id(x) ((x)->u2.bits.unit_id) +#define UNCORE_DISCOVERY_UNIT_bit_field_1(x) ((x)->u2.bit_field_1) +#define UNCORE_DISCOVERY_UNIT_trace_info(x) ((x)->trace_info) + +typedef struct UNCORE_DISCOVERY_GLOBAL_NODE_S UNCORE_DISCOVERY_GLOBAL_NODE; +typedef UNCORE_DISCOVERY_GLOBAL_NODE *UNCORE_DISCOVERY_GLOBAL; + +struct UNCORE_DISCOVERY_GLOBAL_NODE_S { + union { + struct { + U64 unit_type : 8; + U64 block_stride : 8; + U64 max_blocks : 10; + U64 rsvd : 36; + U64 access_type : 2; + } bits; + U64 bit_field_0; + } u1; + U64 global_ctrl_addr; + union { + struct { + U64 gbl_ctr_status_offset : 8; + U64 num_block_status_bits : 16; + U64 rsvd : 40; + } bits; + U64 bit_field_1; + } u2; + U64 trace_info; +}; + +#define UNCORE_DISCOVERY_GLOBAL_unit_type(x) ((x)->u1.bits.unit_type) +#define UNCORE_DISCOVERY_GLOBAL_block_stride(x) ((x)->u1.bits.block_stride) +#define UNCORE_DISCOVERY_GLOBAL_max_blocks(x) ((x)->u1.bits.max_blocks) +#define UNCORE_DISCOVERY_GLOBAL_access_type(x) ((x)->u1.bits.access_type) +#define UNCORE_DISCOVERY_GLOBAL_bit_field_0(x) ((x)->u1.bit_field_0) +#define UNCORE_DISCOVERY_GLOBAL_global_ctrl_addr(x) ((x)->global_ctrl_addr) +#define UNCORE_DISCOVERY_GLOBAL_gbl_ctr_status_offset(x) \ + ((x)->u2.bits.gbl_ctr_status_offset) +#define UNCORE_DISCOVERY_GLOBAL_num_block_status_bits(x) \ + ((x)->u2.bits.num_block_status_bits) +#define UNCORE_DISCOVERY_GLOBAL_bit_field_1(x) ((x)->u2.bit_field_1) +#define UNCORE_DISCOVERY_GLOBAL_trace_info(x) ((x)->trace_info) +#define UNCORE_DISCOVERY_GLOBAL_global_node(x) ((x)->global_node) + +typedef struct UNCORE_DISCOVERY_TABLE_NODE_S UNCORE_DISCOVERY_TABLE_NODE; +typedef UNCORE_DISCOVERY_TABLE_NODE *UNCORE_DISCOVERY_TABLE; + +struct UNCORE_DISCOVERY_TABLE_NODE_S { + DRV_PCI_DEVICE_ENTRY_NODE discovery_entry_node; + UNCORE_DISCOVERY_GLOBAL_NODE discovery_global_node; + UNCORE_DISCOVERY_UNIT_NODE + discovery_unit_node[MAX_DISCOVERY_UNCORE_UNITS]; +}; + +#define UNCORE_DISCOVERY_TABLE_discovery_entry_node(x) ((x)->discovery_entry_node) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_bus_no(x) \ + ((x)->discovery_entry_node.bus_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_dev_no(x) \ + ((x)->discovery_entry_node.dev_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_func_no(x) \ + ((x)->discovery_entry_node.func_no) +#define UNCORE_DISCOVERY_TABLE_discovery_entry_bar_address(x) \ + ((x)->discovery_entry_node.bar_address) +#define UNCORE_DISCOVERY_TABLE_discovery_global_node(x) \ + ((x)->discovery_global_node) +#define UNCORE_DISCOVERY_TABLE_discovery_unit_node(x) ((x)->discovery_unit_node) +#define UNCORE_DISCOVERY_TABLE_discovery_unit_node_entry(x, unit_id) \ + ((x)->discovery_unit_node[unit_id]) + +typedef struct UNCORE_DISCOVERY_DVSEC_CONFIG_NODE_S UNCORE_DISCOVERY_DVSEC_CONFIG_NODE; +typedef UNCORE_DISCOVERY_DVSEC_CONFIG_NODE *UNCORE_DISCOVERY_DVSEC_CONFIG; + +struct UNCORE_DISCOVERY_DVSEC_CONFIG_NODE_S { + U32 pci_ext_cap_id; // Capability ID + U32 dvsec_offset; // DVSEC offset + U32 dvsec_id_mask; // DVSEC_ID mask + U32 dvsec_id_pmon; // Type ID for discovery + U32 dvsec_bir_mask; // BIR mask + U32 map_size; // map size +}; + +#define UNCORE_DISCOVERY_DVSEC_pci_ext_cap_id(x) ((x)->pci_ext_cap_id) +#define UNCORE_DISCOVERY_DVSEC_dvsec_offset(x) ((x)->dvsec_offset) +#define UNCORE_DISCOVERY_DVSEC_dvsec_id_mask(x) ((x)->dvsec_id_mask) +#define UNCORE_DISCOVERY_DVSEC_dvsec_id_pmon(x) ((x)->dvsec_id_pmon) +#define UNCORE_DISCOVERY_DVSEC_dvsec_bir_mask(x) ((x)->dvsec_bir_mask) +#define UNCORE_DISCOVERY_DVSEC_map_size(x) ((x)->map_size) + +typedef struct UNCORE_DISCOVERY_TABLE_LIST_NODE_S UNCORE_DISCOVERY_TABLE_LIST_NODE; +typedef UNCORE_DISCOVERY_TABLE_LIST_NODE *UNCORE_DISCOVERY_TABLE_LIST; + +struct UNCORE_DISCOVERY_TABLE_LIST_NODE_S { + UNCORE_DISCOVERY_DVSEC_CONFIG_NODE dvsec_config_node; + UNCORE_DISCOVERY_TABLE_NODE discovery_table_list[MAX_PACKAGES]; +}; + +#define UNCORE_DISCOVERY_dvsec_config_node(x) ((x)->dvsec_config_node) +#define UNCORE_DISCOVERY_pci_ext_cap_id(x) ((x)->dvsec_config_node.pci_ext_cap_id) +#define UNCORE_DISCOVERY_dvsec_offset(x) ((x)->dvsec_config_node.dvsec_offset) +#define UNCORE_DISCOVERY_dvsec_id_mask(x) ((x)->dvsec_config_node.dvsec_id_mask) +#define UNCORE_DISCOVERY_dvsec_id_pmon(x) ((x)->dvsec_config_node.dvsec_id_pmon) +#define UNCORE_DISCOVERY_dvsec_bir_mask(x) ((x)->dvsec_config_node.dvsec_bir_mask) +#define UNCORE_DISCOVERY_map_size(x) ((x)->dvsec_config_node.map_size) +#define UNCORE_DISCOVERY_table_list(x) ((x)->discovery_table_list) +#define UNCORE_DISCOVERY_table_list_entry(x, i) ((x)->discovery_table_list[i]) + +typedef enum { + CORE_TOPOLOGY_NODE = 0, + UNCORE_TOPOLOGY_NODE_IMC = 1, + UNCORE_TOPOLOGY_NODE_UBOX = 2, + UNCORE_TOPOLOGY_NODE_QPI = 3, + UNCORE_TOPOLOGY_NODE_IIO = 4, + UNCORE_TOPOLOGY_NODE_MCHBM = 5, + UNCORE_TOPOLOGY_NODE_PMEM = 6, + MAX_TOPOLOGY_DEV = 16, +} UNCORE_TOPOLOGY_NODE_INDEX_TYPE; + +/************************************************************** + * UNCORE PMU TOPOLOGY STRUCTS END +***************************************************************/ + +/************************************************************** + * OLD PLATFORM TOPOLOGY STRUCTS (for backward compatibility) + * New struct definitions are below after the old block +***************************************************************/ +//Define to support backward compatibility (changed max value for the new struct definitions) +#define MAX_TOPOLOGY_DEV_OLD 4 + +typedef struct PLATFORM_TOPOLOGY_REG_NODE_S PLATFORM_TOPOLOGY_REG_NODE; +typedef PLATFORM_TOPOLOGY_REG_NODE *PLATFORM_TOPOLOGY_REG; + +struct PLATFORM_TOPOLOGY_REG_NODE_S { + U32 bus; + U32 device; + U32 function; + U32 reg_id; + U64 reg_mask; + U64 reg_value[MAX_PACKAGES]; + U8 reg_type; + U8 device_valid; + U16 reserved1; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define PLATFORM_TOPOLOGY_REG_bus(x, i) ((x)[(i)].bus) +#define PLATFORM_TOPOLOGY_REG_device(x, i) ((x)[(i)].device) +#define PLATFORM_TOPOLOGY_REG_function(x, i) ((x)[(i)].function) +#define PLATFORM_TOPOLOGY_REG_reg_id(x, i) ((x)[(i)].reg_id) +#define PLATFORM_TOPOLOGY_REG_reg_mask(x, i) ((x)[(i)].reg_mask) +#define PLATFORM_TOPOLOGY_REG_reg_type(x, i) ((x)[(i)].reg_type) +#define PLATFORM_TOPOLOGY_REG_device_valid(x, i) ((x)[(i)].device_valid) +#define PLATFORM_TOPOLOGY_REG_reg_value(x, i, package_no) \ + ((x)[(i)].reg_value[package_no]) + +typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S PLATFORM_TOPOLOGY_DISCOVERY_NODE; +typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE *PLATFORM_TOPOLOGY_DISCOVERY; + +struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_S { + U32 device_index; + U32 device_id; + U32 num_registers; + U8 scope; + U8 prog_valid; + U16 reserved2; + U64 reserved3; + U64 reserved4; + U64 reserved5; + PLATFORM_TOPOLOGY_REG_NODE topology_regs[MAX_REGS]; +}; +/************************************************************** + * OLD PLATFORM TOPOLOGY STRUCTS END +***************************************************************/ +/************************************************************** + * NEW PLATFORM TOPOLOGY STRUCTS + * (modified to query more package specific values from driver and support MMIO) + * (Increased the number of devices to be supported) +***************************************************************/ +typedef struct PLATFORM_TOPOLOGY_REG_VALUE_NODE_S PLATFORM_TOPOLOGY_REG_VALUE_NODE; +typedef PLATFORM_TOPOLOGY_REG_VALUE_NODE *PLATFORM_TOPOLOGY_REG_VALUE; + +struct PLATFORM_TOPOLOGY_REG_VALUE_NODE_S { + U64 reg_value; + U16 bus_number; + U16 domain_number; + U32 reserved1; + U64 reserved2; + U64 reserved3; +}; + +typedef struct PLATFORM_TOPOLOGY_REG_NODE_EXT_S PLATFORM_TOPOLOGY_REG_NODE_EXT; +typedef PLATFORM_TOPOLOGY_REG_NODE_EXT *PLATFORM_TOPOLOGY_REG_EXT; + +struct PLATFORM_TOPOLOGY_REG_NODE_EXT_S { + U32 bus; + U32 device; + U32 function; + U32 reg_id; + U64 reg_mask; + U8 reg_type; + U8 device_valid; + U16 unit_index; + + //MMIO specific regs + U32 base_mmio_offset; + U32 mmio_map_size; + U32 main_bar_offset; + U32 secondary_bar_offset; + U8 main_bar_shift; + U8 secondary_bar_shift; + U16 reserved1; + U64 main_bar_mask; + U64 secondary_bar_mask; + + U32 device_id; + U32 reserved2; + U64 reserved3; + U64 reserved4; + //Package specific values + PLATFORM_TOPOLOGY_REG_VALUE_NODE reg_value_list[MAX_PACKAGES]; +}; + +#define PLATFORM_TOPOLOGY_REG_EXT_bus(x, i) ((x)[(i)].bus) +#define PLATFORM_TOPOLOGY_REG_EXT_device(x, i) ((x)[(i)].device) +#define PLATFORM_TOPOLOGY_REG_EXT_function(x, i) ((x)[(i)].function) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_id(x, i) ((x)[(i)].reg_id) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_mask(x, i) ((x)[(i)].reg_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_type(x, i) ((x)[(i)].reg_type) +#define PLATFORM_TOPOLOGY_REG_EXT_device_valid(x, i) ((x)[(i)].device_valid) +#define PLATFORM_TOPOLOGY_REG_EXT_unit_index(x, i) ((x)[(i)].unit_index) +#define PLATFORM_TOPOLOGY_REG_EXT_base_mmio_offset(x, i) \ + ((x)[(i)].base_mmio_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_mmio_map_size(x, i) ((x)[(i)].mmio_map_size) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_offset(x, i) ((x)[(i)].main_bar_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_offset(x, i) \ + ((x)[(i)].secondary_bar_offset) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_shift(x, i) ((x)[(i)].main_bar_shift) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_shift(x, i) \ + ((x)[(i)].secondary_bar_shift) +#define PLATFORM_TOPOLOGY_REG_EXT_main_bar_mask(x, i) ((x)[(i)].main_bar_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_secondary_bar_mask(x, i) \ + ((x)[(i)].secondary_bar_mask) +#define PLATFORM_TOPOLOGY_REG_EXT_device_id(x, i) ((x)[(i)].device_id) +#define PLATFORM_TOPOLOGY_REG_EXT_reg_value(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].reg_value) +#define PLATFORM_TOPOLOGY_REG_EXT_bus_number(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].bus_number) +#define PLATFORM_TOPOLOGY_REG_EXT_domain_number(x, i, package_no) \ + ((x)[(i)].reg_value_list[package_no].domain_number) + +typedef struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT_S PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT; +typedef PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT *PLATFORM_TOPOLOGY_DISCOVERY_EXT; + +struct PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT_S { + U32 device_index; + U32 device_id; + U32 num_registers; + U8 scope; + U8 prog_valid; + U8 distinct_buses; + U8 reserved2; + U64 reserved3; + U64 reserved4; + U64 reserved5; + PLATFORM_TOPOLOGY_REG_NODE_EXT topology_regs[MAX_REGS]; +}; +//Structure used to discover the uncore device topology_device +typedef struct PLATFORM_TOPOLOGY_PROG_NODE_S PLATFORM_TOPOLOGY_PROG_NODE; +typedef PLATFORM_TOPOLOGY_PROG_NODE *PLATFORM_TOPOLOGY_PROG; + +struct PLATFORM_TOPOLOGY_PROG_NODE_S { + U32 num_devices; + PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device + [MAX_TOPOLOGY_DEV_OLD]; //Leaving this for backward compatibility + PLATFORM_TOPOLOGY_DISCOVERY_NODE_EXT + topology_device_ext[MAX_TOPOLOGY_DEV]; //New topology struct array +}; + +// OLD PLATFORM TOPOLOGY STRUCT (for backward compatibility) +typedef struct PLATFORM_TOPOLOGY_PROG_OLD_NODE_S PLATFORM_TOPOLOGY_PROG_OLD_NODE; +typedef PLATFORM_TOPOLOGY_PROG_OLD_NODE *PLATFORM_TOPOLOGY_PROG_OLD; +struct PLATFORM_TOPOLOGY_PROG_OLD_NODE_S { + U32 num_devices; + PLATFORM_TOPOLOGY_DISCOVERY_NODE topology_device[MAX_TOPOLOGY_DEV_OLD]; +}; + +#define PLATFORM_TOPOLOGY_PROG_num_devices(x) ((x)->num_devices) +#define PLATFORM_TOPOLOGY_PROG_topology_device(x, dev_index) \ + ((x)->topology_device[dev_index]) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_index(x, dev_index) \ + ((x)->topology_device[dev_index].device_index) +#define PLATFORM_TOPOLOGY_PROG_topology_device_device_id(x, dev_index) \ + ((x)->topology_device[dev_index].device_id) +#define PLATFORM_TOPOLOGY_PROG_topology_device_scope(x, dev_index) \ + ((x)->topology_device[dev_index].scope) +#define PLATFORM_TOPOLOGY_PROG_topology_device_num_registers(x, dev_index) \ + ((x)->topology_device[dev_index].num_registers) +#define PLATFORM_TOPOLOGY_PROG_topology_device_prog_valid(x, dev_index) \ + ((x)->topology_device[dev_index].prog_valid) +#define PLATFORM_TOPOLOGY_PROG_topology_topology_regs(x, dev_index) \ + ((x)->topology_device[dev_index].topology_regs) + +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device(x, dev_index) \ + ((x)->topology_device_ext[dev_index]) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_index(x, dev_index) \ + ((x)->topology_device_ext[dev_index].device_index) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_device_id(x, dev_index) \ + ((x)->topology_device_ext[dev_index].device_id) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_scope(x, dev_index) \ + ((x)->topology_device_ext[dev_index].scope) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_num_registers(x, dev_index) \ + ((x)->topology_device_ext[dev_index].num_registers) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_prog_valid(x, dev_index) \ + ((x)->topology_device_ext[dev_index].prog_valid) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_device_distinct_buses(x, dev_index) \ + ((x)->topology_device_ext[dev_index].distinct_buses) +#define PLATFORM_TOPOLOGY_PROG_EXT_topology_topology_regs(x, dev_index) \ + ((x)->topology_device_ext[dev_index].topology_regs) + +/************************************************************** + * NEW PLATFORM TOPOLOGY STRUCTS END +***************************************************************/ + +/************************************************************** + * FPGA TOPOLOGY STRUCTS START +***************************************************************/ +typedef struct FPGA_GB_DISCOVERY_NODE_S FPGA_GB_DISCOVERY_NODE; + +struct FPGA_GB_DISCOVERY_NODE_S { + U16 bar_num; + U16 feature_id; + U32 device_id; + U64 afu_id_l; + U64 afu_id_h; + U32 feature_offset; + U32 feature_len; + U8 scan; + U8 valid; + U16 reserved1; + U32 reserved2; +}; + +typedef struct FPGA_GB_DEV_NODE_S FPGA_GB_DEV_NODE; +typedef FPGA_GB_DEV_NODE *FPGA_GB_DEV; + +struct FPGA_GB_DEV_NODE_S { + U32 num_devices; + FPGA_GB_DISCOVERY_NODE fpga_gb_device[MAX_DEVICES]; +}; + +#define FPGA_GB_DEV_num_devices(x) ((x)->num_devices) +#define FPGA_GB_DEV_device(x, dev_index) ((x)->fpga_gb_device[dev_index]) +#define FPGA_GB_DEV_bar_num(x, dev_index) ((x)->fpga_gb_device[dev_index].bar_num) +#define FPGA_GB_DEV_feature_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_id) +#define FPGA_GB_DEV_device_id(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].device_id) +#define FPGA_GB_DEV_afu_id_low(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_l) +#define FPGA_GB_DEV_afu_id_high(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].afu_id_h) +#define FPGA_GB_DEV_feature_offset(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_offset) +#define FPGA_GB_DEV_feature_len(x, dev_index) \ + ((x)->fpga_gb_device[dev_index].feature_len) +#define FPGA_GB_DEV_scan(x, dev_index) ((x)->fpga_gb_device[dev_index].scan) +#define FPGA_GB_DEV_valid(x, dev_index) ((x)->fpga_gb_device[dev_index].valid) + +/************************************************************** + * FPGA TOPOLOGY STRUCTS END +***************************************************************/ + +typedef enum { + UNCORE_TOPOLOGY_INFO_NODE_IMC = 0, + UNCORE_TOPOLOGY_INFO_NODE_QPILL = 1, + UNCORE_TOPOLOGY_INFO_NODE_HA = 2, + UNCORE_TOPOLOGY_INFO_NODE_R3 = 3, + UNCORE_TOPOLOGY_INFO_NODE_R2 = 4, + UNCORE_TOPOLOGY_INFO_NODE_IRP = 5, + UNCORE_TOPOLOGY_INFO_NODE_IMC_UCLK = 6, + UNCORE_TOPOLOGY_INFO_NODE_EDC_ECLK = 7, + UNCORE_TOPOLOGY_INFO_NODE_EDC_UCLK = 8, + UNCORE_TOPOLOGY_INFO_NODE_M2M = 9, + UNCORE_TOPOLOGY_INFO_NODE_HFI_RXE = 10, + UNCORE_TOPOLOGY_INFO_NODE_HFI_TXE = 11, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_CACHE = 12, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_FAB = 13, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_THERMAL = 14, + UNCORE_TOPOLOGY_INFO_NODE_FPGA_POWER = 15, + UNCORE_TOPOLOGY_INFO_NODE_SOC_SA = 16, + UNCORE_TOPOLOGY_INFO_NODE_EDRAM = 17, + UNCORE_TOPOLOGY_INFO_NODE_IIO = 18, + UNCORE_TOPOLOGY_INFO_NODE_PMT = 19, + UNCORE_TOPOLOGY_INFO_NODE_MCHBM = 20, + UNCORE_TOPOLOGY_INFO_NODE_M2HBM = 21, + UNCORE_TOPOLOGY_INFO_NODE_PMEM_FC = 22, + UNCORE_TOPOLOGY_INFO_NODE_PMEM_MC = 23, + UNCORE_TOPOLOGY_INFO_NODE_CXLCM = 24, + UNCORE_TOPOLOGY_INFO_NODE_CXLDP = 25, + UNCORE_TOPOLOGY_INFO_NODE_OOBMSM = 26 +} UNCORE_TOPOLOGY_INFO_NODE_INDEX_TYPE; + +/************************************************************** + * SIDEBAND INFO STRUCTS START +***************************************************************/ +typedef struct SIDEBAND_INFO_NODE_S SIDEBAND_INFO_NODE; +typedef SIDEBAND_INFO_NODE *SIDEBAND_INFO; + +struct SIDEBAND_INFO_NODE_S { + U32 tid; + U32 pid; + U64 tsc; +}; + +#define SIDEBAND_INFO_pid(x) ((x)->pid) +#define SIDEBAND_INFO_tid(x) ((x)->tid) +#define SIDEBAND_INFO_tsc(x) ((x)->tsc) + +typedef struct SIDEBAND_INFO_EXT_NODE_S SIDEBAND_INFO_EXT_NODE; +typedef SIDEBAND_INFO_EXT_NODE *SIDEBAND_INFO_EXT; + +struct SIDEBAND_INFO_EXT_NODE_S { + U32 tid; + U32 pid; + U64 tsc; + U32 osid; + U32 cpuid; + U16 old_thread_state; + U16 reason; + U32 old_tid; + U64 reserved1; +}; + +#define SIDEBAND_INFO_EXT_pid(x) ((x)->pid) +#define SIDEBAND_INFO_EXT_tid(x) ((x)->tid) +#define SIDEBAND_INFO_EXT_tsc(x) ((x)->tsc) +#define SIDEBAND_INFO_EXT_osid(x) ((x)->osid) +#define SIDEBAND_INFO_EXT_cpuid(x) ((x)->cpuid) +#define SIDEBAND_INFO_EXT_old_thread_state(x) ((x)->old_thread_state) +#define SIDEBAND_INFO_EXT_reason(x) ((x)->reason) +#define SIDEBAND_INFO_EXT_old_tid(x) ((x)->old_tid) + +/************************************************************** + * SIDEBAND INFO STRUCTS END +***************************************************************/ + +/************************************************************** + * SAMPLE DROP INFO STRUCTS START +***************************************************************/ +typedef struct SAMPLE_DROP_NODE_S SAMPLE_DROP_NODE; +typedef SAMPLE_DROP_NODE *SAMPLE_DROP; + +struct SAMPLE_DROP_NODE_S { + U32 os_id; + U32 cpu_id; + U32 sampled; + U32 dropped; +}; + +#define SAMPLE_DROP_os_id(x) ((x)->os_id) +#define SAMPLE_DROP_cpu_id(x) ((x)->cpu_id) +#define SAMPLE_DROP_sampled(x) ((x)->sampled) +#define SAMPLE_DROP_dropped(x) ((x)->dropped) + +#define MAX_SAMPLE_DROP_NODES 20 + +typedef struct SAMPLE_DROP_INFO_NODE_S SAMPLE_DROP_INFO_NODE; +typedef SAMPLE_DROP_INFO_NODE *SAMPLE_DROP_INFO; + +struct SAMPLE_DROP_INFO_NODE_S { + U32 size; + SAMPLE_DROP_NODE drop_info[MAX_SAMPLE_DROP_NODES]; +}; + +#define SAMPLE_DROP_INFO_size(x) ((x)->size) +#define SAMPLE_DROP_INFO_drop_info(x, index) ((x)->drop_info[index]) + +/************************************************************** + * SAMPLE DROP INFO STRUCTS END +***************************************************************/ + +#define IS_PEBS_SAMPLE_RECORD(sample_record) \ + ((SAMPLE_RECORD_pid_rec_index(sample_record) == (U32)-1) && \ + (SAMPLE_RECORD_tid(sample_record) == (U32)-1)) + +/* + * VMM vendor information + */ +#define KVM_SIGNATURE "KVMKVMKVM\0\0\0" +#define XEN_SIGNATURE "XenVMMXenVMM" +#define VMWARE_SIGNATURE "VMwareVMware" +#define HYPERV_SIGNATURE "Microsoft Hv" + +#define DRV_VMM_UNKNOWN 0 +#define DRV_VMM_MOBILEVISOR 1 +#define DRV_VMM_KVM 2 +#define DRV_VMM_XEN 3 +#define DRV_VMM_HYPERV 4 +#define DRV_VMM_VMWARE 5 +#define DRV_VMM_ACRN 6 + +#define DRV_TYPE_UNKNOWN 0 +#define DRV_TYPE_PUBLIC 1 +#define DRV_TYPE_NDA 2 +#define DRV_TYPE_PRIVATE 3 + +/* + * @macro DRV_SETUP_INFO_NODE_S + * @brief + * This structure supports driver information such as NMI profiling mode. + */ + +typedef struct DRV_SETUP_INFO_NODE_S DRV_SETUP_INFO_NODE; +typedef DRV_SETUP_INFO_NODE *DRV_SETUP_INFO; + +struct DRV_SETUP_INFO_NODE_S { + union { + U64 modes; + struct { + U64 nmi_mode : 1; + U64 vmm_mode : 1; + U64 vmm_vendor : 8; + U64 vmm_guest_vm : 1; + U64 pebs_accessible : 1; + U64 cpu_hotplug_mode : 1; + U64 matrix_inaccessible : 1; + U64 page_table_isolation : 2; + U64 pebs_ignored_by_pti : 1; + U64 core_event_mux_unavailable : 1; + U64 profiling_version_unsupported : 1; + U64 tracepoints_available : 1; + U64 register_allowlist_detected : 1; + U64 drv_type : 3; + U64 non_arch_msr_blocked : 1; + U64 symbol_lookup_unavailable : 1; + U64 process_exit_hook_unavailable : 1; + U64 munmap_hook_unavailable : 1; + U64 sched_switch_hook_unavailable : 1; + U64 reserved1 : 35; + } s1; + } u1; + U32 vmm_version; + U32 reserved2; + U64 reserved3; + U64 reserved4; +}; + +#define DRV_SETUP_INFO_modes(info) ((info)->u1.modes) +#define DRV_SETUP_INFO_nmi_mode(info) ((info)->u1.s1.nmi_mode) +#define DRV_SETUP_INFO_vmm_mode(info) ((info)->u1.s1.vmm_mode) +#define DRV_SETUP_INFO_vmm_vendor(info) ((info)->u1.s1.vmm_vendor) +#define DRV_SETUP_INFO_vmm_guest_vm(info) ((info)->u1.s1.vmm_guest_vm) +#define DRV_SETUP_INFO_pebs_accessible(info) ((info)->u1.s1.pebs_accessible) +#define DRV_SETUP_INFO_cpu_hotplug_mode(info) ((info)->u1.s1.cpu_hotplug_mode) +#define DRV_SETUP_INFO_matrix_inaccessible(info) \ + ((info)->u1.s1.matrix_inaccessible) +#define DRV_SETUP_INFO_page_table_isolation(info) \ + ((info)->u1.s1.page_table_isolation) +#define DRV_SETUP_INFO_pebs_ignored_by_pti(info) \ + ((info)->u1.s1.pebs_ignored_by_pti) +#define DRV_SETUP_INFO_core_event_mux_unavailable(info) \ + ((info)->u1.s1.core_event_mux_unavailable) +#define DRV_SETUP_INFO_profiling_version_unsupported(info) \ + ((info)->u1.s1.profiling_version_unsupported) +#define DRV_SETUP_INFO_tracepoints_available(info) \ + ((info)->u1.s1.tracepoints_available) +#define DRV_SETUP_INFO_register_allowlist_detected(info) \ + ((info)->u1.s1.register_allowlist_detected) +#define DRV_SETUP_INFO_drv_type(info) ((info)->u1.s1.drv_type) +#define DRV_SETUP_INFO_non_arch_msr_blocked(info) \ + ((info)->u1.s1.non_arch_msr_blocked) +#define DRV_SETUP_INFO_symbol_lookup_unavailable(info) \ + ((info)->u1.s1.symbol_lookup_unavailable) +#define DRV_SETUP_INFO_process_exit_hook_unavailable(info) \ + ((info)->u1.s1.process_exit_hook_unavailable) +#define DRV_SETUP_INFO_munmap_hook_unavailable(info) \ + ((info)->u1.s1.munmap_hook_unavailable) +#define DRV_SETUP_INFO_sched_switch_hook_unavailable(info) \ + ((info)->u1.s1.sched_switch_hook_unavailable) +#define DRV_SETUP_INFO_vmm_version(info) ((info)->vmm_version) + +#define DRV_SETUP_INFO_PTI_DISABLED 0 +#define DRV_SETUP_INFO_PTI_KPTI 1 +#define DRV_SETUP_INFO_PTI_KAISER 2 +#define DRV_SETUP_INFO_PTI_VA_SHADOW 3 +#define DRV_SETUP_INFO_PTI_UNKNOWN 4 + +/* + Type: task_info_t + Description: + Represents the equivalent of a Linux Thread. + Fields: + o id: A unique identifier. May be `NULL_TASK_ID`. + o name: Human-readable name for this task + o executable_name: Literal path to the binary elf that this task's + entry point is executing from. + o address_space_id: The unique ID for the address space this task is + running in. + */ +struct task_info_node_s { + U64 id; + char name[32]; + U64 address_space_id; +}; + +/* + Type: REMOTE_SWITCH + Description: + Collection switch set on target +*/ +typedef struct REMOTE_SWITCH_NODE_S REMOTE_SWITCH_NODE; +typedef REMOTE_SWITCH_NODE *REMOTE_SWITCH; + +struct REMOTE_SWITCH_NODE_S { + U32 auto_mode : 1; + U32 adv_hotspot : 1; + U32 lbr_callstack : 2; + U32 full_pebs : 1; + U32 uncore_supported : 1; + U32 agent_mode : 2; + U32 sched_switch_enabled : 1; + U32 data_transfer_mode : 1; + U32 reserved1 : 22; + U32 reserved2; +}; + +#define REMOTE_SWITCH_auto_mode(x) ((x).auto_mode) +#define REMOTE_SWITCH_adv_hotspot(x) ((x).adv_hotspot) +#define REMOTE_SWITCH_lbr_callstack(x) ((x).lbr_callstack) +#define REMOTE_SWITCH_full_pebs(x) ((x).full_pebs) +#define REMOTE_SWITCH_uncore_supported(x) ((x).uncore_supported) +#define REMOTE_SWITCH_agent_mode(x) ((x).agent_mode) +#define REMOTE_SWITCH_sched_switch_enabled(x) ((x).sched_switch_enabled) +#define REMOTE_SWITCH_data_transfer_mode(x) ((x).data_transfer_mode) + +/* + Type: REMOTE_OS_INFO + Description: + Remote target OS system information +*/ +#define OSINFOLEN 256 +typedef struct REMOTE_OS_INFO_NODE_S REMOTE_OS_INFO_NODE; +typedef REMOTE_OS_INFO_NODE *REMOTE_OS_INFO; + +struct REMOTE_OS_INFO_NODE_S { + U32 os_family; + U32 reserved1; + S8 sysname[OSINFOLEN]; + S8 release[OSINFOLEN]; + S8 version[OSINFOLEN]; +}; + +#define REMOTE_OS_INFO_os_family(x) ((x).os_family) +#define REMOTE_OS_INFO_sysname(x) ((x).sysname) +#define REMOTE_OS_INFO_release(x) ((x).release) +#define REMOTE_OS_INFO_version(x) ((x).version) + +/* + Type: REMOTE_HARDWARE_INFO + Description: + Remote target hardware information +*/ +typedef struct REMOTE_HARDWARE_INFO_NODE_S REMOTE_HARDWARE_INFO_NODE; +typedef REMOTE_HARDWARE_INFO_NODE *REMOTE_HARDWARE_INFO; + +struct REMOTE_HARDWARE_INFO_NODE_S { + U32 num_cpus; + U32 family; + U32 model; + U32 stepping; + U64 tsc_freq; + U64 reserved2; + U64 reserved3; +}; + +#define REMOTE_HARDWARE_INFO_num_cpus(x) ((x).num_cpus) +#define REMOTE_HARDWARE_INFO_family(x) ((x).family) +#define REMOTE_HARDWARE_INFO_model(x) ((x).model) +#define REMOTE_HARDWARE_INFO_stepping(x) ((x).stepping) +#define REMOTE_HARDWARE_INFO_tsc_frequency(x) ((x).tsc_freq) + +/* + Type: SEP_AGENT_MODE + Description: + SEP mode on target agent +*/ +typedef enum { + NATIVE_AGENT = 0, + HOST_VM_AGENT, // Service OS in ACRN + GUEST_VM_AGENT // User OS in ACRN +} SEP_AGENT_MODE; + +/* + Type: DATA_TRANSFER_MODE + Description: + Data transfer mode from target agent to remote host +*/ +typedef enum { + IMMEDIATE_TRANSFER = 0, + DELAYED_TRANSFER // Send after collection is done +} DATA_TRANSFER_MODE; + +#define MAX_NUM_OS_ALLOWED 6 +#define TARGET_IP_NAMELEN 64 + +typedef struct TARGET_INFO_NODE_S TARGET_INFO_NODE; +typedef TARGET_INFO_NODE *TARGET_INFO; + +struct TARGET_INFO_NODE_S { + U32 num_of_agents; + U32 reserved; + U32 os_id[MAX_NUM_OS_ALLOWED]; + S8 ip_address[MAX_NUM_OS_ALLOWED][TARGET_IP_NAMELEN]; + REMOTE_OS_INFO_NODE os_info[MAX_NUM_OS_ALLOWED]; + REMOTE_HARDWARE_INFO_NODE hardware_info[MAX_NUM_OS_ALLOWED]; + REMOTE_SWITCH_NODE remote_switch[MAX_NUM_OS_ALLOWED]; +}; + +#define TARGET_INFO_num_of_agents(x) ((x)->num_of_agents) +#define TARGET_INFO_os_id(x, i) ((x)->os_id[i]) +#define TARGET_INFO_os_info(x, i) ((x)->os_info[i]) +#define TARGET_INFO_ip_address(x, i) ((x)->ip_address[i]) +#define TARGET_INFO_hardware_info(x, i) ((x)->hardware_info[i]) +#define TARGET_INFO_remote_switch(x, i) ((x)->remote_switch[i]) + +typedef struct CPU_MAP_TRACE_NODE_S CPU_MAP_TRACE_NODE; +typedef CPU_MAP_TRACE_NODE *CPU_MAP_TRACE; + +struct CPU_MAP_TRACE_NODE_S { + U64 tsc; + U32 os_id; + U32 vcpu_id; + U32 pcpu_id; + U8 is_static : 1; + U8 initial : 1; + U8 reserved1 : 6; + U8 reserved2; + U16 reserved3; + U64 tsc_offset; +}; + +#define CPU_MAP_TRACE_tsc(x) ((x)->tsc) +#define CPU_MAP_TRACE_os_id(x) ((x)->os_id) +#define CPU_MAP_TRACE_vcpu_id(x) ((x)->vcpu_id) +#define CPU_MAP_TRACE_pcpu_id(x) ((x)->pcpu_id) +#define CPU_MAP_TRACE_is_static(x) ((x)->is_static) +#define CPU_MAP_TRACE_initial(x) ((x)->initial) +#define CPU_MAP_TRACE_tsc_offset(x) ((x)->tsc_offset) + +#define MAX_NUM_VCPU 64 +#define MAX_NUM_VM 16 +typedef struct CPU_MAP_TRACE_LIST_NODE_S CPU_MAP_TRACE_LIST_NODE; +typedef CPU_MAP_TRACE_LIST_NODE *CPU_MAP_TRACE_LIST; + +struct CPU_MAP_TRACE_LIST_NODE_S { + U32 osid; + U8 num_entries; + U8 reserved1; + U16 reserved2; + CPU_MAP_TRACE_NODE entries[MAX_NUM_VCPU]; +}; + +typedef struct VM_OSID_MAP_NODE_S VM_OSID_MAP_NODE; +typedef VM_OSID_MAP_NODE *VM_OSID_MAP; +struct VM_OSID_MAP_NODE_S { + U32 num_vms; + U32 reserved1; + U32 osid[MAX_NUM_VM]; +}; + +typedef struct VM_SWITCH_TRACE_NODE_S VM_SWITCH_TRACE_NODE; +typedef VM_SWITCH_TRACE_NODE *VM_SWITCH_TRACE; + +struct VM_SWITCH_TRACE_NODE_S { + U64 tsc; + U32 from_os_id; + U32 to_os_id; + U64 reason; + U64 reserved1; + U64 reserved2; +}; + +#define VM_SWITCH_TRACE_tsc(x) ((x)->tsc) +#define VM_SWITCH_TRACE_from_os_id(x) ((x)->from_os_id) +#define VM_SWITCH_TRACE_to_os_id(x) ((x)->to_os_id) +#define VM_SWITCH_TRACE_reason(x) ((x)->reason) + +typedef struct EMON_BUFFER_DRIVER_HELPER_NODE_S EMON_BUFFER_DRIVER_HELPER_NODE; +typedef EMON_BUFFER_DRIVER_HELPER_NODE *EMON_BUFFER_DRIVER_HELPER; + +struct EMON_BUFFER_DRIVER_HELPER_NODE_S { + U32 num_entries_per_package; + U32 num_cpu; + U32 power_num_package_events; + U32 power_num_module_events; + U32 power_num_thread_events; + U32 power_device_offset_in_package; + U32 core_num_events; + U32 core_index_to_thread_offset_map[]; +}; + +#define EMON_BUFFER_DRIVER_HELPER_num_entries_per_package(x) \ + ((x)->num_entries_per_package) +#define EMON_BUFFER_DRIVER_HELPER_num_cpu(x) ((x)->num_cpu) +#define EMON_BUFFER_DRIVER_HELPER_power_num_package_events(x) \ + ((x)->power_num_package_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_module_events(x) \ + ((x)->power_num_module_events) +#define EMON_BUFFER_DRIVER_HELPER_power_num_thread_events(x) \ + ((x)->power_num_thread_events) +#define EMON_BUFFER_DRIVER_HELPER_power_device_offset_in_package(x) \ + ((x)->power_device_offset_in_package) +#define EMON_BUFFER_DRIVER_HELPER_core_num_events(x) ((x)->core_num_events) +#define EMON_BUFFER_DRIVER_HELPER_core_index_to_thread_offset_map(x) \ + ((x)->core_index_to_thread_offset_map) + +// EMON counts buffer follow this hardware topology: package -> device -> unit/thread -> event + +// Calculate the CORE thread offset +// Using for initialization: calculate the cpu_index_to_thread_offset_map in emon_Create_Emon_Buffer_Descriptor() +// EMON_BUFFER_CORE_THREAD_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device base offset +// (module_id * threads_per_module + core_id * threads_per_core + thread_id) * num_core_events //thread offset +#define EMON_BUFFER_CORE_THREAD_OFFSET(package_id, \ + num_entries_per_package, device_offset_in_package,\ + module_enum_supported, module_id, \ + threads_per_module, core_id, \ + threads_per_core, thread_id, num_core_events) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + (module_enum_supported * module_id * threads_per_module + core_id * threads_per_core + thread_id) \ + * num_core_events) + + +// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, and calculate the CORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_CORE_EVENT_OFFSET = +// cpu_index_to_thread_offset + //thread offset +// core_event_id //event_offset +#define EMON_BUFFER_CORE_EVENT_OFFSET(cpu_index_to_thread_offset, \ + core_event_id) \ + (cpu_index_to_thread_offset + core_event_id) + +// Calculate the device level to UNCORE event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE = +// device_offset_in_package + //device_offset_in_package +// device_unit_id * num_unit_events + //unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET_IN_PACKAGE( \ + device_offset_in_package, device_unit_id, num_unit_events, \ + device_event_id) \ + (device_offset_in_package + device_unit_id *num_unit_events + \ + device_event_id) + +// Take 'device level to UNCORE event offset' and package_id, calculate the UNCORE package level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET = +// package_id * num_entries_per_package + //package_offset +// uncore_offset_in_package; //offset_in_package +#define EMON_BUFFER_UNCORE_PACKAGE_EVENT_OFFSET( \ + package_id, num_entries_per_package, uncore_offset_in_package) \ + (package_id *num_entries_per_package + uncore_offset_in_package) + +// Take 'device level to UNCORE event offset', calculate the UNCORE system level event offset +// Using for emon_output.c printing function +// EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET = +// device_offset_in_system + //device_offset_in_system +// device_unit_id * num_system_events + //device_unit_offset +// device_event_id //event_offset +#define EMON_BUFFER_UNCORE_SYSTEM_EVENT_OFFSET(device_offset_in_system, \ + device_unit_id, \ + num_system_events, \ + device_event_id) \ + (device_offset_in_system + device_unit_id *num_system_events + \ + device_event_id) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// package_event_offset //power package event offset +#define EMON_BUFFER_UNCORE_PACKAGE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + device_event_offset) \ + (package_id *num_entries_per_package + device_offset_in_package + \ + device_event_offset) + +// Calculate the module level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package event offset +// module_id * num_module_events + //module offset +// module_event_offset //power module event offset +#define EMON_BUFFER_UNCORE_MODULE_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, module_id, num_module_events, device_event_offset) \ + (package_id *num_entries_per_package + device_offset_in_package + \ + num_package_events + module_id *num_module_events + \ + device_event_offset) + +// Calculate the package level power event offset +// Using for kernel and emon_output.c printing function +// EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET = +// package_id * num_entries_per_package + //package offset +// device_offset_in_package + //device offset +// num_package_events + //package offset +// num_modules_per_package * num_module_events + //module offset +// (module_id * threads_per_module + core_id * threads_per_core + thread_id) * num_thread_events + //thread offset +// thread_event_offset //power thread event offset +#define EMON_BUFFER_UNCORE_THREAD_POWER_EVENT_OFFSET( \ + package_id, num_entries_per_package, device_offset_in_package, \ + num_package_events, num_modules_per_package, num_module_events, \ + module_enum_supported, module_id, threads_per_module, core_id, \ + threads_per_core, thread_id, num_unit_events, device_event_offset) \ + (package_id * num_entries_per_package + device_offset_in_package + \ + num_package_events + num_modules_per_package * \ + num_module_events + (module_enum_supported * module_id * \ + threads_per_module + core_id * threads_per_core + thread_id) \ + * num_unit_events + device_event_offset) + +// Take cpu_index and cpu_index_to_thread_offset_map to get thread_offset, move to the end of core_events +// to reach pmu_metrics +// EMON_BUFFER_CORE_EVENT_OFFSET = +// cpu_index_to_thread_offset + //thread offset +// num_core_events + //end of core events +// core_pmu_metrics_id //index of metric under interest +#define EMON_BUFFER_CORE_PERF_METRIC_OFFSET( \ + cpu_index_to_thread_offset, num_core_events, core_pmu_metrics_id) \ + (cpu_index_to_thread_offset + num_core_events + core_pmu_metrics_id) + +/* + ************************************ + * DRIVER LOG BUFFER DECLARATIONS * + ************************************ + */ + +#define DRV_MAX_NB_LOG_CATEGORIES 256 // Must be a multiple of 8 +#define DRV_NB_LOG_CATEGORIES 14 +#define DRV_LOG_CATEGORY_LOAD 0 +#define DRV_LOG_CATEGORY_INIT 1 +#define DRV_LOG_CATEGORY_DETECTION 2 +#define DRV_LOG_CATEGORY_ERROR 3 +#define DRV_LOG_CATEGORY_STATE_CHANGE 4 +#define DRV_LOG_CATEGORY_MARK 5 +#define DRV_LOG_CATEGORY_DEBUG 6 +#define DRV_LOG_CATEGORY_FLOW 7 +#define DRV_LOG_CATEGORY_ALLOC 8 +#define DRV_LOG_CATEGORY_INTERRUPT 9 +#define DRV_LOG_CATEGORY_TRACE 10 +#define DRV_LOG_CATEGORY_REGISTER 11 +#define DRV_LOG_CATEGORY_NOTIFICATION 12 +#define DRV_LOG_CATEGORY_WARNING 13 + +#define LOG_VERBOSITY_UNSET 0xFF +#define LOG_VERBOSITY_DEFAULT 0xFE +#define LOG_VERBOSITY_NONE 0 + +#define LOG_CHANNEL_MEMLOG 0x1 +#define LOG_CHANNEL_AUXMEMLOG 0x2 +#define LOG_CHANNEL_PRINTK 0x4 +#define LOG_CHANNEL_TRACEK 0x8 +#define LOG_CHANNEL_MOSTWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK) +#define LOG_CHANNEL_EVERYWHERE \ + (LOG_CHANNEL_MEMLOG | LOG_CHANNEL_AUXMEMLOG | LOG_CHANNEL_PRINTK | \ + LOG_CHANNEL_TRACEK) +#define LOG_CHANNEL_MASK LOG_CATEGORY_VERBOSITY_EVERYWHERE + +#define LOG_CONTEXT_REGULAR 0x10 +#define LOG_CONTEXT_INTERRUPT 0x20 +#define LOG_CONTEXT_NOTIFICATION 0x40 +#define LOG_CONTEXT_ALL \ + (LOG_CONTEXT_REGULAR | LOG_CONTEXT_INTERRUPT | LOG_CONTEXT_NOTIFICATION) +#define LOG_CONTEXT_MASK LOG_CONTEXT_ALL +#define LOG_CONTEXT_SHIFT 4 + +#define DRV_LOG_NOTHING 0 +#define DRV_LOG_FLOW_IN 1 +#define DRV_LOG_FLOW_OUT 2 + +/* + * @macro DRV_LOG_ENTRY_NODE_S + * @brief + * This structure is used to store a log message from the driver. + */ + +#define DRV_LOG_MESSAGE_LENGTH 64 +#define DRV_LOG_FUNCTION_NAME_LENGTH 32 + +typedef struct DRV_LOG_ENTRY_NODE_S DRV_LOG_ENTRY_NODE; +typedef DRV_LOG_ENTRY_NODE *DRV_LOG_ENTRY; +struct DRV_LOG_ENTRY_NODE_S { + char function_name[DRV_LOG_FUNCTION_NAME_LENGTH]; + char message[DRV_LOG_MESSAGE_LENGTH]; + + U16 temporal_tag; + U16 integrity_tag; + + U8 category; + U8 secondary_info; // Secondary attribute: + // former driver state for STATE category + // 'ENTER' or 'LEAVE' for FLOW and TRACE categories + U16 processor_id; // NB: not guaranteed to be accurate (due to preemption / core migration) + + U64 tsc; + + U16 nb_active_interrupts; // never 100% accurate, merely indicative + U8 active_drv_operation; // only 100% accurate for IOCTL-called functions + U8 driver_state; + + U16 line_number; // as per the __LINE__ macro + + U16 nb_active_notifications; + + U64 reserved; // need padding to reach 128 bytes +}; // this structure should be exactly 128-byte long + +#define DRV_LOG_ENTRY_temporal_tag(ent) ((ent)->temporal_tag) +#define DRV_LOG_ENTRY_integrity_tag(ent) ((ent)->integrity_tag) +#define DRV_LOG_ENTRY_category(ent) ((ent)->category) +#define DRV_LOG_ENTRY_secondary_info(ent) ((ent)->secondary_info) +#define DRV_LOG_ENTRY_processor_id(ent) ((ent)->processor_id) +#define DRV_LOG_ENTRY_tsc(ent) ((ent)->tsc) +#define DRV_LOG_ENTRY_driver_state(ent) ((ent)->driver_state) +#define DRV_LOG_ENTRY_active_drv_operation(ent) ((ent)->active_drv_operation) +#define DRV_LOG_ENTRY_nb_active_interrupts(ent) ((ent)->nb_active_interrupts) +#define DRV_LOG_ENTRY_nb_active_notifications(ent) \ + ((ent)->nb_active_notifications) +#define DRV_LOG_ENTRY_line_number(ent) ((ent)->line_number) +#define DRV_LOG_ENTRY_message(ent) ((ent)->message) +#define DRV_LOG_ENTRY_function_name(ent) ((ent)->function_name) + +/* + * @macro DRV_LOG_BUFFER_NODE_S + * @brief + * Circular buffer structure storing the latest DRV_LOG_MAX_NB_ENTRIES driver messages + */ + +#define DRV_LOG_SIGNATURE_SIZE 8 // Must be a multiple of 8 +#define DRV_LOG_SIGNATURE_0 'S' +#define DRV_LOG_SIGNATURE_1 'e' +#define DRV_LOG_SIGNATURE_2 'P' +#define DRV_LOG_SIGNATURE_3 'd' +#define DRV_LOG_SIGNATURE_4 'R' +#define DRV_LOG_SIGNATURE_5 'v' +#define DRV_LOG_SIGNATURE_6 '5' +#define DRV_LOG_SIGNATURE_7 '\0' +// The signature is "SePdRv4"; not declared as string on purpose to avoid false positives when trying to identify the log buffer in a crash dump + +#define DRV_LOG_VERSION 1 +#define DRV_LOG_FILLER_BYTE 1 + +#define DRV_LOG_DRIVER_VERSION_SIZE 64 // Must be a multiple of 8 +// 2MB buffer [*HAS TO BE* a power of 2!] [8192 entries = 1 MB] +#define DRV_LOG_MAX_NB_PRI_ENTRIES (8192 * 2) +// 1MB buffer [*HAS TO BE* a power of 2!] +#define DRV_LOG_MAX_NB_AUX_ENTRIES (8192) +#define DRV_LOG_MAX_NB_ENTRIES \ + (DRV_LOG_MAX_NB_PRI_ENTRIES + DRV_LOG_MAX_NB_AUX_ENTRIES) + + +typedef struct DRV_LOG_BUFFER_NODE_S DRV_LOG_BUFFER_NODE; +typedef DRV_LOG_BUFFER_NODE *DRV_LOG_BUFFER; +struct DRV_LOG_BUFFER_NODE_S { + // some signature to be able to locate the log even without -g; ASCII would help + // should we change the signature for each log's version instead of keeping it in a + // dedicated field? + char header_signature[DRV_LOG_SIGNATURE_SIZE]; + + U32 log_size; // filled with sizeof(this structure) at init. + U32 max_nb_pri_entries; // filled with the driver's "DRV_LOG_MAX_NB_PRIM_ENTRIES" at init. + + U32 max_nb_aux_entries; // filled with the driver's "DRV_LOG_MAX_NB_AUX_ENTRIES" at init. + U32 reserved1; + + U64 init_time; // primary log disambiguator + + U32 disambiguator; // used to differentiate the driver's version of the log when a full memory dump can contain some from userland + U32 log_version; // 0 at first, increase when format changes? + + U32 pri_entry_index; // should be incremented *atomically* as a means to (re)allocate the next primary log entry. + U32 aux_entry_index; // should be incremented *atomically* as a means to (re)allocate the next auxiliary log entry. + + char driver_version[DRV_LOG_DRIVER_VERSION_SIZE]; + + U8 driver_state; + U8 active_drv_operation; + U16 reserved2; + U32 nb_drv_operations; + + U32 nb_interrupts; + U16 nb_active_interrupts; + U16 nb_active_notifications; + + U32 nb_notifications; + U32 nb_driver_state_transitions; + + U8 contiguous_physical_memory; + U8 reserved3; + U16 reserved4; + U32 reserved5; + + U8 verbosities[DRV_MAX_NB_LOG_CATEGORIES]; + + DRV_LOG_ENTRY_NODE entries[DRV_LOG_MAX_NB_ENTRIES]; + + char footer_signature[DRV_LOG_SIGNATURE_SIZE]; +}; + +#define DRV_LOG_BUFFER_pri_entry_index(log) ((log)->pri_entry_index) +#define DRV_LOG_BUFFER_aux_entry_index(log) ((log)->aux_entry_index) +#define DRV_LOG_BUFFER_header_signature(log) ((log)->header_signature) +#define DRV_LOG_BUFFER_footer_signature(log) ((log)->footer_signature) +#define DRV_LOG_BUFFER_log_size(log) ((log)->log_size) +#define DRV_LOG_BUFFER_driver_version(log) ((log)->driver_version) +#define DRV_LOG_BUFFER_driver_state(log) ((log)->driver_state) +#define DRV_LOG_BUFFER_active_drv_operation(log) ((log)->active_drv_operation) +#define DRV_LOG_BUFFER_nb_interrupts(log) ((log)->nb_interrupts) +#define DRV_LOG_BUFFER_nb_active_interrupts(log) ((log)->nb_active_interrupts) +#define DRV_LOG_BUFFER_nb_notifications(log) ((log)->nb_notifications) +#define DRV_LOG_BUFFER_nb_active_notifications(log) \ + ((log)->nb_active_notifications) +#define DRV_LOG_BUFFER_nb_driver_state_transitions(log) \ + ((log)->nb_driver_state_transitions) +#define DRV_LOG_BUFFER_nb_drv_operations(log) ((log)->nb_drv_operations) +#define DRV_LOG_BUFFER_max_nb_pri_entries(log) ((log)->max_nb_pri_entries) +#define DRV_LOG_BUFFER_max_nb_aux_entries(log) ((log)->max_nb_aux_entries) +#define DRV_LOG_BUFFER_init_time(log) ((log)->init_time) +#define DRV_LOG_BUFFER_disambiguator(log) ((log)->disambiguator) +#define DRV_LOG_BUFFER_log_version(log) ((log)->log_version) +#define DRV_LOG_BUFFER_entries(log) ((log)->entries) +#define DRV_LOG_BUFFER_contiguous_physical_memory(log) \ + ((log)->contiguous_physical_memory) +#define DRV_LOG_BUFFER_verbosities(log) ((log)->verbosities) + +#define DRV_LOG_CONTROL_MAX_DATA_SIZE \ + DRV_MAX_NB_LOG_CATEGORIES // Must be a multiple of 8 + +typedef struct DRV_LOG_CONTROL_NODE_S DRV_LOG_CONTROL_NODE; +typedef DRV_LOG_CONTROL_NODE *DRV_LOG_CONTROL; + +struct DRV_LOG_CONTROL_NODE_S { + U32 command; + U32 reserved1; + // only DRV_NB_LOG_CATEGORIES elements will be used, but let's plan for backwards compatibility + // if LOG_CATEGORY_UNSET, then READ instead of WRITE + U8 data[DRV_LOG_CONTROL_MAX_DATA_SIZE]; + + U64 reserved2; // may later want to add support for resizing the buffer, or only log 100 first interrupts, etc. + U64 reserved3; + U64 reserved4; + U64 reserved5; +}; + +#define DRV_LOG_CONTROL_command(x) ((x)->command) +#define DRV_LOG_CONTROL_verbosities(x) ((x)->data) +// Userland 'MARK' messages use the 'data' field too. +#define DRV_LOG_CONTROL_message(x) ((x)->data) +#define DRV_LOG_CONTROL_log_size(x) (*((U32 *)((x)->data))) + +#define DRV_LOG_CONTROL_COMMAND_NONE 0 +#define DRV_LOG_CONTROL_COMMAND_ADJUST_VERBOSITY 1 +#define DRV_LOG_CONTROL_COMMAND_MARK 2 +#define DRV_LOG_CONTROL_COMMAND_QUERY_SIZE 3 +#define DRV_LOG_CONTROL_COMMAND_BENCHMARK 4 + +typedef struct DRV_IOCTL_STATUS_NODE_S DRV_IOCTL_STATUS_NODE; +typedef DRV_IOCTL_STATUS_NODE *DRV_IOCTL_STATUS; + +struct DRV_IOCTL_STATUS_NODE_S { + U32 drv_status; + U8 reg_prog_type; + U8 reserved1; + U16 reserved2; + union { + struct { + U64 bus : 8; + U64 dev : 5; + U64 func : 3; + U64 offset : 16; + U64 rsvd : 32; + } s; + U64 reg_key1; + } u; + U32 reg_key2; + U64 counter_mask; + U32 reserved4; + U64 reserved5; + U64 reserved6; +}; + +#define DRV_IOCTL_STATUS_drv_status(x) ((x)->drv_status) +#define DRV_IOCTL_STATUS_reg_prog_type(x) ((x)->reg_prog_type) +#define DRV_IOCTL_STATUS_bus(x) ((x)->u.s.bus) +#define DRV_IOCTL_STATUS_dev(x) ((x)->u.s.dev) +#define DRV_IOCTL_STATUS_func(x) ((x)->u.s.func) +#define DRV_IOCTL_STATUS_offset(x) ((x)->u.s.offset) +#define DRV_IOCTL_STATUS_reg_key1(x) ((x)->u.reg_key1) +#define DRV_IOCTL_STATUS_reg_key2(x) ((x)->reg_key2) +#define DRV_IOCTL_STATUS_counter_mask(x) ((x)->counter_mask) + +/* common perf strutcures for platform picker and sampling components */ +#define MAXNAMELEN 256 + +typedef struct PERF_DEVICE_CONFIG_INFO_NODE_S PERF_DEVICE_CONFIG_INFO_NODE; +typedef PERF_DEVICE_CONFIG_INFO_NODE *PERF_DEVICE_CONFIG_INFO; + +struct PERF_DEVICE_CONFIG_INFO_NODE_S { + S8 name[MAXNAMELEN]; // unit name + U32 device_type; // device which unit belongs to + U32 unit_scope; // maps to socket level/ thread level/ system level { enum EVENT_SCOPE_TYPES } + U32 unc_unit_type; // freerun / programmable counters + U32 type_config_value; // type value in /sys/bus/event_source/devices/*/type + U32 mux_value; // Multiplex value + U32 unc_unit_id; // Index to identify different units + S8 cpu_mask[1024]; // cpumask value in /sys/bus/event_source/devices/*/cpumask + // dependent on unit_scope, {socket (entry for each socket) / thread level(no entry)/ system level (one entry)} + // We don't expect cpu_mask to over flow the specified size, but on event of overflow it goes into reserved fields +}; + +#define PERF_DEVICE_CONFIG_INFO_name(x) ((x)->name) +#define PERF_DEVICE_CONFIG_INFO_device_type(x) ((x)->device_type) +#define PERF_DEVICE_CONFIG_INFO_unit_scope(x) ((x)->unit_scope) +#define PERF_DEVICE_CONFIG_INFO_unc_unit_type(x) ((x)->unc_unit_type) +#define PERF_DEVICE_CONFIG_INFO_type_config_value(x) ((x)->type_config_value) +#define PERF_DEVICE_CONFIG_INFO_mux_value(x) ((x)->mux_value) +#define PERF_DEVICE_CONFIG_INFO_cpu_mask(x) ((x)->cpu_mask) +#define PERF_DEVICE_CONFIG_INFO_unc_unit_id(x) ((x)->unc_unit_id) + +typedef struct PERF_SYS_DEVICES_NODE_S PERF_SYS_DEVICES_NODE; +typedef PERF_SYS_DEVICES_NODE *PERF_SYS_DEVICES; +struct PERF_SYS_DEVICES_NODE_S { + U32 num_sys_devices; + U32 size_of_alloc; + PERF_DEVICE_CONFIG_INFO_NODE device_list[]; +}; + +#define PERF_SYS_DEVICES_num_sys_devices(x) ((x)->num_sys_devices) +#define PERF_SYS_DEVICES_size_of_alloc(x) ((x)->size_of_alloc) +#define PERF_SYS_DEVICES_device_list(x) ((x)->device_list) +#define PERF_SYS_DEVICES_device_config(x, i) ((x)->device_list[i]) + +typedef enum { + IPT_BUFFER_CAPTURE = 0, + IPT_BUFFER_DROP, +} IPT_INFO_DESCRIPTOR; + +typedef struct DRV_IPT_DROP_INFO_NODE_S DRV_IPT_DROP_INFO_NODE; +typedef DRV_IPT_DROP_INFO_NODE *DRV_IPT_DROP_INFO; +struct DRV_IPT_DROP_INFO_NODE_S { + U16 length; + U16 descriptor_id; + U32 drop_size; + U64 offset; + U64 reserved2; + U64 reserved3; +}; + +#define DRV_IPT_DROP_INFO_length(x) ((x).length) +#define DRV_IPT_DROP_INFO_descriptor_id(x) ((x).descriptor_id) +#define DRV_IPT_DROP_INFO_drop_size(x) ((x).drop_size) +#define DRV_IPT_DROP_INFO_offset(x) ((x).offset) + +typedef struct DRV_MSR_OP_NODE_S DRV_MSR_OP_NODE; +typedef DRV_MSR_OP_NODE *DRV_MSR_OP; + +struct DRV_MSR_OP_NODE_S { + U32 reg_id; + S32 status; + U64 reg_read_val; + U64 reg_write_val; + U64 reserved; +}; + +#define DRV_MSR_OP_reg_id(x) ((x)->reg_id) +#define DRV_MSR_OP_status(x) ((x)->status) +#define DRV_MSR_OP_reg_read_val(x) ((x)->reg_read_val) +#define DRV_MSR_OP_reg_write_val(x) ((x)->reg_write_val) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_types.h b/drivers/platform/x86/socperf/include/lwpmudrv_types.h new file mode 100644 index 00000000000000..6f28de389e67df --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_types.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2007 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _LWPMUDRV_TYPES_H_ +#define _LWPMUDRV_TYPES_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(BUILD_DRV_ESX) +//SR: added size_t def +typedef unsigned long size_t; +typedef unsigned long ssize_t; +#endif + +typedef unsigned char U8; +typedef char S8; +typedef short S16; +typedef unsigned short U16; +typedef unsigned int U32; +typedef int S32; +#if defined(DRV_OS_WINDOWS) +typedef unsigned __int64 U64; +typedef __int64 S64; +#elif defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || \ + defined(DRV_OS_MAC) || defined(DRV_OS_ANDROID) || \ + defined(DRV_OS_FREEBSD) +typedef unsigned long long U64; +typedef long long S64; +typedef unsigned long ULONG; +typedef void VOID; +typedef void * LPVOID; + +#if defined(BUILD_DRV_ESX) +//SR: added UWORD64 def +typedef union _UWORD64 { + struct { + U32 low; + S32 hi; + } c; + S64 qword; +} UWORD64, *PWORD64; +#endif +#else +#error "Undefined OS" +#endif + +#if defined(DRV_IA32) +typedef S32 SIOP; +typedef U32 UIOP; +#elif defined(DRV_EM64T) +typedef S64 SIOP; +typedef U64 UIOP; +#else +#error "Unexpected Architecture seen" +#endif + +typedef U32 DRV_BOOL; +typedef void *PVOID; + +#if !defined(__DEFINE_STCHAR__) +#define __DEFINE_STCHAR__ +#if defined(UNICODE) +typedef wchar_t STCHAR; +#define VTSA_T(x) L##x +#else +typedef char STCHAR; +#define VTSA_T(x) x +#endif +#endif + +#if defined(DRV_OS_WINDOWS) +#include +typedef wchar_t DRV_STCHAR; +typedef wchar_t VTSA_CHAR; +#else +typedef char DRV_STCHAR; +#endif + +// +// Handy Defines +// +typedef U32 DRV_STATUS; + +#define MAX_STRING_LENGTH 1024 +#define MAXNAMELEN 256 +#define MAX_PATH_BUFFER_LENGTH 1100 +#define MAX_PATH_USER_LENGTH 1024 + +#if defined(DRV_OS_WINDOWS) +#define WIN_MAX_RELATIVE_PATH_LENGTH 200 +#define WIN_VOLUME_LABEL_INDICATOR L":\\" +#define WIN_PATH_LENGTH_EXTEND_PREFIX L"\\\\?\\" +#endif + +#if defined(DRV_OS_WINDOWS) +#define UNLINK _unlink +#define RENAME rename +#define WCSDUP _wcsdup +#endif +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_MAC) || \ + defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) +#define UNLINK unlink +#define RENAME rename +#endif + +#if defined(DRV_OS_SOLARIS) && !defined(_KERNEL) +//wcsdup is missing on Solaris +#include +#include + +static inline wchar_t * +solaris_wcsdup(const wchar_t *wc) +{ + wchar_t *tmp = (wchar_t *)malloc((wcslen(wc) + 1) * sizeof(wchar_t)); + wcscpy(tmp, wc); + return tmp; +} +#define WCSDUP solaris_wcsdup +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_FREEBSD) || defined(DRV_OS_MAC) +#define WCSDUP wcsdup +#endif + +#if !defined(_WCHAR_T_DEFINED) +#if defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID) || defined(DRV_OS_SOLARIS) +#if !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif +#endif +#endif + +#if (defined(DRV_OS_LINUX) || defined(DRV_OS_ANDROID)) && !defined(__KERNEL__) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#if (defined(DRV_OS_MAC) || defined(DRV_OS_FREEBSD) || \ + defined(DRV_OS_SOLARIS)) && \ + !defined(_KERNEL) +#include +typedef wchar_t VTSA_CHAR; +#endif + +#define TRUE 1 +#define FALSE 0 + +#define ALIGN_4(x) (((x) + 3) & ~3) +#define ALIGN_8(x) (((x) + 7) & ~7) +#define ALIGN_16(x) (((x) + 15) & ~15) +#define ALIGN_32(x) (((x) + 31) & ~31) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/platform/x86/socperf/include/lwpmudrv_version.h b/drivers/platform/x86/socperf/include/lwpmudrv_version.h new file mode 100644 index 00000000000000..505227a7a1a211 --- /dev/null +++ b/drivers/platform/x86/socperf/include/lwpmudrv_version.h @@ -0,0 +1,148 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2010-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2010-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ +/* + * File : lwpmudrv_version.h + */ + +#ifndef _LWPMUDRV_VERSION_H_ +#define _LWPMUDRV_VERSION_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * @macro SOCPERF_VERSION_NODE_S + * @brief + * This structure supports versioning in Sep. The field major indicates the major version, + * minor indicates the minor version and api indicates the api version for the current + * sep build. This structure is initialized at the time when the driver is loaded. + */ + +typedef struct SOCPERF_VERSION_NODE_S SOCPERF_VERSION_NODE; +typedef SOCPERF_VERSION_NODE *SOCPERF_VERSION; + +struct SOCPERF_VERSION_NODE_S { + union { + U32 socperf_version; + struct { + S32 major :8; + S32 minor :8; + S32 api :8; + S32 update:8; + }s1; + }u1; +}; + +#define SOCPERF_VERSION_NODE_socperf_version(version) ((version)->u1.socperf_version) +#define SOCPERF_VERSION_NODE_major(version) ((version)->u1.s1.major) +#define SOCPERF_VERSION_NODE_minor(version) ((version)->u1.s1.minor) +#define SOCPERF_VERSION_NODE_api(version) ((version)->u1.s1.api) +#define SEP_VERSION_NODE_update(version) ((version)->u1.s1.update) + +#if defined(__cplusplus) +} +#endif + +// SOCPERF VERSIONING + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) +#define _STRINGIFY_W(x) L#x +#define STRINGIFY_W(x) _STRINGIFY_W(x) + +#define SOCPERF_MAJOR_VERSION 3 +#define SOCPERF_MINOR_VERSION 0 +#define SOCPERF_API_VERSION 0 +#define SOCPERF_UPDATE_VERSION 0 +#if SOCPERF_UPDATE_VERSION > 0 +#define SOCPERF_UPDATE_STRING " Update "STRINGIFY(SOCPERF_UPDATE_VERSION) +#else +#define SOCPERF_UPDATE_STRING "" +#endif + +#define SOCPERF_PRODUCT_NAME "Sampling Enabling Product" +#define PRODUCT_VERSION_DATE __DATE__ " at " __TIME__ +#define PRODUCT_COPYRIGHT "Copyright (C) 2011-2018 Intel Corporation. All rights reserved." +#define PRODUCT_DISCLAIMER "Warning: This computer program is protected under U.S. and international\ncopyright laws, and may only be used or copied in accordance with the terms\nof the license agreement. Except as permitted by such license, no part\nof this computer program may be reproduced, stored in a retrieval system,\nor transmitted in any form or by any means without the express written consent\nof Intel Corporation." +#define PRODUCT_VERSION "5.0" + +#define SOCPERF_NAME "socperf" +#define SOCPERF_NAME_W L"socperf" + +#define SOCPERF_MSG_PREFIX SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION)"_"STRINGIFY(SOCPERF_MINOR_VERSION)":" +#define SOCPERF_VERSION_STR STRINGIFY(SOCPERF_MAJOR_VERSION)"."STRINGIFY(SOCPERF_MINOR_VERSION)"."STRINGIFY(SOCPERF_API_VERSION) + +#if defined(DRV_OS_WINDOWS) +#define SOCPERF_DRIVER_NAME SOCPERF_NAME STRINGIFY(SOCPERF_MAJOR_VERSION) +#define SOCPERF_DRIVER_NAME_W SOCPERF_NAME_W STRINGIFY_W(SOCPERF_MAJOR_VERSION) +#define SOCPERF_DEVICE_NAME SOCPERF_DRIVER_NAME +#endif + +#if defined(DRV_OS_LINUX) || defined(DRV_OS_SOLARIS) || defined(DRV_OS_ANDROID) || defined(DRV_OS_FREEBSD) +#define SOCPERF_DRIVER_NAME SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION) +#define SOCPERF_SAMPLES_NAME SOCPERF_DRIVER_NAME"_s" +#define SOCPERF_DEVICE_NAME "/dev/"SOCPERF_DRIVER_NAME +#endif + +#if defined(DRV_OS_MAC) +#define SOCPERF_DRIVER_NAME SOCPERF_NAME""STRINGIFY(SOCPERF_MAJOR_VERSION) +#define SOCPERF_SAMPLES_NAME SOCPERF_DRIVER_NAME"_s" +#define SOCPERF_DEVICE_NAME SOCPERF_DRIVER_NAME +#endif + +#endif diff --git a/drivers/platform/x86/socperf/include/rise_errors.h b/drivers/platform/x86/socperf/include/rise_errors.h new file mode 100644 index 00000000000000..79d5ec4f7ad5d0 --- /dev/null +++ b/drivers/platform/x86/socperf/include/rise_errors.h @@ -0,0 +1,350 @@ +/* + * Copyright (C) 2004 Intel Corporation + * + * This software and the related documents are Intel copyrighted materials, and your use of them + * is governed by the express license under which they were provided to you ("License"). Unless + * the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose + * or transmit this software or the related documents without Intel's prior written permission. + * + * This software and the related documents are provided as is, with no express or implied + * warranties, other than those that are expressly stated in the License. +*/ + +#ifndef _RISE_ERRORS_H_ +#define _RISE_ERRORS_H_ + +// +// NOTE: +// +// 1) Before adding an error code, first make sure the error code doesn't +// already exist. If it does, use that, don't create a new one just because... +// +// 2) When adding an error code, add it to the end of the list. Don't insert +// error numbers in the middle of the list! For backwards compatibility, +// we don't want the numbers changing unless we really need them +// to for some reason (like we want to switch to negative error numbers) +// +// 3) Change the VT_LAST_ERROR_CODE macro to point to the (newly added) +// last error. This is done so SW can verify the number of error codes +// possible matches the number of error strings it has +// +// 4) Don't forget to update the error string table to include your +// error code (rise.c). Since the goal is something human readable +// you don't need to use abbreviations in there (ie. don't say "bad param", +// say "bad parameter" or "illegal parameter passed in") +// +// 5) Compile and run the test_rise app (in the test_rise directory) to +// verify things are still working +// +// + +#define VT_SUCCESS 0 +#define VT_FAILURE -1 + +/*************************************************************/ + +#define VT_INVALID_MAX_SAMP 1 +#define VT_INVALID_SAMP_PER_BUFF 2 +#define VT_INVALID_SAMP_INTERVAL 3 +#define VT_INVALID_PATH 4 +#define VT_TB5_IN_USE 5 +#define VT_INVALID_NUM_EVENTS 6 +#define VT_INTERNAL_ERROR 8 +#define VT_BAD_EVENT_NAME 9 +#define VT_NO_SAMP_SESSION 10 +#define VT_NO_EVENTS 11 +#define VT_MULTIPLE_RUNS 12 +#define VT_NO_SAM_PARAMS 13 +#define VT_SDB_ALREADY_EXISTS 14 +#define VT_SAMPLING_ALREADY_STARTED 15 +#define VT_TBS_NOT_SUPPORTED 16 +#define VT_INVALID_SAMPARAMS_SIZE 17 +#define VT_INVALID_EVENT_SIZE 18 +#define VT_ALREADY_PROCESSES 19 +#define VT_INVALID_EVENTS_PATH 20 +#define VT_INVALID_LICENSE 21 + +/******************************************************/ +//SEP error codes + +#define VT_SAM_ERROR 22 +#define VT_SAMPLE_FILE_ALREADY_MAPPED 23 +#define VT_INVALID_SAMPLE_FILE 24 +#define VT_UNKNOWN_SECTION_NUMBER 25 +#define VT_NO_MEMORY 26 +#define VT_ENV_VAR_NOT_FOUND 27 +#define VT_SAMPLE_FILE_NOT_MAPPED 28 +#define VT_BUFFER_OVERFLOW 29 +#define VT_USER_OP_COMPLETED 30 +#define VT_BINARY_NOT_FOUND 31 +#define VT_ISM_NOT_INITIALIZED 32 +#define VT_NO_SYMBOLS 33 +#define VT_SAMPLE_FILE_MAPPING_ERROR 34 +#define VT_BUFFER_NULL 35 +#define VT_UNEXPECTED_NULL_PTR 36 +#define VT_BINARY_LOAD_FAILED 37 +#define VT_FUNCTION_NOT_FOUND_IN_BINARY 38 +#define VT_ENTRY_NOT_FOUND 39 +#define VT_SEP_SYNTAX_ERROR 40 +#define VT_SEP_OPTIONS_ERROR 41 +#define VT_BAD_EVENT_MODIFIER 42 +#define VT_INCOMPATIBLE_PARAMS 43 +#define VT_FILE_OPEN_FAILED 44 +#define VT_EARLY_EXIT 45 +#define VT_TIMEOUT_RETURN 46 +#define VT_NO_CHILD_PROCESS 47 +#define VT_DRIVER_RUNNING 48 +#define VT_DRIVER_STOPPED 49 +#define VT_MULTIPLE_RUNS_NEEDED 50 +#define VT_QUIT_IMMEDIATE 51 +#define VT_DRIVER_INIT_FAILED 52 +#define VT_NO_TB5_CREATED 53 +#define VT_NO_WRITE_PERMISSION 54 +#define VT_DSA_INIT_FAILED 55 +#define VT_INVALID_CPU_MASK 56 +#define VT_SAMP_IN_RUNNING_STATE 57 +#define VT_SAMP_IN_PAUSE_STATE 58 +#define VT_SAMP_IN_STOP_STATE 59 +#define VT_SAMP_NO_SESSION 60 +#define VT_NOT_CONFIGURED 61 +#define VT_LAUNCH_BUILD64_FAILED 62 +#define VT_BAD_PARAMETER 63 +#define VT_ISM_INIT_FAILED 64 +#define VT_INVALID_STATE_TRANS 65 +#define VT_EARLY_EXIT_N_CANCEL 66 +#define VT_EVT_MGR_NOT_INIT 67 +#define VT_ISM_SECTION_ENUM_FAILED 68 +#define VT_VG_PARSER_ERROR 69 +#define VT_MISSING_VALUE_FOR_TOKEN 70 +#define VT_EMPTY_SAMPLE_FILE_NAME 71 +#define VT_UNEXPECTED_VALUE 72 +#define VT_NOT_IMPLEMENTED 73 +#define VT_MISSING_COL_DEPNDNCIES 74 +#define VT_DEP_COL_NOT_LIB_DEFINED 75 +#define VT_COL_NOT_REG_WITH_LIB 76 +#define VT_SECTION_ALREADY_IN_USE 77 +#define VT_SECTION_NOT_EXIST 78 +#define VT_STREAM_NOT_EXIST 79 +#define VT_INVALID_STREAM 80 +#define VT_STREAM_ALREADY_IN_USE 81 +#define VT_DATA_DESC_NOT_EXIST 82 +#define VT_INVALID_ERROR_CODE 83 +#define VT_INCOMPATIBLE_VERSION 84 +#define VT_LEGACY_DATA_NOT_EXIST 85 +#define VT_INVALID_READ_START 86 +#define VT_DRIVER_OPEN_FAILED 87 +#define VT_DRIVER_IOCTL_FAILED 88 +#define VT_SAMP_FILE_CREATE_FAILED 89 +#define VT_MODULE_FILE_CREATE_FAILED 90 +#define VT_INVALID_SAMPLE_FILE_NAME 91 +#define VT_INVALID_MODULE_FILE_NAME 92 +#define VT_FORK_CHILD_PROCESS_FAILED 93 +#define VT_UNEXPECTED_MISMATCH_IN_STRING_TYPES 94 +#define VT_INCOMPLETE_TB5_ENCOUNTERED 95 +#define VT_ERR_CONVERSION_FROM_STRING_2_NUMBER 96 +#define VT_INVALID_STRING 97 +#define VT_UNSUPPORTED_DATA_SIZE 98 +#define VT_TBRW_INIT_FAILED 99 +#define VT_PLUGIN_UNLOAD 100 +#define VT_PLUGIN_ENTRY_NULL 101 +#define VT_UNKNOWN_PLUGIN 102 +#define VT_BUFFER_TOO_SMALL 103 +#define VT_CANNOT_MODIFY_COLUMN 104 +#define VT_MULT_FILTERS_NOT_ALLOWED 105 +#define VT_ADDRESS_IN_USE 106 +#define VT_NO_MORE_MMAPS 107 +#define VT_MAX_PAGES_IN_DS_EXCEEDED 108 +#define VT_INVALID_COL_TYPE_IN_GROUP_INFO 109 +#define VT_AGG_FN_ON_VARCHAR_NOT_SUPP 110 +#define VT_INVALID_ACCESS_PERMS 111 +#define VT_NO_DATA_TO_DISPLAY 112 +#define VT_TB5_IS_NOT_BOUND 113 +#define VT_MISSING_GROUP_BY_COLUMN 114 +#define VT_SMRK_MAX_STREAMS_EXCEEDED 115 +#define VT_SMRK_STREAM_NOT_CREATED 116 +#define VT_SMRK_NOT_IMPL 117 +#define VT_SMRK_TYPE_NOT_IMPL 118 +#define VT_SMRK_TYPE_ALREADY_SET 119 +#define VT_SMRK_NO_STREAM 120 +#define VT_SMRK_INVALID_STREAM_TYPE 121 +#define VT_SMRK_STREAM_NOT_FOUND 122 +#define VT_SMRK_FAIL 123 +#define VT_SECTION_NOT_READABLE 124 +#define VT_SECTION_NOT_WRITEABLE 125 +#define VT_GLOBAL_SECTION_NOT_CLOSED 126 +#define VT_STREAM_SECTION_NOT_CLOSED 127 +#define VT_STREAM_NOT_CLOSED 128 +#define VT_STREAM_NOT_BOUND 129 +#define VT_NO_COLS_SPECIFIED 130 +#define VT_NOT_ALL_SECTIONS_CLOSED 131 +#define VT_SMRK_INVALID_PTR 132 +#define VT_UNEXPECTED_BIND_MISMATCH 133 +#define VT_WIN_TIMER_ERROR 134 +#define VT_ONLY_SNGL_DEPNDT_COL_ALLWD 135 +#define VT_BAD_MODULE 136 +#define VT_INPUT_SOURCE_INFO_NOT_SET 137 +#define VT_UNSUPPORTED_TIME_GRAN 138 +#define VT_NO_SAMPLES_COLLECTED 139 +#define VT_INVALID_CPU_TYPE_VERSION 140 +#define VT_BIND_UNEXPECTED_1STMODREC 141 +#define VT_BIND_MODULES_NOT_SORTED 142 +#define VT_UNEXPECTED_NUM_CPUIDS 143 +#define VT_UNSUPPORTED_ARCH_TYPE 144 +#define VT_NO_DATA_TO_WRITE 145 +#define VT_EM_TIME_SLICE_TOO_SMALL 146 +#define VT_EM_TOO_MANY_EVENT_GROUPS 147 +#define VT_EM_ZERO_GROUPS 148 +#define VT_EM_NOT_SUPPORTED 149 +#define VT_PMU_IN_USE 150 +#define VT_TOO_MANY_INTERRUPTS 151 +#define VT_MAX_SAMPLES_REACHED 152 +#define VT_MODULE_COLLECTION_FAILED 153 +#define VT_INCOMPATIBLE_DRIVER 154 +#define VT_UNABLE_LOCATE_TRIGGER_EVENT 155 +#define VT_COMMAND_NOT_HANDLED 156 +#define VT_DRIVER_VERSION_MISMATCH 157 +#define VT_MAX_MARKERS 158 +#define VT_DRIVER_COMM_FAILED 159 +#define VT_CHIPSET_CONFIG_FAILED 160 +#define VT_BAD_DATA_BASE 161 +#define VT_PAX_SERVICE_NOT_CONNECTED 162 +#define VT_PAX_SERVICE_ERROR 163 +#define VT_PAX_PMU_RESERVE_FAILED 164 +#define VT_INVALID_CPU_INFO_TYPE 165 +#define VT_CACHE_DOESNT_EXIST 166 +#define VT_UNSUPPORTED_UNCORE_ARCH_TYPE 167 +#define VT_EXCEEDED_MAX_EVENTS 168 +#define VT_MARKER_TIMER_FAILED 169 +#define VT_PAX_PMU_UNRESERVE_FAILED 170 +#define VT_MULTIPLE_PROCESSES_FOUND 171 +#define VT_NO_SUCH_PROCESS_FOUND 172 +#define VT_PCL_NOT_ENABLED 173 +#define VT_PCL_UID_CHECK 174 +#define VT_DEL_RESULTS_DIR_FAILED 175 +#define VT_NO_VALID_EVENTS 176 +#define VT_INVALID_EVENT 177 +#define VT_EVENTS_COUNTED 178 +#define VT_EVENTS_COLLECTED 179 +#define VT_UNSUPPORTED_GFX_ARCH_TYPE 180 +#define VT_GFX_CONFIG_FAILED 181 +#define VT_UNSUPPORTED_NON_NATIVE_MODE 182 +#define VT_INVALID_DEVICE 183 +#define VT_ENV_SETUP_FAILED 184 +#define VT_RESUME_NOT_RECEIVED 185 +#define VT_UNSUPPORTED_PWR_ARCH_TYPE 186 +#define VT_PWR_CONFIG_FAILED 187 +#define VT_NMI_WATCHDOG_FOUND 188 +#define VT_NO_PMU_RESOURCES 189 +#define VT_MIC_CARD_NOT_ONLINE 190 +#define VT_FREEZE_ON_PMI_NOT_AVAIL 191 +#define VT_FLUSH_FAILED 192 +#define VT_FLUSH_SUCCESS 193 +#define VT_WRITE_ERROR 194 +#define VT_NO_SPACE 195 +#define VT_MSR_ACCESS_ERROR 196 +#define VT_PEBS_NOT_SUPPORTED 197 +#define VT_LUA_PARSE_ERROR 198 +#define VT_COMM_CONNECTION_CLOSED_BY_REMOTE 199 +#define VT_COMM_LISTEN_ERROR 200 +#define VT_COMM_BIND_ERROR 201 +#define VT_COMM_ACCEPT_ERROR 202 +#define VT_COMM_SEND_ERROR 203 +#define VT_COMM_RECV_ERROR 204 +#define VT_COMM_SOCKET_ERROR 205 +#define VT_COMM_CONNECT_ERROR 206 +#define VT_TARGET_COLLECTION_MISMATCH 207 +#define VT_INVALID_SEP_DRIVER_LOG 208 +#define VT_COMM_PROTOCOL_VER_NOT_SUPPORTED 209 +#define VT_SAMP_IN_UNEXPECTED_STATE 210 +#define VT_COMM_RECV_BUF_RESIZE_ERROR 211 +#define VT_COMM_SEND_BUF_RESIZE_ERROR 212 +#define VT_NO_ADMIN_PRIVILEGE 213 +#define VT_CORE_EVT_MUX_NOT_SUPPORTED 214 +#define VT_OS_VERSION_NOT_SUPPORTED 215 +#define VT_COMM_RECV_TIMEOUT_SET_ERROR 216 +#define VT_COMM_RECV_TIMEOUT 217 +#define VT_COMM_NOT_COMPATIBLE 218 +#define VT_COMM_DATA_CHANNEL_UNAVAILABLE 219 +#define VT_COMM_INVALID_TARGET_OS_INFO 220 +#define VT_COMM_INVALID_TARGET_HARDWARE_INFO 221 +#define VT_COMM_INVALID_TARGET_COLLECT_SWITCH 222 +#define VT_NO_OFFCORE_SUPPORT 223 +#define VT_INVALID_EMON_STATIC_EVENT 224 +#define VT_ACRN_VMM_VERSION_MISMATCH 225 +#define VT_COMM_INVALID_NUM_OF_CORE_EVENTS 226 +#define VT_EVENT_NEEDS_EXPERIMENTAL_FLAG 227 +#define VT_INVALID_PROG_INFO 228 +#define VT_UNSUPPORTED_CPU_TOPOLOGY 229 +#define VT_IPT_NOT_SUPPORTED 230 +#define VT_EXCEED_OPEN_FILE_LIMIT 231 +#define VT_DRIVERLESS_SUCCESS 232 +#define VT_FEATURE_NOT_SUPPORTED_IN_DRIVER 232 +#define VT_UNCORE_DISCOVERY_NOT_SUPPORTED 233 +#define VT_UNCORE_DISCOVERY_DATA_INVALID 234 +#define VT_INCORRECT_COUNTER_MASK 235 +/* + * define error code for checking on async marker request + */ +#define VT_INVALID_MARKER_ID -1 + +/* + * ************************************************************ + * NOTE: after adding new error code(s), remember to also + * update the following: + * 1) VT_LAST_ERROR_CODE below + * 2) /src/rise.c + * 3) /driver/qnx/sepdk/include/rise_errors.h + * + * ************************************************************ + */ + +// +// To make error checking easier, the special VT_LAST_ERROR_CODE +// should be set to whatever is the last error on the list above +// +#define VT_LAST_ERROR_CODE VT_INCORRECT_COUNTER_MASK + +// +// Define a macro to determine success or failure. Users of this +// error header file should use the macros instead of direct +// checks so that we can change the error numbers in the future +// (such as making negative numbers be an error indication and positive +// numbers being a success with a value indication) +// +#define VTSA_SUCCESS(x) ((x) == VT_SUCCESS) +#define VTSA_FAILED(x) (!VTSA_SUCCESS(x)) + +// +// These should be deprecated, but we'll keep them here just in case +// +#define SEP_IS_SUCCESS(x) VTSA_SUCCESS(x) +#define SEP_IS_FAILED(x) VTSA_FAILED(x) + +/************************************************************* + * API Error Codes + *************************************************************/ +#define VTAPI_INVALID_MAX_SAMP VT_INVALID_MAX_SAMP +#define VTAPI_INVALID_SAMP_PER_BUFF VT_INVALID_SAMP_PER_BUFF +#define VTAPI_INVALID_SAMP_INTERVAL VT_INVALID_SAMP_INTERVAL +#define VTAPI_INVALID_PATH VT_INVALID_PATH +#define VTAPI_TB5_IN_USE VT_TB5_IN_USE +#define VTAPI_INVALID_NUM_EVENTS VT_INVALID_NUM_EVENTS +#define VTAPI_INTERNAL_ERROR VT_INTERNAL_ERROR +#define VTAPI_BAD_EVENT_NAME VT_BAD_EVENT_NAME +#define VTAPI_NO_SAMP_SESSION VT_NO_SAMP_SESSION +#define VTAPI_NO_EVENTS VT_NO_EVENTS +#define VTAPI_MULTIPLE_RUNS VT_MULTIPLE_RUNS +#define VTAPI_NO_SAM_PARAMS VT_NO_SAM_PARAMS +#define VTAPI_SDB_ALREADY_EXISTS VT_SDB_ALREADY_EXISTS +#define VTAPI_SAMPLING_ALREADY_STARTED VT_SAMPLING_ALREADY_STARTED +#define VTAPI_TBS_NOT_SUPPORTED VT_TBS_NOT_SUPPORTED +#define VTAPI_INVALID_SAMPARAMS_SIZE VT_INVALID_SAMPARAMS_SIZE +#define VTAPI_INVALID_EVENT_SIZE VT_INVALID_EVENT_SIZE +#define VTAPI_ALREADY_PROCESSES VT_ALREADY_PROCESSES +#define VTAPI_INVALID_EVENTS_PATH VT_INVALID_EVENTS_PATH +#define VTAPI_INVALID_LICENSE VT_INVALID_LICENSE + +typedef int RISE_ERROR; +typedef void *RISE_PTR; + +#endif diff --git a/drivers/platform/x86/socperf/npk_uncore.c b/drivers/platform/x86/socperf/npk_uncore.c new file mode 100644 index 00000000000000..8d8ad63efa76f9 --- /dev/null +++ b/drivers/platform/x86/socperf/npk_uncore.c @@ -0,0 +1,519 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "inc/socperfdrv.h" +#include "inc/ecb_iterators.h" +#include "inc/pci.h" +#include "inc/control.h" +#include "inc/npk_uncore.h" + +extern LWPMU_DEVICE device_uncore; +static U32 counter_overflow[SOC_NPK_COUNTER_MAX_COUNTERS]; +static U64 counter_virtual_address; +static U64 mchbar_virtual_address; +static U64 mchbar_offset; + +/*! + * @fn static ULONG read_From_Register(U64 bar_virtual_address, + U64 mmio_offset, + U32 *data_val) + * + * @brief Reads register programming info + * + * @param bar_virtual_address - memory address + * mmio_offset - offset of the register + * data_val - register value read + * + * @return data from the counter register + * + * Special Notes: + */ +static void +read_From_Register(U64 bar_virtual_address, U64 mmio_offset, U32 *data_val) +{ + if (data_val) { + *data_val = readl((U32 *)((char *)(UIOP)(bar_virtual_address) + + mmio_offset)); + } + return; +} + +/*! + * @fn static ULONG write_To_Register(U64 bar_virtual_address, + U64 mmio_offset, + U32 value) + * + * @brief Write register programming info + * + * @param bar_virtual_address - memory address + * mmio_offset - offset of the register + * value - register value to be written + * + * @return none + * + * Special Notes: + */ +static void +write_To_Register(U64 bar_virtual_address, U64 mmio_offset, ULONG value) +{ + U32 read_reg = 0; + + writel(value, + (U32 *)(((char *)(UIOP)bar_virtual_address) + mmio_offset)); + read_From_Register(bar_virtual_address, mmio_offset, &read_reg); + + return; +} + +/*! + * @fn static VOID uncore_Reset_Counters(U32 dev_idx) + * + * @brief Reset counters + * + * @param dev_idx - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Reset_Counters(U32 dev_idx) +{ + U32 data_reg = 0; + + if (counter_virtual_address) { + FOR_EACH_PCI_REG_RAW (pecb, i, dev_idx) { + if (ECB_entries_reg_type(pecb, i) == + PMU_REG_EVENT_SELECT) { + data_reg = + i + ECB_operations_register_len( + pecb, PMU_OPERATION_WRITE); + if (ECB_entries_reg_type(pecb, data_reg) == + PMU_REG_DATA) { + write_To_Register( + counter_virtual_address, + ECB_entries_reg_offset( + pecb, data_reg), + (ULONG)0); + } + write_To_Register(counter_virtual_address, + ECB_entries_reg_offset(pecb, + i), + (ULONG)SOC_NPK_UNCORE_STOP); + } + } + END_FOR_EACH_PCI_REG_RAW; + } + + return; +} + +/*! + * @fn static VOID uncore_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the entries and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Write_PMU(VOID *param) +{ + U32 dev_idx = *((U32 *)param); + ECB pecb; + DRV_PCI_DEVICE_ENTRY dpden; + U32 pci_address; + U32 bar_lo; + U64 bar_hi; + U64 final_bar; + U64 physical_address; + U32 dev_index = 0; + S32 bar_list[SOC_NPK_UNCORE_MAX_PCI_DEVICES]; + U32 bar_index = 0; + U64 virtual_address = 0; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U32 next_bar_offset = 0; + U64 mmio_offset = 0; + U32 i = 0; + U32 map_size = 0; + U32 cur_grp; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; + if (pecb == NULL) { + SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); + return; + } + + for (dev_index = 0; dev_index < SOC_NPK_UNCORE_MAX_PCI_DEVICES; + dev_index++) { + bar_list[dev_index] = -1; + } + + // initialize the per-counter overflow numbers + for (i = 0; i < SOC_NPK_COUNTER_MAX_COUNTERS; i++) { + counter_overflow[i] = 0; + socperf_pcb[0].last_uncore_count[i] = 0; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + uncore_Reset_Counters(dev_idx); + + SOCPERF_PRINT_DEBUG( + "Inside VISA Driver Write PMU: Number of entries=%d\n", + ECB_num_pci_devices(pecb)); + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + + // UNC_MMIO programming + if (bar_list[bar_name] != -1) { + bar_index = bar_list[bar_name]; + virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = + DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + write_To_Register(virtual_address, mmio_offset, + (U32)DRV_PCI_DEVICE_ENTRY_value( + curr_pci_entry)); + continue; + } + + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); + bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); + SOCPERF_PRINT_DEBUG( + "The bus=%x device=%x function=%x offset=%x\n", + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); + next_bar_offset = + DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry) + + SOC_NPK_UNCORE_NEXT_ADDR_OFFSET; + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + next_bar_offset); + bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); + SOCPERF_PRINT_DEBUG( + "The bus=%x device=%x function=%x offset=%x\n", + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + next_bar_offset); + final_bar = (bar_hi << SOC_NPK_UNCORE_BAR_ADDR_SHIFT) | bar_lo; + if (bar_name == UNC_MCHBAR) { + final_bar &= SOC_NPK_UNCORE_MCHBAR_ADDR_MASK; + map_size = SOC_NPK_UNCORE_MCHBAR_MMIO_PAGE_SIZE; + } else { + final_bar &= SOC_NPK_UNCORE_BAR_ADDR_MASK; + map_size = SOC_NPK_UNCORE_NPK_BAR_MMIO_PAGE_SIZE; + } + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = final_bar; + physical_address = + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); + + if (physical_address) { + DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = + (U64)(UIOP)ioremap(physical_address, map_size); + virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( + curr_pci_entry); + + write_To_Register(virtual_address, mmio_offset, + (U32)DRV_PCI_DEVICE_ENTRY_value( + curr_pci_entry)); + bar_list[bar_name] = dev_index; + if (counter_virtual_address == 0) { + counter_virtual_address = virtual_address; + } + if (mchbar_virtual_address == 0 && + bar_name == UNC_MCHBAR) { + mchbar_virtual_address = virtual_address; + mchbar_offset = mmio_offset; + } + } + } + + return; +} + +/*! + * @fn static VOID uncore_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Disable_PMU(PVOID param) +{ + U32 dev_idx = *((U32 *)param); + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP) { + uncore_Reset_Counters(dev_idx); + if (mchbar_virtual_address) { + write_To_Register(mchbar_virtual_address, mchbar_offset, + 0x0); + iounmap((void *)(UIOP)(mchbar_virtual_address)); + SOCPERF_PRINT_DEBUG("Unmapping MCHBAR address=%x\n", + mchbar_virtual_address); + } + if (counter_virtual_address) { + iounmap((void *)(UIOP)(counter_virtual_address)); + SOCPERF_PRINT_DEBUG("Unmapping NPKBAR address=%x\n", + counter_virtual_address); + } + counter_virtual_address = 0; + mchbar_virtual_address = 0; + mchbar_offset = 0; + } + + return; +} + +/*! + * @fn static VOID uncore_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Initialize(VOID *param) +{ + counter_virtual_address = 0; + mchbar_virtual_address = 0; + mchbar_offset = 0; + return; +} + +/*! + * @fn static VOID uncore_Clean_Up(PVOID) + * + * @brief Reset any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Clean_Up(VOID *param) +{ + counter_virtual_address = 0; + mchbar_virtual_address = 0; + mchbar_offset = 0; + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn uncore_Read_Data() + * + * @param None + * + * @return None No return needed + * + * @brief Read the counters + * + */ +static VOID uncore_Read_Data(PVOID data_buffer) +{ + U32 event_id = 0; + U64 *data; + int data_index; + U32 data_val = 0; + U32 data_reg = 0; + U64 total_count = 0; + U32 event_index = 0; + U32 cur_grp; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_UNINITIALIZED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_IDLE || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_RESERVED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_STOPPED) { + SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); + return; + } + + if (data_buffer == NULL) { + return; + } + + data = (U64 *)data_buffer; + data_index = 0; + + // Write GroupID + data[data_index] = cur_grp + 1; + // Increment the data index as the event id starts from zero + data_index++; + + FOR_EACH_PCI_REG_RAW (pecb, i, dev_idx) { + if (ECB_entries_reg_type(pecb, i) == PMU_REG_EVENT_SELECT) { + write_To_Register(counter_virtual_address, + ECB_entries_reg_offset(pecb, i), + (ULONG)SOC_NPK_UNCORE_SAMPLE_DATA); + + data_reg = i + ECB_operations_register_len( + pecb, PMU_OPERATION_WRITE); + if (ECB_entries_reg_type(pecb, data_reg) == + PMU_REG_DATA) { + read_From_Register( + counter_virtual_address, + ECB_entries_reg_offset(pecb, data_reg), + &data_val); + if (data_val < + socperf_pcb[0] + .last_uncore_count[event_index]) { + counter_overflow[event_index]++; + } + socperf_pcb[0].last_uncore_count[event_index] = + data_val; + total_count = data_val + + counter_overflow[event_index] * + SOC_NPK_COUNTER_MAX_COUNT; + event_index++; + data[data_index + event_id] = total_count; + SOCPERF_PRINT_DEBUG("DATA[%d]=%llu\n", event_id, + total_count); + event_id++; + } + } + } + END_FOR_EACH_PCI_REG_RAW; + + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE npk_dispatch = { + .init = uncore_Initialize, // initialize + .fini = NULL, // destroy + .write = uncore_Write_PMU, // write + .freeze = uncore_Disable_PMU, // freeze + .restart = NULL, // restart + .read_data = NULL, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, + .read_lbrs = NULL, + .clean_up = uncore_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = NULL, + .read_current_data = uncore_Read_Data, + .create_mem = NULL, + .check_status = NULL, + .read_mem = NULL, + .stop_mem = NULL +}; diff --git a/drivers/platform/x86/socperf/pci.c b/drivers/platform/x86/socperf/pci.c new file mode 100644 index 00000000000000..fc131752b72ba6 --- /dev/null +++ b/drivers/platform/x86/socperf/pci.c @@ -0,0 +1,197 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "socperfdrv.h" +#include "pci.h" + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int SOCPERF_PCI_Read_From_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param *value - value at this address + * + * @return status + * + * @brief Read memory mapped i/o physical location + * + */ +extern int SOCPERF_PCI_Read_From_Memory_Address(U32 addr, U32 *val) +{ + U32 aligned_addr, offset, value; + PVOID base; + + if (addr <= 0) { + return OS_INVALID; + } + + SOCPERF_PRINT_DEBUG( + "SOCPERF_PCI_Read_From_Memory_Address: reading physical address:%x\n", + addr); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SOCPERF_PRINT_DEBUG( + "SOCPERF_PCI_Read_From_Memory_Address: aligned physical address:%x,offset:%x\n", + aligned_addr, offset); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + return OS_INVALID; + } + + value = readl(base + offset); + *val = value; + SOCPERF_PRINT_DEBUG( + "SOCPERF_PCI_Read_From_Memory_Address: value at this physical address:%x\n", + value); + + iounmap(base); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int SOCPERF_PCI_Write_To_Memory_Address(addr, val) + * + * @param addr - physical address in mmio + * @param value - value to be written + * + * @return status + * + * @brief Write to memory mapped i/o physical location + * + */ +extern int SOCPERF_PCI_Write_To_Memory_Address(U32 addr, U32 val) +{ + U32 aligned_addr, offset; + PVOID base; + + if (addr <= 0) { + return OS_INVALID; + } + + SOCPERF_PRINT_DEBUG( + "SOCPERF_PCI_Write_To_Memory_Address: writing physical address:%x with value:%x\n", + addr, val); + offset = addr & ~PAGE_MASK; + aligned_addr = addr & PAGE_MASK; + SOCPERF_PRINT_DEBUG( + "SOCPERF_PCI_Write_To_Memory_Address: aligned physical address:%x,offset:%x\n", + aligned_addr, offset); + + base = ioremap(aligned_addr, PAGE_SIZE); + if (base == NULL) { + return OS_INVALID; + } + + writel(val, base + offset); + + iounmap(base); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int SOCPERF_PCI_Read_Ulong(pci_address) + * + * @param pci_address - PCI configuration address + * + * @return value at this location + * + * @brief Reads a ULONG from PCI configuration space + * + */ +extern int SOCPERF_PCI_Read_Ulong(U32 pci_address) +{ + U32 temp_ulong = 0; + + outl(pci_address, PCI_ADDR_IO); + temp_ulong = inl(PCI_DATA_IO); + + return temp_ulong; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern int SOCPERF_PCI_Write_Ulong(addr, val) + * + * @param pci_address - PCI configuration address + * @param value - Value to be written + * + * @return status + * + * @brief Writes a ULONG to PCI configuration space + * + */ +extern void SOCPERF_PCI_Write_Ulong(U32 pci_address, U32 value) +{ + outl(pci_address, PCI_ADDR_IO); + outl(value, PCI_DATA_IO); + + return; +} diff --git a/drivers/platform/x86/socperf/pmu_list.c b/drivers/platform/x86/socperf/pmu_list.c new file mode 100644 index 00000000000000..aa4aad32826d32 --- /dev/null +++ b/drivers/platform/x86/socperf/pmu_list.c @@ -0,0 +1,414 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2019-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "socperfdrv.h" +#include "inc/control.h" +#include "inc/utility.h" + +#include "inc/pmu_info_struct.h" +#include "inc/pmu_info_mmio.h" +#include "inc/pmu_info.h" +#include "inc/pmu_list.h" + +static S32 pmu_info_index = -1; + +static PMU_SEARCH_NODE *mmio_root; + +static void +pmu_list_Lookup_PMU_Info(const PMU_INFO_NODE *pmu_list, + U32 family, + U32 model, + U32 stepping) +{ + S32 i = 0; + + while (!(pmu_list[i].family == 0 && pmu_list[i].model == 0 && + pmu_list[i].stepping_to == 0)) { + if (pmu_list[i].family == family && + pmu_list[i].model == model && + pmu_list[i].stepping_from <= stepping && + pmu_list[i].stepping_to >= stepping) { + pmu_info_index = i; + return; + } + i++; + } +} + +/**************************************************************************************** + * Common helper fuctions for search algorithm + ****************************************************************************************/ + +static S32 +pmu_list_Max_Height(PMU_SEARCH_NODE *node_left, PMU_SEARCH_NODE *node_right) +{ + if (node_left && node_right) { + return (node_left->height > node_right->height) ? + node_left->height : + node_right->height; + } else if (node_left) { + return node_left->height; + } else if (node_right) { + return node_right->height; + } + return 0; +} + +static PMU_SEARCH_NODE * +pmu_list_Right_Rotate(PMU_SEARCH_NODE *node) +{ + PMU_SEARCH_NODE *nn, *r_child_nn; + + nn = node->left; + r_child_nn = nn->right; + + // Rotate + nn->right = node; // node becomes right child + node->left = r_child_nn; // original right child becomes left child + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + nn->height = 1 + pmu_list_Max_Height(nn->left, nn->right); + + return nn; +} + +static PMU_SEARCH_NODE * +pmu_list_Left_Rotate(PMU_SEARCH_NODE *node) +{ + PMU_SEARCH_NODE *nn, *l_child_nn; + + nn = node->right; + l_child_nn = nn->left; + + // Rotate + nn->left = node; // node becomes left child + node->right = l_child_nn; // original left child becomes right child + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + nn->height = 1 + pmu_list_Max_Height(nn->left, nn->right); + + return nn; +} + +static PMU_SEARCH_NODE * +pmu_list_Create_Node(U64 key, S32 range, void *addr) +{ + PMU_SEARCH_NODE *temp = (PMU_SEARCH_NODE *)SOCPERF_Allocate_Memory( + sizeof(PMU_SEARCH_NODE)); + temp->key = key; + temp->range = (U16)range; + temp->left = NULL; + temp->right = NULL; + temp->height = 1; + temp->addr = addr; + return temp; +} + +static void pmu_list_Delete_Tree(PMU_SEARCH_NODE *node) +{ + if (node == NULL) { + return; + } + + pmu_list_Delete_Tree(node->left); + pmu_list_Delete_Tree(node->right); + + node->left = NULL; + node->right = NULL; + + SOCPERF_Free_Memory(node); +} + +/**************************************************************************************** + * Range is not used: for PCI and MMIO + ****************************************************************************************/ + +static PMU_SEARCH_NODE * +pmu_list_Balance_Tree(PMU_SEARCH_NODE *node, U64 key) +{ + S32 height_delta = 0; + + if (node->left && node->right) { + height_delta = node->left->height - node->right->height; + } else if (node->left) { + height_delta = node->left->height; + } else if (node->right) { + height_delta = 0 - (node->right->height); + } + + if (height_delta == 0) { + // tree is balanced + return node; + } + // if Delta > 1, balance left tree + else if ((height_delta > 1) && (node->key > key)) { + node = pmu_list_Right_Rotate(node); + } else if ((height_delta > 1) && (node->key < key)) { + node->left = pmu_list_Left_Rotate(node->left); + node = pmu_list_Right_Rotate(node); + } + // if Delta < -1, balance right tree + else if ((height_delta < -1) && (node->key < key)) { + node = pmu_list_Left_Rotate(node); + } else if ((height_delta < -1) && (node->key > key)) { + node->right = pmu_list_Right_Rotate(node->right); + node = pmu_list_Left_Rotate(node); + } + return node; +} + +static PMU_SEARCH_NODE * +pmu_list_Insert_Node(PMU_SEARCH_NODE *node, U64 key, void *addr) +{ + if (node == NULL) { + // make it root, range = 0 + node = pmu_list_Create_Node(key, 0, addr); + } else if (node->key < key) { + // insert to right sub tree + node->right = pmu_list_Insert_Node(node->right, key, addr); + } else if (node->key > key) { + // insert to left sub tree + node->left = pmu_list_Insert_Node(node->left, key, addr); + } else { + // do nothing + return node; + } + + // update height + node->height = 1 + pmu_list_Max_Height(node->left, node->right); + + // Balance the tree + return pmu_list_Balance_Tree(node, key); +} + +static PMU_SEARCH_NODE * +pmu_list_binary_search(PMU_SEARCH_NODE *node, U64 key) +{ + if (node == NULL) { + return NULL; + } + if (node->key == key) { + return node; + } else if (node->key < key) { + return pmu_list_binary_search(node->right, key); + } else { + return pmu_list_binary_search(node->left, key); + } +} + +extern DRV_BOOL +PMU_LIST_Check_MMIO(PMU_MMIO_BAR_INFO_NODE primary, + PMU_MMIO_BAR_INFO_NODE secondary, + U32 offset) +{ + PMU_SEARCH_NODE *temp; + U64 key; + PMU_MMIO_UNIT_INFO_NODE *unit_info = NULL; + DRV_BOOL ret = FALSE; + + SOCPERF_PRINT_DEBUG(""); + + if (pmu_info_index == -1 || mmio_root == NULL) { + SOCPERF_PRINT_DEBUG("Success"); + return FALSE; + } + + if (primary.bar_prog_type == MMIO_SINGLE_BAR_TYPE || + primary.bar_prog_type == MMIO_DUAL_BAR_TYPE) { + key = (U64)primary.u.reg << 32 | offset; + } else if (primary.bar_prog_type == MMIO_DIRECT_BAR_TYPE) { + key = (U64)primary.mask << 32 | offset; + } else { + SOCPERF_PRINT_ERROR("Invalid BAR prog type %d", + primary.bar_prog_type); + SOCPERF_PRINT_DEBUG("Success"); + return FALSE; + } + + temp = pmu_list_binary_search(mmio_root, key); + if (temp != NULL) { + if (primary.bar_prog_type == MMIO_DIRECT_BAR_TYPE) { + ret = TRUE; + } else if (primary.bar_prog_type == MMIO_SINGLE_BAR_TYPE) { + unit_info = (PMU_MMIO_UNIT_INFO_NODE *)temp->addr; + if (unit_info && + (unit_info->primary.mask == primary.mask) && + (unit_info->primary.shift == primary.shift)) { + ret = TRUE; + } + } else if (primary.bar_prog_type == MMIO_DUAL_BAR_TYPE) { + unit_info = (PMU_MMIO_UNIT_INFO_NODE *)temp->addr; + if (unit_info && + (unit_info->primary.mask == primary.mask) && + (unit_info->primary.shift == primary.shift) && + (unit_info->secondary.u.s.offset == + secondary.u.s.offset) && + (unit_info->secondary.mask == secondary.mask) && + (unit_info->secondary.shift == secondary.shift)) { + ret = TRUE; + } + } + } + + SOCPERF_PRINT_DEBUG("Success"); + return ret; +} + +extern OS_STATUS PMU_LIST_Initialize(void) +{ + U64 rax, rbx, rcx, rdx; + U32 family, model, stepping; + + SOCPERF_PRINT_DEBUG(""); + + SOCPERF_UTILITY_Read_Cpuid(0x1, &rax, &rbx, &rcx, &rdx); + + family = (U32)(rax >> 8 & 0x0f); + model = (U32)(rax >> 12 & 0xf0); /* extended model bits */ + model |= (U32)(rax >> 4 & 0x0f); + stepping = (U32)(rax & 0x0f); + + pmu_info_index = -1; + pmu_list_Lookup_PMU_Info(pmu_info_list, family, model, stepping); + + SOCPERF_PRINT("PMU check enabled! F%x.M%x.S%x index=%d\n", family, + model, stepping, pmu_info_index); + + SOCPERF_PRINT_DEBUG("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS PMU_LIST_Build_MMIO_List(void) +{ + U32 unit_idx = 0; + U32 reg_idx = 0; + U64 key; + PMU_MMIO_UNIT_INFO_NODE *unit_info_list; + + SOCPERF_PRINT_DEBUG(""); + + if (pmu_info_index == -1) { + SOCPERF_PRINT("No MMIO list information detected!\n"); + SOCPERF_PRINT_DEBUG("Success"); + return OS_SUCCESS; + } + + unit_info_list = pmu_info_list[pmu_info_index].mmio_info_list; + + if (!unit_info_list) { + SOCPERF_PRINT("No MMIO list information detected!\n"); + return OS_SUCCESS; + } + + SOCPERF_DRV_MEMSET(&key, 0, sizeof(U64)); + + while (unit_info_list[unit_idx] + .reg_offset_list) { //Iterate through unit list + reg_idx = 0; + while (unit_info_list[unit_idx].reg_offset_list[reg_idx] != + 0x0) { //Iterate through offset list + switch (unit_info_list[unit_idx].primary.bar_prog_type) { + case MMIO_SINGLE_BAR_TYPE: + case MMIO_DUAL_BAR_TYPE: + key = (U64)unit_info_list[unit_idx].primary.u.reg + << 32 | + (U64)unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + mmio_root = pmu_list_Insert_Node( + mmio_root, key, + (void *)(&unit_info_list[unit_idx])); + break; + case MMIO_DIRECT_BAR_TYPE: + key = (U64)unit_info_list[unit_idx].primary.mask + << 32 | + (U64)unit_info_list[unit_idx] + .reg_offset_list[reg_idx]; + mmio_root = pmu_list_Insert_Node( + mmio_root, key, + (void *)(&unit_info_list[unit_idx])); + break; + } + reg_idx++; + } + unit_idx++; + } + + SOCPERF_PRINT_DEBUG("Success"); + return OS_SUCCESS; +} + +extern OS_STATUS PMU_LIST_Clean_Up(void) +{ + SOCPERF_PRINT_DEBUG(""); + + pmu_info_index = -1; + + if (mmio_root) { + pmu_list_Delete_Tree(mmio_root); + mmio_root = NULL; + } + + SOCPERF_PRINT_DEBUG("Success"); + return OS_SUCCESS; +} diff --git a/drivers/platform/x86/socperf/soc_uncore.c b/drivers/platform/x86/socperf/soc_uncore.c new file mode 100644 index 00000000000000..d1b9761268d20c --- /dev/null +++ b/drivers/platform/x86/socperf/soc_uncore.c @@ -0,0 +1,948 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2013-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include + +#include "lwpmudrv_types.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" + +#include "socperfdrv.h" +#include "control.h" +#include "soc_uncore.h" +#include "inc/ecb_iterators.h" +#include "inc/pci.h" + +#if defined(PCI_HELPERS_API) +#include +#elif defined(DRV_CHROMEOS) +#include +static struct pci_dev *pci_root; +#define PCI_DEVFN(slot, func) ((((slot)&0x1f) << 3) | ((func)&0x07)) +#endif + +static U32 counter_overflow[UNCORE_MAX_COUNTERS]; +static U32 counter_port_id; +static U64 trace_virtual_address; + +#if defined(DRV_CHROMEOS) +/*! + * @fn static VOID get_pci_device_handle(U32 bus_no, + U32 dev_no, + U32 func_no) + * + * @brief Get PCI device handle to be able to read/write + * + * @param bus_no - bus number + * dev_no - device number + * func_no - function number + * + * @return None + * + * Special Notes: + */ +static void get_pci_device_handle(U32 bus_no, U32 dev_no, U32 func_no) +{ + if (!pci_root) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + pci_root = pci_get_domain_bus_and_slot( + 0, bus_no, PCI_DEVFN(dev_no, func_no)); +#else + pci_root = pci_get_bus_and_slot(bus_no, + PCI_DEVFN(dev_no, func_no)); +#endif + if (!pci_root) { + SOCPERF_PRINT_DEBUG("Unable to get pci device handle"); + } + } + + return; +} +#endif + +/*! + * @fn static VOID write_To_Register(U32 bus_no, + U32 dev_no, + U32 func_no, + U32 port_id, + U32 op_code, + U64 mmio_offset, + ULONG value) + * + * @brief Reads Uncore programming + * + * @param bus_no - bus number + * dev_no - device number + * func_no - function number + * port_id - port id + * op_code - operation code + * mmio_offset - mmio offset + * value - data to be written to the register + * + * @return None + * + * Special Notes: + */ +static void +write_To_Register(U32 bus_no, + U32 dev_no, + U32 func_no, + U32 port_id, + U32 op_code, + U64 mmio_offset, + ULONG value) +{ + U32 cmd = 0; + U32 mmio_offset_lo; + U32 mmio_offset_hi; +#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API) + U32 pci_address; +#endif + + mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK; + mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK; + cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) + + (port_id << SOC_UNCORE_PORT_ID_SHIFT) + (mmio_offset_lo << 8) + + (SOC_UNCORE_BYTE_ENABLES << 4); + SOCPERF_PRINT_DEBUG("write off=%llx value=%x\n", mmio_offset, value); + +#if defined(PCI_HELPERS_API) + intel_mid_msgbus_write32_raw_ext(cmd, mmio_offset_hi, value); +#elif defined(DRV_CHROMEOS) + if (!pci_root) { + get_pci_device_handle(bus_no, dev_no, func_no); + } + pci_write_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, value); + pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET, + mmio_offset_hi); + pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd); +#else + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MDR_REG_OFFSET); + SOCPERF_PCI_Write_Ulong((ULONG)pci_address, (ULONG)value); + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MCRX_REG_OFFSET); + SOCPERF_PCI_Write_Ulong((ULONG)pci_address, mmio_offset_hi); + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MCR_REG_OFFSET); + SOCPERF_PCI_Write_Ulong((ULONG)pci_address, cmd); +#endif + + return; +} + +/*! + * @fn static ULONG read_From_Register(U32 bus_no, + U32 dev_no, + U32 func_no, + U32 port_id, + U32 op_code, + U64 mmio_offset) + * + * @brief Reads Uncore programming info + * + * @param bus_no - bus number + * dev_no - device number + * func_no - function number + * port_id - port id + * op_code - operation code + * mmio_offset - mmio offset + * + * @return data from the counter + * + * Special Notes: + */ +static void +read_From_Register(U32 bus_no, + U32 dev_no, + U32 func_no, + U32 port_id, + U32 op_code, + U64 mmio_offset, + U32 *data_val) +{ + U32 data = 0; + U32 cmd = 0; + U32 mmio_offset_hi; + U32 mmio_offset_lo; +#if !defined(DRV_CHROMEOS) && !defined(PCI_HELPERS_API) + U32 pci_address; +#endif + + mmio_offset_hi = mmio_offset & SOC_UNCORE_OFFSET_HI_MASK; + mmio_offset_lo = mmio_offset & SOC_UNCORE_OFFSET_LO_MASK; + cmd = (op_code << SOC_UNCORE_OP_CODE_SHIFT) + + (port_id << SOC_UNCORE_PORT_ID_SHIFT) + (mmio_offset_lo << 8) + + (SOC_UNCORE_BYTE_ENABLES << 4); + +#if defined(PCI_HELPERS_API) + data = intel_mid_msgbus_read32_raw_ext(cmd, mmio_offset_hi); +#elif defined(DRV_CHROMEOS) + if (!pci_root) { + get_pci_device_handle(bus_no, dev_no, func_no); + } + pci_write_config_dword(pci_root, SOC_UNCORE_MCRX_REG_OFFSET, + mmio_offset_hi); + pci_write_config_dword(pci_root, SOC_UNCORE_MCR_REG_OFFSET, cmd); + pci_read_config_dword(pci_root, SOC_UNCORE_MDR_REG_OFFSET, &data); +#else + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MCRX_REG_OFFSET); + SOCPERF_PCI_Write_Ulong((ULONG)pci_address, mmio_offset_hi); + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MCR_REG_OFFSET); + SOCPERF_PCI_Write_Ulong((ULONG)pci_address, cmd); + pci_address = FORM_PCI_ADDR(bus_no, dev_no, func_no, + SOC_UNCORE_MDR_REG_OFFSET); + data = SOCPERF_PCI_Read_Ulong(pci_address); +#endif + SOCPERF_PRINT_DEBUG("read off=%llx value=%x\n", mmio_offset, data); + if (data_val) { + *data_val = data; + } + + return; +} + +/*! + * @fn static VOID uncore_Reset_Counters(U32 dev_idx) + * + * @brief Reset counters + * + * @param dev_idx - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Reset_Counters(U32 dev_idx) +{ + U32 data_reg = 0; + + if (counter_port_id != 0) { + FOR_EACH_PCI_REG_RAW (pecb, i, dev_idx) { + if (ECB_entries_reg_type(pecb, i) == + PMU_REG_EVENT_SELECT) { + data_reg = + i + ECB_operations_register_len( + pecb, PMU_OPERATION_WRITE); + if (ECB_entries_reg_type(pecb, data_reg) == + PMU_REG_DATA) { + write_To_Register( + ECB_entries_bus_no(pecb, + data_reg), + ECB_entries_dev_no(pecb, + data_reg), + ECB_entries_func_no(pecb, + data_reg), + counter_port_id, + SOC_COUNTER_WRITE_OP_CODE, + ECB_entries_reg_offset( + pecb, data_reg), + (ULONG)0); + } + write_To_Register(ECB_entries_bus_no(pecb, i), + ECB_entries_dev_no(pecb, i), + ECB_entries_func_no(pecb, i), + counter_port_id, + SOC_COUNTER_WRITE_OP_CODE, + ECB_entries_reg_offset(pecb, + i), + (ULONG)SOC_UNCORE_STOP); + } + } + END_FOR_EACH_PCI_REG_RAW; + } + + return; +} + +/*! + * @fn static VOID uncore_Write_PMU(VOID*) + * + * @brief Initial write of PMU registers + * Walk through the entries and write the value of the register accordingly. + * When current_group = 0, then this is the first time this routine is called, + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Write_PMU(VOID *param) +{ + U32 dev_idx; + ECB pecb; + DRV_PCI_DEVICE_ENTRY dpden; + U32 pci_address; + U32 bar_lo; + U64 bar_hi; + U64 final_bar; + U64 physical_address; + U32 dev_index = 0; + S32 bar_list[SOC_UNCORE_MAX_PCI_DEVICES]; + U32 bar_index = 0; + U32 map_size = 0; + U64 virtual_address = 0; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U32 next_bar_offset = 0; + U64 mmio_offset = 0; + U64 map_base = 0; + U32 i = 0; + U32 cur_grp; + + dev_idx = *((U32 *)param); + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; + if (pecb == NULL) { + SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); + return; + } + + for (dev_index = 0; dev_index < SOC_UNCORE_MAX_PCI_DEVICES; + dev_index++) { + bar_list[dev_index] = -1; + } + + // initialize the per-counter overflow numbers + for (i = 0; i < UNCORE_MAX_COUNTERS; i++) { + counter_overflow[i] = 0; + socperf_pcb[0].last_uncore_count[i] = 0; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + uncore_Reset_Counters(dev_idx); + + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + + if (counter_port_id == 0 && + DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_COUNTER) { + counter_port_id = + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry); + uncore_Reset_Counters(dev_idx); + } + if (DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == + UNC_PCICFG) { + if (bar_name == UNC_SOCPCI && + (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_MUX || + DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_COUNTER) && + DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == + UNC_OP_WRITE) { + SOCPERF_PRINT_DEBUG( + "dev_index=%d OFFSET=%x VAL=%x\n", + dev_index, + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_value( + curr_pci_entry)); + write_To_Register( + DRV_PCI_DEVICE_ENTRY_bus_no( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_op_code( + curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry), + (ULONG)DRV_PCI_DEVICE_ENTRY_value( + curr_pci_entry)); + } + continue; + } + // UNC_MMIO programming + if (bar_list[bar_name] != -1) { + bar_index = bar_list[bar_name]; + virtual_address = DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + DRV_PCI_DEVICE_ENTRY_virtual_address(curr_pci_entry) = + DRV_PCI_DEVICE_ENTRY_virtual_address( + &dpden[bar_index]); + writel(DRV_PCI_DEVICE_ENTRY_value(curr_pci_entry), + (U32 *)(((char *)(UIOP)virtual_address) + + mmio_offset)); + continue; + } + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry)); + bar_lo = SOCPERF_PCI_Read_Ulong(pci_address); + next_bar_offset = + DRV_PCI_DEVICE_ENTRY_bar_offset(curr_pci_entry) + + SOC_UNCORE_NEXT_ADDR_OFFSET; + pci_address = FORM_PCI_ADDR( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + next_bar_offset); + bar_hi = SOCPERF_PCI_Read_Ulong(pci_address); + final_bar = (bar_hi << SOC_UNCORE_BAR_ADDR_SHIFT) | bar_lo; + final_bar &= SOC_UNCORE_BAR_ADDR_MASK; + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry) = final_bar; + physical_address = + DRV_PCI_DEVICE_ENTRY_bar_address(curr_pci_entry); + if (physical_address) { + map_size = SOC_UNCORE_OTHER_BAR_MMIO_PAGE_SIZE; + map_base = (mmio_offset / map_size) * map_size; + if (mmio_offset > map_size) { + physical_address = physical_address + map_base; + } + } + } + + return; +} + +/*! + * @fn static VOID uncore_Disable_PMU(PVOID) + * + * @brief Unmap the virtual address when sampling/driver stops + * + * @param param - device index + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Disable_PMU(PVOID param) +{ + U32 dev_idx = *((U32 *)param); + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP) { + uncore_Reset_Counters(dev_idx); + } + + return; +} + +/*! + * @fn static VOID uncore_Stop_Mem(VOID) + * + * @brief Stop trace + * + * @param param - None + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Stop_Mem(VOID) +{ + ECB pecb; + DRV_PCI_DEVICE_ENTRY dpden; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U64 mmio_offset = 0; + U32 dev_index = 0; + U32 data_val = 0; + U32 cur_grp; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[cur_grp]; + if (pecb == NULL) { + SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); + return; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + + if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_STOP && + DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == + UNC_PCICFG && + bar_name == UNC_SOCPCI && + DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == + UNC_OP_READ) { + SOCPERF_PRINT_DEBUG( + "op=%d port=%d offset=%x val=%x\n", + DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + mmio_offset, data_val); + read_From_Register( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + SOC_COUNTER_READ_OP_CODE, mmio_offset, + &data_val); + SOCPERF_PRINT_DEBUG( + "op=%d port=%d offset=%x val=%x\n", + DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + mmio_offset, data_val); + write_To_Register( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + SOC_COUNTER_WRITE_OP_CODE, mmio_offset, + (ULONG)(data_val | 0x2000)); + } + } + + return; +} + +/*! + * @fn static VOID uncore_Initialize(PVOID) + * + * @brief Initialize any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Initialize(VOID *param) +{ + return; +} + +/*! + * @fn static VOID uncore_Clean_Up(PVOID) + * + * @brief Reset any registers or addresses + * + * @param param + * + * @return None + * + * Special Notes: + */ +static VOID uncore_Clean_Up(VOID *param) +{ + if (trace_virtual_address) { + iounmap((void *)(UIOP)trace_virtual_address); + trace_virtual_address = 0; + } + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn uncore_Read_Data() + * + * @param None + * + * @return None No return needed + * + * @brief Read the counters + * + */ +static VOID uncore_Read_Data(PVOID data_buffer) +{ + U32 event_id = 0; + U64 *data; + int data_index; + U32 data_val = 0; + U32 data_reg = 0; + U64 total_count = 0; + U32 event_index = 0; + U32 cur_grp; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + cur_grp = LWPMU_DEVICE_cur_group(device_uncore); + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_UNINITIALIZED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_IDLE || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_RESERVED || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP || + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_STOPPED) { + SOCPERF_PRINT_ERROR("ERROR: RETURING EARLY from Read_Data\n"); + return; + } + + data = data_buffer; + data_index = 0; + + preempt_disable(); + + // Write GroupID + data[data_index] = cur_grp + 1; + // Increment the data index as the event id starts from zero + data_index++; + + FOR_EACH_PCI_REG_RAW (pecb, i, dev_idx) { + if (ECB_entries_reg_type(pecb, i) == PMU_REG_EVENT_SELECT) { + write_To_Register(ECB_entries_bus_no(pecb, i), + ECB_entries_dev_no(pecb, i), + ECB_entries_func_no(pecb, i), + counter_port_id, + SOC_COUNTER_WRITE_OP_CODE, + ECB_entries_reg_offset(pecb, i), + (ULONG)SOC_UNCORE_SAMPLE_DATA); + + data_reg = i + ECB_operations_register_len( + pecb, PMU_OPERATION_WRITE); + if (ECB_entries_reg_type(pecb, data_reg) == + PMU_REG_DATA) { + read_From_Register( + ECB_entries_bus_no(pecb, data_reg), + ECB_entries_dev_no(pecb, data_reg), + ECB_entries_func_no(pecb, data_reg), + counter_port_id, + SOC_COUNTER_READ_OP_CODE, + ECB_entries_reg_offset(pecb, data_reg), + &data_val); + if (data_val < + socperf_pcb[0] + .last_uncore_count[event_index]) { + counter_overflow[event_index]++; + } + socperf_pcb[0].last_uncore_count[event_index] = + data_val; + total_count = data_val + + counter_overflow[event_index] * + UNCORE_MAX_COUNT; + event_index++; + data[data_index + event_id] = total_count; + event_id++; + } + } + } + END_FOR_EACH_PCI_REG_RAW; + + preempt_enable(); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn uncore_Create_Mem() + * + * @param None + * + * @return None No return needed + * + * @brief Read the counters + * + */ +static VOID uncore_Create_Mem(U32 memory_size, U64 *trace_buffer) +{ + ECB pecb; + DRV_PCI_DEVICE_ENTRY dpden; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U64 mmio_offset = 0; + U32 dev_index = 0; + U32 data_val = 0; + U32 reg_index = 0; + U64 physical_high = 0; + U64 odla_physical_address = 0; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[0]; + if (pecb == NULL) { + SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); + return; + } + + if (!trace_buffer) { + return; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + + if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_MEMORY && + DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == + UNC_PCICFG && + bar_name == UNC_SOCPCI && + DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == + UNC_OP_WRITE) { + read_From_Register( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + SOC_COUNTER_READ_OP_CODE, mmio_offset, + &data_val); + if (reg_index == 1) { + odla_physical_address = data_val; + } else if (reg_index == 2) { + physical_high = data_val; + odla_physical_address = odla_physical_address | + (physical_high << 32); + } + SOCPERF_PRINT_DEBUG( + "op=%d port=%d offset=%x val=%x\n", + DRV_PCI_DEVICE_ENTRY_op_code(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + mmio_offset, data_val); + reg_index++; + } + continue; + } + SOCPERF_PRINT_DEBUG("Physical Address=%llx\n", odla_physical_address); + if (odla_physical_address) { + trace_virtual_address = (U64)(UIOP)ioremap( + odla_physical_address, 1024 * sizeof(U64)); + SOCPERF_PRINT_DEBUG("PHY=%llx ODLA VIRTUAL ADDRESS=%llx\n", + odla_physical_address, + trace_virtual_address); + if (trace_buffer) { + *trace_buffer = odla_physical_address; + } + } + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn uncore_Check_Status() + * + * @param None + * + * @return None No return needed + * + * @brief Read the counters + * + */ +static VOID uncore_Check_Status(U64 *trace_buffer, U32 *num_entries) +{ + U32 dev_index = 0; + ECB pecb; + DRV_PCI_DEVICE_ENTRY dpden; + U32 bar_name = 0; + DRV_PCI_DEVICE_ENTRY curr_pci_entry = NULL; + U64 mmio_offset = 0; + U32 data_val = 0; + U32 data_index = 0; + + if (device_uncore == NULL) { + SOCPERF_PRINT_ERROR("ERROR: NULL device_uncore!\n"); + return; + } + pecb = (ECB)LWPMU_DEVICE_PMU_register_data(device_uncore)[0]; + if (pecb == NULL) { + SOCPERF_PRINT_ERROR("ERROR: null pecb!\n"); + return; + } + if (!trace_buffer) { + return; + } + + ECB_pcidev_entry_list(pecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)pecb + + ECB_pcidev_list_offset(pecb)); + dpden = ECB_pcidev_entry_list(pecb); + + for (dev_index = 0; dev_index < ECB_num_pci_devices(pecb); + dev_index++) { + curr_pci_entry = &dpden[dev_index]; + bar_name = DRV_PCI_DEVICE_ENTRY_bar_name(curr_pci_entry); + mmio_offset = DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + curr_pci_entry); + + if (DRV_PCI_DEVICE_ENTRY_prog_type(curr_pci_entry) == + UNC_STATUS && + DRV_PCI_DEVICE_ENTRY_config_type(curr_pci_entry) == + UNC_PCICFG && + bar_name == UNC_SOCPCI && + DRV_PCI_DEVICE_ENTRY_operation(curr_pci_entry) == + UNC_OP_READ) { + read_From_Register( + DRV_PCI_DEVICE_ENTRY_bus_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_dev_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_func_no(curr_pci_entry), + DRV_PCI_DEVICE_ENTRY_port_id(curr_pci_entry), + SOC_COUNTER_READ_OP_CODE, mmio_offset, + &data_val); + SOCPERF_PRINT_DEBUG("TRACE STATUS=%x\n", data_val); + trace_buffer[data_index] = data_val; + data_index++; + continue; + } + } + + if (num_entries) { + *num_entries = data_index; + } + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn uncore_Read_Mem() + * + * @param None + * + * @return None No return needed + * + * @brief Read the counters + * + */ +static VOID +uncore_Read_Mem(U64 start_address, U64 *trace_buffer, U32 num_entries) +{ + U32 data_index = 0; + U32 data_value = 0; + + if (num_entries == 0 || !trace_buffer) { + return; + } + SOCPERF_PRINT_DEBUG( + "Reading memory for num_entries=%d from address=%llx\n", + num_entries, trace_virtual_address); + for (data_index = 0; data_index < num_entries; data_index++) { + if (trace_virtual_address) { + data_value = readl((U64 *)(UIOP)trace_virtual_address + + data_index); + + SOCPERF_PRINT_DEBUG("DATA VALUE=%llx\n", data_value); + *(trace_buffer + data_index) = data_value; + } + } + + return; +} + +/* + * Initialize the dispatch table + */ +DISPATCH_NODE soc_uncore_dispatch = { + .init = uncore_Initialize, // initialize + .fini = NULL, // destroy + .write = uncore_Write_PMU, // write + .freeze = uncore_Disable_PMU, // freeze + .restart = NULL, // restart + .read_data = NULL, // read + .check_overflow = NULL, // check for overflow + .swap_group = NULL, + .read_lbrs = NULL, + .clean_up = uncore_Clean_Up, + .hw_errata = NULL, + .read_power = NULL, + .check_overflow_errata = NULL, + .read_counts = NULL, // read counts + .check_overflow_gp_errata = NULL, + .read_ro = NULL, + .platform_info = NULL, + .trigger_read = NULL, + .read_current_data = uncore_Read_Data, + .create_mem = uncore_Create_Mem, + .check_status = uncore_Check_Status, + .read_mem = uncore_Read_Mem, + .stop_mem = uncore_Stop_Mem +}; diff --git a/drivers/platform/x86/socperf/socperfdrv.c b/drivers/platform/x86/socperf/socperfdrv.c new file mode 100644 index 00000000000000..a48edcf198ff13 --- /dev/null +++ b/drivers/platform/x86/socperf/socperfdrv.c @@ -0,0 +1,1826 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_version.h" +#include "lwpmudrv_ecb.h" +#include "lwpmudrv_struct.h" +#include "lwpmudrv_ioctl.h" +#include "inc/ecb_iterators.h" +#include "socperfdrv.h" +#include "control.h" +#include "inc/utility.h" +#include "inc/pci.h" +#include "pmu_info_struct.h" +#include "pmu_list.h" + +MODULE_AUTHOR("Copyright(C) 2007-2018 Intel Corporation"); +MODULE_VERSION(SOCPERF_NAME"_"SOCPERF_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +typedef struct LWPMU_DEV_NODE_S LWPMU_DEV_NODE; +typedef LWPMU_DEV_NODE *LWPMU_DEV; + +struct LWPMU_DEV_NODE_S { + long buffer; + struct semaphore sem; + struct cdev cdev; +}; + +#define LWPMU_DEV_buffer(dev) ((dev)->buffer) +#define LWPMU_DEV_sem(dev) ((dev)->sem) +#define LWPMU_DEV_cdev(dev) ((dev)->cdev) + +/* Global variables of the driver */ +SOCPERF_VERSION_NODE socperf_drv_version; +U64 *read_unc_ctr_info; +DISPATCH dispatch_uncore; +DRV_CONFIG drv_cfg; +EVENT_CONFIG socperf_global_ec; +volatile S32 socperf_abnormal_terminate; +LWPMU_DEV socperf_control; + +LWPMU_DEVICE device_uncore; +CPU_STATE socperf_pcb; +size_t socperf_pcb_size; +UNCORE_TOPOLOGY_INFO_NODE uncore_topology; + +#if defined(DRV_USE_UNLOCKED_IOCTL) +static struct mutex ioctl_lock; +#endif + +#define PMU_DEVICES 1 // pmu control + +static dev_t lwpmu_DevNum; /* the major and minor parts for SOCPERF base */ + +#if !defined(DRV_UDEV_UNAVAILABLE) +static struct class *pmu_class; +#endif + +#define DRV_DEVICE_DELIMITER "!" + +#if !defined(DRV_USE_UNLOCKED_IOCTL) +#define MUTEX_INIT(lock) +#define MUTEX_LOCK(lock) +#define MUTEX_UNLOCK(lock) +#else +#define MUTEX_INIT(lock) mutex_init(&(lock)); +#define MUTEX_LOCK(lock) mutex_lock(&(lock)) +#define MUTEX_UNLOCK(lock) mutex_unlock(&(lock)) +#endif + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_State(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Allocates the memory needed at load time. Initializes all the + * @brief necessary state variables with the default values. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Initialize_State(VOID) +{ + S32 i, max_cpu_id = 0; + + for_each_possible_cpu(i) { + if (cpu_present(i)) { + if (i > max_cpu_id) { + max_cpu_id = i; + } + } + } + max_cpu_id++; + + /* + * Machine Initializations + * Abstract this information away into a separate entry point + * + * Question: Should we allow for the use of Hot-cpu + * add/subtract functionality while the driver is executing? + */ + if (max_cpu_id > num_present_cpus()) { + GLOBAL_STATE_num_cpus(socperf_driver_state) = max_cpu_id; + } else { + GLOBAL_STATE_num_cpus(socperf_driver_state) = + num_present_cpus(); + } + GLOBAL_STATE_active_cpus(socperf_driver_state) = num_online_cpus(); + GLOBAL_STATE_cpu_count(socperf_driver_state) = 0; + GLOBAL_STATE_dpc_count(socperf_driver_state) = 0; + GLOBAL_STATE_num_em_groups(socperf_driver_state) = 0; + GLOBAL_STATE_current_phase(socperf_driver_state) = + DRV_STATE_UNINITIALIZED; + + SOCPERF_PRINT_DEBUG( + "lwpmudrv_Initialize_State: num_cpus=%d, active_cpus=%d\n", + GLOBAL_STATE_num_cpus(socperf_driver_state), + GLOBAL_STATE_active_cpus(socperf_driver_state)); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID SOCPERF_Read_Data + * + * @brief Reads counter data + * + * @param param data_buffer - buffer for reading counter data. + * + * @return None + * + * Special Notes: + * + */ +extern VOID SOCPERF_Read_Data3(PVOID data_buffer) +{ + if (dispatch_uncore && dispatch_uncore->read_current_data) { + dispatch_uncore->read_current_data(data_buffer); + } + SOCPERF_PRINT_DEBUG("SOCPERF_Read_Data called\n"); + return; +} +EXPORT_SYMBOL(SOCPERF_Read_Data3); + +/********************************************************************* + * Internal Driver functions + * Should be called only from the lwpmudrv_DeviceControl routine + *********************************************************************/ + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_VERSION call. + * @brief Returns the version number of the kernel mode sampling. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Version(IOCTL_ARGS arg) +{ + OS_STATUS status; + + // Check if enough space is provided for collecting the data + if ((arg->len_drv_to_usr != sizeof(U32)) || + (arg->buf_drv_to_usr == NULL)) { + return OS_FAULT; + } + + status = put_user( + SOCPERF_VERSION_NODE_socperf_version(&socperf_drv_version), + (U32 *)arg->buf_drv_to_usr); + + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Clean_Up(DRV_BOOL) + * + * @param DRV_BOOL finish - Flag to call finish + * + * @return VOID + * + * @brief Cleans up the memory allocation. + * + * Special Notes + */ +static VOID lwpmudrv_Clean_Up(DRV_BOOL finish) +{ + U32 i = 0; + + if (dispatch_uncore && dispatch_uncore->clean_up) { + dispatch_uncore->clean_up((VOID *)&i); + } + + if (device_uncore) { + EVENT_CONFIG ec; + if (LWPMU_DEVICE_PMU_register_data(device_uncore)) { + ec = LWPMU_DEVICE_ec(device_uncore); + for (i = 0; i < EVENT_CONFIG_num_groups_unc(ec); i++) { + SOCPERF_Free_Memory( + LWPMU_DEVICE_PMU_register_data( + device_uncore)[i]); + } + } + LWPMU_DEVICE_pcfg(device_uncore) = + SOCPERF_Free_Memory(LWPMU_DEVICE_pcfg(device_uncore)); + LWPMU_DEVICE_ec(device_uncore) = + SOCPERF_Free_Memory(LWPMU_DEVICE_ec(device_uncore)); + device_uncore = SOCPERF_Free_Memory(device_uncore); + } + + socperf_pcb = SOCPERF_Free_Memory(socperf_pcb); + socperf_pcb_size = 0; + GLOBAL_STATE_num_em_groups(socperf_driver_state) = 0; + GLOBAL_STATE_num_descriptors(socperf_driver_state) = 0; + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_Driver(PVOID buf_drv_to_usr, U32 len_drv_to_usr) + * + * @param buf_drv_to_usr - pointer to the input buffer + * @param len_drv_to_usr - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT_DRIVER call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_Driver(PVOID buf_drv_to_usr, U32 len_drv_to_usr) +{ + if (buf_drv_to_usr == NULL) { + SOCPERF_PRINT_ERROR("buf_drv_to_usr ERROR!\n"); + return OS_FAULT; + } + + drv_cfg = SOCPERF_Allocate_Memory(len_drv_to_usr); + if (!drv_cfg) { + SOCPERF_PRINT_ERROR("Memory allocation failure for drv_cfg!\n"); + return OS_NO_MEM; + } + + if (copy_from_user(drv_cfg, buf_drv_to_usr, len_drv_to_usr)) { + SOCPERF_PRINT_ERROR("Failed to copy from user"); + return OS_FAULT; + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Initialize_Uncore(PVOID buf_drv_to_usr, U32 len_drv_to_usr) + * + * @param buf_drv_to_usr - pointer to the input buffer + * @param len_drv_to_usr - size of the input buffer + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_INIT call. + * @brief Sets up the interrupt handler. + * @brief Set up the output buffers/files needed to make the driver + * @brief operational. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Initialize_Uncore(PVOID buf_drv_to_usr, U32 len_drv_to_usr) +{ + DEV_UNC_CONFIG pcfg_unc; + U32 previous_state; + U32 i = 0; + + SOCPERF_PRINT_DEBUG("Entered lwpmudrv_Initialize_UNC\n"); + previous_state = + cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_UNINITIALIZED, DRV_STATE_IDLE); + + if (previous_state != DRV_STATE_UNINITIALIZED) { + SOCPERF_PRINT_ERROR("OS_IN_PROGRESS error!\n"); + return OS_IN_PROGRESS; + } + /* + * Program State Initializations: + * Foreach device, copy over pcfg_unc and configure dispatch table + */ + if (buf_drv_to_usr == NULL) { + SOCPERF_PRINT_ERROR("in_buff ERROR!\n"); + return OS_FAULT; + } + if (len_drv_to_usr != sizeof(DEV_UNC_CONFIG_NODE)) { + SOCPERF_PRINT_ERROR( + "Got len_drv_to_usr=%d, expecting size=%d\n", + len_drv_to_usr, (int)sizeof(DEV_UNC_CONFIG_NODE)); + return OS_FAULT; + } + + device_uncore = SOCPERF_Allocate_Memory(sizeof(LWPMU_DEVICE_NODE)); + if (!device_uncore) { + SOCPERF_PRINT_ERROR( + "Memory allocation failure for device_uncore!\n"); + return OS_NO_MEM; + } + socperf_pcb_size = GLOBAL_STATE_num_cpus(socperf_driver_state) * + sizeof(CPU_STATE_NODE); + socperf_pcb = SOCPERF_Allocate_Memory(socperf_pcb_size); + if (!socperf_pcb) { + SOCPERF_PRINT_ERROR( + "Memory allocation failure for socperf_pcb!\n"); + return OS_NO_MEM; + } + + // allocate memory + LWPMU_DEVICE_pcfg(device_uncore) = + SOCPERF_Allocate_Memory(sizeof(DEV_UNC_CONFIG_NODE)); + if (!LWPMU_DEVICE_pcfg(device_uncore)) { + SOCPERF_PRINT_ERROR( + "Memory allocation failure for LWPMU_DEVICE_pcfg(device_uncore)!\n"); + return OS_NO_MEM; + } + // copy over pcfg_unc + if (copy_from_user(LWPMU_DEVICE_pcfg(device_uncore), buf_drv_to_usr, + len_drv_to_usr)) { + SOCPERF_PRINT_ERROR("Failed to copy from user"); + return OS_FAULT; + } + // configure dispatch from dispatch_id + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(device_uncore); + + LWPMU_DEVICE_dispatch(device_uncore) = SOCPERF_UTILITY_Configure_CPU( + DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + if (LWPMU_DEVICE_dispatch(device_uncore) == NULL) { + SOCPERF_PRINT_ERROR("Unable to configure CPU"); + return OS_FAULT; + } + + LWPMU_DEVICE_em_groups_count(device_uncore) = 0; + LWPMU_DEVICE_cur_group(device_uncore) = 0; + SOCPERF_PRINT_DEBUG( + "SocPerf Driver Config : uncore dispatch id = %d\n", + DEV_UNC_CONFIG_dispatch_id(pcfg_unc)); + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->init) { + dispatch_uncore->init((VOID *)&i); + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS socperf_Terminate(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMUDRV_IOCTL_TERMINATE call. + * @brief Cleans up the interrupt handler and resets the PMU state. + * + * Special Notes + */ +static OS_STATUS socperf_Terminate(VOID) +{ + U32 previous_state; + + if (GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_UNINITIALIZED) { + return OS_SUCCESS; + } + + previous_state = + cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_STOPPED, DRV_STATE_UNINITIALIZED); + if (previous_state != DRV_STATE_STOPPED) { + SOCPERF_PRINT_ERROR( + "socperf_Terminate: Sampling is in progress, cannot terminate.\n"); + return OS_IN_PROGRESS; + } + + GLOBAL_STATE_current_phase(socperf_driver_state) = + DRV_STATE_UNINITIALIZED; + lwpmudrv_Clean_Up(TRUE); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Trigger_Read(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Trigger_Read(VOID) +{ + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->trigger_read) { + dispatch_uncore->trigger_read(); + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Init_PMU(void) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Initialize the PMU and the driver state in preparation for data collection. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Init_PMU(VOID) +{ + U32 i = 0; + + if (GLOBAL_STATE_current_phase(socperf_driver_state) != + DRV_STATE_IDLE) { + return OS_IN_PROGRESS; + } + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->write) { + dispatch_uncore->write((VOID *)&i); + } + SOCPERF_PRINT_DEBUG( + "lwpmudrv_Init_PMU: IOCTL_Init_PMU - finished initial Write\n"); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Set_EM_Config_UNC(IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Set the number of em groups in the global state node. + * @brief Also, copy the EVENT_CONFIG struct that has been passed in, + * @brief into a global location for now. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Set_EM_Config_Uncore(IOCTL_ARGS arg) +{ + EVENT_CONFIG ec; + SOCPERF_PRINT_DEBUG("enter lwpmudrv_Set_EM_Config_UNC\n"); + if (GLOBAL_STATE_current_phase(socperf_driver_state) != + DRV_STATE_IDLE) { + return OS_IN_PROGRESS; + } + + if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { + return OS_INVALID; + } + // allocate memory + LWPMU_DEVICE_ec(device_uncore) = + SOCPERF_Allocate_Memory(sizeof(EVENT_CONFIG_NODE)); + if (!LWPMU_DEVICE_ec(device_uncore)) { + SOCPERF_PRINT_ERROR( + "Memory allocation failure for LWPMU_DEVICE_ec(device_uncore)!\n"); + return OS_NO_MEM; + } + if (copy_from_user(LWPMU_DEVICE_ec(device_uncore), arg->buf_usr_to_drv, + arg->len_usr_to_drv)) { + return OS_FAULT; + } + // configure num_groups from ec of the specific device + ec = (EVENT_CONFIG)LWPMU_DEVICE_ec(device_uncore); + LWPMU_DEVICE_PMU_register_data(device_uncore) = SOCPERF_Allocate_Memory( + EVENT_CONFIG_num_groups_unc(ec) * sizeof(VOID *)); + if (!LWPMU_DEVICE_PMU_register_data(device_uncore)) { + SOCPERF_PRINT_ERROR( + "Memory allocation failure for LWPMU_DEVICE_PMU_register_data(device_uncore)!\n"); + return OS_NO_MEM; + } + LWPMU_DEVICE_em_groups_count(device_uncore) = 0; + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS socperf_Configure_Events_Uncore (IOCTL_ARGS arg) + * + * @param arg - pointer to the IOCTL_ARGS structure + * + * @return OS_STATUS + * + * @brief Make a copy of the uncore registers that need to be programmed + * @brief for the next event set used for event multiplexing + * + * Special Notes + */ +static OS_STATUS socperf_Configure_Events_Uncore(IOCTL_ARGS arg) +{ + OS_STATUS status = OS_SUCCESS; + VOID **PMU_register_data_unc; + S32 em_groups_count_unc; + ECB ecb; + EVENT_CONFIG ec_unc; + U32 group_id = 0; + ECB in_ecb = NULL; + PMU_MMIO_BAR_INFO_NODE primary; + PMU_MMIO_BAR_INFO_NODE secondary; + U32 idx, reg_id; + DRV_PCI_DEVICE_ENTRY cur_entry = NULL; + DRV_PCI_DEVICE_ENTRY dpden = NULL; + U32 dev_index; + U32 previous_state; + + if (GLOBAL_STATE_current_phase(socperf_driver_state) != + DRV_STATE_IDLE) { + return OS_IN_PROGRESS; + } + + em_groups_count_unc = LWPMU_DEVICE_em_groups_count(device_uncore); + PMU_register_data_unc = LWPMU_DEVICE_PMU_register_data(device_uncore); + ec_unc = LWPMU_DEVICE_ec(device_uncore); + + if (ec_unc == NULL) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: ec_unc is NULL!\n"); + return OS_INVALID; + } + + if (em_groups_count_unc >= (S32)EVENT_CONFIG_num_groups_unc(ec_unc)) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: Number of Uncore EM groups exceeded the initial configuration."); + return OS_INVALID; + } + if (arg->buf_usr_to_drv == NULL || + arg->len_usr_to_drv < sizeof(ECB_NODE)) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: args are invalid."); + return OS_INVALID; + } + // size is in len_usr_to_drv, data is pointed to by buf_usr_to_drv + // + in_ecb = SOCPERF_Allocate_Memory(arg->len_usr_to_drv); + if (!in_ecb) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: ECB memory allocation failed\n"); + return OS_NO_MEM; + } + if (copy_from_user(in_ecb, arg->buf_usr_to_drv, arg->len_usr_to_drv)) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: ECB copy failed\n"); + in_ecb = SOCPERF_Free_Memory(in_ecb); + return OS_NO_MEM; + } + + if (ECB_device_type(in_ecb) != DEVICE_UNC_SOCPERF) { + SOCPERF_PRINT_ERROR("Invalid Device Type: %d", + ECB_device_type(in_ecb)); + status = OS_INVALID; + goto clean_return; + } + + for ((idx) = 0; (idx) < ECB_num_entries(in_ecb); (idx)++) { + if (ECB_entries_reg_prog_type((in_ecb), (idx)) == + PMU_REG_PROG_MMIO) { + reg_id = ECB_entries_reg_id((in_ecb), (idx)); + if (reg_id == 0) { + continue; + } + + memset(&primary, 0, sizeof(PMU_MMIO_BAR_INFO_NODE)); + memset(&secondary, 0, sizeof(PMU_MMIO_BAR_INFO_NODE)); + ECB_pcidev_entry_list(in_ecb) = + (DRV_PCI_DEVICE_ENTRY)((S8 *)in_ecb + + ECB_pcidev_list_offset( + in_ecb)); + dpden = ECB_pcidev_entry_list(in_ecb); + if (!dpden) { + continue; + } + + for (dev_index = 0; + dev_index < ECB_num_pci_devices(in_ecb); + dev_index++) { + cur_entry = &dpden[dev_index]; + if (!cur_entry) { + continue; + } + + primary.u.s.bus = + DRV_PCI_DEVICE_ENTRY_bus_no(cur_entry); + primary.u.s.dev = + DRV_PCI_DEVICE_ENTRY_dev_no(cur_entry); + primary.u.s.func = + DRV_PCI_DEVICE_ENTRY_func_no(cur_entry); + primary.u.s.offset = + DRV_PCI_DEVICE_ENTRY_base_offset_for_mmio( + cur_entry); + primary.mask = + DRV_PCI_DEVICE_ENTRY_mask(cur_entry); + primary.bar_prog_type = MMIO_SINGLE_BAR_TYPE; + + if (!PMU_LIST_Check_MMIO(primary, secondary, + reg_id)) { + SOCPERF_PRINT_ERROR( + "Invalid MMIO information! Offset:0x%x, B%d.D%d.F%d.O0x%x, M0x%llx.S%d", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, primary.shift); + status = OS_INVALID; + goto clean_return; + } else { + SOCPERF_PRINT_DEBUG( + "Verified the MMIO B%d.D%d.F%d.O0x%x", + reg_id, primary.u.s.bus, + primary.u.s.dev, + primary.u.s.func, + primary.u.s.offset, + primary.mask, primary.shift); + } + } + } else { + SOCPERF_PRINT_ERROR("Invalid Prog Type: %d", + ECB_entries_reg_prog_type((in_ecb), + (idx))); + } + } + + group_id = ECB_group_id(in_ecb); + if (group_id >= EVENT_CONFIG_num_groups_unc(ec_unc)) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: group_id is larger than total number of groups\n"); + in_ecb = SOCPERF_Free_Memory(in_ecb); + status = OS_INVALID; + goto clean_return; + } + + PMU_register_data_unc[group_id] = in_ecb; + if (!PMU_register_data_unc[group_id]) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: ECB memory allocation failed\n"); + in_ecb = SOCPERF_Free_Memory(in_ecb); + status = OS_NO_MEM; + goto clean_return; + } + + // + // Make a copy of the data for global use. + // + if (copy_from_user(PMU_register_data_unc[group_id], arg->buf_usr_to_drv, + arg->len_usr_to_drv)) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: ECB copy failed\n"); + in_ecb = SOCPERF_Free_Memory(in_ecb); + status = OS_NO_MEM; + goto clean_return; + } + + // at this point, we know the number of uncore events for this device, + // so allocate the results buffer per thread for uncore only for event based uncore counting + if (em_groups_count_unc == 0) { + ecb = PMU_register_data_unc[0]; + if (ecb == NULL) { + in_ecb = SOCPERF_Free_Memory(in_ecb); + status = OS_INVALID; + goto clean_return; + } + LWPMU_DEVICE_num_events(device_uncore) = ECB_num_events(ecb); + } + LWPMU_DEVICE_em_groups_count(device_uncore) = group_id + 1; + +clean_return: + if (status != OS_SUCCESS) { + previous_state = cmpxchg( + &GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_IDLE, DRV_STATE_UNINITIALIZED); + + if (previous_state != DRV_STATE_IDLE) { + SOCPERF_PRINT_ERROR( + "socperf_Configure_Events_Uncore: Unable to exit properly - State is %d\n", + GLOBAL_STATE_current_phase( + socperf_driver_state)); + } + } + + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS socperf_Start(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Local function that handles the LWPMU_IOCTL_START call. + * @brief Set up the OS hooks for process/thread/load notifications. + * @brief Write the initial set of MSRs. + * + * Special Notes + */ +static OS_STATUS socperf_Start(VOID) +{ + OS_STATUS status = OS_SUCCESS; + U32 previous_state; + U32 i = 0; + + /* + * To Do: Check for state == STATE_IDLE and only then enable sampling + */ + previous_state = + cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_IDLE, DRV_STATE_RUNNING); + if (previous_state != DRV_STATE_IDLE) { + SOCPERF_PRINT_ERROR( + "socperf_Start: Unable to start sampling - State is %d\n", + GLOBAL_STATE_current_phase(socperf_driver_state)); + return OS_IN_PROGRESS; + } + + if (dispatch_uncore && dispatch_uncore->restart) { + dispatch_uncore->restart((VOID *)&i); + } + + return status; +} + +/* + * @fn lwpmudrv_Prepare_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the LWPMUDRV_IOCTL_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS socperf_Prepare_Stop(VOID) +{ + U32 i = 0; + U32 current_state = GLOBAL_STATE_current_phase(socperf_driver_state); + + SOCPERF_PRINT_DEBUG("socperf_Prepare_Stop: About to stop sampling\n"); + GLOBAL_STATE_current_phase(socperf_driver_state) = + DRV_STATE_PREPARE_STOP; + + if (current_state == DRV_STATE_UNINITIALIZED) { + return OS_SUCCESS; + } + + if (dispatch_uncore && dispatch_uncore->freeze) { + dispatch_uncore->freeze((VOID *)&i); + } + + return OS_SUCCESS; +} + +/* + * @fn socperf_Finish_Stop(); + * + * @param NONE + * @return OS_STATUS + * + * @brief Local function that handles the LWPMUDRV_IOCTL_STOP call. + * @brief Cleans up the interrupt handler. + */ +static OS_STATUS socperf_Finish_Stop(VOID) +{ + OS_STATUS status = OS_SUCCESS; + + GLOBAL_STATE_current_phase(socperf_driver_state) = DRV_STATE_STOPPED; + + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Pause(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Pause the collection + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Pause(VOID) +{ + U32 previous_state; + U32 i = 0; + + previous_state = + cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_RUNNING, DRV_STATE_PAUSED); + if (previous_state == DRV_STATE_RUNNING) { + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->freeze) { + dispatch_uncore->freeze((VOID *)&i); + } + } else { + if (previous_state == DRV_STATE_PAUSED) { + return VT_SAMP_IN_PAUSE_STATE; + } + SOCPERF_PRINT_ERROR( + "There is no sampling collection running at this time\n"); + return VT_SAMP_IN_STOP_STATE; + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static NTSTATUS lwpmudrv_Resume(void) + * + * @param - none + * + * @return OS_STATUS + * + * @brief Resume the sampling after a pause. Assumption, the pause duration + * @brief will be long enough for all interrupts to be processed and no + * @brief active sampling to occur. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Resume(VOID) +{ + U32 previous_state; + U32 i = 0; + + previous_state = + cmpxchg(&GLOBAL_STATE_current_phase(socperf_driver_state), + DRV_STATE_PAUSED, DRV_STATE_RUNNING); + + if (previous_state == DRV_STATE_PAUSED) { + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->restart) { + dispatch_uncore->restart((VOID *)&i); + } + SOCPERF_PRINT_DEBUG("Resuming the sampling collection...\n"); + } else { + SOCPERF_PRINT_DEBUG( + "There is no paused sampling collection at this time.\n"); + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Uncore_Counts(void buf_usr_to_drv, U32 len_usr_to_drv) + * + * @param - buf_usr_to_drv - output buffer + * len_usr_to_drv - output buffer length + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static OS_STATUS +lwpmudrv_Read_Uncore_Counts(PVOID buf_usr_to_drv, U32 len_usr_to_drv) +{ + if (buf_usr_to_drv == NULL) { + SOCPERF_PRINT_ERROR( + "lwpmudrv_Read_Uncore_Counts: counter buffer is NULL\n"); + return OS_FAULT; + } + + if (dispatch_uncore && dispatch_uncore->read_current_data) { + dispatch_uncore->read_current_data(buf_usr_to_drv); + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS SOCPERF_Switch_Group(void) + * + * @param none + * + * @return OS_STATUS + * + * @brief Switch the current uncore group that is being collected. + * + * Special Notes + * This routine is called from the user mode code to handle the multiple uncore group + * situation. 4 distinct steps are taken: + * Step 1: Pause the sampling + * Step 2: Increment the current uncore group count + * Step 3: Write the new group to the uncore PMU + * Step 4: Resume sampling + */ +OS_STATUS SOCPERF_Switch_Group3(VOID) +{ + OS_STATUS status = OS_SUCCESS; + U32 current_state = GLOBAL_STATE_current_phase(socperf_driver_state); + U32 i = 0; + DEV_UNC_CONFIG pcfg_unc; + + SOCPERF_PRINT_DEBUG("Switching Uncore Group...\n"); + if (current_state != DRV_STATE_RUNNING && + current_state != DRV_STATE_PAUSED) { + return status; + } + status = lwpmudrv_Pause(); + LWPMU_DEVICE_cur_group(device_uncore)++; + LWPMU_DEVICE_cur_group(device_uncore) %= + LWPMU_DEVICE_em_groups_count(device_uncore); + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->write) { + dispatch_uncore->write((VOID *)&i); + } + + pcfg_unc = (DEV_UNC_CONFIG)LWPMU_DEVICE_pcfg(device_uncore); + if (pcfg_unc && (DRV_CONFIG_start_paused(drv_cfg) == FALSE)) { + status = lwpmudrv_Resume(); + } + + return status; +} +EXPORT_SYMBOL(SOCPERF_Switch_Group3); + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Create_Mem(IOCTL_ARGS arg) +{ + U32 memory_size = 0; + U64 trace_phys_address = 0; + + if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { + SOCPERF_PRINT_ERROR( + "lwpmudrv_Create_Mem: Counter buffer is NULL\n"); + return OS_FAULT; + } + + if (copy_from_user(&memory_size, (U32 *)arg->buf_usr_to_drv, + sizeof(U32))) { + return OS_FAULT; + } + + if (arg->buf_drv_to_usr == NULL || arg->len_drv_to_usr == 0) { + SOCPERF_PRINT_ERROR( + "lwpmudrv_Create_Mem: output buffer is NULL\n"); + return OS_FAULT; + } + SOCPERF_PRINT_DEBUG("Read size=%llx\n", arg->len_drv_to_usr); + SOCPERF_PRINT_DEBUG("Write size=%llx\n", arg->len_usr_to_drv); + if (arg->len_drv_to_usr != sizeof(U64)) { + return OS_FAULT; + } + + dispatch_uncore = LWPMU_DEVICE_dispatch(device_uncore); + if (dispatch_uncore && dispatch_uncore->create_mem) { + dispatch_uncore->create_mem(memory_size, &trace_phys_address); + } else { + SOCPERF_PRINT_ERROR("dispatch table could not be called\n"); + } + + if (copy_to_user(arg->buf_drv_to_usr, (void *)&trace_phys_address, + sizeof(U64))) { + return OS_FAULT; + } + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Check_Status( IOCTL_ARGS arg) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Check_Status(IOCTL_ARGS arg) +{ + U32 num_entries = 0; + U64 *status_data = 0; + + if ((arg->len_drv_to_usr == 0) || (arg->buf_drv_to_usr == NULL)) { + return OS_FAULT; + } + + status_data = SOCPERF_Allocate_Memory(arg->len_drv_to_usr); + if (dispatch_uncore && dispatch_uncore->check_status) { + dispatch_uncore->check_status(status_data, &num_entries); + } + + if (copy_to_user(arg->buf_drv_to_usr, (void *)status_data, + num_entries * sizeof(U64))) { + SOCPERF_Free_Memory(status_data); + return OS_FAULT; + } + SOCPERF_Free_Memory(status_data); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static OS_STATUS lwpmudrv_Read_Mem( IOCTL_ARGS arg) + * + * @param - none + * + * @return - OS_STATUS + * + * @brief Read the Counter Data. + * + * Special Notes + */ +static OS_STATUS lwpmudrv_Read_Mem(IOCTL_ARGS arg) +{ + U64 start_address = 0; + U64 *mem_address = NULL; + U32 mem_size = 0; + U32 num_entries = 0; + + if (arg->buf_usr_to_drv == NULL || arg->len_usr_to_drv == 0) { + SOCPERF_PRINT_ERROR( + "lwpmudrv_Read_Mem: Counter buffer is NULL\n"); + return OS_FAULT; + } + + if (copy_from_user(&start_address, (U64 *)arg->buf_usr_to_drv, + sizeof(U64))) { + return OS_FAULT; + } + + if ((arg->len_drv_to_usr == 0) || (arg->buf_drv_to_usr == NULL)) { + return OS_FAULT; + } + mem_size = (U32)arg->len_drv_to_usr; + mem_address = SOCPERF_Allocate_Memory(mem_size); + if (!mem_address) { + return OS_NO_MEM; + } + + num_entries = (U32)(mem_size / sizeof(U64)); + if (dispatch_uncore && dispatch_uncore->read_mem) { + dispatch_uncore->read_mem(start_address, mem_address, + num_entries); + } + if (copy_to_user(arg->buf_drv_to_usr, (void *)mem_address, mem_size)) { + SOCPERF_Free_Memory(mem_address); + return OS_FAULT; + } + SOCPERF_Free_Memory(mem_address); + + return OS_SUCCESS; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static VOID lwpmudrv_Stop_Mem(void) + * + * @param - none + * + * @return - none + * + * @brief Stop Mem + * + * Special Notes + */ +extern VOID lwpmudrv_Stop_Mem(VOID) +{ + SOCPERF_PRINT_DEBUG("Entered lwpmudrv_Stop_Mem\n"); + + if (dispatch_uncore && dispatch_uncore->stop_mem) { + dispatch_uncore->stop_mem(); + } + + SOCPERF_PRINT_DEBUG("Exited lwpmudrv_Stop_Mem\n"); + + return; +} + +/*! + * @fn static VOID socperfdrv_PCI_Scan_For_Uncore(U32) + * + * @brief Scan the PCI devices to find if the uncore device is accessible + * + * @param dev_node - uncore device index for scannable devices + * + * @return None + * + * Special Notes: + */ + +static VOID socperfdrv_PCI_Scan_For_Uncore(U32 dev_node) +{ + U32 device_id; + U32 value; + U32 vendor_id; + U32 busno = 0; + U32 j, k; + U32 pci_address; + + SOCPERF_PRINT_DEBUG("Dummy param: %p, dev_node: %u, callback: %p.", + param, dev_node, callback); + + for (j = 0; j < MAX_PCI_DEVNO; j++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_valid(&uncore_topology, + dev_node, j))) { + continue; + } + for (k = 0; k < MAX_PCI_FUNCNO; k++) { + if (!(UNCORE_TOPOLOGY_INFO_pcidev_is_devno_funcno_valid( + &uncore_topology, dev_node, j, k))) { + continue; + } + pci_address = FORM_PCI_ADDR(busno, j, k, 0); + value = SOCPERF_PCI_Read_Ulong(pci_address); + CONTINUE_IF_NOT_GENUINE_INTEL_DEVICE(value, vendor_id, + device_id); + SOCPERF_PRINT_DEBUG( + "Uncore bus=%d, dev=%d, func=%d device ID = 0x%x.", + busno, j, k, device_id); + if (device_id != 0xFFFF) { + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, dev_node, j, k)++; + SOCPERF_PRINT_DEBUG( + "Found device 0x%x at BDF(%x:%x:%x) [%u unit(s) so far].", + device_id, busno, j, k, + UNCORE_TOPOLOGY_INFO_pcidev_num_entries_found( + &uncore_topology, dev_node, j, + k)); + } + } + } + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn U64 socperdrv_Get_Uncore_Topology + * + * @brief Reads uncore topology information + * + * @param arg Pointer to the IOCTL structure + * + * @return status + * + * Special Notes: + * + */ +static OS_STATUS socperdrv_Get_Uncore_Topology(IOCTL_ARGS args) +{ + U32 dev; + static UNCORE_TOPOLOGY_INFO_NODE req_uncore_topology; + + if (args->buf_usr_to_drv == NULL) { + SOCPERF_PRINT_ERROR( + "Invalid arguments (buf_usr_to_drv is NULL)!"); + return OS_INVALID; + } + if (args->len_usr_to_drv != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SOCPERF_PRINT_ERROR( + "Invalid arguments (unexpected len_usr_to_drv value)!"); + return OS_INVALID; + } + if (args->buf_drv_to_usr == NULL) { + SOCPERF_PRINT_ERROR( + "Invalid arguments (buf_drv_to_usr is NULL)!"); + return OS_INVALID; + } + if (args->len_drv_to_usr != sizeof(UNCORE_TOPOLOGY_INFO_NODE)) { + SOCPERF_PRINT_ERROR( + "Invalid arguments (unexpected len_drv_to_usr value)!"); + return OS_INVALID; + } + + memset((char *)&req_uncore_topology, 0, + sizeof(UNCORE_TOPOLOGY_INFO_NODE)); + if (copy_from_user(&req_uncore_topology, args->buf_usr_to_drv, + args->len_usr_to_drv)) { + SOCPERF_PRINT_ERROR("Memory copy failure!"); + return OS_FAULT; + } + + for (dev = 0; dev < MAX_DEVICES; dev++) { + // skip if user does not require to scan this device + if (!UNCORE_TOPOLOGY_INFO_device_scan(&req_uncore_topology, + dev)) { + continue; + } + // skip if this device has been discovered + if (UNCORE_TOPOLOGY_INFO_device_scan(&uncore_topology, dev)) { + continue; + } + memcpy((U8 *)&(UNCORE_TOPOLOGY_INFO_device(&uncore_topology, + dev)), + (U8 *)&(UNCORE_TOPOLOGY_INFO_device(&req_uncore_topology, + dev)), + sizeof(UNCORE_PCIDEV_NODE)); + socperfdrv_PCI_Scan_For_Uncore(dev); + } + + if (copy_to_user(args->buf_drv_to_usr, &uncore_topology, + args->len_drv_to_usr)) { + SOCPERF_PRINT_ERROR("Memory copy failure!"); + return OS_FAULT; + } + + return OS_SUCCESS; +} + +/******************************************************************************* + * External Driver functions - Open + * This function is common to all drivers + *******************************************************************************/ + +static int socperf_Open(struct inode *inode, struct file *filp) +{ + SOCPERF_PRINT_DEBUG("lwpmu_Open called on maj:%d, min:%d\n", + imajor(inode), iminor(inode)); + filp->private_data = container_of(inode->i_cdev, LWPMU_DEV_NODE, cdev); + + return 0; +} + +/******************************************************************************* + * External Driver functions + * These functions are registered into the file operations table that + * controls this device. + * Open, Close, Read, Write, Release + *******************************************************************************/ + +static ssize_t +socperf_Read(struct file *filp, char *buf, size_t count, loff_t *f_pos) +{ + unsigned long retval; + + /* Transfering data to user space */ + SOCPERF_PRINT_DEBUG("lwpmu_Read dispatched with count=%d\n", + (S32)count); + if (copy_to_user(buf, &LWPMU_DEV_buffer(socperf_control), 1)) { + retval = OS_FAULT; + return retval; + } + /* Changing reading position as best suits */ + if (*f_pos == 0) { + *f_pos += 1; + return 1; + } + + return 0; +} + +static ssize_t +socperf_Write(struct file *filp, const char *buf, size_t count, loff_t *f_pos) +{ + unsigned long retval; + + SOCPERF_PRINT_DEBUG("lwpmu_Write dispatched with count=%d\n", + (S32)count); + if (copy_from_user(&LWPMU_DEV_buffer(socperf_control), buf + count - 1, + 1)) { + retval = OS_FAULT; + return retval; + } + + return 1; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn extern IOCTL_OP_TYPE socperf_Service_IOCTL(IOCTL_USE_NODE, filp, cmd, arg) + * + * @param IOCTL_USE_INODE - Used for pre 2.6.32 kernels + * @param struct file *filp - file pointer + * @param unsigned int cmd - IOCTL command + * @param unsigned long arg - args to the IOCTL command + * + * @return OS_STATUS + * + * @brief Worker function that handles IOCTL requests from the user mode. + * + * Special Notes + */ +extern IOCTL_OP_TYPE +socperf_Service_IOCTL(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + IOCTL_ARGS_NODE local_args) +{ + int status = OS_SUCCESS; + + switch (cmd) { + /* + * Common IOCTL commands + */ + case DRV_OPERATION_VERSION: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_VERSION\n"); + status = lwpmudrv_Version(&local_args); + break; + + case DRV_OPERATION_RESERVE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESERVE\n"); + break; + + case DRV_OPERATION_INIT_PMU: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_PMU\n"); + status = lwpmudrv_Init_PMU(); + break; + + case DRV_OPERATION_START: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_START\n"); + status = socperf_Start(); + break; + + case DRV_OPERATION_STOP: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP\n"); + status = socperf_Prepare_Stop(); + break; + + case DRV_OPERATION_PAUSE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_PAUSE\n"); + status = lwpmudrv_Pause(); + break; + + case DRV_OPERATION_RESUME: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_RESUME\n"); + status = lwpmudrv_Resume(); + break; + + case DRV_OPERATION_TERMINATE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_TERMINATE\n"); + status = socperf_Terminate(); + break; + + case DRV_OPERATION_INIT_DRIVER: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_DRIVER\n"); + status = lwpmudrv_Initialize_Driver(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + + case DRV_OPERATION_INIT_UNCORE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_INIT_UNCORE\n"); + status = lwpmudrv_Initialize_Uncore(local_args.buf_usr_to_drv, + local_args.len_usr_to_drv); + break; + case DRV_OPERATION_EM_GROUPS_UNCORE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_GROUPS_UNC\n"); + status = lwpmudrv_Set_EM_Config_Uncore(&local_args); + break; + + case DRV_OPERATION_EM_CONFIG_NEXT_UNCORE: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_EM_CONFIG_NEXT_UNC\n"); + status = socperf_Configure_Events_Uncore(&local_args); + break; + + case DRV_OPERATION_TIMER_TRIGGER_READ: + lwpmudrv_Trigger_Read(); + break; + + case DRV_OPERATION_READ_UNCORE_DATA: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_UNCORE_DATA\n"); + status = lwpmudrv_Read_Uncore_Counts(local_args.buf_drv_to_usr, + local_args.len_drv_to_usr); + break; + + case DRV_OPERATION_CREATE_MEM: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CREATE_MEM\n"); + lwpmudrv_Create_Mem(&local_args); + break; + + case DRV_OPERATION_READ_MEM: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_READ_MEM\n"); + lwpmudrv_Read_Mem(&local_args); + break; + + case DRV_OPERATION_CHECK_STATUS: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_CHECK_STATUS\n"); + lwpmudrv_Check_Status(&local_args); + break; + + case DRV_OPERATION_STOP_MEM: + SOCPERF_PRINT_DEBUG(" DRV_OPERATION_STOP_MEM\n"); + lwpmudrv_Stop_Mem(); + break; + + case DRV_OPERATION_GET_UNCORE_TOPOLOGY: + SOCPERF_PRINT_DEBUG("DRV_OPERATION_GET_UNCORE_TOPOLOGY."); + status = socperdrv_Get_Uncore_Topology(&local_args); + break; + + /* + * if none of the above, treat as unknown/illegal IOCTL command + */ + default: + SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d number:%d\n", + _IOC_TYPE(cmd), _IOC_NR(cmd)); + status = OS_ILLEGAL_IOCTL; + break; + } + + if (cmd == DRV_OPERATION_STOP && + GLOBAL_STATE_current_phase(socperf_driver_state) == + DRV_STATE_PREPARE_STOP) { + status = socperf_Finish_Stop(); + } + + return status; +} + +extern long +socperf_Device_Control(IOCTL_USE_INODE struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_ARGS_NODE local_args; + +#if !defined(DRV_USE_UNLOCKED_IOCTL) + SOCPERF_PRINT_DEBUG( + "lwpmu_DeviceControl(0x%x) called on inode maj:%d, min:%d\n", + cmd, imajor(inode), iminor(inode)); +#endif + SOCPERF_PRINT_DEBUG("type: %d, subcommand: %d\n", _IOC_TYPE(cmd), + _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + MUTEX_LOCK(ioctl_lock); + if (arg) { + status = copy_from_user(&local_args, (IOCTL_ARGS)arg, + sizeof(IOCTL_ARGS_NODE)); + } else { + memset(&local_args, 0, sizeof(IOCTL_ARGS_NODE)); + } + + status = socperf_Service_IOCTL(IOCTL_USE_INODE filp, _IOC_NR(cmd), + local_args); + MUTEX_UNLOCK(ioctl_lock); + + return status; +} + +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) +extern long +socperf_Device_Control_Compat(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int status = OS_SUCCESS; + IOCTL_COMPAT_ARGS_NODE local_args_compat; + IOCTL_ARGS_NODE local_args; + + memset(&local_args_compat, 0, sizeof(IOCTL_COMPAT_ARGS_NODE)); + SOCPERF_PRINT_DEBUG("Compat: type: %d, subcommand: %d\n", + _IOC_TYPE(cmd), _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != LWPMU_IOC_MAGIC) { + SOCPERF_PRINT_ERROR("Unknown IOCTL magic:%d\n", _IOC_TYPE(cmd)); + return OS_ILLEGAL_IOCTL; + } + + MUTEX_LOCK(ioctl_lock); + if (arg) { + status = copy_from_user(&local_args_compat, + (IOCTL_COMPAT_ARGS)arg, + sizeof(IOCTL_COMPAT_ARGS_NODE)); + } + local_args.len_drv_to_usr = local_args_compat.len_drv_to_usr; + local_args.len_usr_to_drv = local_args_compat.len_usr_to_drv; + local_args.buf_drv_to_usr = + (char *)compat_ptr(local_args_compat.buf_drv_to_usr); + local_args.buf_usr_to_drv = + (char *)compat_ptr(local_args_compat.buf_usr_to_drv); + + status = socperf_Service_IOCTL(filp, _IOC_NR(cmd), local_args); + MUTEX_UNLOCK(ioctl_lock); + + return status; +} +#endif + +/* + * @fn SOCPERF_Abnormal_Terminate(void) + * + * @brief This routine is called from linuxos_Exit_Task_Notify if the user process has + * been killed by an uncatchable signal (example kill -9). The state variable + * abormal_terminate is set to 1 and the clean up routines are called. In this + * code path the OS notifier hooks should not be unloaded. + * + * @param None + * + * @return OS_STATUS + * + * Special Notes: + * + */ +extern int SOCPERF_Abnormal_Terminate(void) +{ + int status = OS_SUCCESS; + + socperf_abnormal_terminate = 1; + SOCPERF_PRINT_DEBUG( + "Abnormal-Termination: Calling socperf_Prepare_Stop\n"); + status = socperf_Prepare_Stop(); + SOCPERF_PRINT_DEBUG( + "Abnormal-Termination: Calling socperf_Finish_Stop\n"); + status = socperf_Finish_Stop(); + SOCPERF_PRINT_DEBUG( + "Abnormal-Termination: Calling lwpmudrv_Terminate\n"); + status = socperf_Terminate(); + + return status; +} + +/***************************************************************************************** + * + * Driver Entry / Exit functions that will be called on when the driver is loaded and + * unloaded + * + ****************************************************************************************/ + +/* + * Structure that declares the usual file access functions + * First one is for lwpmu_c, the control functions + */ +static struct file_operations socperf_Fops = { + .owner = THIS_MODULE, + IOCTL_OP = socperf_Device_Control, +#if defined(CONFIG_COMPAT) && defined(DRV_EM64T) + .compat_ioctl = socperf_Device_Control_Compat, +#endif + .read = socperf_Read, + .write = socperf_Write, + .open = socperf_Open, + .release = NULL, + .llseek = NULL, +}; + +/*! + * @fn static int lwpmudrv_setup_cdev(dev, fops, dev_number) + * + * @param LWPMU_DEV dev - pointer to the device object + * @param struct file_operations *fops - pointer to the file operations struct + * @param dev_t dev_number - major/monor device number + * + * @return OS_STATUS + * + * @brief Set up the device object. + * + * Special Notes + */ +static int +lwpmu_setup_cdev(LWPMU_DEV dev, struct file_operations *fops, dev_t dev_number) +{ + cdev_init(&LWPMU_DEV_cdev(dev), fops); + LWPMU_DEV_cdev(dev).owner = THIS_MODULE; + LWPMU_DEV_cdev(dev).ops = fops; + + return cdev_add(&LWPMU_DEV_cdev(dev), dev_number, 1); +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int socperf_Load(void) + * + * @param none + * + * @return STATUS + * + * @brief Load the driver module into the kernel. Set up the driver object. + * @brief Set up the initial state of the driver and allocate the memory + * @brief needed to keep basic state information. + */ +static int socperf_Load(VOID) +{ + int num_cpus; + OS_STATUS status = OS_SUCCESS; + + SOCPERF_Memory_Tracker_Init(); + + /* Get one major device number and one minor number. */ + /* The result is formatted as major+minor(0) */ + /* One minor number is for control (lwpmu_c), */ + SOCPERF_PRINT("SocPerf Driver loading...\n"); + SOCPERF_PRINT("SocPerf Driver about to register chrdev...\n"); + + lwpmu_DevNum = MKDEV(0, 0); + status = alloc_chrdev_region(&lwpmu_DevNum, 0, PMU_DEVICES, + SOCPERF_DRIVER_NAME); + SOCPERF_PRINT("SocPerf Driver: result of alloc_chrdev_region is %d\n", + status); + if (status < 0) { + SOCPERF_PRINT_ERROR( + "SocPerf driver failed to alloc chrdev_region!\n"); + return status; + } + SOCPERF_PRINT("SocPerf Driver: major number is %d\n", + MAJOR(lwpmu_DevNum)); + status = lwpmudrv_Initialize_State(); + if (status < 0) { + SOCPERF_PRINT_ERROR( + "SocPerf driver failed to initialize state!\n"); + return status; + } + num_cpus = GLOBAL_STATE_num_cpus(socperf_driver_state); + SOCPERF_PRINT("SocPerf Driver: detected %d CPUs in lwpmudrv_Load\n", + num_cpus); + + /* Allocate memory for the control structures */ + socperf_control = SOCPERF_Allocate_Memory(sizeof(LWPMU_DEV_NODE)); + + if (!socperf_control) { + SOCPERF_Free_Memory(socperf_control); + return OS_NO_MEM; + } + + /* Register the file operations with the OS */ +#if !defined(DRV_UDEV_UNAVAILABLE) + SOCPERF_PRINT("SocPerf Driver: creating device %s...\n", + SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); + pmu_class = class_create(THIS_MODULE, SOCPERF_DRIVER_NAME); + if (IS_ERR(pmu_class)) { + SOCPERF_PRINT_ERROR( + "Error registering SocPerf control class\n"); + } + device_create(pmu_class, NULL, lwpmu_DevNum, NULL, + SOCPERF_DRIVER_NAME DRV_DEVICE_DELIMITER "c"); +#endif + + status = lwpmu_setup_cdev(socperf_control, &socperf_Fops, lwpmu_DevNum); + if (status) { + SOCPERF_PRINT_ERROR("Error %d adding lwpmu as char device\n", + status); + return status; + } + + MUTEX_INIT(ioctl_lock); + + PMU_LIST_Initialize(); + PMU_LIST_Build_MMIO_List(); + + /* + * Initialize the SocPerf driver version (done once at driver load time) + */ + SOCPERF_VERSION_NODE_major(&socperf_drv_version) = + SOCPERF_MAJOR_VERSION; + SOCPERF_VERSION_NODE_minor(&socperf_drv_version) = + SOCPERF_MINOR_VERSION; + SOCPERF_VERSION_NODE_api(&socperf_drv_version) = SOCPERF_API_VERSION; + // + // Display driver version information + // + SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been loaded.\n", + SOCPERF_VERSION_NODE_major(&socperf_drv_version), + SOCPERF_VERSION_NODE_minor(&socperf_drv_version), + SOCPERF_VERSION_NODE_api(&socperf_drv_version)); + + return status; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn static int lwpmu_Unload(void) + * + * @param none + * + * @return none + * + * @brief Remove the driver module from the kernel. + */ +static VOID socperf_Unload(VOID) +{ + SOCPERF_PRINT("SocPerf Driver unloading...\n"); + + PMU_LIST_Clean_Up(); + + socperf_pcb = SOCPERF_Free_Memory(socperf_pcb); + socperf_pcb_size = 0; + +#if !defined(DRV_UDEV_UNAVAILABLE) + unregister_chrdev(MAJOR(lwpmu_DevNum), SOCPERF_DRIVER_NAME); + device_destroy(pmu_class, lwpmu_DevNum); +#endif + + cdev_del(&LWPMU_DEV_cdev(socperf_control)); + unregister_chrdev_region(lwpmu_DevNum, PMU_DEVICES); + +#if !defined(DRV_UDEV_UNAVAILABLE) + class_destroy(pmu_class); +#endif + + socperf_control = SOCPERF_Free_Memory(socperf_control); + + SOCPERF_Memory_Tracker_Free(); + + // + // Display driver version information + // + SOCPERF_PRINT("SocPerf Driver v%d.%d.%d has been unloaded.\n", + SOCPERF_VERSION_NODE_major(&socperf_drv_version), + SOCPERF_VERSION_NODE_minor(&socperf_drv_version), + SOCPERF_VERSION_NODE_api(&socperf_drv_version)); + + return; +} + +/* Declaration of the init and exit functions */ +module_init(socperf_Load); +module_exit(socperf_Unload); diff --git a/drivers/platform/x86/socperf/utility.c b/drivers/platform/x86/socperf/utility.c new file mode 100644 index 00000000000000..42c4ee59466143 --- /dev/null +++ b/drivers/platform/x86/socperf/utility.c @@ -0,0 +1,185 @@ +/* *********************************************************************************************** + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright (C) 2005-2021 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *********************************************************************************************** +*/ + +#include "lwpmudrv_defines.h" +#include +#include +#include +#include + +#include "lwpmudrv_types.h" +#include "rise_errors.h" +#include "lwpmudrv_ecb.h" +#include "socperfdrv.h" +#include "utility.h" +#if defined(DRV_SOFIA) +#include "noc_uncore.h" +#elif defined(DRV_BUTTER) +#include "axi_uncore.h" +#else +#include "soc_uncore.h" +#include "haswellunc_sa.h" +#include "npk_uncore.h" +#endif + +volatile int config_done; + +extern VOID SOCPERF_UTILITY_Read_TSC(U64 *pTsc) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + *pTsc = rdtsc_ordered(); +#else + rdtscll(*(pTsc)); +#endif + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID SOCPERF_UTILITY_Read_Cpuid + * + * @brief executes the cpuid_function of cpuid and returns values + * + * @param IN cpuid_function + * OUT rax - results of the cpuid instruction in the + * OUT rbx - corresponding registers + * OUT rcx + * OUT rdx + * + * @return none + * + * Special Notes: + * + * + */ +extern VOID +SOCPERF_UTILITY_Read_Cpuid(U64 cpuid_function, + U64 *rax_value, + U64 *rbx_value, + U64 *rcx_value, + U64 *rdx_value) +{ + U32 function = (U32)cpuid_function; + U32 *eax = (U32 *)rax_value; + U32 *ebx = (U32 *)rbx_value; + U32 *ecx = (U32 *)rcx_value; + U32 *edx = (U32 *)rdx_value; + + *eax = function; + + __asm__("cpuid" + : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) + : "a"(function), "b"(*ebx), "c"(*ecx), "d"(*edx)); + + return; +} + +/* ------------------------------------------------------------------------- */ +/*! + * @fn VOID SOCPERF_UTILITY_Configure_CPU + * + * @brief Reads the CPU information from the hardware + * + * @param param dispatch_id - The id of the dispatch table. + * + * @return Pointer to the correct dispatch table for the CPU architecture + * + * Special Notes: + * + */ +extern DISPATCH SOCPERF_UTILITY_Configure_CPU(U32 dispatch_id) +{ + DISPATCH dispatch = NULL; + switch (dispatch_id) { +#if defined(DRV_SOFIA) + case 1000: + SOCPERF_PRINT_DEBUG( + "Set up the SoC Uncore NOC dispatch table\n"); + dispatch = &noc_dispatch; + break; +#elif defined(DRV_BUTTER) + case 1100: + SOCPERF_PRINT_DEBUG( + "Set up the SoC Uncore AXI dispatch table\n"); + dispatch = &axi_dispatch; + break; +#else + case 230: + SOCPERF_PRINT_DEBUG("Set up the Haswell SA dispatch table\n"); + dispatch = &socperf_hswunc_sa_dispatch; + break; + case 700: + SOCPERF_PRINT_DEBUG("Set up the SOC Uncore dispatch table\n"); + dispatch = &soc_uncore_dispatch; + break; + case 701: + SOCPERF_PRINT_DEBUG( + "Set up the SoC Uncore NPK dispatch table\n"); + dispatch = &npk_dispatch; + break; +#endif + default: + dispatch = NULL; + SOCPERF_PRINT_ERROR( + "Architecture not supported (dispatch_id=%d)\n", + dispatch_id); + break; + } + + return dispatch; +}