Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
233 commits
Select commit Hold shift + click to select a range
7144737
blk-mq: avoid extending delays of active hctx from blk_mq_delay_run_h…
Jan 31, 2022
b46c9d7
dmaengine: idxd: Add a new DSA device ID for Granite Rapids-D platform
Aug 28, 2024
140baf6
selftests/sgx: Ignore OpenSSL 3.0 deprecated functions warning
kaccardi Aug 12, 2022
60ab96f
x86/sgx: Improve comments for sgx_encl_lookup/alloc_backing()
kaccardi Aug 12, 2022
0c7571a
x86/sgx: Allow enclaves to use Asynchrounous Exit Notification
hansendc Jul 20, 2022
bb2f3ad
KVM/VMX: Allow exposing EDECCSSA user leaf function to KVM guest
kaihuang Nov 1, 2022
555ae3e
KVM: x86/pmu: Do not mask LVTPC when handling a PMI on AMD platforms
sandip4n Apr 5, 2024
23358e8
KVM: x86: Snapshot if a vCPU's vendor model is AMD vs. Intel compatible
sean-jc Apr 5, 2024
5f330ca
KVM: x86: Manually retrieve CPUID.0x1 when getting FMS for RESET/INIT
sean-jc Sep 29, 2021
98c747a
KVM: vPMU: Fill get_msr MSR_CORE_PERF_GLOBAL_OVF_CTRL w/ 0
Oct 19, 2021
4788985
KVM: x86: Drop current_vcpu for kvm_running_vcpu + kvm_arch_vcpu vari…
sean-jc Nov 11, 2021
e4b79d5
KVM: Move x86's perf guest info callbacks to generic KVM
sean-jc Nov 11, 2021
8e67498
KVM: x86/svm: Add module param to control PMU virtualization
Nov 17, 2021
f12d637
KVM: x86: avoid out of bounds indices for fixed performance counters
bonzini Dec 9, 2021
7b47078
KVM: x86/pmu: Setup pmc->eventsel for fixed PMCs
Nov 30, 2021
04ec220
KVM: x86/pmu: Reuse pmc_perf_hw_id() and drop find_fixed_event()
Nov 30, 2021
cdcdb98
KVM: x86/pmu: Add pmc->intr to refactor kvm_perf_overflow{_intr}()
Nov 30, 2021
2dadc63
KVM: x86: Update vPMCs when retiring instructions
ehankland Nov 30, 2021
5563768
KVM: SVM: include CR3 in initial VMSA state for SEV-ES guests
mdroth Dec 16, 2021
2ca1a6d
KVM: x86: Do runtime CPUID update before updating vcpu->arch.cpuid_en…
vittyvk Jan 17, 2022
9680830
KVM: x86: Making the module parameter of vPMU more common
Jan 11, 2022
5fc1b09
KVM: x86/pmu: Use binary search to check filtered events
jsmattsonjr Jan 15, 2022
e1bd356
KVM: x86: Remove defunct pre_block/post_block kvm_x86_ops hooks
sean-jc Dec 8, 2021
cf73ec2
KVM: x86: Move CPUID.(EAX=0x12,ECX=1) mangling to __kvm_update_cpuid_…
vittyvk Jan 24, 2022
3fb89ff
KVM: x86: skip host CPUID call for hypervisor leaves
bonzini Oct 28, 2021
7c54aca
KVM: x86: Drop export for .tlb_flush_current() static_call key
sean-jc Jan 28, 2022
2775efa
KVM: x86: Rename kvm_x86_ops pointers to align w/ preferred vendor names
sean-jc Jan 28, 2022
8d64051
KVM: nVMX: Refactor PMU refresh to avoid referencing kvm_x86_ops.pmu_ops
sean-jc Jan 28, 2022
785cfb3
KVM: x86: Use more verbose names for mem encrypt kvm_x86_ops hooks
sean-jc Jan 28, 2022
9039b5e
KVM: x86: return 1 unconditionally for availability of KVM_CAP_VAPIC
bonzini Feb 15, 2022
7b366b6
KVM: x86: use static_call_cond for optional callbacks
bonzini Feb 1, 2022
a16cf72
KVM: x86: remove KVM_X86_OP_NULL and mark optional kvm_x86_ops
bonzini Dec 9, 2021
afb27a8
KVM: x86: warn on incorrectly NULL members of kvm_x86_ops
bonzini Dec 9, 2021
9a494cd
KVM: x86: allow defining return-0 static calls
bonzini Feb 15, 2022
b2dd728
KVM: x86: Fix pointer mistmatch warning when patching RET0 static calls
sean-jc Feb 23, 2022
b9f1d39
KVM: x86: add support for CPUID leaf 0x80000021
bonzini Oct 28, 2021
636fd0b
KVM: x86: synthesize CPUID leaf 0x80000021h if useful
bonzini Oct 21, 2021
df2f17f
KVM: x86: Fix clang -Wimplicit-fallthrough in do_host_cpuid()
nathanchance Mar 22, 2022
abdeecc
KVM: x86: Move lookup of indexed CPUID leafs to helper
mdroth Feb 24, 2022
9a87d74
KVM: x86: Move kvm_ops_static_call_update() to x86.c
Mar 29, 2022
09b4e2c
KVM: x86: Copy kvm_pmu_ops by value to eliminate layer of indirection
Mar 29, 2022
49171e8
KVM: x86: Move .pmu_ops to kvm_x86_init_ops and tag as __initdata
Mar 29, 2022
4dfad70
KVM: x86: Use static calls to reduce kvm_pmu_ops overhead
Mar 29, 2022
441874b
KVM: x86: work around QEMU issue with synthetic CPUID leaves
bonzini Apr 29, 2022
400175f
kvm: x86/pmu: Fix the compare function used by the pmu event filter
suomilewis May 17, 2022
479e50b
KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS
Apr 11, 2022
e450f10
KVM: x86/pmu: Reprogram PEBS event to emulate guest PEBS counter
Apr 11, 2022
75a4edd
KVM: x86/pmu: Add IA32_DS_AREA MSR emulation to support guest DS
Apr 11, 2022
fa70532
KVM: x86/pmu: Add PEBS_DATA_CFG MSR emulation to support adaptive PEBS
Apr 11, 2022
23173e2
KVM: x86/pmu: Move pmc_speculative_in_use() to arch/x86/kvm/pmu.h
Apr 11, 2022
7a5a669
KVM: x86/pmu: Disable guest PEBS temporarily in two rare situations
Apr 11, 2022
bef7a60
KVM: x86/pmu: Add kvm_pmu_cap to optimize perf_get_x86_pmu_capability
Apr 11, 2022
7dcf03f
KVM: x86/pmu: remove useless prototype
bonzini May 20, 2022
5692c7f
KVM: x86/pmu: Don't overwrite the pmu->global_ctrl when refreshing
May 10, 2022
9ef3700
KVM: x86: always allow host-initiated writes to PMU MSRs
bonzini May 25, 2022
a46cc19
KVM: x86/pmu: Extract check_pmu_event_filter() handling both GP and f…
May 18, 2022
3e28a6e
KVM: x86/pmu: Pass only "struct kvm_pmc *pmc" to reprogram_counter()
May 18, 2022
992c2a5
KVM: x86/pmu: Drop "u64 eventsel" for reprogram_gp_counter()
May 18, 2022
0528ef1
KVM: x86/pmu: Drop "u8 ctrl, int idx" for reprogram_fixed_counter()
May 18, 2022
304ee84
KVM: x86/pmu: Use only the uniform interface reprogram_counter()
bonzini May 25, 2022
879ea56
KVM: x86/pmu: Use PERF_TYPE_RAW to merge reprogram_{gp,fixed}counter()
May 18, 2022
17b7c6b
KVM: x86/pmu: Update global enable_pmu when PMU is undetected
May 18, 2022
d7a8b29
KVM: x86/pmu: Restrict advanced features based on module enable_pmu
Jun 1, 2022
6d173d0
Revert "KVM: x86: always allow host-initiated writes to PMU MSRs"
sean-jc Jun 11, 2022
52fae24
KVM: VMX: Use vcpu_get_perf_capabilities() to get guest-visible value
sean-jc Jun 11, 2022
1184801
KVM: x86: Ignore benign host accesses to "unsupported" PEBS and BTS MSRs
sean-jc Jun 11, 2022
8d4b1b2
KVM: x86: Provide per VM capability for disabling PMU virtualization
Feb 23, 2022
196c6f8
KVM: x86: Add dedicated helper to get CPUID entry with significant index
sean-jc Jul 12, 2022
647d8b9
KVM: x86: Refresh PMU after writes to MSR_IA32_PERF_CAPABILITIES
sean-jc Jul 27, 2022
859f21c
perf/x86/core: Completely disable guest PEBS via guest's global_ctrl
Aug 31, 2022
5d077d0
KVM: x86/pmu: Avoid setting BIT_ULL(-1) to pmu->host_cross_mapped_mask
Aug 31, 2022
f2f7124
KVM: x86/pmu: Don't generate PEBS records for emulated instructions
Aug 31, 2022
4614c3c
KVM: x86/pmu: Refactor PERF_GLOBAL_CTRL update helper for reuse by PEBS
Sep 22, 2022
db432d6
KVM: x86/pmu: Avoid using PEBS perf_events for normal counters
Aug 31, 2022
ff4544f
KVM: x86/pmu: Limit the maximum number of supported Intel GP counters
Sep 19, 2022
d0ae290
KVM: x86/pmu: Limit the maximum number of supported AMD GP counters
Sep 19, 2022
e41445b
KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()
Sep 23, 2022
c69179b
KVM: x86/pmu: Clear "reprogram" bit if counter is disabled or disallowed
sean-jc Sep 23, 2022
238bbf4
KVM: x86/pmu: Defer counter emulated overflow via pmc->prev_counter
Sep 23, 2022
9768d32
KVM: x86: Update KVM-only leaf handling to allow for 100% KVM-only leafs
sean-jc Nov 25, 2022
b78fca5
KVM: x86: Advertise that the SMM_CTL MSR is not supported
jsmattsonjr Oct 7, 2022
8da054c
KVM: x86: Move Intel Processor Trace interrupt handler to vmx.c
sean-jc Nov 11, 2021
31ca65b
KVM: x86: Move hardware setup/unsetup to init/exit
sean-jc Nov 30, 2022
8ebf35d
KVM: x86: Move guts of kvm_arch_init() to standalone helper
sean-jc Nov 30, 2022
6421eb4
KVM: x86: Serialize vendor module initialization (hardware setup)
sean-jc Nov 30, 2022
13932e4
KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit propagation code
kimphillamd Jan 24, 2023
61f9c1c
x86/cpu, kvm: Add the Null Selector Clears Base feature
kimphillamd Jan 24, 2023
dbd0b1e
x86/cpu: Support AMD Automatic IBRS
kimphillamd Jan 24, 2023
0a5d010
KVM: x86: Propagate the AMD Automatic IBRS feature to the guest
kimphillamd Jan 24, 2023
c0824d3
KVM: x86/pmu: Cap kvm_pmu_cap.num_counters_gp at KVM's internal max
sean-jc Jan 24, 2023
f1e57b6
KVM: x86/pmu: Use separate array for defining "PMU MSRs to save"
sean-jc Jan 24, 2023
d2d518f
docs: kvm: x86: Fix broken field list
zulinx86 Mar 31, 2023
1fccdd2
KVM: x86/pmu: Rename pmc_is_enabled() to pmc_is_globally_enabled()
Feb 14, 2023
b710ab7
KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers
sean-jc Jan 27, 2023
c67d0ba
KVM: x86/pmu: Rewrite reprogram_counters() to improve performance
Feb 14, 2023
72abc24
KVM: x86/pmu: Fix a typo in kvm_pmu_request_counter_reprogam()
Mar 10, 2023
a680d13
KVM: x86/pmu: Prevent the PMU from counting disallowed events
suomilewis Mar 7, 2023
1c922b3
KVM: x86/pmu: Rename global_ovf_ctrl_mask to global_status_mask
sean-jc Jun 3, 2023
035a361
KVM: x86/pmu: Move reprogram_counters() to pmu.h
Jun 3, 2023
bfbb534
KVM: x86/pmu: Reject userspace attempts to set reserved GLOBAL_STATUS…
Jun 3, 2023
e02a316
KVM: x86/pmu: Move handling PERF_GLOBAL_CTRL and friends to common x86
Jun 3, 2023
7c903e4
KVM: x86/pmu: Provide Intel PMU's pmc_is_enabled() as generic x86 code
Jun 3, 2023
3a66023
KVM: x86: Explicitly zero cpuid "0xa" leaf when PMU is disabled
Jun 3, 2023
e851abd
KVM: x86/pmu: Disable vPMU if the minimum num of counters isn't met
Jun 3, 2023
9eedb0a
KVM: x86/pmu: Advertise PERFCTR_CORE iff the min nr of counters is met
Jun 3, 2023
2b3b102
KVM: x86/pmu: Constrain the num of guest counters with kvm_pmu_cap
Jun 3, 2023
2516aad
KVM: x86/cpuid: Add a KVM-only leaf to redirect AMD PerfMonV2 flag
Jun 3, 2023
5b615a7
KVM: x86/svm/pmu: Add AMD PerfMonV2 support
Jun 3, 2023
2fe25f8
KVM: x86/cpuid: Add AMD CPUID ExtPerfMonAndDbg leaf 0x80000022
Jun 3, 2023
640290f
x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled
kimphillamd Jul 20, 2023
5bd46ea
KVM: x86: Acquire SRCU read lock when handling fastpath MSR writes
sean-jc Jul 21, 2023
10a1dfb
KVM: x86/pmu: Truncate counter value to allowed width on write
May 4, 2023
ffcdf9a
KVM: x86: Get CPL directly when checking if loaded vCPU is in kernel …
Nov 23, 2023
9202540
KVM: x86/pmu: fix masking logic for MSR_CORE_PERF_GLOBAL_CTRL
bonzini Jan 4, 2024
73c6e5b
KVM: x86/pmu: Zero out pmu->all_valid_pmc_idx each time it's refreshed
Apr 4, 2023
6b65c71
KVM: x86/pmu: WARN and bug the VM if PMU is refreshed after vCPU has run
sean-jc Mar 11, 2023
d8470cc
KVM: x86/pmu: Zero out PMU metadata on AMD if PMU is disabled
sean-jc Nov 10, 2023
60ee5a6
KVM: x86/pmu: Fix type length error when reading pmu->fixed_ctr_ctrl
mzhang3579 Jan 23, 2024
e39bd20
KVM: x86/pmu: Synthesize at most one PMI per VM-exit
jsmattsonjr Sep 25, 2023
4c30a2e
KVM: x86: Use actual kvm_cpuid.base for clearing KVM_FEATURE_PV_UNHALT
vittyvk Feb 28, 2024
0a8b855
KVM: x86/pmu: Expose CPUIDs feature bits PDCM, DS, DTES64
Apr 11, 2022
74164f2
KVM: x86/pmu: Disable support for adaptive PEBS
sean-jc Mar 7, 2024
26d7b5d
KVM: x86: Fix errant brace in KVM capability handling
Jun 13, 2022
ee2265a
KVM: x86/cpuid: generalize kvm_update_kvm_cpuid_base() and also captu…
Jan 6, 2023
3f4fbc0
KVM: x86: Introduce __kvm_get_hypervisor_cpuid() helper
vittyvk Feb 28, 2024
0549f63
KVM: x86/cpuid: Refactor host/guest CPU model consistency check
Apr 11, 2022
4b91f90
iommu/amd: Update struct iommu_dev_data definition
hegdevasant Jul 6, 2022
d445c29
iommu/amd: Introduce pci segment structure
hegdevasant Jul 6, 2022
81cb900
iommu/amd: Introduce per PCI segment device table
ssuthiku-amd Jul 6, 2022
c0ebd61
iommu/amd: Introduce per PCI segment rlookup table
ssuthiku-amd Jul 6, 2022
6bb92a8
iommu/amd: Introduce per PCI segment irq_lookup_table
hegdevasant Jul 6, 2022
87e4b44
iommu/amd: Introduce per PCI segment dev_data_list
hegdevasant Jul 6, 2022
c1a1b50
iommu/amd: Introduce per PCI segment old_dev_tbl_cpy
ssuthiku-amd Jul 6, 2022
6239b18
iommu/amd: Introduce per PCI segment alias_table
ssuthiku-amd Jul 6, 2022
01aa315
iommu/amd: Introduce per PCI segment unity map list
hegdevasant Jul 6, 2022
1c3531d
iommu/amd: Introduce per PCI segment last_bdf
hegdevasant Jul 6, 2022
0f36dcd
iommu/amd: Introduce per PCI segment device table size
hegdevasant Jul 6, 2022
2fcbabe
iommu/amd: Introduce per PCI segment alias table size
hegdevasant Jul 6, 2022
6437c5b
iommu/amd: Introduce per PCI segment rlookup table size
hegdevasant Jul 6, 2022
ed964e6
iommu/amd: Convert to use per PCI segment irq_lookup_table
hegdevasant Jul 6, 2022
3719530
iommu/amd: Convert to use rlookup_amd_iommu helper function
ssuthiku-amd Jul 6, 2022
10a2512
iommu/amd: Update irq_remapping_alloc to use IOMMU lookup helper func…
ssuthiku-amd Jul 6, 2022
ca12483
iommu/amd: Introduce struct amd_ir_data.iommu
ssuthiku-amd Jul 6, 2022
ead45d2
iommu/amd: Update amd_irte_ops functions
ssuthiku-amd Jul 6, 2022
a6661a2
iommu/amd: Update alloc_irq_table and alloc_irq_index
ssuthiku-amd Jul 6, 2022
469acfd
iommu/amd: Convert to use per PCI segment rlookup_table
hegdevasant Jul 6, 2022
5a6847a
iommu/amd: Update set_dte_entry and clear_dte_entry
ssuthiku-amd Jul 6, 2022
6edf5d8
iommu/amd: Update iommu_ignore_device
ssuthiku-amd Jul 6, 2022
e3c0274
iommu/amd: Update dump_dte_entry
ssuthiku-amd Jul 6, 2022
c551801
iommu/amd: Update set_dte_irq_entry
ssuthiku-amd Jul 6, 2022
c43c573
iommu/amd: Update (un)init_device_table_dma()
ssuthiku-amd Jul 6, 2022
06d26ac
iommu/amd: Update set_dev_entry_bit() and get_dev_entry_bit()
ssuthiku-amd Jul 6, 2022
c6b7626
iommu/amd: Remove global amd_iommu_[dev_table/alias_table/last_bdf]
ssuthiku-amd Jul 6, 2022
861a0db
iommu/amd: Flush upto last_bdf only
hegdevasant Jul 6, 2022
f1a04a8
iommu/amd: Introduce get_device_sbdf_id() helper function
ssuthiku-amd Jul 6, 2022
5bc975a
iommu/amd: Include PCI segment ID when initialize IOMMU
ssuthiku-amd Jul 6, 2022
0107040
iommu/amd: Specify PCI segment ID when getting pci device
ssuthiku-amd Jul 6, 2022
0de7a77
iommu/amd: Print PCI segment ID in error log messages
hegdevasant Jul 6, 2022
8973aaa
iommu/amd: Update device_state structure to include PCI seg ID
hegdevasant Jul 6, 2022
de73fee
iommu/amd: Update amd_iommu_fault structure to include PCI seg ID
hegdevasant Jul 6, 2022
db634e1
i2c: i801: Add support for Intel Ice Lake PCH-N
andy-shev Oct 1, 2021
5535238
i2c: i801: Improve handling of chip-specific feature definitions
hkallweit Nov 19, 2021
6663d6d
i2c: i801: Enlarge device name field in i801_ids table
jhnikula May 12, 2023
fe5e4da
i2c: i801: Hide Intel Birch Stream SoC TCO WDT
Sep 1, 2025
20052ea
KVM: x86: Advertise AVX-VNNI-INT8 CPUID to user space
Nov 25, 2022
0844146
x86/cpu: Add model number for Intel Clearwater Forest processor
aegl Jan 17, 2024
038f6eb
x86: KVM: Advertise CPUIDs for new instructions in Clearwater Forest
taosu-linux Nov 5, 2024
a1d95f1
x86/cpu: Add model number for another Intel Arrow Lake mobile processor
aegl Mar 22, 2024
f34cd38
x86/cpu/vfm: Add/initialize x86_vfm field to struct cpuinfo_x86
aegl Apr 16, 2024
8b8c454
x86/cpu/vfm: Add new macros to work with (vendor/family/model) values
aegl Apr 16, 2024
8ba4937
x86/cpu/vfm: Update arch/x86/include/asm/intel-family.h
aegl Apr 16, 2024
790af31
x86/cpu: Switch to new Intel CPU model defines
aegl May 20, 2024
075cfb6
x86/cpu/intel: Switch to new Intel CPU model defines
aegl May 20, 2024
1d8f56b
x86/cpu/intel: Drop stray FAM6 check with new Intel CPU model defines
andyhhp May 29, 2024
c5041d7
perf/x86/intel: Switch to new Intel CPU model defines
aegl May 20, 2024
e27b817
cpufreq: intel_pstate: Support Emerald Rapids OOB mode
spandruvada May 30, 2024
ecbe648
cpufreq: intel_pstate: Support Granite Rapids and Sierra Forest OOB mode
spandruvada Aug 2, 2024
d685de9
cpufreq: intel_pstate: Support Clearwater Forest OOB mode
spandruvada Aug 8, 2025
34ee634
cpufreq: intel_pstate: Update Balance-performance EPP for Granite Rapids
spandruvada Nov 12, 2024
86f76d6
cpufreq: intel_pstate: Add Granite Rapids support in no-HWP mode
lrq-max Jun 23, 2025
777a736
platform/x86: ISST: Add Clearwater Forest to support list
spandruvada Jan 3, 2025
9fa4361
platform/x86/intel/ifs: Add Clearwater Forest to CPU support list
jithu83 Dec 10, 2024
8acfd34
powercap: intel_rapl: Sort header files
zhang-rui Apr 8, 2024
00c7e4c
powercap: intel_rapl: Introduce APIs for PMU support
zhang-rui Apr 28, 2024
2bc531f
powercap: intel_rapl_tpmi: Enable PMU support
zhang-rui Apr 28, 2024
88742f0
memblock: use numa_valid_node() helper to check for invalid node ID
rppt Jun 14, 2024
83b8afd
EDAC/i10nm: Add Intel Grand Ridge micro-server support
qzhuo2 Jan 29, 2024
1cbc1ac
EDAC/{skx_common,i10nm}: Fix some missing error reports on Emerald Ra…
qzhuo2 Feb 14, 2025
4b7ed68
EDAC/{skx_common,i10nm}: Fix the loss of saved RRL for HBM pseudo cha…
qzhuo2 Apr 17, 2025
93ffc87
EDAC/{skx_common,i10nm}: Remove the AMAP register for determing DDR5
qzhuo2 Aug 29, 2024
be70871
EDAC/i10nm: Add Intel Clearwater Forest server support
qzhuo2 Dec 3, 2024
55ea77a
EDAC/{i10nm,skx,skx_common}: Support UV systems
Dec 13, 2024
b0e0527
EDAC/i10nm: Explicitly set the modes of the RRL register sets
qzhuo2 Apr 17, 2025
0bcbffc
EDAC/{skx_common,i10nm}: Structure the per-channel RRL registers
qzhuo2 Apr 17, 2025
82a6229
EDAC/{skx_common,i10nm}: Refactor enable_retry_rd_err_log()
qzhuo2 Apr 17, 2025
d496fee
EDAC/{skx_common,i10nm}: Refactor show_retry_rd_err_log()
qzhuo2 Apr 17, 2025
bbdeca1
EDAC/{skx_common,i10nm}: Add RRL support for Intel Granite Rapids server
qzhuo2 Apr 17, 2025
27ba8b3
EDAC/i10nm: Fix the bitwise operation between variables of different …
qzhuo2 Apr 24, 2025
87b703d
EDAC/i10nm: Add Intel Granite Rapids-D support
qzhuo2 Jul 4, 2025
03a811e
EDAC/{skx_common,i10nm}: Use scnprintf() for safer buffer handling
Jul 15, 2025
3d6d8ab
intel_idle: add Granite Rapids Xeon D support
dedekind Nov 7, 2024
00640bf
intel_idle: add Clearwater Forest SoC support
dedekind Dec 3, 2024
cb9bdff
perf/x86/intel/uncore: Clean up func_id
Jan 8, 2025
8239ce2
perf/x86/intel/uncore: Support more units on Granite Rapids
Jan 8, 2025
d98ccab
perf vendor events: Add Clearwaterforest events
captain5050 Feb 11, 2025
31ca895
perf/x86/intel/uncore: Switch to new Intel CPU model defines
aegl Apr 24, 2024
9b25bc0
perf/x86/intel/uncore: Add Clearwater Forest support
Dec 11, 2024
d9c533f
perf/x86/intel/uncore: Support MSR portal for discovery tables
Jul 7, 2025
57f441d
perf/x86/intel/uncore: Support customized MMIO map size
Jul 7, 2025
a48f967
perf/x86/intel: Use the common uarch name for the shared functions
Aug 29, 2023
0b850d7
perf/x86/intel: Factor out the initialization code for SPR
Aug 29, 2023
9c75a99
perf/x86/intel: Factor out the initialization code for ADL e-core
Aug 29, 2023
d888a62
perf/x86/intel: Apply the common initialization code for ADL
Aug 29, 2023
122be97
perf/x86/intel: Clean up the hybrid CPU type handling code
Aug 29, 2023
d70ac25
perf/x86/intel: Add common intel_pmu_init_hybrid()
Aug 29, 2023
832eb31
perf/x86/intel: Fix broken fixed event constraints extension
Sep 11, 2023
e73e16d
perf/x86/intel: Support the PEBS event mask
Jun 26, 2024
1987add
perf/x86: Support counter mask
Jun 26, 2024
8711bb8
perf/x86: Add Lunar Lake and Arrow Lake support
Jun 26, 2024
20cbd46
perf/x86/intel: Rename model-specific pebs_latency_data functions
Jun 26, 2024
094df19
perf/x86/intel: Support new data source for Lunar Lake
Jun 26, 2024
0c1514b
perf/x86: Add config_mask to represent EVENTSEL bitmask
Jun 26, 2024
b7ab333
perf/x86/intel: Support PERFEVTSEL extension
Jun 26, 2024
3c93fa3
perf/x86/intel: Support Perfmon MSRs aliasing
Jun 26, 2024
fdab2c2
perf/x86/intel: Add PMU support for Clearwater Forest
Apr 15, 2025
64c24c7
perf/x86/intel: Parse CPUID archPerfmonExt leaves for non-hybrid CPUs
Apr 15, 2025
a802deb
perf/x86/intel: Introduce pairs of PEBS static calls
Apr 15, 2025
e1401ef
x86,fs/resctrl: Remove inappropriate references to cacheinfo in the r…
beckwen Oct 9, 2025
bec088d
temp patch for upstream commit
beckwen Oct 13, 2025
c5187da
perf/x86/intel/uncore: Add events for Intel SPR IMC PMU
Apr 19, 2023
ed4ed94
perf/x86: Print PMU counters bitmap in x86_pmu_show_pmu_cap()
Aug 20, 2025
ef3b5d6
perf/x86/intel/cstate: Add Clearwater Forrest support
zhenyw May 30, 2024
755137d
add the unified config.velinux for test
bhe4 Oct 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions Documentation/admin-guide/hw-vuln/spectre.rst
Original file line number Diff line number Diff line change
Expand Up @@ -484,11 +484,14 @@ Spectre variant 2

Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
boot, by setting the IBRS bit, and they're automatically protected against
Spectre v2 variant attacks, including cross-thread branch target injections
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
Spectre v2 variant attacks.

Legacy IBRS systems clear the IBRS bit on exit to userspace and
therefore explicitly enable STIBP for that
On Intel's enhanced IBRS systems, this includes cross-thread branch target
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
STIBP, too.

AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.

The retpoline mitigation is turned on by default on vulnerable
CPUs. It can be forced on or off by the administrator
Expand Down Expand Up @@ -622,9 +625,9 @@ kernel command line.
retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence
eibrs enhanced IBRS
eibrs,retpoline enhanced IBRS + Retpolines
eibrs,lfence enhanced IBRS + LFENCE
eibrs Enhanced/Auto IBRS
eibrs,retpoline Enhanced/Auto IBRS + Retpolines
eibrs,lfence Enhanced/Auto IBRS + LFENCE

Not specifying this option is equivalent to
spectre_v2=auto.
Expand Down
6 changes: 3 additions & 3 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5403,9 +5403,9 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
eibrs - Enhanced/Auto IBRS
eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel

Not specifying this option is equivalent to
Expand Down
22 changes: 22 additions & 0 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7424,6 +7424,28 @@ of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace
the hypercalls whose corresponding bit is in the argument, and return
ENOSYS for the others.

8.35 KVM_CAP_PMU_CAPABILITY
---------------------------

:Capability: KVM_CAP_PMU_CAPABILITY
:Architectures: x86
:Type: vm
:Parameters: arg[0] is bitmask of PMU virtualization capabilities.
:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits

This capability alters PMU virtualization in KVM.

Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of
PMU virtualization capabilities that can be adjusted on a VM.

The argument to KVM_ENABLE_CAP is also a bitmask and selects specific
PMU virtualization capabilities to be applied to the VM. This can
only be invoked on a VM prior to the creation of VCPUs.

At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting
this capability will disable PMU virtualization for that VM. Usermode
should adjust CPUID leaf 0xA to reflect that the PMU is disabled.

9. Known KVM API problems
=========================

Expand Down
6 changes: 6 additions & 0 deletions Documentation/virt/kvm/locking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -257,3 +257,9 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup notification event since external interrupts from the
assigned devices happens, we will find the vCPU on the list to
wakeup.

``vendor_module_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel)
10 changes: 10 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,16 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
void kvm_perf_init(void);
void kvm_perf_teardown(void);

/*
* Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
* arrived in guest context. For arm64, any event that arrives while a vCPU is
* loaded is considered to be "in guest".
*/
static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
{
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}

long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
Expand Down
5 changes: 5 additions & 0 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}

unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
{
return *vcpu_pc(vcpu);
}

/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
Expand Down
24 changes: 12 additions & 12 deletions arch/x86/events/amd/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (cmpxchg(nb->owners + i, event, NULL) == event)
break;
}
Expand Down Expand Up @@ -500,7 +500,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event);
Expand Down Expand Up @@ -543,7 +543,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
Expand Down Expand Up @@ -739,7 +739,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand All @@ -760,7 +760,7 @@ static void amd_pmu_enable_all(int added)

amd_brs_enable_all();

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
hwc = &cpuc->events[idx]->hw;

/* only activate events which are marked as active */
Expand Down Expand Up @@ -954,7 +954,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask))
continue;

Expand Down Expand Up @@ -1289,7 +1289,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event,
.del = amd_pmu_del_event,
.cntval_bits = 48,
Expand Down Expand Up @@ -1388,7 +1388,7 @@ static int __init amd_core_pmu_init(void)
*/
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);

/* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
Expand All @@ -1398,9 +1398,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2;

/* Find the number of available Core PMCs */
x86_pmu.num_counters = ebx.split.num_core_pmc;
x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);

amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;

/* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all;
Expand Down Expand Up @@ -1428,12 +1428,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i);

pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
x86_pmu.num_counters / 2, 0,
x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR);

x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
Expand Down
Loading