Skip to content

Commit

Permalink
Merge tag 'x86_bugs_zenbleed' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/tip/tip

Pull Zen 2 errata fix from Borislav Petkov:
 "Fix an issue on AMD Zen2 processors called Zenbleed.

  The bug manifests itself as a data corruption issue when executing
  VZEROUPPER under certain microarchitectural conditions"

* tag 'x86_bugs_zenbleed' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu/amd: Add a Zenbleed fix
  x86/cpu/amd: Move the errata checking functionality up
  • Loading branch information
torvalds committed Jul 24, 2023
2 parents 6eaae19 + 522b1d6 commit 0a9266b
Show file tree
Hide file tree
Showing 5 changed files with 133 additions and 72 deletions.
1 change: 1 addition & 0 deletions arch/x86/include/asm/microcode.h
Expand Up @@ -5,6 +5,7 @@
#include <asm/cpu.h>
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <asm/microcode_amd.h>

struct ucode_patch {
struct list_head plist;
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/microcode_amd.h
Expand Up @@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family);
extern void load_ucode_amd_ap(unsigned int family);
extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
extern void amd_check_microcode(void);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
static inline void load_ucode_amd_ap(unsigned int family) {}
static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) {}
static inline void amd_check_microcode(void) {}
#endif
#endif /* _ASM_X86_MICROCODE_AMD_H */
1 change: 1 addition & 0 deletions arch/x86/include/asm/msr-index.h
Expand Up @@ -545,6 +545,7 @@
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9

#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
Expand Down
199 changes: 127 additions & 72 deletions arch/x86/kernel/cpu/amd.c
Expand Up @@ -27,18 +27,85 @@

#include "cpu.h"

static const int amd_erratum_383[];
static const int amd_erratum_400[];
static const int amd_erratum_1054[];
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);

/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
* Node Identifiers[10:8]
*/
static u32 nodes_per_socket = 1;

/*
* AMD errata checking
*
* Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
* AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
* have an OSVW id assigned, which it takes as first argument. Both take a
* variable number of family-specific model-stepping ranges created by
* AMD_MODEL_RANGE().
*
* Example:
*
* const int amd_erratum_319[] =
* AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
* AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
* AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
*/

#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)

static const int amd_erratum_400[] =
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));

static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));

/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));

static const int amd_zenbleed[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));

static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
u32 range;
u32 ms;

if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;

rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
if (osvw_id < osvw_len) {
u64 osvw_bits;

rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
osvw_bits);
return osvw_bits & (1ULL << (osvw_id & 0x3f));
}
}

/* OSVW unavailable or ID unknown, match family-model-stepping range */
ms = (cpu->x86_model << 4) | cpu->x86_stepping;
while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) &&
(ms <= AMD_MODEL_RANGE_END(range)))
return true;

return false;
}

static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
Expand Down Expand Up @@ -916,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
}
}

static bool cpu_has_zenbleed_microcode(void)
{
u32 good_rev = 0;

switch (boot_cpu_data.x86_model) {
case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
case 0x60 ... 0x67: good_rev = 0x0860010b; break;
case 0x68 ... 0x6f: good_rev = 0x08608105; break;
case 0x70 ... 0x7f: good_rev = 0x08701032; break;
case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;

default:
return false;
break;
}

if (boot_cpu_data.microcode < good_rev)
return false;

return true;
}

static void zenbleed_check(struct cpuinfo_x86 *c)
{
if (!cpu_has_amd_erratum(c, amd_zenbleed))
return;

if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;

if (!cpu_has(c, X86_FEATURE_AVX))
return;

if (!cpu_has_zenbleed_microcode()) {
pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
} else {
msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
}
}

static void init_amd(struct cpuinfo_x86 *c)
{
early_init_amd(c);
Expand Down Expand Up @@ -1020,6 +1128,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
cpu_has(c, X86_FEATURE_AUTOIBRS))
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));

zenbleed_check(c);
}

#ifdef CONFIG_X86_32
Expand Down Expand Up @@ -1115,73 +1225,6 @@ static const struct cpu_dev amd_cpu_dev = {

cpu_dev_register(amd_cpu_dev);

/*
* AMD errata checking
*
* Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
* AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
* have an OSVW id assigned, which it takes as first argument. Both take a
* variable number of family-specific model-stepping ranges created by
* AMD_MODEL_RANGE().
*
* Example:
*
* const int amd_erratum_319[] =
* AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
* AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
* AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
*/

#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)

static const int amd_erratum_400[] =
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));

static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));

/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));

static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
u32 range;
u32 ms;

if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;

rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
if (osvw_id < osvw_len) {
u64 osvw_bits;

rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
osvw_bits);
return osvw_bits & (1ULL << (osvw_id & 0x3f));
}
}

/* OSVW unavailable or ID unknown, match family-model-stepping range */
ms = (cpu->x86_model << 4) | cpu->x86_stepping;
while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) &&
(ms <= AMD_MODEL_RANGE_END(range)))
return true;

return false;
}

static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);

static unsigned int amd_msr_dr_addr_masks[] = {
Expand Down Expand Up @@ -1235,3 +1278,15 @@ u32 amd_get_highest_perf(void)
return 255;
}
EXPORT_SYMBOL_GPL(amd_get_highest_perf);

static void zenbleed_check_cpu(void *unused)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());

zenbleed_check(c);
}

void amd_check_microcode(void)
{
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
2 changes: 2 additions & 0 deletions arch/x86/kernel/cpu/common.c
Expand Up @@ -2287,6 +2287,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info)

perf_check_microcode();

amd_check_microcode();

store_cpu_caps(&curr_info);

if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
Expand Down

0 comments on commit 0a9266b

Please sign in to comment.