|
19 | 19 | #include <asm/microcode_intel.h> |
20 | 20 | #include <asm/hwcap2.h> |
21 | 21 | #include <asm/elf.h> |
| 22 | +#include <asm/cpu_device_id.h> |
| 23 | +#include <asm/cmdline.h> |
22 | 24 |
|
23 | 25 | #ifdef CONFIG_X86_64 |
24 | 26 | #include <linux/topology.h> |
|
31 | 33 | #include <asm/apic.h> |
32 | 34 | #endif |
33 | 35 |
|
| 36 | +enum split_lock_detect_state { |
| 37 | + sld_off = 0, |
| 38 | + sld_warn, |
| 39 | + sld_fatal, |
| 40 | +}; |
| 41 | + |
| 42 | +/* |
| 43 | + * Default to sld_off because most systems do not support split lock detection |
| 44 | + * split_lock_setup() will switch this to sld_warn on systems that support |
| 45 | + * split lock detect, unless there is a command line override. |
| 46 | + */ |
| 47 | +static enum split_lock_detect_state sld_state = sld_off; |
| 48 | + |
34 | 49 | /* |
35 | 50 | * Processors which have self-snooping capability can handle conflicting |
36 | 51 | * memory type across CPUs by snooping its own cache. However, there exists |
@@ -570,6 +585,8 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) |
570 | 585 | wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); |
571 | 586 | } |
572 | 587 |
|
| 588 | +static void split_lock_init(void); |
| 589 | + |
573 | 590 | static void init_intel(struct cpuinfo_x86 *c) |
574 | 591 | { |
575 | 592 | early_init_intel(c); |
@@ -684,6 +701,8 @@ static void init_intel(struct cpuinfo_x86 *c) |
684 | 701 | tsx_enable(); |
685 | 702 | if (tsx_ctrl_state == TSX_CTRL_DISABLE) |
686 | 703 | tsx_disable(); |
| 704 | + |
| 705 | + split_lock_init(); |
687 | 706 | } |
688 | 707 |
|
689 | 708 | #ifdef CONFIG_X86_32 |
@@ -945,3 +964,159 @@ static const struct cpu_dev intel_cpu_dev = { |
945 | 964 | }; |
946 | 965 |
|
947 | 966 | cpu_dev_register(intel_cpu_dev); |
| 967 | + |
| 968 | +#undef pr_fmt |
| 969 | +#define pr_fmt(fmt) "x86/split lock detection: " fmt |
| 970 | + |
| 971 | +static const struct { |
| 972 | + const char *option; |
| 973 | + enum split_lock_detect_state state; |
| 974 | +} sld_options[] __initconst = { |
| 975 | + { "off", sld_off }, |
| 976 | + { "warn", sld_warn }, |
| 977 | + { "fatal", sld_fatal }, |
| 978 | +}; |
| 979 | + |
| 980 | +static inline bool match_option(const char *arg, int arglen, const char *opt) |
| 981 | +{ |
| 982 | + int len = strlen(opt); |
| 983 | + |
| 984 | + return len == arglen && !strncmp(arg, opt, len); |
| 985 | +} |
| 986 | + |
| 987 | +static void __init split_lock_setup(void) |
| 988 | +{ |
| 989 | + char arg[20]; |
| 990 | + int i, ret; |
| 991 | + |
| 992 | + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); |
| 993 | + sld_state = sld_warn; |
| 994 | + |
| 995 | + ret = cmdline_find_option(boot_command_line, "split_lock_detect", |
| 996 | + arg, sizeof(arg)); |
| 997 | + if (ret >= 0) { |
| 998 | + for (i = 0; i < ARRAY_SIZE(sld_options); i++) { |
| 999 | + if (match_option(arg, ret, sld_options[i].option)) { |
| 1000 | + sld_state = sld_options[i].state; |
| 1001 | + break; |
| 1002 | + } |
| 1003 | + } |
| 1004 | + } |
| 1005 | + |
| 1006 | + switch (sld_state) { |
| 1007 | + case sld_off: |
| 1008 | + pr_info("disabled\n"); |
| 1009 | + break; |
| 1010 | + |
| 1011 | + case sld_warn: |
| 1012 | + pr_info("warning about user-space split_locks\n"); |
| 1013 | + break; |
| 1014 | + |
| 1015 | + case sld_fatal: |
| 1016 | + pr_info("sending SIGBUS on user-space split_locks\n"); |
| 1017 | + break; |
| 1018 | + } |
| 1019 | +} |
| 1020 | + |
| 1021 | +/* |
| 1022 | + * Locking is not required at the moment because only bit 29 of this |
| 1023 | + * MSR is implemented and locking would not prevent that the operation |
| 1024 | + * of one thread is immediately undone by the sibling thread. |
| 1025 | + * Use the "safe" versions of rdmsr/wrmsr here because although code |
| 1026 | + * checks CPUID and MSR bits to make sure the TEST_CTRL MSR should |
| 1027 | + * exist, there may be glitches in virtualization that leave a guest |
| 1028 | + * with an incorrect view of real h/w capabilities. |
| 1029 | + */ |
| 1030 | +static bool __sld_msr_set(bool on) |
| 1031 | +{ |
| 1032 | + u64 test_ctrl_val; |
| 1033 | + |
| 1034 | + if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val)) |
| 1035 | + return false; |
| 1036 | + |
| 1037 | + if (on) |
| 1038 | + test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
| 1039 | + else |
| 1040 | + test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
| 1041 | + |
| 1042 | + return !wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val); |
| 1043 | +} |
| 1044 | + |
| 1045 | +static void split_lock_init(void) |
| 1046 | +{ |
| 1047 | + if (sld_state == sld_off) |
| 1048 | + return; |
| 1049 | + |
| 1050 | + if (__sld_msr_set(true)) |
| 1051 | + return; |
| 1052 | + |
| 1053 | + /* |
| 1054 | + * If this is anything other than the boot-cpu, you've done |
| 1055 | + * funny things and you get to keep whatever pieces. |
| 1056 | + */ |
| 1057 | + pr_warn("MSR fail -- disabled\n"); |
| 1058 | + sld_state = sld_off; |
| 1059 | +} |
| 1060 | + |
| 1061 | +bool handle_user_split_lock(struct pt_regs *regs, long error_code) |
| 1062 | +{ |
| 1063 | + if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) |
| 1064 | + return false; |
| 1065 | + |
| 1066 | + pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", |
| 1067 | + current->comm, current->pid, regs->ip); |
| 1068 | + |
| 1069 | + /* |
| 1070 | + * Disable the split lock detection for this task so it can make |
| 1071 | + * progress and set TIF_SLD so the detection is re-enabled via |
| 1072 | + * switch_to_sld() when the task is scheduled out. |
| 1073 | + */ |
| 1074 | + __sld_msr_set(false); |
| 1075 | + set_tsk_thread_flag(current, TIF_SLD); |
| 1076 | + return true; |
| 1077 | +} |
| 1078 | + |
| 1079 | +/* |
| 1080 | + * This function is called only when switching between tasks with |
| 1081 | + * different split-lock detection modes. It sets the MSR for the |
| 1082 | + * mode of the new task. This is right most of the time, but since |
| 1083 | + * the MSR is shared by hyperthreads on a physical core there can |
| 1084 | + * be glitches when the two threads need different modes. |
| 1085 | + */ |
| 1086 | +void switch_to_sld(unsigned long tifn) |
| 1087 | +{ |
| 1088 | + __sld_msr_set(!(tifn & _TIF_SLD)); |
| 1089 | +} |
| 1090 | + |
| 1091 | +#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY} |
| 1092 | + |
| 1093 | +/* |
| 1094 | + * The following processors have the split lock detection feature. But |
| 1095 | + * since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot |
| 1096 | + * be enumerated. Enable it by family and model matching on these |
| 1097 | + * processors. |
| 1098 | + */ |
| 1099 | +static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { |
| 1100 | + SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X), |
| 1101 | + SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L), |
| 1102 | + {} |
| 1103 | +}; |
| 1104 | + |
| 1105 | +void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) |
| 1106 | +{ |
| 1107 | + u64 ia32_core_caps = 0; |
| 1108 | + |
| 1109 | + if (c->x86_vendor != X86_VENDOR_INTEL) |
| 1110 | + return; |
| 1111 | + if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) { |
| 1112 | + /* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */ |
| 1113 | + rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); |
| 1114 | + } else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
| 1115 | + /* Enumerate split lock detection by family and model. */ |
| 1116 | + if (x86_match_cpu(split_lock_cpu_ids)) |
| 1117 | + ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT; |
| 1118 | + } |
| 1119 | + |
| 1120 | + if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) |
| 1121 | + split_lock_setup(); |
| 1122 | +} |
0 commit comments