|
19 | 19 | #include <asm/microcode_intel.h>
|
20 | 20 | #include <asm/hwcap2.h>
|
21 | 21 | #include <asm/elf.h>
|
| 22 | +#include <asm/cpu_device_id.h> |
| 23 | +#include <asm/cmdline.h> |
22 | 24 |
|
23 | 25 | #ifdef CONFIG_X86_64
|
24 | 26 | #include <linux/topology.h>
|
|
31 | 33 | #include <asm/apic.h>
|
32 | 34 | #endif
|
33 | 35 |
|
| 36 | +enum split_lock_detect_state { |
| 37 | + sld_off = 0, |
| 38 | + sld_warn, |
| 39 | + sld_fatal, |
| 40 | +}; |
| 41 | + |
| 42 | +/* |
| 43 | + * Default to sld_off because most systems do not support split lock detection |
| 44 | + * split_lock_setup() will switch this to sld_warn on systems that support |
| 45 | + * split lock detect, unless there is a command line override. |
| 46 | + */ |
| 47 | +static enum split_lock_detect_state sld_state __ro_after_init = sld_off; |
| 48 | +static u64 msr_test_ctrl_cache __ro_after_init; |
| 49 | + |
34 | 50 | /*
|
35 | 51 | * Processors which have self-snooping capability can handle conflicting
|
36 | 52 | * memory type across CPUs by snooping its own cache. However, there exists
|
@@ -570,6 +586,8 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
|
570 | 586 | wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
|
571 | 587 | }
|
572 | 588 |
|
| 589 | +static void split_lock_init(void); |
| 590 | + |
573 | 591 | static void init_intel(struct cpuinfo_x86 *c)
|
574 | 592 | {
|
575 | 593 | early_init_intel(c);
|
@@ -684,6 +702,8 @@ static void init_intel(struct cpuinfo_x86 *c)
|
684 | 702 | tsx_enable();
|
685 | 703 | if (tsx_ctrl_state == TSX_CTRL_DISABLE)
|
686 | 704 | tsx_disable();
|
| 705 | + |
| 706 | + split_lock_init(); |
687 | 707 | }
|
688 | 708 |
|
689 | 709 | #ifdef CONFIG_X86_32
|
@@ -945,3 +965,166 @@ static const struct cpu_dev intel_cpu_dev = {
|
945 | 965 | };
|
946 | 966 |
|
947 | 967 | cpu_dev_register(intel_cpu_dev);
|
| 968 | + |
| 969 | +#undef pr_fmt |
| 970 | +#define pr_fmt(fmt) "x86/split lock detection: " fmt |
| 971 | + |
| 972 | +static const struct { |
| 973 | + const char *option; |
| 974 | + enum split_lock_detect_state state; |
| 975 | +} sld_options[] __initconst = { |
| 976 | + { "off", sld_off }, |
| 977 | + { "warn", sld_warn }, |
| 978 | + { "fatal", sld_fatal }, |
| 979 | +}; |
| 980 | + |
| 981 | +static inline bool match_option(const char *arg, int arglen, const char *opt) |
| 982 | +{ |
| 983 | + int len = strlen(opt); |
| 984 | + |
| 985 | + return len == arglen && !strncmp(arg, opt, len); |
| 986 | +} |
| 987 | + |
| 988 | +static bool split_lock_verify_msr(bool on) |
| 989 | +{ |
| 990 | + u64 ctrl, tmp; |
| 991 | + |
| 992 | + if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) |
| 993 | + return false; |
| 994 | + if (on) |
| 995 | + ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
| 996 | + else |
| 997 | + ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
| 998 | + if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) |
| 999 | + return false; |
| 1000 | + rdmsrl(MSR_TEST_CTRL, tmp); |
| 1001 | + return ctrl == tmp; |
| 1002 | +} |
| 1003 | + |
| 1004 | +static void __init split_lock_setup(void) |
| 1005 | +{ |
| 1006 | + enum split_lock_detect_state state = sld_warn; |
| 1007 | + char arg[20]; |
| 1008 | + int i, ret; |
| 1009 | + |
| 1010 | + if (!split_lock_verify_msr(false)) { |
| 1011 | + pr_info("MSR access failed: Disabled\n"); |
| 1012 | + return; |
| 1013 | + } |
| 1014 | + |
| 1015 | + ret = cmdline_find_option(boot_command_line, "split_lock_detect", |
| 1016 | + arg, sizeof(arg)); |
| 1017 | + if (ret >= 0) { |
| 1018 | + for (i = 0; i < ARRAY_SIZE(sld_options); i++) { |
| 1019 | + if (match_option(arg, ret, sld_options[i].option)) { |
| 1020 | + state = sld_options[i].state; |
| 1021 | + break; |
| 1022 | + } |
| 1023 | + } |
| 1024 | + } |
| 1025 | + |
| 1026 | + switch (state) { |
| 1027 | + case sld_off: |
| 1028 | + pr_info("disabled\n"); |
| 1029 | + return; |
| 1030 | + case sld_warn: |
| 1031 | + pr_info("warning about user-space split_locks\n"); |
| 1032 | + break; |
| 1033 | + case sld_fatal: |
| 1034 | + pr_info("sending SIGBUS on user-space split_locks\n"); |
| 1035 | + break; |
| 1036 | + } |
| 1037 | + |
| 1038 | + rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); |
| 1039 | + |
| 1040 | + if (!split_lock_verify_msr(true)) { |
| 1041 | + pr_info("MSR access failed: Disabled\n"); |
| 1042 | + return; |
| 1043 | + } |
| 1044 | + |
| 1045 | + sld_state = state; |
| 1046 | + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); |
| 1047 | +} |
| 1048 | + |
| 1049 | +/* |
| 1050 | + * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking |
| 1051 | + * is not implemented as one thread could undo the setting of the other |
| 1052 | + * thread immediately after dropping the lock anyway. |
| 1053 | + */ |
| 1054 | +static void sld_update_msr(bool on) |
| 1055 | +{ |
| 1056 | + u64 test_ctrl_val = msr_test_ctrl_cache; |
| 1057 | + |
| 1058 | + if (on) |
| 1059 | + test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; |
| 1060 | + |
| 1061 | + wrmsrl(MSR_TEST_CTRL, test_ctrl_val); |
| 1062 | +} |
| 1063 | + |
| 1064 | +static void split_lock_init(void) |
| 1065 | +{ |
| 1066 | + split_lock_verify_msr(sld_state != sld_off); |
| 1067 | +} |
| 1068 | + |
| 1069 | +bool handle_user_split_lock(struct pt_regs *regs, long error_code) |
| 1070 | +{ |
| 1071 | + if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) |
| 1072 | + return false; |
| 1073 | + |
| 1074 | + pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", |
| 1075 | + current->comm, current->pid, regs->ip); |
| 1076 | + |
| 1077 | + /* |
| 1078 | + * Disable the split lock detection for this task so it can make |
| 1079 | + * progress and set TIF_SLD so the detection is re-enabled via |
| 1080 | + * switch_to_sld() when the task is scheduled out. |
| 1081 | + */ |
| 1082 | + sld_update_msr(false); |
| 1083 | + set_tsk_thread_flag(current, TIF_SLD); |
| 1084 | + return true; |
| 1085 | +} |
| 1086 | + |
| 1087 | +/* |
| 1088 | + * This function is called only when switching between tasks with |
| 1089 | + * different split-lock detection modes. It sets the MSR for the |
| 1090 | + * mode of the new task. This is right most of the time, but since |
| 1091 | + * the MSR is shared by hyperthreads on a physical core there can |
| 1092 | + * be glitches when the two threads need different modes. |
| 1093 | + */ |
| 1094 | +void switch_to_sld(unsigned long tifn) |
| 1095 | +{ |
| 1096 | + sld_update_msr(!(tifn & _TIF_SLD)); |
| 1097 | +} |
| 1098 | + |
| 1099 | +#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY} |
| 1100 | + |
| 1101 | +/* |
| 1102 | + * The following processors have the split lock detection feature. But |
| 1103 | + * since they don't have the IA32_CORE_CAPABILITIES MSR, the feature cannot |
| 1104 | + * be enumerated. Enable it by family and model matching on these |
| 1105 | + * processors. |
| 1106 | + */ |
| 1107 | +static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { |
| 1108 | + SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_X), |
| 1109 | + SPLIT_LOCK_CPU(INTEL_FAM6_ICELAKE_L), |
| 1110 | + {} |
| 1111 | +}; |
| 1112 | + |
| 1113 | +void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) |
| 1114 | +{ |
| 1115 | + u64 ia32_core_caps = 0; |
| 1116 | + |
| 1117 | + if (c->x86_vendor != X86_VENDOR_INTEL) |
| 1118 | + return; |
| 1119 | + if (cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) { |
| 1120 | + /* Enumerate features reported in IA32_CORE_CAPABILITIES MSR. */ |
| 1121 | + rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); |
| 1122 | + } else if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
| 1123 | + /* Enumerate split lock detection by family and model. */ |
| 1124 | + if (x86_match_cpu(split_lock_cpu_ids)) |
| 1125 | + ia32_core_caps |= MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT; |
| 1126 | + } |
| 1127 | + |
| 1128 | + if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) |
| 1129 | + split_lock_setup(); |
| 1130 | +} |
0 commit comments