Skip to content

Commit acac5e9

Browse files
jpoimboesuryasaimadhu
authored andcommitted
x86/speculation: Remove x86_spec_ctrl_mask
This mask has been made redundant by kvm_spec_ctrl_test_value(). And it doesn't even work when MSR interception is disabled, as the guest can just write to SPEC_CTRL directly. Signed-off-by: Josh Poimboeuf <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Paolo Bonzini <[email protected]> Signed-off-by: Borislav Petkov <[email protected]>
1 parent bbb69e8 commit acac5e9

File tree

1 file changed

+1
-30
lines changed

1 file changed

+1
-30
lines changed

arch/x86/kernel/cpu/bugs.c

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -85,12 +85,6 @@ u64 spec_ctrl_current(void)
8585
}
8686
EXPORT_SYMBOL_GPL(spec_ctrl_current);
8787

88-
/*
89-
* The vendor and possibly platform specific bits which can be modified in
90-
* x86_spec_ctrl_base.
91-
*/
92-
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
93-
9488
/*
9589
* AMD specific MSR info for Speculative Store Bypass control.
9690
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
@@ -146,10 +140,6 @@ void __init check_bugs(void)
146140
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
147141
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
148142

149-
/* Allow STIBP in MSR_SPEC_CTRL if supported */
150-
if (boot_cpu_has(X86_FEATURE_STIBP))
151-
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
152-
153143
/* Select the proper CPU mitigations before patching alternatives: */
154144
spectre_v1_select_mitigation();
155145
spectre_v2_select_mitigation();
@@ -208,19 +198,10 @@ void __init check_bugs(void)
208198
void
209199
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
210200
{
211-
u64 msrval, guestval, hostval = spec_ctrl_current();
201+
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
212202
struct thread_info *ti = current_thread_info();
213203

214-
/* Is MSR_SPEC_CTRL implemented ? */
215204
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
216-
/*
217-
* Restrict guest_spec_ctrl to supported values. Clear the
218-
* modifiable bits in the host base value and or the
219-
* modifiable bits from the guest value.
220-
*/
221-
guestval = hostval & ~x86_spec_ctrl_mask;
222-
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
223-
224205
if (hostval != guestval) {
225206
msrval = setguest ? guestval : hostval;
226207
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -1665,16 +1646,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
16651646
break;
16661647
}
16671648

1668-
/*
1669-
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1670-
* bit in the mask to allow guests to use the mitigation even in the
1671-
* case where the host does not enable it.
1672-
*/
1673-
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1674-
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1675-
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1676-
}
1677-
16781649
/*
16791650
* We have three CPU feature flags that are in play here:
16801651
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.

0 commit comments

Comments
 (0)