|
5 | 5 | use std::result; |
6 | 6 |
|
7 | 7 | use arch_gen::x86::msr_index::*; |
| 8 | +use bitflags::bitflags; |
8 | 9 | use kvm_bindings::{kvm_msr_entry, MsrList, Msrs}; |
9 | 10 | use kvm_ioctls::{Kvm, VcpuFd}; |
10 | 11 |
|
@@ -54,9 +55,74 @@ const MSR_KVM_POLL_CONTROL: u32 = 0x4b56_4d05; |
54 | 55 | const MSR_KVM_ASYNC_PF_INT: u32 = 0x4b56_4d06; |
55 | 56 |
|
56 | 57 | /// Taken from arch/x86/include/asm/msr-index.h |
57 | | -const MSR_IA32_SPEC_CTRL: u32 = 0x0000_0048; |
| 58 | +/// Spectre mitigations control MSR |
| 59 | +pub const MSR_IA32_SPEC_CTRL: u32 = 0x0000_0048; |
| 60 | +/// Architecture capabilities MSR |
| 61 | +pub const MSR_IA32_ARCH_CAPABILITIES: u32 = 0x0000_010a; |
| 62 | + |
58 | 63 | const MSR_IA32_PRED_CMD: u32 = 0x0000_0049; |
59 | 64 |
|
| 65 | +bitflags! { |
| 66 | + /// Feature flags enumerated in the IA32_ARCH_CAPABILITIES MSR. |
| 67 | + /// See https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/cpuid-enumeration-and-architectural-msrs.html |
| 68 | + #[derive(Default)] |
| 69 | + #[repr(C)] |
| 70 | + pub struct ArchCapaMSRFlags: u64 { |
| 71 | + /// The processor is not susceptible to Rogue Data Cache Load (RDCL). |
| 72 | + const RDCL_NO = 1 << 0; |
| 73 | + /// The processor supports enhanced Indirect Branch Restriction Speculation (IBRS) |
| 74 | + const IBRS_ALL = 1 << 1; |
| 75 | + /// The processor supports RSB Alternate. Alternative branch predictors may be used by RET instructions |
| 76 | + /// when the RSB is empty. Software using retpoline may be affected by this behavior. |
| 77 | + const RSBA = 1 << 2; |
| 78 | + /// A value of 1 indicates the hypervisor need not flush the L1D on VM entry. |
| 79 | + const SKIP_L1DFL_VMENTRY = 1 << 3; |
| 80 | + /// Processor is not susceptible to Speculative Store Bypass (SSB). |
| 81 | + const SSB_NO = 1 << 4; |
| 82 | + /// Processor is not susceptible to Microarchitectural Data Sampling (MDS). |
| 83 | + const MDS_NO = 1 << 5; |
| 84 | + /// The processor is not susceptible to a machine check error due to modifying the size of a code page |
| 85 | + /// without TLB invalidation. |
| 86 | + const IF_PSCHANGE_MC_NO = 1 << 6; |
| 87 | + /// The processor supports RTM_DISABLE and TSX_CPUID_CLEAR. |
| 88 | + const TSX_CTRL = 1 << 7; |
| 89 | + /// Processor is not susceptible to Intel® Transactional Synchronization Extensions |
| 90 | + /// (Intel® TSX) Asynchronous Abort (TAA). |
| 91 | + const TAA_NO = 1 << 8; |
| 92 | + // Bit 9 is reserved |
| 93 | + /// Processor supports IA32_MISC_PACKAGE_CTRLS MSR. |
| 94 | + const MISC_PACKAGE_CTRLS = 1 << 10; |
| 95 | + /// Processor supports setting and reading IA32_MISC_PACKAGE_CTLS[0] (ENERGY_FILTERING_ENABLE) bit. |
| 96 | + const ENERGY_FILTERING_CTL = 1 << 11; |
| 97 | + /// The processor supports data operand independent timing mode. |
| 98 | + const DOITM = 1 << 12; |
| 99 | + /// The processor is not affected by either the Shared Buffers Data Read (SBDR) vulnerability or the |
| 100 | + /// Sideband Stale Data Propagator (SSDP). |
| 101 | + const SBDR_SSDP_NO = 1 << 13; |
| 102 | + /// The processor is not affected by the Fill Buffer Stale Data Propagator (FBSDP). |
| 103 | + const FBSDP_NO = 1 << 14; |
| 104 | + /// The processor is not affected by vulnerabilities involving the Primary Stale Data Propagator (PSDP). |
| 105 | + const PSDP_NO = 1 << 15; |
| 106 | + // Bit 16 is reserved |
| 107 | + /// The processor will overwrite fill buffer values as part of MD_CLEAR operations with the VERW instruction. |
| 108 | + /// On these processors, L1D_FLUSH does not overwrite fill buffer values. |
| 109 | + const FB_CLEAR = 1 << 17; |
| 110 | + /// The processor supports read and write to the IA32_MCU_OPT_CTRL MSR (MSR 123H) and to the FB_CLEAR_DIS bit |
| 111 | + /// in that MSR (bit position 3). |
| 112 | + const FB_CLEAR_CTRL = 1 << 18; |
| 113 | + /// A value of 1 indicates processor may have the RRSBA alternate prediction behavior, |
| 114 | + /// if not disabled by RRSBA_DIS_U or RRSBA_DIS_S. |
| 115 | + const RRSBA = 1 << 19; |
| 116 | + /// A value of 1 indicates BHI_NO branch prediction behavior, |
| 117 | + /// regardless of the value of IA32_SPEC_CTRL[BHI_DIS_S] MSR bit. |
| 118 | + const BHI_NO = 1 << 20; |
| 119 | + // Bits 21:22 are reserved |
| 120 | + /// If set, the IA32_OVERCLOCKING STATUS MSR exists. |
| 121 | + const OVERCLOCKING_STATUS = 1 << 23; |
| 122 | + // Bits 24:63 are reserved |
| 123 | + } |
| 124 | +} |
| 125 | + |
60 | 126 | // Creates a MsrRange of one msr given as argument. |
61 | 127 | macro_rules! SINGLE_MSR { |
62 | 128 | ($msr:expr) => { |
@@ -188,8 +254,8 @@ pub fn msr_should_serialize(index: u32) -> bool { |
188 | 254 | ALLOWED_MSR_RANGES.iter().any(|range| range.contains(index)) |
189 | 255 | } |
190 | 256 |
|
191 | | -// Creates and populates required MSR entries for booting Linux on X86_64. |
192 | | -fn create_boot_msr_entries() -> Vec<kvm_msr_entry> { |
| 257 | +/// Creates and populates required MSR entries for booting Linux on X86_64. |
| 258 | +pub fn create_boot_msr_entries() -> Vec<kvm_msr_entry> { |
193 | 259 | let msr_entry_default = |msr| kvm_msr_entry { |
194 | 260 | index: msr, |
195 | 261 | data: 0x0, |
@@ -221,9 +287,8 @@ fn create_boot_msr_entries() -> Vec<kvm_msr_entry> { |
221 | 287 | /// # Arguments |
222 | 288 | /// |
223 | 289 | /// * `vcpu` - Structure for the VCPU that holds the VCPU's fd. |
224 | | -pub fn setup_msrs(vcpu: &VcpuFd) -> Result<()> { |
225 | | - let entry_vec = create_boot_msr_entries(); |
226 | | - let msrs = Msrs::from_entries(&entry_vec).map_err(Error::FamError)?; |
| 290 | +pub fn set_msrs(vcpu: &VcpuFd, msr_entries: &[kvm_msr_entry]) -> Result<()> { |
| 291 | + let msrs = Msrs::from_entries(&msr_entries).map_err(Error::FamError)?; |
227 | 292 | vcpu.set_msrs(&msrs) |
228 | 293 | .map_err(Error::SetModelSpecificRegisters) |
229 | 294 | .and_then(|msrs_written| { |
@@ -271,7 +336,8 @@ mod tests { |
271 | 336 | let kvm = Kvm::new().unwrap(); |
272 | 337 | let vm = kvm.create_vm().unwrap(); |
273 | 338 | let vcpu = vm.create_vcpu(0).unwrap(); |
274 | | - setup_msrs(&vcpu).unwrap(); |
| 339 | + let msr_boot_entries = create_boot_msr_entries(); |
| 340 | + set_msrs(&vcpu, &msr_boot_entries).unwrap(); |
275 | 341 |
|
276 | 342 | // This test will check against the last MSR entry configured (the tenth one). |
277 | 343 | // See create_msr_entries() for details. |
|
0 commit comments