diff --git a/resources/hiding_ci/build_and_install_kernel.sh b/resources/hiding_ci/build_and_install_kernel.sh index 41acd53ba36..ea5d92806d0 100755 --- a/resources/hiding_ci/build_and_install_kernel.sh +++ b/resources/hiding_ci/build_and_install_kernel.sh @@ -29,6 +29,18 @@ check_userspace() { exit 1 } +install_build_deps() { + case $USERSPACE in + "UBUNTU") + apt-get update && apt-get install -y make bsdmainutils flex yacc bison bc xz-utils libelf-dev elfutils libssl-dev + ;; + "AL2023") + yum groupinstall "Development Tools" + yum install make openssl-devel dkms + ;; + esac +} + tidy_up() { # Some cleanup after we are done echo "Cleaning up.." @@ -154,6 +166,9 @@ update_boot_config() { esac } +check_userspace +install_build_deps + KERNEL_URL=$(cat kernel_url) KERNEL_COMMIT_HASH=$(cat kernel_commit_hash) KERNEL_PATCHES_DIR=$(pwd)/linux_patches @@ -210,7 +225,6 @@ echo "New kernel version:" $KERNEL_VERSION confirm "$@" check_root -check_userspace echo "Installing kernel modules..." make INSTALL_MOD_STRIP=1 modules_install diff --git a/resources/hiding_ci/kernel_commit_hash b/resources/hiding_ci/kernel_commit_hash index 927b0259c87..f9edf6840ea 100644 --- a/resources/hiding_ci/kernel_commit_hash +++ b/resources/hiding_ci/kernel_commit_hash @@ -1 +1 @@ -d7b8f8e20813f0179d8ef519541a3527e7661d3a +347e9f5043c89695b01e66b3ed111755afcf1911 diff --git a/resources/hiding_ci/kernel_config_overrides b/resources/hiding_ci/kernel_config_overrides index 6cb1dd1f894..a46ecc5bde2 100644 --- a/resources/hiding_ci/kernel_config_overrides +++ b/resources/hiding_ci/kernel_config_overrides @@ -8,7 +8,6 @@ CONFIG_KVM_SW_PROTECTED_VM=y CONFIG_KVM_AMD=y CONFIG_KVM_INTEL=y CONFIG_KVM_AMD_SEV=y -CONFIG_KVM_PRIVATE_MEM=y CONFIG_KVM_GENERIC_MMU_NOTIFIER=y CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES=y diff --git a/src/firecracker/examples/uffd/fault_all_handler.rs b/src/firecracker/examples/uffd/fault_all_handler.rs index 61d1569ccd4..c167333eb96 100644 --- a/src/firecracker/examples/uffd/fault_all_handler.rs +++ b/src/firecracker/examples/uffd/fault_all_handler.rs @@ -50,7 +50,7 @@ fn main() { let are_we_faulted_yet = uffd_handler .userfault_bitmap .as_mut() - .map_or(false, |bitmap| !bitmap.is_bit_set(bit)); + .is_some_and(|bitmap| !bitmap.is_bit_set(bit)); if are_we_faulted_yet { // TODO: we currently ignore the result as we may attempt to diff --git a/src/firecracker/examples/uffd/on_demand_handler.rs b/src/firecracker/examples/uffd/on_demand_handler.rs index d8686b1af61..50e9fe8504e 100644 --- a/src/firecracker/examples/uffd/on_demand_handler.rs +++ b/src/firecracker/examples/uffd/on_demand_handler.rs @@ -119,10 +119,8 @@ fn main() { println!("uffdio_continue error: {:?}", err) }); } - } else { - if !uffd_handler.serve_pf(addr.cast(), uffd_handler.page_size) { - deferred_events.push(event); - } + } else if !uffd_handler.serve_pf(addr.cast(), uffd_handler.page_size) { + deferred_events.push(event); } } userfaultfd::Event::Remove { start, end } => { diff --git a/src/firecracker/examples/uffd/uffd_utils.rs b/src/firecracker/examples/uffd/uffd_utils.rs index 6f9c8cf2f80..c2b7d8fc1b6 100644 --- a/src/firecracker/examples/uffd/uffd_utils.rs +++ b/src/firecracker/examples/uffd/uffd_utils.rs @@ -247,9 +247,10 @@ impl UffdHandler { match (&guest_memfd, &userfault_bitmap_memfd) { (Some(guestmem_file), Some(bitmap_file)) => { let guest_memfd_addr = - Some(Self::mmap_helper(size, guestmem_file.as_raw_fd()) as *mut u8); + Some(Self::mmap_helper(size, guestmem_file.as_raw_fd()).cast::()); - let bitmap_ptr = Self::mmap_helper(size, bitmap_file.as_raw_fd()) as *mut AtomicU64; + let bitmap_ptr = + Self::mmap_helper(size, bitmap_file.as_raw_fd()).cast::(); // SAFETY: The bitmap pointer is valid and the size is correct. let userfault_bitmap = Some(unsafe { @@ -302,7 +303,7 @@ impl UffdHandler { let addr = addr as u64; for region in &self.mem_regions { if region.contains(addr) { - return addr - region.base_host_virt_addr + region.offset as u64; + return addr - region.base_host_virt_addr + region.offset; } } @@ -606,7 +607,7 @@ impl Runtime { ) -> UffdHandler { let mut message_buf = vec![0u8; 1024]; let mut iovecs = [libc::iovec { - iov_base: message_buf.as_mut_ptr() as *mut libc::c_void, + iov_base: message_buf.as_mut_ptr().cast::(), iov_len: message_buf.len(), }]; let mut fds = [0; 3]; @@ -686,7 +687,7 @@ impl Runtime { if pollfds[i].revents & libc::POLLIN != 0 { nready -= 1; if pollfds[i].fd == self.stream.as_raw_fd() { - while let Some(fault_request) = uffd_msg_iter.next() { + for fault_request in uffd_msg_iter.by_ref() { let page_size = self.handler.page_size; assert!( diff --git a/src/vmm/src/arch/aarch64/vm.rs b/src/vmm/src/arch/aarch64/vm.rs index 44897e42e41..d36705f17fb 100644 --- a/src/vmm/src/arch/aarch64/vm.rs +++ b/src/vmm/src/arch/aarch64/vm.rs @@ -8,14 +8,6 @@ use crate::arch::aarch64::gic::GicState; use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryState}; use crate::vstate::vm::{VmCommon, VmError}; -/// The VM type for this architecture that allows us to use guest_memfd. On ARM, all VMs -/// support guest_memfd and no special type is needed (in fact, no concept of vm types really -/// exists, and the correspoding field of the CREATE_VM ioctl determines IPA size instead, -/// e.g. the size of the guest physical address space. This value cannot be hardcoded, hence -/// `None` to let the `Vm` constructor now that just normal [`Kvm::create_vm`] should be called, -/// which internally determines the preferred IPA size. -pub const VM_TYPE_FOR_SECRET_FREEDOM: Option = None; - /// Structure representing the current architecture's understand of what a "virtual machine" is. #[derive(Debug)] pub struct ArchVm { diff --git a/src/vmm/src/arch/mod.rs b/src/vmm/src/arch/mod.rs index 05f930682ab..61d65fea1a5 100644 --- a/src/vmm/src/arch/mod.rs +++ b/src/vmm/src/arch/mod.rs @@ -17,7 +17,7 @@ pub use aarch64::kvm::{Kvm, KvmArchError, OptionalCapabilities}; #[cfg(target_arch = "aarch64")] pub use aarch64::vcpu::*; #[cfg(target_arch = "aarch64")] -pub use aarch64::vm::{ArchVm, ArchVmError, VM_TYPE_FOR_SECRET_FREEDOM, VmState}; +pub use aarch64::vm::{ArchVm, ArchVmError, VmState}; #[cfg(target_arch = "aarch64")] pub use aarch64::{ ConfigurationError, MMIO_MEM_SIZE, MMIO_MEM_START, arch_memory_regions, @@ -35,7 +35,7 @@ pub use x86_64::kvm::{Kvm, KvmArchError}; #[cfg(target_arch = "x86_64")] pub use x86_64::vcpu::*; #[cfg(target_arch = "x86_64")] -pub use x86_64::vm::{ArchVm, ArchVmError, VM_TYPE_FOR_SECRET_FREEDOM, VmState}; +pub use x86_64::vm::{ArchVm, ArchVmError, VmState}; #[cfg(target_arch = "x86_64")] pub use crate::arch::x86_64::{ diff --git a/src/vmm/src/arch/x86_64/vm.rs b/src/vmm/src/arch/x86_64/vm.rs index 09a1c03e6dc..176bfcbf2a4 100644 --- a/src/vmm/src/arch/x86_64/vm.rs +++ b/src/vmm/src/arch/x86_64/vm.rs @@ -5,8 +5,7 @@ use std::fmt; use kvm_bindings::{ KVM_CLOCK_TSC_STABLE, KVM_IRQCHIP_IOAPIC, KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE, - KVM_PIT_SPEAKER_DUMMY, KVM_X86_SW_PROTECTED_VM, MsrList, kvm_clock_data, kvm_irqchip, - kvm_pit_config, kvm_pit_state2, + KVM_PIT_SPEAKER_DUMMY, MsrList, kvm_clock_data, kvm_irqchip, kvm_pit_config, kvm_pit_state2, }; use kvm_ioctls::Cap; use serde::{Deserialize, Serialize}; @@ -47,9 +46,6 @@ pub enum ArchVmError { SetTssAddress(kvm_ioctls::Error), } -/// The VM type for this architecture that allows us to use guest_memfd. -pub const VM_TYPE_FOR_SECRET_FREEDOM: Option = Some(KVM_X86_SW_PROTECTED_VM as u64); - /// Structure representing the current architecture's understand of what a "virtual machine" is. #[derive(Debug)] pub struct ArchVm { diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index f72506ed9c9..5fa89050288 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -13,6 +13,7 @@ use std::sync::mpsc; use std::sync::{Arc, Mutex}; use event_manager::{MutEventSubscriber, SubscriberOps}; +use kvm_ioctls::Cap; use libc::EFD_NONBLOCK; use linux_loader::cmdline::Cmdline as LoaderKernelCmdline; use utils::time::TimestampUs; @@ -68,7 +69,7 @@ use crate::vmm_config::snapshot::{LoadSnapshotParams, MemBackendType}; use crate::vstate::kvm::Kvm; use crate::vstate::memory::{MaybeBounce, create_memfd}; use crate::vstate::vcpu::{Vcpu, VcpuError}; -use crate::vstate::vm::{KVM_GMEM_NO_DIRECT_MAP, Vm}; +use crate::vstate::vm::{GUEST_MEMFD_FLAG_NO_DIRECT_MAP, GUEST_MEMFD_FLAG_SUPPORT_SHARED, Vm}; use crate::{EventManager, Vmm, VmmError, device_manager}; /// Errors associated with starting the instance. @@ -147,9 +148,15 @@ fn create_vmm_and_vcpus( instance_info: &InstanceInfo, event_manager: &mut EventManager, vcpu_count: u8, - kvm_capabilities: Vec, + mut kvm_capabilities: Vec, secret_free: bool, ) -> Result<(Vmm, Vec), VmmError> { + if secret_free { + kvm_capabilities.push(KvmCapability::Add(Cap::GuestMemfd as u32)); + kvm_capabilities.push(KvmCapability::Add(KVM_CAP_GMEM_SHARED_MEM)); + kvm_capabilities.push(KvmCapability::Add(KVM_CAP_GMEM_NO_DIRECT_MAP)); + } + let kvm = Kvm::new(kvm_capabilities)?; // Set up Kvm Vm and register memory regions. // Build custom CPU config if a custom template is provided. @@ -238,23 +245,21 @@ pub fn build_microvm_for_boot( let secret_free = vm_resources.machine_config.secret_free; - #[cfg(target_arch = "x86_64")] - if secret_free { - boot_cmdline.insert_str("no-kvmclock")?; - } - let (mut vmm, mut vcpus) = create_vmm_and_vcpus( instance_info, event_manager, vm_resources.machine_config.vcpu_count, cpu_template.kvm_capabilities.clone(), - vm_resources.machine_config.secret_free, + secret_free, )?; let guest_memfd = match secret_free { true => Some( vmm.vm - .create_guest_memfd(vm_resources.memory_size(), KVM_GMEM_NO_DIRECT_MAP) + .create_guest_memfd( + vm_resources.memory_size(), + GUEST_MEMFD_FLAG_SUPPORT_SHARED | GUEST_MEMFD_FLAG_NO_DIRECT_MAP, + ) .map_err(VmmError::Vm)?, ), false => None, @@ -268,9 +273,6 @@ pub fn build_microvm_for_boot( .register_memory_regions(guest_memory, None) .map_err(VmmError::Vm)?; - #[cfg(target_arch = "x86_64")] - vmm.vm.set_memory_private().map_err(VmmError::Vm)?; - let entry_point = load_kernel( MaybeBounce::<_, 4096>::new_persistent( boot_config.kernel_file.try_clone().unwrap(), @@ -518,6 +520,10 @@ fn memfd_to_slice(memfd: &Option) -> Option<&mut [u8]> { } } +const KVM_CAP_GMEM_SHARED_MEM: u32 = 243; +const KVM_CAP_GMEM_NO_DIRECT_MAP: u32 = 244; +const KVM_CAP_USERFAULT: u32 = 245; + /// Builds and starts a microVM based on the provided MicrovmState. /// /// An `Arc` reference of the built `Vmm` is also plugged in the `EventManager`, while another @@ -531,15 +537,13 @@ pub fn build_microvm_from_snapshot( params: &LoadSnapshotParams, vm_resources: &mut VmResources, ) -> Result>, BuildMicrovmFromSnapshotError> { - // TODO: take it from kvm-bindings when userfault support is merged upstream - const KVM_CAP_USERFAULT: u32 = 241; - // Build Vmm. debug!("event_start: build microvm from snapshot"); let secret_free = vm_resources.machine_config.secret_free; let mut kvm_capabilities = microvm_state.kvm_state.kvm_cap_modifiers.clone(); + if secret_free { kvm_capabilities.push(KvmCapability::Add(KVM_CAP_USERFAULT)); } @@ -556,7 +560,10 @@ pub fn build_microvm_from_snapshot( let guest_memfd = match secret_free { true => Some( vmm.vm - .create_guest_memfd(vm_resources.memory_size(), KVM_GMEM_NO_DIRECT_MAP) + .create_guest_memfd( + vm_resources.memory_size(), + GUEST_MEMFD_FLAG_SUPPORT_SHARED | GUEST_MEMFD_FLAG_NO_DIRECT_MAP, + ) .map_err(VmmError::Vm)?, ), false => None, @@ -622,9 +629,6 @@ pub fn build_microvm_from_snapshot( vmm.uffd = uffd; vmm.uffd_socket = socket; - #[cfg(target_arch = "x86_64")] - vmm.vm.set_memory_private().map_err(VmmError::Vm)?; - #[cfg(target_arch = "x86_64")] { // Scale TSC to match, extract the TSC freq from the state if specified diff --git a/src/vmm/src/vstate/vcpu.rs b/src/vmm/src/vstate/vcpu.rs index f845244624f..72d2a178c57 100644 --- a/src/vmm/src/vstate/vcpu.rs +++ b/src/vmm/src/vstate/vcpu.rs @@ -328,13 +328,10 @@ impl Vcpu { // does not panic on resume, see https://docs.kernel.org/virt/kvm/api.html . // We do not want to fail if the call is not successful, because depending // that may be acceptable depending on the workload. - // TODO: once kvmclock is supported with Secret Fredom, remove this condition. #[cfg(target_arch = "x86_64")] - if self.userfault_resolved.is_none() { - if let Err(err) = self.kvm_vcpu.fd.kvmclock_ctrl() { - METRICS.vcpu.kvmclock_ctrl_fails.inc(); - warn!("KVM_KVMCLOCK_CTRL call failed {}", err); - } + if let Err(err) = self.kvm_vcpu.fd.kvmclock_ctrl() { + METRICS.vcpu.kvmclock_ctrl_fails.inc(); + warn!("KVM_KVMCLOCK_CTRL call failed {}", err); } return StateMachine::next(Self::paused); @@ -360,13 +357,10 @@ impl Vcpu { // does not panic on resume, see https://docs.kernel.org/virt/kvm/api.html . // We do not want to fail if the call is not successful, because depending // that may be acceptable depending on the workload. - // TODO: once kvmclock is supported with Secret Fredom, remove this condition. #[cfg(target_arch = "x86_64")] - if self.userfault_resolved.is_none() { - if let Err(err) = self.kvm_vcpu.fd.kvmclock_ctrl() { - METRICS.vcpu.kvmclock_ctrl_fails.inc(); - warn!("KVM_KVMCLOCK_CTRL call failed {}", err); - } + if let Err(err) = self.kvm_vcpu.fd.kvmclock_ctrl() { + METRICS.vcpu.kvmclock_ctrl_fails.inc(); + warn!("KVM_KVMCLOCK_CTRL call failed {}", err); } // Move to 'paused' state. diff --git a/src/vmm/src/vstate/vm.rs b/src/vmm/src/vstate/vm.rs index ed0e1b962be..5eec23ce044 100644 --- a/src/vmm/src/vstate/vm.rs +++ b/src/vmm/src/vstate/vm.rs @@ -13,16 +13,16 @@ use std::path::Path; use std::sync::{Arc, Condvar, Mutex}; use kvm_bindings::{ - KVM_MEM_GUEST_MEMFD, KVM_MEM_LOG_DIRTY_PAGES, KVM_MEMORY_ATTRIBUTE_PRIVATE, KVMIO, - kvm_create_guest_memfd, kvm_memory_attributes, kvm_userspace_memory_region, + KVM_MEM_GUEST_MEMFD, KVM_MEM_LOG_DIRTY_PAGES, KVMIO, kvm_create_guest_memfd, + kvm_userspace_memory_region, }; use kvm_ioctls::{Cap, VmFd}; use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::ioctl::ioctl_with_ref; use vmm_sys_util::ioctl_iow_nr; +use crate::arch::host_page_size; pub use crate::arch::{ArchVm as Vm, ArchVmError, VmState}; -use crate::arch::{VM_TYPE_FOR_SECRET_FREEDOM, host_page_size}; use crate::logger::info; use crate::persist::CreateSnapshotError; use crate::utils::u64_to_usize; @@ -34,7 +34,8 @@ use crate::vstate::memory::{ use crate::vstate::vcpu::VcpuError; use crate::{DirtyBitmap, Vcpu, mem_size_mib}; -pub(crate) const KVM_GMEM_NO_DIRECT_MAP: u64 = 1; +pub(crate) const GUEST_MEMFD_FLAG_SUPPORT_SHARED: u64 = 1 << 0; +pub(crate) const GUEST_MEMFD_FLAG_NO_DIRECT_MAP: u64 = 1 << 1; /// KVM userfault information #[derive(Copy, Clone, Default, Eq, PartialEq, Debug)] @@ -137,14 +138,7 @@ impl Vm { const MAX_ATTEMPTS: u32 = 5; let mut attempt = 1; let fd = loop { - let create_result = if secret_free && VM_TYPE_FOR_SECRET_FREEDOM.is_some() { - kvm.fd - .create_vm_with_type(VM_TYPE_FOR_SECRET_FREEDOM.unwrap()) - } else { - kvm.fd.create_vm() - }; - - match create_result { + match kvm.fd.create_vm() { Ok(fd) => break fd, Err(e) if e.errno() == libc::EINTR && attempt < MAX_ATTEMPTS => { info!("Attempt #{attempt} of KVM_CREATE_VM returned EINTR"); @@ -371,28 +365,6 @@ impl Vm { &self.common.guest_memory } - /// Sets the memory attributes on all guest_memfd-backed regions to private - pub fn set_memory_private(&self) -> Result<(), VmError> { - if !self.secret_free() { - return Ok(()); - } - - for region in self.guest_memory().iter() { - let attr = kvm_memory_attributes { - address: region.start_addr().0, - size: region.len(), - attributes: KVM_MEMORY_ATTRIBUTE_PRIVATE as u64, - ..Default::default() - }; - - self.fd() - .set_memory_attributes(attr) - .map_err(VmError::SetMemoryAttributes)? - } - - Ok(()) - } - /// Resets the KVM dirty bitmap for each of the guest's memory regions. pub fn reset_dirty_bitmap(&self) { self.guest_memory() diff --git a/tests/integration_tests/functional/test_kvm_ptp.py b/tests/integration_tests/functional/test_kvm_ptp.py index 4b44ca124eb..55c878d121f 100644 --- a/tests/integration_tests/functional/test_kvm_ptp.py +++ b/tests/integration_tests/functional/test_kvm_ptp.py @@ -2,9 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 """Check that the kvm_ptp device works""" - import pytest +from framework.properties import global_props + +pytestmark = pytest.mark.skipif( + global_props.cpu_architecture == "aarch64", reason="currently broken on aarch64" +) + def test_kvm_ptp(uvm_any_booted): """Test kvm_ptp is usable""" diff --git a/tests/integration_tests/performance/test_huge_pages.py b/tests/integration_tests/performance/test_huge_pages.py index ba2705ff540..1c943cef30f 100644 --- a/tests/integration_tests/performance/test_huge_pages.py +++ b/tests/integration_tests/performance/test_huge_pages.py @@ -54,11 +54,6 @@ def check_hugetlbfs_in_use(pid: int, allocation_name: str): assert kernel_page_size_kib > 4 -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) def test_hugetlbfs_boot(uvm_plain): """Tests booting a microvm with guest memory backed by 2MB hugetlbfs pages""" @@ -73,11 +68,6 @@ def test_hugetlbfs_boot(uvm_plain): ) -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) def test_hugetlbfs_snapshot(microvm_factory, guest_kernel_linux_5_10, rootfs): """ Test hugetlbfs snapshot restore via uffd @@ -105,11 +95,6 @@ def test_hugetlbfs_snapshot(microvm_factory, guest_kernel_linux_5_10, rootfs): check_hugetlbfs_in_use(vm.firecracker_pid, "/anon_hugepage") -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) def test_hugetlbfs_diff_snapshot(microvm_factory, uvm_plain): """ Test hugetlbfs differential snapshot support. @@ -150,11 +135,6 @@ def test_hugetlbfs_diff_snapshot(microvm_factory, uvm_plain): # Verify if the restored microvm works. -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) @pytest.mark.parametrize("huge_pages", HugePagesConfig) def test_ept_violation_count( microvm_factory, @@ -231,11 +211,6 @@ def test_ept_violation_count( metrics.put_metric(metric, int(metric_value), "Count") -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) def test_negative_huge_pages_plus_balloon(uvm_plain): """Tests that huge pages and memory ballooning cannot be used together""" uvm_plain.memory_monitor = None diff --git a/tests/integration_tests/performance/test_initrd.py b/tests/integration_tests/performance/test_initrd.py index 28df8159155..3845e5610c0 100644 --- a/tests/integration_tests/performance/test_initrd.py +++ b/tests/integration_tests/performance/test_initrd.py @@ -4,7 +4,6 @@ import pytest from framework.microvm import HugePagesConfig, Serial -from framework.properties import global_props INITRD_FILESYSTEM = "rootfs" @@ -21,11 +20,6 @@ def uvm_with_initrd(microvm_factory, guest_kernel, record_property, artifact_dir yield uvm -@pytest.mark.skipif( - global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64", - reason="Huge page tests with secret hidden kernels on ARM currently fail", -) @pytest.mark.parametrize("huge_pages", HugePagesConfig) def test_microvm_initrd_with_serial(uvm_with_initrd, huge_pages): """ diff --git a/tests/integration_tests/performance/test_snapshot.py b/tests/integration_tests/performance/test_snapshot.py index b2b0e2a4fbc..f89828e2725 100644 --- a/tests/integration_tests/performance/test_snapshot.py +++ b/tests/integration_tests/performance/test_snapshot.py @@ -13,7 +13,6 @@ import host_tools.drive as drive_tools from framework.microvm import HugePagesConfig, Microvm, SnapshotType -from framework.properties import global_props USEC_IN_MSEC = 1000 NS_IN_MSEC = 1_000_000 @@ -112,15 +111,6 @@ def test_restore_latency( We only test a single guest kernel, as the guest kernel does not "participate" in snapshot restore. """ - if ( - test_setup.huge_pages != HugePagesConfig.NONE - and global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64" - ): - pytest.skip( - "huge pages with secret hiding kernels on ARM are currently failing" - ) - vm = test_setup.boot_vm(microvm_factory, guest_kernel_linux_5_10, rootfs, False) metrics.set_dimensions( @@ -170,15 +160,6 @@ def test_post_restore_latency( if huge_pages != HugePagesConfig.NONE and uffd_handler is None: pytest.skip("huge page snapshots can only be restored using uffd") - if ( - huge_pages != HugePagesConfig.NONE - and global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64" - ): - pytest.skip( - "huge pages with secret hiding kernels on ARM are currently failing" - ) - if secret_free and uffd_handler is None: pytest.skip("Restoring from a file is not compatible with Secret Freedom") @@ -243,15 +224,6 @@ def test_population_latency( secret_free, ): """Collects population latency metrics (e.g. how long it takes UFFD handler to fault in all memory)""" - if ( - huge_pages != HugePagesConfig.NONE - and global_props.host_linux_version_tpl > (6, 1) - and global_props.cpu_architecture == "aarch64" - ): - pytest.skip( - "huge pages with secret hiding kernels on ARM are currently failing" - ) - if secret_free and huge_pages != HugePagesConfig.NONE: pytest.skip("Huge pages are not supported with Secret Freedom yet") diff --git a/tools/devtool b/tools/devtool index 9d12c58fd60..efbd0a6d56d 100755 --- a/tools/devtool +++ b/tools/devtool @@ -740,12 +740,6 @@ cmd_test() { env |grep -P "^(AWS_EMF_|BUILDKITE|CODECOV_)" > env.list if [[ $performance_tweaks -eq 1 ]]; then - if [[ "$(uname --machine)" == "x86_64" ]]; then - say "Detected CI and performance tests, tuning CPU frequency scaling and idle states for reduced variability" - - apply_performance_tweaks - fi - # It seems that even if the tests using huge pages run sequentially on ag=1 agents, right-sizing the huge pages # pool to the total number of huge pages used across all tests results in spurious failures with pool depletion # anyway (something else on the host seems to be stealing our huge pages, and we cannot "ear mark" them for @@ -796,10 +790,6 @@ cmd_test() { # undo performance tweaks (in case the instance gets recycled for a non-perf test) if [[ $performance_tweaks -eq 1 ]]; then - if [[ "$(uname --machine)" == "x86_64" ]]; then - unapply_performance_tweaks - fi - echo $huge_pages_old |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages >/dev/null fi