Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion dev_tests/src/ratchet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ fn ratchet_globals() -> Result<()> {
("litebox/", 9),
("litebox_platform_linux_kernel/", 6),
("litebox_platform_linux_userland/", 5),
("litebox_platform_lvbs/", 20),
("litebox_platform_lvbs/", 22),
("litebox_platform_multiplex/", 1),
("litebox_platform_windows_userland/", 7),
("litebox_runner_linux_userland/", 1),
Expand Down
8 changes: 6 additions & 2 deletions litebox_common_linux/src/vmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,12 +166,16 @@ pub enum PhysPointerError {
NoMappingInfo,
#[error("Overflow occurred during calculation")]
Overflow,
#[error("Non-contiguous physical pages in the array")]
NonContiguousPages,
#[error("The operation is unsupported on this platform")]
UnsupportedOperation,
#[error("Unsupported permissions: {0:#x}")]
UnsupportedPermissions(u8),
#[error("Memory copy failed")]
CopyFailed,
#[error("Duplicate physical page address {0:#x} in the input array")]
DuplicatePhysicalAddress(usize),
#[error("Virtual address space exhausted in vmap region")]
VaSpaceExhausted,
#[error("Page-table frame allocation failed (out of memory)")]
FrameAllocationFailed,
}
1 change: 1 addition & 0 deletions litebox_platform_lvbs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ spin = { version = "0.10.0", default-features = false, features = [
"once",
"rwlock",
] }
bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] }
arrayvec = { version = "0.7.6", default-features = false }
rangemap = { version = "1.5.1", features = ["const_fn"] }
thiserror = { version = "2.0.6", default-features = false }
Expand Down
271 changes: 242 additions & 29 deletions litebox_platform_lvbs/src/arch/x86/mm/paging.rs

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions litebox_platform_lvbs/src/host/per_cpu_variables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,9 @@ impl PerCpuVariablesAsm {
pub fn set_vtl_return_addr(&self, addr: usize) {
self.vtl_return_addr.set(addr);
}
pub fn get_vtl_return_addr(&self) -> usize {
self.vtl_return_addr.get()
}
pub fn set_vtl0_state_top_addr(&self, addr: usize) {
self.vtl0_state_top_addr.set(addr);
}
Expand Down
246 changes: 156 additions & 90 deletions litebox_platform_lvbs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,12 @@ use litebox::{
shim::ContinueOperation,
utils::TruncateExt,
};
use litebox_common_linux::{
PunchthroughSyscall,
errno::Errno,
vmap::{
PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError,
VmapManager,
},
#[cfg(feature = "optee_syscall")]
use litebox_common_linux::vmap::{
PhysPageAddr, PhysPageAddrArray, PhysPageMapInfo, PhysPageMapPermissions, PhysPointerError,
VmapManager,
};
use litebox_common_linux::{PunchthroughSyscall, errno::Errno};
use x86_64::{
VirtAddr,
structures::paging::{
Expand All @@ -41,6 +39,9 @@ use x86_64::{
};
use zerocopy::{FromBytes, IntoBytes};

#[cfg(feature = "optee_syscall")]
use crate::mm::vmap::vmap_allocator;

extern crate alloc;

pub mod arch;
Expand Down Expand Up @@ -509,6 +510,7 @@ impl<Host: HostInterface> LinuxKernel<Host> {
.ok_or(DeallocationError::Unaligned)?,
false,
true,
false,
)
}
}
Expand Down Expand Up @@ -1055,7 +1057,7 @@ impl<Host: HostInterface, const ALIGN: usize> PageManagementProvider<ALIGN> for
FixedAddressBehavior::Hint | FixedAddressBehavior::NoReplace => {}
FixedAddressBehavior::Replace => {
// Clear the existing mappings first.
unsafe { current_pt.unmap_pages(range, true, true).unwrap() };
unsafe { current_pt.unmap_pages(range, true, true, false).unwrap() };
}
}
let flags = u32::from(initial_permissions.bits())
Expand All @@ -1077,7 +1079,7 @@ impl<Host: HostInterface, const ALIGN: usize> PageManagementProvider<ALIGN> for
unsafe {
self.page_table_manager
.current_page_table()
.unmap_pages(range, true, true)
.unmap_pages(range, true, true, false)
}
}

Expand Down Expand Up @@ -1169,23 +1171,21 @@ impl<Host: HostInterface> litebox::platform::SystemInfoProvider for LinuxKernel<
}
}

/// Checks whether the given physical addresses are contiguous with respect to ALIGN.
///
/// Note: This is a temporary check to let `VmapManager` work with this platform
/// which does not yet support virtually contiguous mapping of non-contiguous physical pages
/// (for now, it maps physical pages with a fixed offset).
#[cfg(feature = "optee_syscall")]
fn check_contiguity<const ALIGN: usize>(
addrs: &[PhysPageAddr<ALIGN>],
) -> Result<(), PhysPointerError> {
/// Checks whether the given physical addresses are contiguous with respect to ALIGN.
fn is_contiguous<const ALIGN: usize>(addrs: &[PhysPageAddr<ALIGN>]) -> bool {
for window in addrs.windows(2) {
let first = window[0].as_usize();
let second = window[1].as_usize();
if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? {
return Err(PhysPointerError::NonContiguousPages);
if let Some(expected) = first.checked_add(ALIGN) {
if second != expected {
return false;
}
} else {
return false;
}
}
Ok(())
true
}

#[cfg(feature = "optee_syscall")]
Expand All @@ -1195,72 +1195,132 @@ impl<Host: HostInterface, const ALIGN: usize> VmapManager<ALIGN> for LinuxKernel
pages: &PhysPageAddrArray<ALIGN>,
perms: PhysPageMapPermissions,
) -> Result<PhysPageMapInfo<ALIGN>, PhysPointerError> {
// TODO: Remove this check once this platform supports virtually contiguous
// non-contiguous physical page mapping.
check_contiguity(pages)?;

if pages.is_empty() {
return Err(PhysPointerError::InvalidPhysicalAddress(0));
}
let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64);
let phys_end = x86_64::PhysAddr::new(
pages
.last()
.unwrap()
.as_usize()
.checked_add(ALIGN)
.ok_or(PhysPointerError::Overflow)? as u64,
);
let frame_range = if ALIGN == PAGE_SIZE {
PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(phys_start),
PhysFrame::<Size4KiB>::containing_address(phys_end),
)
} else {
unimplemented!("ALIGN other than 4KiB is not supported yet")
};

if ALIGN != PAGE_SIZE {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: check alignment before the more expensive loop?

unimplemented!("ALIGN other than 4KiB is not supported yet");
}

// TODO: Add `PageTableFlags::NO_EXECUTE` once DEP is enabled.
let mut flags = PageTableFlags::PRESENT;
if perms.contains(PhysPageMapPermissions::WRITE) {
flags |= PageTableFlags::WRITABLE;
}

if let Ok(page_addr) = self
.page_table_manager
.current_page_table()
.map_phys_frame_range(frame_range, flags)
{
Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
})
// If pages are contiguous, use `map_phys_frame_range` which is efficient and doesn't require vmap VA space.
if is_contiguous(pages) {
let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64);
let phys_end = x86_64::PhysAddr::new(
pages
.last()
.unwrap()
.as_usize()
.checked_add(ALIGN)
.ok_or(PhysPointerError::Overflow)? as u64,
);
let frame_range = PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(phys_start),
PhysFrame::<Size4KiB>::containing_address(phys_end),
);

match self
.page_table_manager
.current_page_table()
.map_phys_frame_range(frame_range, flags)
{
Ok(page_addr) => Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
}),
Err(MapToError::PageAlreadyMapped(_)) => {
Err(PhysPointerError::AlreadyMapped(pages[0].as_usize()))
}
Err(MapToError::FrameAllocationFailed) => {
Err(PhysPointerError::FrameAllocationFailed)
}
Err(MapToError::ParentEntryHugePage) => Err(
PhysPointerError::InvalidPhysicalAddress(pages[0].as_usize()),
),
}
} else {
Err(PhysPointerError::InvalidPhysicalAddress(
pages[0].as_usize(),
))
// Reject duplicate page addresses
{
let mut seen = hashbrown::HashSet::with_capacity(pages.len());
for page in pages {
if !seen.insert(page.as_usize()) {
return Err(PhysPointerError::DuplicatePhysicalAddress(page.as_usize()));
}
}
}

let frames: alloc::vec::Vec<PhysFrame<Size4KiB>> = pages
.iter()
.map(|p| PhysFrame::containing_address(x86_64::PhysAddr::new(p.as_usize() as u64)))
.collect();

let base_va = vmap_allocator()
.allocate_va_and_register_map(&frames)
.map_err(|e| match e {
crate::mm::vmap::VmapAllocError::EmptyInput => {
PhysPointerError::InvalidPhysicalAddress(0)
}
crate::mm::vmap::VmapAllocError::DuplicateMapping => {
PhysPointerError::AlreadyMapped(pages[0].as_usize())
}
crate::mm::vmap::VmapAllocError::VaSpaceExhausted => {
PhysPointerError::VaSpaceExhausted
}
})?;

match self
.page_table_manager
.current_page_table()
.map_non_contiguous_phys_frames(&frames, base_va, flags)
{
Ok(page_addr) => Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
}),
Err(e) => {
let _ = vmap_allocator().unregister_allocation(base_va);
match e {
MapToError::PageAlreadyMapped(_) => {
Err(PhysPointerError::AlreadyMapped(pages[0].as_usize()))
}
MapToError::FrameAllocationFailed => {
Err(PhysPointerError::FrameAllocationFailed)
}
MapToError::ParentEntryHugePage => Err(
PhysPointerError::InvalidPhysicalAddress(pages[0].as_usize()),
),
}
}
}
}
}

unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo<ALIGN>) -> Result<(), PhysPointerError> {
if ALIGN == PAGE_SIZE {
let Some(page_range) = PageRange::<PAGE_SIZE>::new(
vmap_info.base as usize,
vmap_info.base.wrapping_add(vmap_info.size) as usize,
) else {
return Err(PhysPointerError::UnalignedPhysicalAddress(
vmap_info.base as usize,
ALIGN,
));
};
unsafe {
self.page_table_manager
.current_page_table()
.unmap_pages(page_range, false, true)
.map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))
}
} else {
unimplemented!("ALIGN other than 4KiB is not supported yet")
if ALIGN != PAGE_SIZE {
unimplemented!("ALIGN other than 4KiB is not supported yet");
}

let base_va = x86_64::VirtAddr::new(vmap_info.base as u64);

// Unmap the page table entries first. Only release the VA range back
// to the allocator when unmapping succeeds; if it fails, stale PTE
// entries remain and recycling the VA would cause collisions.
self.unmap_vtl0_pages(vmap_info.base, vmap_info.size)
.map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))?;

if crate::mm::vmap::is_vmap_address(base_va) {
crate::mm::vmap::vmap_allocator()
.unregister_allocation(base_va)
.ok_or(PhysPointerError::Unmapped(vmap_info.base as usize))?;
}

Ok(())
}

fn validate_unowned(&self, pages: &PhysPageAddrArray<ALIGN>) -> Result<(), PhysPointerError> {
Expand All @@ -1284,23 +1344,20 @@ impl<Host: HostInterface, const ALIGN: usize> VmapManager<ALIGN> for LinuxKernel
pages: &PhysPageAddrArray<ALIGN>,
perms: PhysPageMapPermissions,
) -> Result<(), PhysPointerError> {
let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64);
let phys_end = x86_64::PhysAddr::new(
pages
.last()
.unwrap()
.as_usize()
.checked_add(ALIGN)
.ok_or(PhysPointerError::Overflow)? as u64,
);
let frame_range = if ALIGN == PAGE_SIZE {
PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(phys_start),
PhysFrame::<Size4KiB>::containing_address(phys_end),
)
} else {
unimplemented!("ALIGN other than 4KiB is not supported yet")
};
if ALIGN != PAGE_SIZE {
unimplemented!("ALIGN other than 4KiB is not supported yet");
}

// Build a RangeSet so that adjacent pages are coalesced into contiguous
// ranges, minimizing the number of hypercalls.
let mut range_set = rangemap::RangeSet::new();
for page in pages {
let start = page.as_usize() as u64;
let end = start
.checked_add(ALIGN as u64)
.ok_or(PhysPointerError::Overflow)?;
range_set.insert(start..end);
}

let mem_attr = if perms.contains(PhysPageMapPermissions::WRITE) {
// VTL1 wants to write data to the pages, preventing VTL0 from reading/executing the pages.
Expand All @@ -1312,8 +1369,17 @@ impl<Host: HostInterface, const ALIGN: usize> VmapManager<ALIGN> for LinuxKernel
// VTL1 no longer protects the pages.
crate::mshv::heki::MemAttr::all()
};
crate::mshv::vsm::protect_physical_memory_range(frame_range, mem_attr)
.map_err(|_| PhysPointerError::UnsupportedPermissions(perms.bits()))

for range in range_set.iter() {
let frame_range = PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(x86_64::PhysAddr::new(range.start)),
PhysFrame::<Size4KiB>::containing_address(x86_64::PhysAddr::new(range.end)),
);
crate::mshv::vsm::protect_physical_memory_range(frame_range, mem_attr)
.map_err(|_| PhysPointerError::UnsupportedPermissions(perms.bits()))?;
}

Ok(())
}
}

Expand Down
Loading