Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,26 @@ jobs:
- uses: actions/checkout@v6
- name: Format Rust code
run: cargo fmt --all -- --check

test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- name: Install aarch64 toolchain
uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
targets: aarch64-unknown-none
components: llvm-tools
- name: Install QEMU
run: |
sudo apt-get update && \
sudo apt-get install -y --no-install-recommends qemu-system-arm ipxe-qemu
- name: Install cargo-binutils
uses: taiki-e/install-action@v2
with:
tool: cargo-binutils
- name: Prepare QEMU
run: sudo chown $(whoami):$(whoami) /dev/vhost-vsock && sudo chmod g+rw /dev/vhost-vsock
- name: Run the tests
run: make test
22 changes: 16 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 8 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
[workspace]
members = [
".",
"tests/isolation_test",
]

[package]
name = "ritm"
version = "0.1.0"
Expand All @@ -10,7 +16,7 @@ keywords = ["arm", "aarch64", "cortex-a", "osdev"]
categories = ["embedded", "no-std"]

[dependencies]
aarch64-paging = { version = "0.11", default-features = false }
aarch64-paging = "0.11"
aarch64-rt = { version = "0.4", features = ["el2", "exceptions", "initial-pagetable", "psci"], default-features = false }
arm-pl011-uart = "0.4"
arm-psci = "0.2"
Expand All @@ -26,6 +32,7 @@ spin = { version = "0.10", features = ["lazy", "once", "spin_mutex"], default-fe

[patch.crates-io]
aarch64-rt = { git = "https://github.com/m4tx/aarch64-rt.git", rev = "4b70c951fa2263ca00bcd029dfc7b68ec74f69e7" }
aarch64-paging = { git = "https://github.com/m4tx/aarch64-paging.git", rev = "7c788ce47730e895edd6ff3ce7979c231b98dcc3" }

[lints.rust]
deprecated-safe = "warn"
Expand Down
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ PAYLOAD ?= payload.bin
QEMU_BIN := target/ritm.qemu.bin
QEMU_RUSTFLAGS := "--cfg platform=\"qemu\""

.PHONY: all build.qemu clean clippy qemu
.PHONY: all build.qemu clean clippy qemu test

all: $(QEMU_BIN)

Expand All @@ -37,6 +37,9 @@ qemu: $(QEMU_BIN)
-device virtconsole,chardev=char0 \
-device vhost-vsock-device,id=virtiosocket0,guest-cid=102

test:
tests/isolation_test.py

clean:
cargo clean
rm -f target/*.bin
8 changes: 8 additions & 0 deletions src/arch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,14 @@ pub fn isb() {
}
}

/// TLBI VMALLS12E1 - VMID-based Stage-1/Stage-2 combined invalidation for the EL1&0 regime.
pub fn tlbi_vmalls12e1() {
// SAFETY: TLBI VMALLS12E1 is always safe.
unsafe {
asm!("tlbi vmalls12e1", options(nostack, preserves_flags));
}
}

/// Disables MMU and caches.
///
/// # Safety
Expand Down
112 changes: 101 additions & 11 deletions src/hypervisor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,30 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use core::arch::naked_asm;

use crate::{
arch,
platform::{Platform, PlatformImpl},
simple_map::SimpleMap,
};
use aarch64_paging::descriptor::Stage2Attributes;
use aarch64_paging::idmap::IdMap;
use aarch64_rt::{RegisterStateRef, Stack};
use arm_sysregs::{
CnthctlEl2, CntvoffEl2, ElrEl2, HcrEl2, MpidrEl1, SpsrEl2, read_cnthctl_el2, read_esr_el2,
read_far_el2, read_hcr_el2, read_mpidr_el1, read_spsr_el2, write_cnthctl_el2,
write_cntvoff_el2, write_elr_el2, write_hcr_el2, write_spsr_el2,
CnthctlEl2, CntvoffEl2, ElrEl1, ElrEl2, EsrEl1, FarEl1, HcrEl2, MpidrEl1, SpsrEl1, SpsrEl2,
VtcrEl2, read_cnthctl_el2, read_esr_el2, read_far_el2, read_hcr_el2, read_mpidr_el1,
read_spsr_el2, read_vbar_el1, write_cnthctl_el2, write_cntvoff_el2, write_elr_el1,
write_elr_el2, write_esr_el1, write_far_el1, write_hcr_el2, write_spsr_el1, write_spsr_el2,
write_vtcr_el2,
};
use core::arch::naked_asm;
use log::debug;
use spin::Once;
use spin::mutex::SpinMutex;

use crate::{
platform::{Platform, PlatformImpl},
simple_map::SimpleMap,
};
static STAGE2_MAP: Once<SpinMutex<IdMap<Stage2Attributes>>> = Once::new();

const SPSR_EL1H: u8 = 5;
const T0SZ_MAX_SIZE: u8 = 64;

/// Entry point for EL1 execution.
///
Expand All @@ -35,6 +42,7 @@ const SPSR_EL1H: u8 = 5;
/// address for EL1 execution that never returns.
/// This function must be called in EL2.
pub unsafe fn entry_point_el1(arg0: u64, arg1: u64, arg2: u64, arg3: u64, entry_point: u64) -> ! {
setup_stage2();
// Setup EL1
let mut hcr = read_hcr_el2();
hcr |= HcrEl2::RW;
Expand Down Expand Up @@ -84,6 +92,39 @@ pub unsafe fn entry_point_el1(arg0: u64, arg1: u64, arg2: u64, arg3: u64, entry_
}
}

fn setup_stage2() {
debug!("Setting up stage 2 page table");
let mut idmap = STAGE2_MAP
.call_once(|| SpinMutex::new(PlatformImpl::make_stage2_pagetable()))
.lock();

let root_pa = idmap.root_address().0;
debug!("Root PA: {root_pa:#x}");

// Activate the page table
// SAFETY: We are initializing the Stage 2 translation. The guest is not running yet.
let ttbr = unsafe { idmap.activate() };
debug!("idmap.activate() returned ttbr={ttbr:#x}");

let mut vtcr = VtcrEl2::default();
vtcr.set_ps(2); // 40 bit physical address size
vtcr.set_tg0(0); // 4kB granule size
vtcr.set_sh0(3); // Inner shareable memory
vtcr.set_orgn0(1); // Outer Write-Back Read-Allocate Write-Allocate Cacheable
vtcr.set_irgn0(1); // Inner Write-Back Read-Allocate Write-Allocate Cacheable
vtcr.set_sl0(2); // L0 starting level
vtcr.set_t0sz(T0SZ_MAX_SIZE - 40); // 40 bit size offset
debug!("Writing VTCR_EL2={vtcr:#x}...");
// SAFETY: We are initializing the Stage 2 translation. The guest is not running yet.
unsafe {
write_vtcr_el2(vtcr);
}
arch::tlbi_vmalls12e1();
arch::dsb();
arch::isb();
debug!("Stage 2 activation complete.");
}

/// Returns to EL1.
///
/// This function executes the `eret` instruction to return to EL1 with the provided arguments.
Expand Down Expand Up @@ -180,15 +221,61 @@ pub fn handle_sync_lower(mut register_state: RegisterStateRef) {
}
}
}
ExceptionClass::Unknown(_) => {
ExceptionClass::DataAbortLowerEL => {
inject_data_abort(&mut register_state);
}
ExceptionClass::Unknown(val) => {
panic!(
"Unexpected sync_lower, esr={esr_el2:#x}, far={:#x}, register_state={register_state:?}",
"Unexpected sync_lower, esr={esr_el2:#x}, ec={val:#x}, far={:#x}, register_state={register_state:?}",
read_far_el2(),
);
}
}
}

fn inject_data_abort(register_state: &mut RegisterStateRef) {
// SAFETY: We are modifying the saved register state to redirect execution.
let regs = unsafe { register_state.get_mut() };
let fault_addr = read_far_el2();
let esr = read_esr_el2();

debug!("Injecting data abort to guest: fault_addr={fault_addr:#x}, esr={esr:#x}");

// Read guest VBAR
let vbar = read_vbar_el1().bits();
assert_ne!(
vbar, 0,
"Guest VBAR_EL1 is 0, cannot inject data abort. Fault addr: {fault_addr:#x}"
);
let handler = vbar + 0x200; // Current EL with SPx Sync

// Save current context to guest EL1 regs
// SAFETY: We are accessing EL1 system registers to inject exception.
unsafe {
write_elr_el1(ElrEl1::from_bits_retain(regs.elr as u64));
write_spsr_el1(SpsrEl1::from_bits_retain(regs.spsr));
write_far_el1(FarEl1::from_bits_retain(fault_addr.va()));
}
write_esr_el1(EsrEl1::from_bits_retain(esr.bits()));

// Redirect execution
#[expect(
clippy::cast_possible_truncation,
reason = "only 64-bit target is supported"
)]
{
regs.elr = handler as usize;
}
// Mask all interrupts (DAIF) and set mode to EL1h
let mut spsr = SpsrEl1::default();
spsr.set_m_3_0(SPSR_EL1H);
spsr |= SpsrEl1::D;
spsr |= SpsrEl1::A;
spsr |= SpsrEl1::I;
spsr |= SpsrEl1::F;
regs.spsr = spsr.bits();
}

const AARCH64_INSTRUCTION_LENGTH: usize = 4;

fn try_handle_psci(register_state: &mut RegisterStateRef) -> Result<(), arm_psci::Error> {
Expand Down Expand Up @@ -370,6 +457,8 @@ enum ExceptionClass {
HvcTrappedInAArch64,
/// SMC instruction execution in `AArch64` state.
SmcTrappedInAArch64,
/// Data Abort taken without a change in Exception Level.
DataAbortLowerEL,
#[allow(unused)]
/// Unknown exception class.
Unknown(u8),
Expand All @@ -380,6 +469,7 @@ impl From<u8> for ExceptionClass {
match value {
0x16 => Self::HvcTrappedInAArch64,
0x17 => Self::SmcTrappedInAArch64,
0x24 => Self::DataAbortLowerEL,
_ => Self::Unknown(value),
}
}
Expand Down
29 changes: 28 additions & 1 deletion src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ mod payload_constants {
use aarch64_paging::paging::PAGE_SIZE;
use aarch64_rt::{entry, exception_handlers};
use buddy_system_allocator::{Heap, LockedHeap};
use core::alloc::Layout;
use core::arch::naked_asm;
use core::ops::DerefMut;
use dtoolkit::fdt::Fdt;
Expand All @@ -39,14 +40,20 @@ use crate::{
platform::{BootMode, Platform, PlatformImpl},
};

const LOG_LEVEL: LevelFilter = LevelFilter::Info;
const LOG_LEVEL: LevelFilter = LevelFilter::Debug;

const HEAP_SIZE: usize = 40 * PAGE_SIZE;
static HEAP: SpinMutex<[u8; HEAP_SIZE]> = SpinMutex::new([0; HEAP_SIZE]);

static SHARED_HEAP: SpinMutex<[u8; PlatformImpl::SHARED_HEAP_SIZE]> =
SpinMutex::new([0; PlatformImpl::SHARED_HEAP_SIZE]);

#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::new();

/// Heap allocator for data that needs to be shared between RITM and the guest running in EL1.
pub static SHARED_HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::new();

#[repr(align(0x200000))] // Linux requires 2MB alignment
struct AlignImage<T>(T);

Expand Down Expand Up @@ -74,6 +81,12 @@ fn main(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
SpinMutexGuard::leak(HEAP.try_lock().expect("failed to lock heap")).as_mut_slice(),
);

add_to_heap(
SHARED_HEAP_ALLOCATOR.lock().deref_mut(),
SpinMutexGuard::leak(SHARED_HEAP.try_lock().expect("failed to lock shared heap"))
.as_mut_slice(),
);

let fdt_address = x0 as *const u8;
// SAFETY: We trust that the FDT pointer we were given is valid, and this is the only time we
// use it.
Expand Down Expand Up @@ -138,3 +151,17 @@ unsafe fn run_payload_el1(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
hypervisor::entry_point_el1(x0, x1, x2, x3, &raw const NEXT_IMAGE.0 as u64);
}
}

/// Allocates a buffer from the shared heap.
///
/// # Panics
///
/// Panics if the requested size is invalid or if the allocation fails.
pub fn shared_alloc(layout: Layout) -> &'static mut [u8] {
let ptr = SHARED_HEAP_ALLOCATOR
.lock()
.alloc(layout)
.expect("failed to allocate from shared heap");
// SAFETY: The pointer is valid and represents the requested size.
unsafe { core::slice::from_raw_parts_mut(ptr.as_ptr(), layout.size()) }
}
Loading