Skip to content

Commit dadf701

Browse files
committed
Run the payload at EL2
1 parent 3f03472 commit dadf701

File tree

3 files changed

+251
-3
lines changed

3 files changed

+251
-3
lines changed

linker/qemu.ld

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
MEMORY
22
{
3-
image : ORIGIN = 0x40080000, LENGTH = 2M
3+
image : ORIGIN = 0x40080000, LENGTH = 64M
44
}

src/arch.rs

Lines changed: 207 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,207 @@
1+
// Copyright 2025 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4+
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5+
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6+
// option. This file may not be copied, modified, or distributed
7+
// except according to those terms.
8+
9+
//! Architecture-specific code.
10+
11+
use core::arch::asm;
12+
13+
/// Data Synchronization Barrier.
14+
pub fn dsb() {
15+
// SAFETY: Data Synchronization Barrier is always safe.
16+
unsafe {
17+
asm!("dsb sy", options(nostack, preserves_flags));
18+
}
19+
}
20+
21+
/// Data Memory Barrier.
22+
pub fn dmb() {
23+
// SAFETY: Data Memory Barrier is always safe.
24+
unsafe {
25+
asm!("dmb sy", options(nostack, preserves_flags));
26+
}
27+
}
28+
29+
/// Instruction Synchronization Barrier.
30+
pub fn isb() {
31+
// SAFETY: Instruction Synchronization Barrier is always safe.
32+
unsafe {
33+
asm!("isb", options(nostack, preserves_flags));
34+
}
35+
}
36+
37+
/// Invalidate all instruction caches.
38+
pub fn ic_iallu() {
39+
// SAFETY: `ic iallu` is always safe.
40+
unsafe {
41+
asm!("ic iallu", options(nostack, preserves_flags));
42+
}
43+
}
44+
45+
/// Invalidate all TLB entries for EL2.
46+
pub fn tlbi_alle2is() {
47+
// SAFETY: `tlbi alle2is` is always safe.
48+
unsafe {
49+
asm!("tlbi alle2is", options(nostack, preserves_flags));
50+
}
51+
}
52+
53+
macro_rules! sys_reg {
54+
($name:ident, {$($const_name:ident: $const_val:expr),*}) => {
55+
pub mod $name {
56+
use core::arch::asm;
57+
$(pub const $const_name: u64 = $const_val;)*
58+
59+
#[doc = concat!("Read the `", stringify!($name), "` system register.")]
60+
///
61+
/// # Safety
62+
///
63+
/// This function emits a raw `MRS` instruction. The caller must guarantee that:
64+
///
65+
/// * The register is readable at the current Exception Level.
66+
/// * Reading the register does not destructively alter hardware state (e.g.,
67+
/// acknowledging an interrupt by reading `ICC_IAR1_EL1`).
68+
#[allow(unused)]
69+
pub unsafe fn read() -> u64 {
70+
let val: u64;
71+
// SAFETY: The caller must ensure that the register is safely readable.
72+
unsafe {
73+
asm!(concat!("mrs {}, ", stringify!($name)), out(reg) val, options(nostack, preserves_flags));
74+
}
75+
val
76+
}
77+
78+
#[doc = concat!("Write the `", stringify!($name), "` system register.")]
79+
///
80+
/// # Safety
81+
///
82+
/// This function allows fundamental changes to the CPU state. To avoid Undefined
83+
/// Behavior, the caller must guarantee:
84+
///
85+
/// * The register is writable at the current Exception Level.
86+
/// * The write must not invalidate the stack, the heap, or any active Rust references
87+
/// (e.g., by disabling the MMU).
88+
/// * This function emits a raw `MSR`. The caller is responsible for issuing context
89+
/// synchronization (e.g., `ISB`) or memory barriers (`DSB`) if required.
90+
#[allow(unused)]
91+
pub unsafe fn write(val: u64) {
92+
// SAFETY: The caller must ensure that the register is safely writeable.
93+
unsafe {
94+
asm!(concat!("msr ", stringify!($name), ", {}"), in(reg) val, options(nostack, preserves_flags));
95+
}
96+
}
97+
}
98+
};
99+
($name:ident) => {
100+
sys_reg!($name, {});
101+
};
102+
}
103+
104+
sys_reg!(sctlr_el2, {
105+
M: 1 << 0,
106+
C: 1 << 2,
107+
I: 1 << 12
108+
});
109+
sys_reg!(clidr_el1);
110+
sys_reg!(csselr_el1);
111+
sys_reg!(ccsidr_el1);
112+
sys_reg!(hcr_el2);
113+
sys_reg!(cntvoff_el2);
114+
sys_reg!(cnthctl_el2);
115+
sys_reg!(spsr_el2);
116+
sys_reg!(elr_el2);
117+
sys_reg!(sp_el1);
118+
119+
pub(super) fn disable_mmu_and_caches() {
120+
invalidate_dcache();
121+
122+
// Disable MMU and caches
123+
let mut sctlr: u64;
124+
// SAFETY: We are reading a non-destructive register at our current Exception Level.
125+
unsafe {
126+
sctlr = sctlr_el2::read();
127+
}
128+
sctlr &= !sctlr_el2::M; // MMU Enable
129+
sctlr &= !sctlr_el2::C; // Data Cache Enable
130+
sctlr &= !sctlr_el2::I; // Instruction Cache Enable
131+
// SAFETY: We assume we have an identity mapped pagetables for the currently running
132+
// code, so disabling MMU is safe.
133+
unsafe {
134+
sctlr_el2::write(sctlr);
135+
}
136+
dsb();
137+
isb();
138+
139+
// Invalidate I-cache
140+
ic_iallu();
141+
tlbi_alle2is();
142+
143+
// Final synchronization
144+
dsb();
145+
isb();
146+
}
147+
148+
/// Invalidate D-cache by set/way to the point of coherency.
149+
pub fn invalidate_dcache() {
150+
dmb();
151+
152+
// Cache Level ID Register
153+
let clidr: u64;
154+
// SAFETY: We are reading a non-destructive register at a higher Exception Level.
155+
unsafe {
156+
clidr = clidr_el1::read();
157+
}
158+
159+
// Level of Coherence (LoC) - Bits [26:24]
160+
let loc = (clidr >> 24) & 0x7;
161+
162+
for level in 0..loc {
163+
let cache_type = (clidr >> (level * 3)) & 0x7;
164+
165+
// Cache Types: 0=None, 1=Instruction, 2=Data, 3=Split, 4=Unified
166+
// We don't care about No cache or Instruction cache
167+
if cache_type < 2 {
168+
continue;
169+
}
170+
171+
// Select the Cache Level in CSSELR (Cache Size Selection Register)
172+
// SAFETY: Writing to `csselr_el1` is always safe, assuming the cache level exists.
173+
unsafe {
174+
csselr_el1::write(level << 1);
175+
}
176+
177+
// Barrier to ensure CSSELR write finishes before reading CCSIDR
178+
isb();
179+
180+
// Cache Size ID Register (CCSIDR)
181+
let ccsidr: u64;
182+
// SAFETY: We are reading a non-destructive register at a higher Exception Level.
183+
unsafe {
184+
ccsidr = ccsidr_el1::read();
185+
}
186+
187+
let line_power = (ccsidr & 0x7) + 4;
188+
let ways = (ccsidr >> 3) & 0x3FF;
189+
let sets = (ccsidr >> 13) & 0x7FFF;
190+
191+
let way_shift = (ways as u32).leading_zeros();
192+
193+
for set in 0..=sets {
194+
for way in 0..=ways {
195+
let dc_val = (way << way_shift) | (set << line_power) | (level << 1);
196+
197+
// SAFETY: `dc cisw` is always safe, assuming the cache line exists.
198+
unsafe {
199+
asm!("dc cisw, {0}", in(reg) dc_val);
200+
}
201+
}
202+
}
203+
}
204+
205+
dsb();
206+
isb();
207+
}

src/main.rs

Lines changed: 43 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
extern crate alloc;
1313

14+
mod arch;
1415
mod console;
1516
mod exceptions;
1617
mod logger;
@@ -20,11 +21,15 @@ mod platform;
2021
use aarch64_paging::paging::PAGE_SIZE;
2122
use aarch64_rt::entry;
2223
use buddy_system_allocator::{Heap, LockedHeap};
24+
use core::arch::naked_asm;
2325
use core::ops::DerefMut;
2426
use log::{LevelFilter, info};
2527
use spin::mutex::{SpinMutex, SpinMutexGuard};
2628

27-
use crate::platform::{Platform, PlatformImpl};
29+
use crate::{
30+
arch::disable_mmu_and_caches,
31+
platform::{Platform, PlatformImpl},
32+
};
2833

2934
const LOG_LEVEL: LevelFilter = LevelFilter::Info;
3035

@@ -34,6 +39,13 @@ static HEAP: SpinMutex<[u8; HEAP_SIZE]> = SpinMutex::new([0; HEAP_SIZE]);
3439
#[global_allocator]
3540
static HEAP_ALLOCATOR: LockedHeap<32> = LockedHeap::new();
3641

42+
#[repr(align(0x200000))] // Linux requires 2MB alignment
43+
struct AlignImage<T>(T);
44+
45+
static NEXT_IMAGE: AlignImage<[u8; 18_815_488]> = AlignImage(*include_bytes!(
46+
"/usr/local/google/home/mmac/code/linux/arch/arm64/boot/Image"
47+
));
48+
3749
entry!(main);
3850
fn main(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
3951
// SAFETY: We only call `PlatformImpl::create` here, once on boot.
@@ -52,7 +64,10 @@ fn main(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
5264
SpinMutexGuard::leak(HEAP.try_lock().expect("failed to lock heap")).as_mut_slice(),
5365
);
5466

55-
todo!();
67+
// SAFETY: We assume that the payload at `NEXT_IMAGE` is a valid executable piece of code.
68+
unsafe {
69+
run_payload_el2(x0, x1, x2, x3);
70+
}
5671
}
5772

5873
/// Adds the given memory range to the given heap.
@@ -63,3 +78,29 @@ fn add_to_heap<const ORDER: usize>(heap: &mut Heap<ORDER>, range: &'static mut [
6378
heap.init(range.as_mut_ptr() as usize, range.len());
6479
}
6580
}
81+
82+
/// Run the payload at EL2.
83+
///
84+
/// # Safety
85+
///
86+
/// `NEXT_IMAGE` must point to a valid executable piece of code which never returns.
87+
unsafe fn run_payload_el2(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
88+
disable_mmu_and_caches();
89+
// SAFETY: The caller guarantees that `NEXT_IMAGE` points to a valid executable piece of code which never returns.
90+
unsafe {
91+
jump_to_payload(x0, x1, x2, x3);
92+
}
93+
}
94+
95+
/// Jumps to the payload.
96+
///
97+
/// # Safety
98+
///
99+
/// `NEXT_IMAGE` must point to a valid executable piece of code which never returns.
100+
#[unsafe(naked)]
101+
unsafe extern "C" fn jump_to_payload(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
102+
naked_asm!(
103+
"b {next_image}",
104+
next_image = sym NEXT_IMAGE,
105+
);
106+
}

0 commit comments

Comments
 (0)