Skip to content

Commit b370dba

Browse files
committed
move MMU disable code to assembly
1 parent 728cd2b commit b370dba

File tree

3 files changed

+45
-36
lines changed

3 files changed

+45
-36
lines changed

src/arch.rs

Lines changed: 31 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
//! Architecture-specific code.
1010
11-
use core::arch::asm;
11+
use core::arch::{asm, naked_asm};
1212

1313
/// Data Synchronization Barrier.
1414
pub fn dsb() {
@@ -35,6 +35,7 @@ pub fn isb() {
3535
}
3636

3737
/// Invalidate all instruction caches.
38+
#[allow(dead_code)]
3839
pub fn ic_iallu() {
3940
// SAFETY: `ic iallu` is always safe.
4041
unsafe {
@@ -43,6 +44,7 @@ pub fn ic_iallu() {
4344
}
4445

4546
/// Invalidate all TLB entries for EL2.
47+
#[allow(dead_code)]
4648
pub fn tlbi_alle2is() {
4749
// SAFETY: `tlbi alle2is` is always safe.
4850
unsafe {
@@ -153,7 +155,33 @@ sys_reg!(elr_el2);
153155
sys_reg!(sp_el1);
154156
sys_reg!(mpidr_el1);
155157

156-
pub(super) fn disable_mmu_and_caches() {
158+
/// Disable MMU and caches.
159+
///
160+
/// # Safety
161+
///
162+
/// It is not sound to execute arbitrary Rust code after disabling the data cache, so this
163+
/// should only ever be called from assembly.
164+
/// This disables MMU, so the caller must ensure that the code will be executable at the
165+
/// same address.
166+
#[unsafe(naked)]
167+
pub unsafe extern "C" fn disable_mmu_and_caches() {
168+
naked_asm!(
169+
"mov x28, x30",
170+
"bl {invalidate_caches}",
171+
"mov x30, x28",
172+
"msr sctlr_el2, x0",
173+
"dsb sy",
174+
"isb",
175+
"ic iallu",
176+
"tlbi alle2is",
177+
"dsb sy",
178+
"isb",
179+
"ret",
180+
invalidate_caches = sym invalidate_caches,
181+
);
182+
}
183+
184+
unsafe extern "C" fn invalidate_caches() -> u64 {
157185
invalidate_dcache();
158186

159187
// Disable MMU and caches
@@ -165,21 +193,8 @@ pub(super) fn disable_mmu_and_caches() {
165193
sctlr &= !sctlr_el2::M; // MMU Enable
166194
sctlr &= !sctlr_el2::C; // Data Cache Enable
167195
sctlr &= !sctlr_el2::I; // Instruction Cache Enable
168-
// SAFETY: We assume we have an identity mapped pagetables for the currently running
169-
// code, so disabling MMU is safe.
170-
unsafe {
171-
sctlr_el2::write(sctlr);
172-
}
173-
dsb();
174-
isb();
175-
176-
// Invalidate I-cache
177-
ic_iallu();
178-
tlbi_alle2is();
179196

180-
// Final synchronization
181-
dsb();
182-
isb();
197+
sctlr
183198
}
184199

185200
/// Invalidate D-cache by set/way to the point of coherency.

src/hypervisor.rs

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
use core::arch::naked_asm;
1010

1111
use aarch64_rt::{RegisterStateRef, Stack};
12-
use alloc::{boxed::Box, collections::btree_map::BTreeMap};
12+
use alloc::boxed::Box;
1313
use log::debug;
1414
use spin::mutex::SpinMutex;
1515

@@ -26,8 +26,6 @@ use crate::{arch::{self, esr, far}, platform::{Platform, PlatformImpl}, simple_m
2626
/// address for EL1 execution that never returns.
2727
/// This function must be called in EL2.
2828
pub unsafe fn entry_point_el1(arg0: u64, arg1: u64, arg2: u64, arg3: u64, entry_point: u64) -> ! {
29-
arch::disable_mmu_and_caches();
30-
3129
// Setup EL1
3230
// SAFETY: We are configuring HCR_EL2 to allow EL1 execution.
3331
unsafe {
@@ -98,7 +96,13 @@ pub unsafe fn entry_point_el1(arg0: u64, arg1: u64, arg2: u64, arg3: u64, entry_
9896
/// This function must be called in EL2.
9997
#[unsafe(naked)]
10098
pub unsafe extern "C" fn eret_to_el1(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
101-
naked_asm!("eret");
99+
naked_asm!(
100+
"mov x19, x0",
101+
"bl {disable_mmu_and_caches}",
102+
"mov x0, x19",
103+
"eret",
104+
disable_mmu_and_caches = sym arch::disable_mmu_and_caches,
105+
);
102106
}
103107

104108
pub fn handle_sync_lower(mut register_state: RegisterStateRef) {
@@ -349,6 +353,6 @@ fn get_secondary_stack(mpidr: u64) -> *mut Stack<SECONDARY_STACK_PAGE_COUNT> {
349353
if let Some(stack) = stack_map.get_mut(&mpidr) {
350354
&raw mut **stack
351355
} else {
352-
&raw mut **stack_map.insert(mpidr, Default::default())
356+
&raw mut **stack_map.insert(mpidr, Box::default())
353357
}
354358
}

src/main.rs

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ use log::{LevelFilter, info};
3030
use spin::mutex::{SpinMutex, SpinMutexGuard};
3131

3232
use crate::{
33-
arch::disable_mmu_and_caches,
3433
exceptions::Exceptions,
3534
platform::{BootMode, Platform, PlatformImpl},
3635
};
@@ -108,23 +107,14 @@ fn add_to_heap<const ORDER: usize>(heap: &mut Heap<ORDER>, range: &'static mut [
108107
/// # Safety
109108
///
110109
/// `NEXT_IMAGE` must point to a valid executable piece of code which never returns.
111-
unsafe fn run_payload_el2(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
112-
disable_mmu_and_caches();
113-
// SAFETY: The caller guarantees that `NEXT_IMAGE` points to a valid executable piece of code which never returns.
114-
unsafe {
115-
jump_to_payload(x0, x1, x2, x3);
116-
}
117-
}
118-
119-
/// Jumps to the payload.
120-
///
121-
/// # Safety
122-
///
123-
/// `NEXT_IMAGE` must point to a valid executable piece of code which never returns.
124110
#[unsafe(naked)]
125-
unsafe extern "C" fn jump_to_payload(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
111+
unsafe extern "C" fn run_payload_el2(x0: u64, x1: u64, x2: u64, x3: u64) -> ! {
126112
naked_asm!(
113+
"mov x19, x0",
114+
"bl {disable_mmu_and_caches}",
115+
"mov x0, x19",
127116
"b {next_image}",
117+
disable_mmu_and_caches = sym arch::disable_mmu_and_caches,
128118
next_image = sym NEXT_IMAGE,
129119
);
130120
}

0 commit comments

Comments
 (0)