Skip to content

Commit 63fbc02

Browse files
authored
[feat] seperate return_run_guest, introduce dispatch_irq (#19)
* [feat] seperate return_run_guest and dispatch_irq, retroduce AxVCpuHal trait * [refactor] delete unused fields in Aarch64PerCpu * [fix] add doc for irq_hanlder to fix cargo doc error * [refactor] remove crate_interface
1 parent 9a6c258 commit 63fbc02

File tree

8 files changed

+239
-209
lines changed

8 files changed

+239
-209
lines changed

Cargo.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@ edition = "2021"
66
[dependencies]
77
log = "0.4.21"
88

9-
cortex-a = "8.1.1"
109
aarch64-cpu = "9.3"
11-
smccc = "0.1.1"
1210
tock-registers = "0.8"
1311
numeric-enum-macro = "0.2"
1412

src/context_frame.rs

Lines changed: 18 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -5,21 +5,22 @@ use aarch64_cpu::registers::*;
55

66
/// A struct representing the AArch64 CPU context frame.
77
///
8-
/// This context frame includes the general-purpose registers (GPRs),
9-
/// the stack pointer (SP), the exception link register (ELR), and
10-
/// the saved program status register (SPSR).
8+
/// This context frame includes
9+
/// * the general-purpose registers (GPRs),
10+
/// * the stack pointer associated with EL0 (SP_EL0),
11+
/// * the exception link register (ELR),
12+
/// * the saved program status register (SPSR).
1113
///
1214
/// The `#[repr(C)]` attribute ensures that the struct has a C-compatible
1315
/// memory layout, which is important when interfacing with hardware or
1416
/// other low-level components.
1517
#[repr(C)]
16-
#[derive(Copy, Clone, Debug, Default)]
18+
#[derive(Copy, Clone, Debug)]
1719
pub struct Aarch64ContextFrame {
1820
/// An array of 31 `u64` values representing the general-purpose registers.
1921
pub gpr: [u64; 31],
20-
/// The stack pointer.
21-
/// The value currently stored in this field is meaningless (the base address of Aarch64ContextFrame).
22-
pub sp: u64,
22+
/// The stack pointer associated with EL0 (SP_EL0)
23+
pub sp_el0: u64,
2324
/// The exception link register, which stores the return address after an exception.
2425
pub elr: u64,
2526
/// The saved program status register, which holds the state of the program at the time of an exception.
@@ -37,16 +38,16 @@ impl core::fmt::Display for Aarch64ContextFrame {
3738
}
3839
writeln!(f, "spsr:{:016x}", self.spsr)?;
3940
write!(f, "elr: {:016x}", self.elr)?;
40-
writeln!(f, " sp: {:016x}", self.sp)?;
41+
writeln!(f, " sp_el0: {:016x}", self.sp_el0)?;
4142
Ok(())
4243
}
4344
}
4445

45-
impl Aarch64ContextFrame {
46+
impl Default for Aarch64ContextFrame {
4647
/// Returns the default context frame.
4748
///
4849
/// The default state sets the SPSR to mask all exceptions and sets the mode to EL1h.
49-
pub fn default() -> Aarch64ContextFrame {
50+
fn default() -> Self {
5051
Aarch64ContextFrame {
5152
gpr: [0; 31],
5253
spsr: (SPSR_EL1::M::EL1h
@@ -56,34 +57,12 @@ impl Aarch64ContextFrame {
5657
+ SPSR_EL1::D::Masked)
5758
.value,
5859
elr: 0,
59-
sp: 0,
60+
sp_el0: 0,
6061
}
6162
}
63+
}
6264

63-
/// Creates a new context frame with a specific program counter, stack pointer, and argument.
64-
///
65-
/// Sets the SPSR to mask all exceptions and sets the mode to EL1h by default.
66-
/// # Arguments
67-
///
68-
/// * `pc` - The initial program counter (PC).
69-
/// * `sp` - The initial stack pointer (SP).
70-
/// * `arg` - The argument to be passed in register x0.
71-
pub fn new(pc: usize, sp: usize, arg: usize) -> Self {
72-
let mut r = Aarch64ContextFrame {
73-
gpr: [0; 31],
74-
spsr: (SPSR_EL1::M::EL1h
75-
+ SPSR_EL1::I::Masked
76-
+ SPSR_EL1::F::Masked
77-
+ SPSR_EL1::A::Masked
78-
+ SPSR_EL1::D::Masked)
79-
.value,
80-
elr: pc as u64,
81-
sp: sp as u64,
82-
};
83-
r.set_argument(arg);
84-
r
85-
}
86-
65+
impl Aarch64ContextFrame {
8766
/// Returns the exception program counter (ELR).
8867
pub fn exception_pc(&self) -> usize {
8968
self.elr as usize
@@ -98,23 +77,6 @@ impl Aarch64ContextFrame {
9877
self.elr = pc as u64;
9978
}
10079

101-
/// Returns the stack pointer (SP).
102-
/// Note: currently returned value is meaningless.
103-
pub fn stack_pointer(&self) -> usize {
104-
self.sp as usize
105-
}
106-
107-
/// Sets the stack pointer (SP).
108-
///
109-
/// Note: currently useless.
110-
///
111-
/// # Arguments
112-
///
113-
/// * `sp` - The new stack pointer value.
114-
pub fn set_stack_pointer(&mut self, sp: usize) {
115-
self.sp = sp as u64;
116-
}
117-
11880
/// Sets the argument in register x0.
11981
///
12082
/// # Arguments
@@ -200,7 +162,7 @@ pub struct GuestSystemRegisters {
200162
pub vmpidr_el2: u64,
201163

202164
// 64bit EL1/EL0 register
203-
sp_el0: u64,
165+
pub sp_el0: u64,
204166
sp_el1: u64,
205167
elr_el1: u64,
206168
spsr_el1: u32,
@@ -300,7 +262,9 @@ impl GuestSystemRegisters {
300262
asm!("msr CNTV_CVAL_EL0, {0}", in(reg) self.cntv_cval_el0);
301263
asm!("msr CNTKCTL_EL1, {0:x}", in (reg) self.cntkctl_el1);
302264
asm!("msr CNTV_CTL_EL0, {0:x}", in (reg) self.cntv_ctl_el0);
303-
asm!("msr SP_EL0, {0}", in(reg) self.sp_el0);
265+
// The restoration of SP_EL0 is done in `exception_return_el2`,
266+
// which move the value from `self.ctx.sp_el0` to `SP_EL0`.
267+
// asm!("msr SP_EL0, {0}", in(reg) self.sp_el0);
304268
asm!("msr SP_EL1, {0}", in(reg) self.sp_el1);
305269
asm!("msr ELR_EL1, {0}", in(reg) self.elr_el1);
306270
asm!("msr SPSR_EL1, {0:x}", in(reg) self.spsr_el1);

src/entry.S

Lines changed: 0 additions & 32 deletions
This file was deleted.

src/exception.S

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
.macro SAVE_REGS_FROM_EL1
2+
# Curretly `sp` points to the address of `Aarch64VCpu.host_stack_top`.
3+
sub sp, sp, 34 * 8
24
# Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
3-
5+
46
# Save general purpose registers into `Aarch64VCpu.ctx`
57
stp x0, x1, [sp]
68
stp x2, x3, [sp, 2 * 8]
@@ -18,9 +20,7 @@
1820
stp x26, x27, [sp, 26 * 8]
1921
stp x28, x29, [sp, 28 * 8]
2022

21-
# The value currently stored in `sp` is unused, (the base address of Aarch64ContextFrame).
22-
mov x9, sp
23-
add x9, x9, #(0x110)
23+
mrs x9, sp_el0
2424
stp x30, x9, [sp, 30 * 8]
2525

2626
# Save `elr_el2` and `spsr_el2` into `Aarch64VCpu.ctx`
@@ -29,17 +29,13 @@
2929
stp x10, x11, [sp, 32 * 8]
3030
.endm
3131

32-
# Same as `context_vm_entry` in `src/entry.S`
3332
.macro RESTORE_REGS_INTO_EL1
3433
ldp x10, x11, [sp, 32 * 8]
34+
ldp x30, x9, [sp, 30 * 8]
35+
msr sp_el0, x9
3536
msr elr_el2, x10
3637
msr spsr_el2, x11
3738

38-
ldr x10, [sp, 31 * 8]
39-
mrs x11, sp_el0
40-
msr sp_el0, x10
41-
str x11, [sp, 31 * 8]
42-
4339
ldr x30, [sp, 30 * 8]
4440
ldp x28, x29, [sp, 28 * 8]
4541
ldp x26, x27, [sp, 26 * 8]
@@ -56,7 +52,9 @@
5652
ldp x4, x5, [sp, 4 * 8]
5753
ldp x2, x3, [sp, 2 * 8]
5854
ldp x0, x1, [sp]
59-
# Keep `sp` as the base address of `Aarch64VCpu.ctx`
55+
# Curretly `sp` points to the base address of `Aarch64VCpu.ctx`
56+
add sp, sp, 34 * 8
57+
# Curretly `x0` points to the address of `Aarch64VCpu.host_stack_top`.
6058
.endm
6159

6260

@@ -75,13 +73,15 @@
7573
SAVE_REGS_FROM_EL1
7674
mov x0, {exception_irq}
7775
bl vmexit_trampoline
76+
# b .Lexception_return_el2 is called by `vmexit_trampoline`
7877
.endm
7978

8079
.macro HANDLE_LOWER_SYNC_VCPU
8180
.p2align 7
8281
SAVE_REGS_FROM_EL1
8382
mov x0, {exception_sync}
8483
bl vmexit_trampoline
84+
# b .Lexception_return_el2 is called by `vmexit_trampoline`
8585
.endm
8686

8787

@@ -113,6 +113,12 @@ exception_vector_base_vcpu:
113113
INVALID_EXCP_EL2 2 3
114114
INVALID_EXCP_EL2 3 3
115115

116+
.global context_vm_entry
117+
context_vm_entry:
118+
# Curretly `x0` points to the address of `Aarch64VCpu.host_stack_top`.
119+
mov sp, x0
120+
sub sp, sp, 34 * 8
121+
# Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
116122
.Lexception_return_el2:
117123
RESTORE_REGS_INTO_EL1
118124
eret

src/exception.rs

Lines changed: 52 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -45,35 +45,6 @@ core::arch::global_asm!(
4545
exception_irq = const EXCEPTION_IRQ,
4646
);
4747

48-
/// Handles IRQ (Interrupt Request) exceptions that occur during the execution of a guest VM.
49-
///
50-
/// This function is responsible for processing external interrupts.
51-
///
52-
/// # Arguments
53-
///
54-
/// * `_ctx` - A mutable reference to the `TrapFrame`, which contains the saved state of the
55-
/// guest VM's CPU registers at the time of the exception. This is currently unused
56-
/// but included for future expansion.
57-
///
58-
/// # Returns
59-
///
60-
/// An `AxResult` containing an `AxVCpuExitReason` with the reason for the VM exit.
61-
///
62-
/// # TODO
63-
///
64-
/// - Implement proper handling of both current and lower EL IRQs.
65-
/// - Replace the temporary vector `33` with the actual interrupt vector once the
66-
/// full implementation is complete.
67-
///
68-
/// # Notes
69-
///
70-
/// This function is a placeholder and should be expanded to fully support IRQ handling
71-
/// in future iterations.
72-
///
73-
pub fn handle_exception_irq(_ctx: &mut TrapFrame) -> AxResult<AxVCpuExitReason> {
74-
Ok(AxVCpuExitReason::ExternalInterrupt { vector: 33 })
75-
}
76-
7748
/// Handles synchronous exceptions that occur during the execution of a guest VM.
7849
///
7950
/// This function examines the exception class (EC) to determine the cause of the exception
@@ -235,7 +206,53 @@ fn handle_psci_call(ctx: &mut TrapFrame) -> Option<AxResult<AxVCpuExitReason>> {
235206
})
236207
}
237208

238-
/// A trampoline function for sp switching during handling VM exits.
209+
/// Dispatches IRQs to the appropriate handler provided by the underlying host OS,
210+
/// which is registered at [`crate::pcpu::IRQ_HANDLER`] during `Aarch64PerCpu::new()`.
211+
fn dispatch_irq() {
212+
unsafe { crate::pcpu::IRQ_HANDLER.current_ref_raw() }
213+
.get()
214+
.unwrap()()
215+
}
216+
217+
/// A trampoline function for handling exceptions (VM exits) in EL2.
218+
///
219+
/// Functionality:
220+
///
221+
/// 1. **Check if VCPU is running:**
222+
/// - The `vcpu_running` function is called to check if the VCPU is currently running.
223+
/// If the VCPU is running, the control flow is transferred to the `return_run_guest` function.
224+
///
225+
/// 2. **Dispatch IRQ:**
226+
/// - If there is no active vcpu running, the `dispatch_irq` function is called to handle the IRQ,
227+
/// which will dispatch this irq routine to the underlining host OS.
228+
/// - The IRQ handling routine will end up calling `exception_return_el2` here.
229+
///
230+
/// Note that the `return_run_guest` will never return.
231+
#[naked]
232+
#[no_mangle]
233+
unsafe extern "C" fn vmexit_trampoline() {
234+
core::arch::asm!(
235+
"bl {vcpu_running}", // Check if vcpu is running.
236+
// If vcpu_running returns true, jump to `return_run_guest`,
237+
// after that the control flow is handed back to Aarch64VCpu.run(),
238+
// simulating the normal return of the `run_guest` function.
239+
"cbnz x0, {return_run_guest}",
240+
// If vcpu_running returns false, there is no active vcpu running,
241+
// jump to `dispatch_irq`.
242+
"bl {dispatch_irq}",
243+
// Return from exception.
244+
"b .Lexception_return_el2",
245+
vcpu_running = sym crate::vcpu::vcpu_running,
246+
return_run_guest = sym return_run_guest,
247+
dispatch_irq = sym dispatch_irq,
248+
options(noreturn),
249+
)
250+
}
251+
252+
/// A trampoline function for sp switching during handling VM exits,
253+
/// when **there is a active VCPU running**, which means that the host context is stored
254+
/// into host stack in `run_guest` function.
255+
///
239256
/// # Functionality
240257
///
241258
/// 1. **Restore Previous Host Stack pointor:**
@@ -269,13 +286,13 @@ fn handle_psci_call(ctx: &mut TrapFrame) -> Option<AxResult<AxVCpuExitReason>> {
269286
/// invoked as part of the low-level hypervisor or VM exit handling routines.
270287
#[naked]
271288
#[no_mangle]
272-
unsafe extern "C" fn vmexit_trampoline() {
273-
// Note: Currently `sp` points to `&Aarch64VCpu.ctx`, which is just the base address of Aarch64VCpu struct.
289+
unsafe extern "C" fn return_run_guest() -> ! {
274290
core::arch::asm!(
275-
"add sp, sp, 34 * 8", // Skip the exception frame.
276-
"mov x9, sp", // Currently `sp` points to `&Aarch64VCpu.host_stack_top`, see `run_guest()` in vcpu.rs.
291+
// Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
292+
"add x9, sp, 34 * 8", // Skip the exception frame.
293+
// Currently `x9` points to `&Aarch64VCpu.host_stack_top`, see `run_guest()` in vcpu.rs.
277294
"ldr x10, [x9]", // Get `host_stack_top` value from `&Aarch64VCpu.host_stack_top`.
278-
"mov sp, x10", // Set `sp` as the host stack top.
295+
"mov sp, x10", // Set `sp` as the host stack top.
279296
restore_regs_from_stack!(), // Restore host function context frame.
280297
"ret", // Control flow is handed back to Aarch64VCpu.run(), simulating the normal return of the `run_guest` function.
281298
options(noreturn),

src/exception_utils.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@ macro_rules! save_regs_to_stack {
272272
///
273273
/// ## Note
274274
///
275-
/// This macro is called in `vmexit_trampoline()` in exception.rs,
275+
/// This macro is called in `return_run_guest()` in exception.rs,
276276
/// it should only be used after `save_regs_to_stack!` to correctly restore the control flow of `Aarch64VCpu.run()`.
277277
macro_rules! restore_regs_from_stack {
278278
() => {

0 commit comments

Comments
 (0)