Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 46 additions & 13 deletions src/exception.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,12 @@ core::arch::global_asm!(
///
pub fn handle_exception_sync(ctx: &mut TrapFrame) -> AxResult<AxVCpuExitReason> {
match exception_class() {
Some(ESR_EL2::EC::Value::DataAbortLowerEL) => handle_data_abort(ctx),
Some(ESR_EL2::EC::Value::DataAbortLowerEL) => {
let elr = ctx.exception_pc();
let val = elr + exception_next_instruction_step();
ctx.set_exception_pc(val);
handle_data_abort(ctx)
}
Some(ESR_EL2::EC::Value::HVC64) => {
// The `#imm`` argument when triggering a hvc call, currently not used.
let _hvc_arg_imm16 = ESR_EL2.read(ESR_EL2::ISS);
Expand All @@ -96,6 +101,12 @@ pub fn handle_exception_sync(ctx: &mut TrapFrame) -> AxResult<AxVCpuExitReason>
})
}
Some(ESR_EL2::EC::Value::TrappedMsrMrs) => handle_system_register(ctx),
Some(ESR_EL2::EC::Value::SMC64) => {
let elr = ctx.exception_pc();
let val = elr + exception_next_instruction_step();
ctx.set_exception_pc(val);
handle_smc64_exception(ctx)
}
_ => {
panic!(
"handler not presents for EC_{} @ipa 0x{:x}, @pc 0x{:x}, @esr 0x{:x},
Expand All @@ -116,17 +127,18 @@ pub fn handle_exception_sync(ctx: &mut TrapFrame) -> AxResult<AxVCpuExitReason>

fn handle_data_abort(context_frame: &mut TrapFrame) -> AxResult<AxVCpuExitReason> {
let addr = exception_fault_addr()?;
debug!("data fault addr {:?}, esr: 0x{:x}", addr, exception_esr());

let access_width = exception_data_abort_access_width();
let is_write = exception_data_abort_access_is_write();
//let sign_ext = exception_data_abort_access_is_sign_ext();
let reg = exception_data_abort_access_reg();
let reg_width = exception_data_abort_access_reg_width();

let elr = context_frame.exception_pc();
let val = elr + exception_next_instruction_step();
context_frame.set_exception_pc(val);
trace!(
"Data fault @{:?}, ELR {:#x}, esr: 0x{:x}",
addr,
context_frame.exception_pc(),
exception_esr(),
);

let width = match AccessWidth::try_from(access_width) {
Ok(access_width) => access_width,
Expand Down Expand Up @@ -209,12 +221,14 @@ fn handle_psci_call(ctx: &mut TrapFrame) -> Option<AxResult<AxVCpuExitReason>> {
const PSCI_FN_RANGE_32: core::ops::RangeInclusive<u64> = 0x8400_0000..=0x8400_001F;
const PSCI_FN_RANGE_64: core::ops::RangeInclusive<u64> = 0xC400_0000..=0xC400_001F;

const PSCI_FN_VERSION: u64 = 0x0;
const _PSCI_FN_CPU_SUSPEND: u64 = 0x1;
const PSCI_FN_CPU_OFF: u64 = 0x2;
const PSCI_FN_CPU_ON: u64 = 0x3;
const _PSCI_FN_MIGRATE: u64 = 0x5;
const PSCI_FN_SYSTEM_OFF: u64 = 0x8;
const _PSCI_FN_SYSTEM_RESET: u64 = 0x9;
const PSCI_FN_END: u64 = 0x1f;

let fn_ = ctx.gpr[0];
let fn_offset = if PSCI_FN_RANGE_32.contains(&fn_) {
Expand All @@ -225,16 +239,35 @@ fn handle_psci_call(ctx: &mut TrapFrame) -> Option<AxResult<AxVCpuExitReason>> {
None
};

fn_offset.map(|fn_offset| match fn_offset {
PSCI_FN_CPU_OFF => Ok(AxVCpuExitReason::CpuDown { _state: ctx.gpr[1] }),
PSCI_FN_CPU_ON => Ok(AxVCpuExitReason::CpuUp {
match fn_offset {
Some(PSCI_FN_CPU_OFF) => Some(Ok(AxVCpuExitReason::CpuDown { _state: ctx.gpr[1] })),
Some(PSCI_FN_CPU_ON) => Some(Ok(AxVCpuExitReason::CpuUp {
target_cpu: ctx.gpr[1],
entry_point: GuestPhysAddr::from(ctx.gpr[2] as usize),
arg: ctx.gpr[3],
}),
PSCI_FN_SYSTEM_OFF => Ok(AxVCpuExitReason::SystemDown),
_ => Err(AxError::Unsupported),
})
})),
Some(PSCI_FN_SYSTEM_OFF) => Some(Ok(AxVCpuExitReason::SystemDown)),
// We just forward these request to the ATF directly.
Some(PSCI_FN_VERSION..PSCI_FN_END) => None,
_ => None,
}
}

/// Handles SMC (Secure Monitor Call) exceptions.
///
/// This function will judge if the SMC call is a PSCI call, if so, it will handle it as a PSCI call.
/// Otherwise, it will forward the SMC call to the ATF directly.
fn handle_smc64_exception(ctx: &mut TrapFrame) -> AxResult<AxVCpuExitReason> {
// Is this a psci call?
if let Some(result) = handle_psci_call(ctx) {
return result;
} else {
// We just forward the SMC call to the ATF directly.
// The args are from lower EL, so it is safe to call the ATF.
(ctx.gpr[0], ctx.gpr[1], ctx.gpr[2], ctx.gpr[3]) =
unsafe { crate::smc::smc_call(ctx.gpr[0], ctx.gpr[1], ctx.gpr[2], ctx.gpr[3]) };
Ok(AxVCpuExitReason::Nothing)
}
}

/// Dispatches IRQs to the appropriate handler provided by the underlying host OS,
Expand Down
2 changes: 2 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#![feature(naked_functions)]
#![feature(doc_cfg)]
#![feature(asm_const)]
#![feature(exclusive_range_pattern)]
#![doc = include_str!("../README.md")]

#[macro_use]
Expand All @@ -12,6 +13,7 @@ mod context_frame;
mod exception_utils;
mod exception;
mod pcpu;
mod smc;
mod vcpu;

pub use self::pcpu::Aarch64PerCpu;
Expand Down
24 changes: 24 additions & 0 deletions src/smc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
use core::arch::asm;

#[inline(never)]
/// invoke a secure monitor call
/// # Safety:
/// It is unsafe to call this function directly.
/// The caller must ensure that
/// x0 is defined as the SMC function number referenced in the SMC Calling Convention
/// than the args later must be valid for the specified SMC function.
pub unsafe fn smc_call(x0: u64, x1: u64, x2: u64, x3: u64) -> (u64, u64, u64, u64) {
let r0;
let r1;
let r2;
let r3;
asm!(
"smc #0",
inout("x0") x0 => r0,
inout("x1") x1 => r1,
inout("x2") x2 => r2,
inout("x3") x3 => r3,
options(nomem, nostack)
);
(r0, r1, r2, r3)
}
13 changes: 9 additions & 4 deletions src/vcpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ pub struct Aarch64VCpuCreateConfig {
/// which is used to identify the CPU in a multiprocessor system.
/// Note: mind CPU cluster.
pub mpidr_el1: u64,
/// The address of the device tree blob.
pub dtb_addr: usize,
}

impl<H: AxVCpuHal> axvcpu::AxArchVCpu for Aarch64VCpu<H> {
Expand All @@ -83,8 +85,11 @@ impl<H: AxVCpuHal> axvcpu::AxArchVCpu for Aarch64VCpu<H> {
type SetupConfig = ();

fn new(config: Self::CreateConfig) -> AxResult<Self> {
let mut ctx = TrapFrame::default();
ctx.set_argument(config.dtb_addr);

Ok(Self {
ctx: TrapFrame::default(),
ctx,
host_stack_top: 0,
guest_system_regs: GuestSystemRegisters::default(),
mpidr: config.mpidr_el1,
Expand Down Expand Up @@ -168,11 +173,11 @@ impl<H: AxVCpuHal> Aarch64VCpu<H> {
+ VTCR_EL2::SL0.val(0b01)
+ VTCR_EL2::T0SZ.val(64 - 39))
.into();
self.guest_system_regs.hcr_el2 = (HCR_EL2::VM::Enable + HCR_EL2::RW::EL1IsAarch64).into();
self.guest_system_regs.hcr_el2 =
(HCR_EL2::VM::Enable + HCR_EL2::RW::EL1IsAarch64 + HCR_EL2::TSC::EnableTrapEl1SmcToEl2)
.into();
// self.system_regs.hcr_el2 |= 1<<27;
// + HCR_EL2::IMO::EnableVirtualIRQ).into();
// trap el1 smc to el2
// self.system_regs.hcr_el2 |= HCR_TSC_TRAP as u64;

// Set VMPIDR_EL2, which provides the value of the Virtualization Multiprocessor ID.
// This is the value returned by Non-secure EL1 reads of MPIDR.
Expand Down
Loading