diff --git a/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h b/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h index ba2f609e7..b4b3e63d3 100644 --- a/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h +++ b/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h @@ -99,6 +99,7 @@ SYSREG_GEN_ACCESSORS(icc_iar1_el1) SYSREG_GEN_ACCESSORS(icc_eoir1_el1) SYSREG_GEN_ACCESSORS(icc_dir_el1) SYSREG_GEN_ACCESSORS(ich_vtr_el2) +SYSREG_GEN_ACCESSORS(ich_vmcr_el2) SYSREG_GEN_ACCESSORS(icc_sre_el2) SYSREG_GEN_ACCESSORS(icc_pmr_el1) SYSREG_GEN_ACCESSORS(icc_bpr1_el1) diff --git a/src/arch/armv8/gic.c b/src/arch/armv8/gic.c index 3867fd6d8..af74e1729 100644 --- a/src/arch/armv8/gic.c +++ b/src/arch/armv8/gic.c @@ -92,6 +92,8 @@ void gic_init() void gic_handle() { uint32_t ack = gicc_iar(); + cpu()->is_handling_irq = true; + cpu()->arch.handling_irq_ack = ack; irqid_t id = bit32_extract(ack, GICC_IAR_ID_OFF, GICC_IAR_ID_LEN); if (id < GIC_FIRST_SPECIAL_INTID) { @@ -101,6 +103,7 @@ void gic_handle() gicc_dir(ack); } } + cpu()->is_handling_irq = false; } uint8_t gicd_get_prio(irqid_t int_id) diff --git a/src/arch/armv8/inc/arch/cpu.h b/src/arch/armv8/inc/arch/cpu.h index 3c8040ca0..ffc79d4ef 100644 --- a/src/arch/armv8/inc/arch/cpu.h +++ b/src/arch/armv8/inc/arch/cpu.h @@ -14,6 +14,7 @@ struct cpu_arch { struct cpu_arch_profile profile; unsigned long mpidr; + unsigned long handling_irq_ack; }; unsigned long cpu_id_to_mpidr(cpuid_t id); diff --git a/src/arch/armv8/inc/arch/gicv2.h b/src/arch/armv8/inc/arch/gicv2.h index a1e6a85d1..d651f5a1c 100644 --- a/src/arch/armv8/inc/arch/gicv2.h +++ b/src/arch/armv8/inc/arch/gicv2.h @@ -59,6 +59,16 @@ static inline uint64_t gich_get_elrsr() return elsr; } +static inline uint32_t gich_get_vmcr() +{ + return gich->VMCR; +} + +static inline void gich_set_vmcr(uint32_t vmcr) +{ + gich->VMCR = vmcr; +} + static inline uint32_t gicc_iar() { return gicc->IAR; diff --git a/src/arch/armv8/inc/arch/gicv3.h b/src/arch/armv8/inc/arch/gicv3.h index 25812cc92..d7977e9a0 100644 --- a/src/arch/armv8/inc/arch/gicv3.h +++ b/src/arch/armv8/inc/arch/gicv3.h @@ -137,6 +137,16 @@ static inline uint64_t gich_get_elrsr() return sysreg_ich_elrsr_el2_read(); } +static inline uint32_t gich_get_vmcr() +{ + return (uint32_t)sysreg_ich_vmcr_el2_read(); +} + +static inline void gich_set_vmcr(uint32_t vmcr) +{ + sysreg_ich_vmcr_el2_write(vmcr); +} + static inline uint32_t gicc_iar() { return (uint32_t)sysreg_icc_iar1_el1_read(); diff --git a/src/arch/armv8/inc/arch/psci.h b/src/arch/armv8/inc/arch/psci.h index 29e5ad1d6..27e0c8eca 100644 --- a/src/arch/armv8/inc/arch/psci.h +++ b/src/arch/armv8/inc/arch/psci.h @@ -21,6 +21,7 @@ #define PSCI_AFFINITY_INFO_SMC64 (0xc4000004) #define PSCI_FEATURES (0x8400000A) #define PSCI_MIG_INFO_TYPE (0x84000006) +#define PSCI_SYSTEM_RESET (0x84000009) #ifdef AARCH32 #define PSCI_CPU_SUSPEND PSCI_CPU_SUSPEND_SMC32 diff --git a/src/arch/armv8/inc/arch/vgic.h b/src/arch/armv8/inc/arch/vgic.h index cc623866c..37546278b 100644 --- a/src/arch/armv8/inc/arch/vgic.h +++ b/src/arch/armv8/inc/arch/vgic.h @@ -126,4 +126,7 @@ bool vgic_int_has_other_target(struct vcpu* vcpu, struct vgic_int* interrupt); uint8_t vgic_int_ptarget_mask(struct vcpu* vcpu, struct vgic_int* interrupt); void vgic_inject_sgi(struct vcpu* vcpu, struct vgic_int* interrupt, vcpuid_t source); +void vgic_reset(struct vm* vm); +void vgic_cpu_reset(struct vcpu* vcpu); + #endif /* __VGIC_H__ */ diff --git a/src/arch/armv8/interrupts.c b/src/arch/armv8/interrupts.c index 049e741f0..6e631362a 100644 --- a/src/arch/armv8/interrupts.c +++ b/src/arch/armv8/interrupts.c @@ -9,14 +9,17 @@ #include #include #include +#if (GIC_VERSION == GICV2) +#include +#elif (GIC_VERSION == GICV3) +#include +#else +#error "unknown GIV version " GIC_VERSION +#endif #include #include #include -#ifndef GIC_VERSION -#error "GIC_VERSION not defined for this platform" -#endif - void interrupts_arch_init() { gic_init(); @@ -61,3 +64,12 @@ void interrupts_arch_vm_assign(struct vm* vm, irqid_t id) { vgic_set_hw(vm, id); } + +void interrupts_arch_finish() +{ + if (cpu()->is_handling_irq) { + gicc_eoir((uint32_t)cpu()->arch.handling_irq_ack); + gicc_dir((uint32_t)cpu()->arch.handling_irq_ack); + cpu()->is_handling_irq = false; + } +} diff --git a/src/arch/armv8/psci.c b/src/arch/armv8/psci.c index 7479bc68a..a0ec4c475 100644 --- a/src/arch/armv8/psci.c +++ b/src/arch/armv8/psci.c @@ -177,6 +177,7 @@ static int32_t psci_features_handler(uint32_t feature_id) case PSCI_AFFINITY_INFO_SMC32: case PSCI_AFFINITY_INFO_SMC64: case PSCI_FEATURES: + case PSCI_SYSTEM_RESET: ret = PSCI_E_SUCCESS; break; default: @@ -187,6 +188,21 @@ static int32_t psci_features_handler(uint32_t feature_id) return ret; } +static int32_t psci_reset() +{ + // Although the spec mandates this call cannot fail, this might actually happen if Bao did + // not keep a copy of the original guest image around + bool res = PSCI_E_DENIED; + + if (vm_reset(cpu()->vcpu->vm)) { + // Note that, if successful, this PSCI call does not actually return. We still "return" + // success to keep the compiler happy. + res = PSCI_E_SUCCESS; + } + + return res; +} + int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, unsigned long x3) { int32_t ret = PSCI_E_NOT_SUPPORTED; @@ -223,6 +239,9 @@ int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, u ret = PSCI_TOS_NOT_PRESENT_MP; break; + case PSCI_SYSTEM_RESET: + ret = psci_reset(); + break; default: INFO("unkown psci smc_fid 0x%lx", smc_fid); } diff --git a/src/arch/armv8/vgicv2.c b/src/arch/armv8/vgicv2.c index 860572d84..4f55a89a4 100644 --- a/src/arch/armv8/vgicv2.c +++ b/src/arch/armv8/vgicv2.c @@ -6,6 +6,8 @@ #include #include +#include + #include #include #include @@ -140,6 +142,69 @@ void vgic_inject_sgi(struct vcpu* vcpu, struct vgic_int* interrupt, vcpuid_t sou spin_unlock(&interrupt->lock); } +void vgic_cpu_reset(struct vcpu* vcpu) +{ + for (irqid_t i = 0; i < GIC_CPU_PRIV; i++) { + vcpu->arch.vgic_priv.interrupts[i].owner = vcpu; + vcpu->arch.vgic_priv.interrupts[i].lock = SPINLOCK_INITVAL; + vcpu->arch.vgic_priv.interrupts[i].state = INV; + vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; + vcpu->arch.vgic_priv.interrupts[i].cfg = 0; + vcpu->arch.vgic_priv.interrupts[i].sgi.act = 0; + vcpu->arch.vgic_priv.interrupts[i].sgi.pend = 0; + vcpu->arch.vgic_priv.interrupts[i].in_lr = false; + vcpu->arch.vgic_priv.interrupts[i].enabled = false; + + if (vcpu->arch.vgic_priv.interrupts[i].hw) { + gic_set_enable(i, false); + gic_set_prio(i, GIC_LOWEST_PRIO); + gic_set_act(i, false); + gic_set_pend(i, false); + } + } + + for (irqid_t i = 0; i < GIC_MAX_SGIS; i++) { + vcpu->arch.vgic_priv.interrupts[i].enabled = true; + } + + for (size_t i = 0; i < gich_num_lrs(); i++) { + gich_write_lr(i, 0); + } + + // gich_set_hcr(0); + gich_set_vmcr(0); + // TODO: reset gich apr registers + + list_init(&vcpu->arch.vgic_spilled); +} + +void vgic_reset(struct vm* vm) +{ + for (irqid_t i = 0; i < vm->arch.vgicd.int_num; i++) { + vm->arch.vgicd.interrupts[i].owner = NULL; + vm->arch.vgicd.interrupts[i].lock = SPINLOCK_INITVAL; + vm->arch.vgicd.interrupts[i].state = INV; + vm->arch.vgicd.interrupts[i].prio = GIC_LOWEST_PRIO; + vm->arch.vgicd.interrupts[i].cfg = 0; + vm->arch.vgicd.interrupts[i].targets = 0; + vm->arch.vgicd.interrupts[i].in_lr = false; + vm->arch.vgicd.interrupts[i].enabled = false; + + if (vm->arch.vgicd.interrupts[i].hw) { + irqid_t id = vm->arch.vgicd.interrupts[i].id; + gic_set_enable(id, false); + gic_set_prio(id, GIC_LOWEST_PRIO); + gic_set_act(id, false); + gic_set_pend(id, false); + } + } + + vm->arch.vgicd.CTLR = 0; + + list_init(&vm->arch.vgic_spilled); + vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; +} + void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) { vm->arch.vgicd.CTLR = 0; @@ -160,16 +225,8 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) } for (irqid_t i = 0; i < vm->arch.vgicd.int_num; i++) { - vm->arch.vgicd.interrupts[i].owner = NULL; - vm->arch.vgicd.interrupts[i].lock = SPINLOCK_INITVAL; vm->arch.vgicd.interrupts[i].id = i + GIC_CPU_PRIV; - vm->arch.vgicd.interrupts[i].state = INV; - vm->arch.vgicd.interrupts[i].prio = GIC_LOWEST_PRIO; - vm->arch.vgicd.interrupts[i].cfg = 0; - vm->arch.vgicd.interrupts[i].targets = 0; vm->arch.vgicd.interrupts[i].hw = false; - vm->arch.vgicd.interrupts[i].in_lr = false; - vm->arch.vgicd.interrupts[i].enabled = false; } vm->arch.vgicd_emul = (struct emul_mem){ .va_base = vgic_dscrp->gicd_addr, @@ -177,29 +234,20 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) .handler = vgicd_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vgicd_emul); - list_init(&vm->arch.vgic_spilled); - vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; + vgic_reset(vm); } void vgic_cpu_init(struct vcpu* vcpu) { for (irqid_t i = 0; i < GIC_CPU_PRIV; i++) { - vcpu->arch.vgic_priv.interrupts[i].owner = vcpu; - vcpu->arch.vgic_priv.interrupts[i].lock = SPINLOCK_INITVAL; vcpu->arch.vgic_priv.interrupts[i].id = i; - vcpu->arch.vgic_priv.interrupts[i].state = INV; - vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; - vcpu->arch.vgic_priv.interrupts[i].cfg = 0; - vcpu->arch.vgic_priv.interrupts[i].sgi.act = 0; - vcpu->arch.vgic_priv.interrupts[i].sgi.pend = 0; vcpu->arch.vgic_priv.interrupts[i].hw = false; - vcpu->arch.vgic_priv.interrupts[i].in_lr = false; - vcpu->arch.vgic_priv.interrupts[i].enabled = false; + vcpu->arch.vgic_priv.interrupts[i].targets = (uint8_t)(1UL << cpu()->id); } for (size_t i = 0; i < GIC_MAX_SGIS; i++) { vcpu->arch.vgic_priv.interrupts[i].enabled = true; } - list_init(&vcpu->arch.vgic_spilled); + vgic_cpu_reset(vcpu); } diff --git a/src/arch/armv8/vgicv3.c b/src/arch/armv8/vgicv3.c index 8ae0e6472..b8f46aa6d 100644 --- a/src/arch/armv8/vgicv3.c +++ b/src/arch/armv8/vgicv3.c @@ -6,6 +6,8 @@ #include #include +#include + #include #include #include @@ -318,10 +320,75 @@ static bool vgic_icc_sre_handler(struct emul_access* acc) return true; } +void vgic_cpu_reset(struct vcpu* vcpu) +{ + for (irqid_t i = 0; i < GIC_CPU_PRIV; i++) { + vcpu->arch.vgic_priv.interrupts[i].owner = NULL; + vcpu->arch.vgic_priv.interrupts[i].lock = SPINLOCK_INITVAL; + vcpu->arch.vgic_priv.interrupts[i].state = INV; + vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; + vcpu->arch.vgic_priv.interrupts[i].cfg = 0; + vcpu->arch.vgic_priv.interrupts[i].route = GICD_IROUTER_INV; + vcpu->arch.vgic_priv.interrupts[i].phys.redist = vcpu->phys_id; + vcpu->arch.vgic_priv.interrupts[i].in_lr = false; + vcpu->arch.vgic_priv.interrupts[i].enabled = false; + + if (vcpu->arch.vgic_priv.interrupts[i].hw) { + gic_set_enable(i, false); + gic_set_prio(i, GIC_LOWEST_PRIO); + gic_set_act(i, false); + gic_set_pend(i, false); + // gicd_set_route(i, GICD_IROUTER_INV); + } + } + + for (irqid_t i = 0; i < GIC_MAX_SGIS; i++) { + vcpu->arch.vgic_priv.interrupts[i].cfg = 0x2; + } + + for (size_t i = 0; i < gich_num_lrs(); i++) { + gich_write_lr(i, 0); + } + + // gich_set_hcr(0); + gich_set_vmcr(0); + // TODO: reset gich apr registers + + list_init(&vcpu->arch.vgic_spilled); +} + +void vgic_reset(struct vm* vm) +{ + for (irqid_t i = 0; i < vm->arch.vgicd.int_num; i++) { + vm->arch.vgicd.interrupts[i].owner = NULL; + vm->arch.vgicd.interrupts[i].lock = SPINLOCK_INITVAL; + vm->arch.vgicd.interrupts[i].state = INV; + vm->arch.vgicd.interrupts[i].prio = GIC_LOWEST_PRIO; + vm->arch.vgicd.interrupts[i].cfg = 0; + vm->arch.vgicd.interrupts[i].route = GICD_IROUTER_INV; + vm->arch.vgicd.interrupts[i].phys.route = GICD_IROUTER_INV; + vm->arch.vgicd.interrupts[i].in_lr = false; + vm->arch.vgicd.interrupts[i].enabled = false; + + if (vm->arch.vgicd.interrupts[i].hw) { + irqid_t id = i + GIC_CPU_PRIV; + gic_set_enable(id, false); + gic_set_prio(id, GIC_LOWEST_PRIO); + gic_set_act(id, false); + gic_set_pend(id, false); + gicd_set_route(id, GICD_IROUTER_INV); + } + } + + vm->arch.vgicd.CTLR = 0; + + list_init(&vm->arch.vgic_spilled); + vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; +} + void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) { vm->arch.vgicr_addr = vgic_dscrp->gicr_addr; - vm->arch.vgicd.CTLR = 0; size_t vtyper_itln = vgic_get_itln(vgic_dscrp); vm->arch.vgicd.int_num = 32 * (vtyper_itln + 1); vm->arch.vgicd.TYPER = ((vtyper_itln << GICD_TYPER_ITLN_OFF) & GICD_TYPER_ITLN_MSK) | @@ -336,17 +403,8 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) } for (irqid_t i = 0; i < vm->arch.vgicd.int_num; i++) { - vm->arch.vgicd.interrupts[i].owner = NULL; - vm->arch.vgicd.interrupts[i].lock = SPINLOCK_INITVAL; vm->arch.vgicd.interrupts[i].id = i + GIC_CPU_PRIV; - vm->arch.vgicd.interrupts[i].state = INV; - vm->arch.vgicd.interrupts[i].prio = GIC_LOWEST_PRIO; - vm->arch.vgicd.interrupts[i].cfg = 0; - vm->arch.vgicd.interrupts[i].route = GICD_IROUTER_INV; - vm->arch.vgicd.interrupts[i].phys.route = GICD_IROUTER_INV; vm->arch.vgicd.interrupts[i].hw = false; - vm->arch.vgicd.interrupts[i].in_lr = false; - vm->arch.vgicd.interrupts[i].enabled = false; } vm->arch.vgicd_emul = (struct emul_mem){ .va_base = vgic_dscrp->gicd_addr, @@ -377,29 +435,14 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) .handler = vgic_icc_sre_handler }; vm_emul_add_reg(vm, &vm->arch.icc_sre_emul); - list_init(&vm->arch.vgic_spilled); - vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; + vgic_reset(vm); } void vgic_cpu_init(struct vcpu* vcpu) { for (irqid_t i = 0; i < GIC_CPU_PRIV; i++) { - vcpu->arch.vgic_priv.interrupts[i].owner = NULL; - vcpu->arch.vgic_priv.interrupts[i].lock = SPINLOCK_INITVAL; vcpu->arch.vgic_priv.interrupts[i].id = i; - vcpu->arch.vgic_priv.interrupts[i].state = INV; - vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; - vcpu->arch.vgic_priv.interrupts[i].cfg = 0; - vcpu->arch.vgic_priv.interrupts[i].route = GICD_IROUTER_INV; - vcpu->arch.vgic_priv.interrupts[i].phys.redist = vcpu->phys_id; - vcpu->arch.vgic_priv.interrupts[i].hw = false; - vcpu->arch.vgic_priv.interrupts[i].in_lr = false; - vcpu->arch.vgic_priv.interrupts[i].enabled = false; } - for (irqid_t i = 0; i < GIC_MAX_SGIS; i++) { - vcpu->arch.vgic_priv.interrupts[i].cfg = 0x2; - } - - list_init(&vcpu->arch.vgic_spilled); + vgic_cpu_reset(vcpu); } diff --git a/src/arch/armv8/vm.c b/src/arch/armv8/vm.c index 84cd2f5e9..d43a827d6 100644 --- a/src/arch/armv8/vm.c +++ b/src/arch/armv8/vm.c @@ -45,13 +45,16 @@ static unsigned long vm_cpuid_to_mpidr(struct vm* vm, vcpuid_t cpuid) return mpidr; } +void vm_arch_reset(struct vm* vm) +{ + vgic_reset(vm); +} + void vcpu_arch_init(struct vcpu* vcpu, struct vm* vm) { vcpu->arch.vmpidr = vm_cpuid_to_mpidr(vm, vcpu->id); sysreg_vmpidr_el2_write(vcpu->arch.vmpidr); - vcpu->arch.psci_ctx.state = vcpu->id == 0 ? ON : OFF; - vcpu_arch_profile_init(vcpu, vm); vgic_cpu_init(vcpu); @@ -78,6 +81,10 @@ void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) * TODO: ARMv8-A ARM mentions another implementation optional registers that reset to a known * value. */ + + vcpu->arch.psci_ctx.state = vcpu->id == 0 ? ON : OFF; + + vgic_cpu_reset(vcpu); } static inline bool vcpu_psci_state_on(struct vcpu* vcpu) diff --git a/src/arch/riscv/inc/arch/cpu.h b/src/arch/riscv/inc/arch/cpu.h index 3a5ca07c4..c5a3d8969 100644 --- a/src/arch/riscv/inc/arch/cpu.h +++ b/src/arch/riscv/inc/arch/cpu.h @@ -15,6 +15,10 @@ extern cpuid_t CPU_MASTER; struct cpu_arch { unsigned hart_id; unsigned plic_cntxt; + struct { + unsigned long cause; + unsigned long external_id; + } handling_irq; }; static inline struct cpu* cpu() diff --git a/src/arch/riscv/interrupts.c b/src/arch/riscv/interrupts.c index 05bab96d8..ed2fd0d7c 100644 --- a/src/arch/riscv/interrupts.c +++ b/src/arch/riscv/interrupts.c @@ -70,6 +70,9 @@ void interrupts_arch_handle(void) { unsigned long _scause = csrs_scause_read(); + cpu()->is_handling_irq = true; + cpu()->arch.handling_irq.cause = _scause; + switch (_scause) { case SCAUSE_CODE_SSI: csrs_sip_clear(SIP_SSIP); @@ -92,6 +95,8 @@ void interrupts_arch_handle(void) // WARNING("unkown interrupt"); break; } + + cpu()->is_handling_irq = false; } bool interrupts_arch_check(irqid_t int_id) @@ -128,3 +133,29 @@ void interrupts_arch_vm_assign(struct vm* vm, irqid_t id) { virqc_set_hw(vm, id); } + +void interrupts_arch_finish() +{ + if (cpu()->is_handling_irq) { + switch (cpu()->arch.handling_irq.cause) { + case SCAUSE_CODE_SSI: + csrs_sip_clear(SIP_SSIP); + break; + case SCAUSE_CODE_STI: + /** + * Clearing the timer pending bit actually has no effect. We could re-program the + * timer to "infinity" but we don't know if the handler itself re-programed the + * timer with a new event. Therefore, at this point, we must trust the handler + * either correctly re-programms the timer or disables the interrupt so the cpu is + * not starved by continously triggering the timer interrupt (spoiler alert, it + * does!) + */ + break; + case SCAUSE_CODE_SEI: + irqc_finish_interrupt((irqid_t)cpu()->arch.handling_irq.external_id); + break; + default: + break; + } + } +} diff --git a/src/arch/riscv/irqc/aia/aplic.c b/src/arch/riscv/irqc/aia/aplic.c index e115c9246..c164d049d 100644 --- a/src/arch/riscv/irqc/aia/aplic.c +++ b/src/arch/riscv/irqc/aia/aplic.c @@ -167,6 +167,7 @@ void aplic_handle(void) irqid_t intp_identity = aplic_idc_get_claimi_intpid(idc_id); if (intp_identity != 0) { + cpu()->arch.handling_irq.external_id = intp_identity; interrupts_handle(intp_identity); } } diff --git a/src/arch/riscv/irqc/aia/inc/irqc.h b/src/arch/riscv/irqc/aia/inc/irqc.h index adbb5f0f6..26022f80c 100644 --- a/src/arch/riscv/irqc/aia/inc/irqc.h +++ b/src/arch/riscv/irqc/aia/inc/irqc.h @@ -61,4 +61,9 @@ static inline void virqc_set_hw(struct vm* vm, irqid_t id) vaplic_set_hw(vm, id); } +static inline void irqc_finish_interrupt(irqid_t id) +{ + irqc_clr_pend(id); +} + #endif // IRQC_H diff --git a/src/arch/riscv/irqc/aia/inc/vaplic.h b/src/arch/riscv/irqc/aia/inc/vaplic.h index f8083f30b..0c13c70cc 100644 --- a/src/arch/riscv/irqc/aia/inc/vaplic.h +++ b/src/arch/riscv/irqc/aia/inc/vaplic.h @@ -86,4 +86,16 @@ static inline void virqc_inject(vcpu_t* vcpu, irqid_t id) vaplic_inject(vcpu, id); } +/** + * @brief Resets the state of the virtual aplic + * + * @param vm Virtual Machine of the target vaplic + */ +void vaplic_reset(struct vm* vm); + +static inline void virqc_reset(struct vm* vm) +{ + vaplic_reset(vm); +} + #endif // VAPLIC_H diff --git a/src/arch/riscv/irqc/aia/vaplic.c b/src/arch/riscv/irqc/aia/vaplic.c index 061b03835..522c4acf7 100644 --- a/src/arch/riscv/irqc/aia/vaplic.c +++ b/src/arch/riscv/irqc/aia/vaplic.c @@ -10,6 +10,7 @@ #include #include #include +#include #define APLIC_MIN_PRIO (0xFF) #define UPDATE_ALL_HARTS (~0U) @@ -1282,6 +1283,28 @@ static bool vaplic_idc_emul_handler(struct emul_access* acc) return true; } +void vaplic_reset(struct vm* vm) +{ + memset(vm->arch.vaplic.srccfg, 0, sizeof(vm->arch.vaplic.srccfg)); + memset(vm->arch.vaplic.ip, 0, sizeof(vm->arch.vaplic.ip)); + memset(vm->arch.vaplic.ie, 0, sizeof(vm->arch.vaplic.ie)); + memset(vm->arch.vaplic.target, 0, sizeof(vm->arch.vaplic.target)); + memset(vm->arch.vaplic.idelivery, 0, sizeof(vm->arch.vaplic.idelivery)); + memset(vm->arch.vaplic.iforce, 0, sizeof(vm->arch.vaplic.iforce)); + memset(vm->arch.vaplic.ithreshold, 0, sizeof(vm->arch.vaplic.ithreshold)); + memset(vm->arch.vaplic.topi_claimi, 0, sizeof(vm->arch.vaplic.topi_claimi)); + + for (irqid_t id = 1; id <= APLIC_MAX_INTERRUPTS; id++) { + if (bitmap_get(vm->arch.vaplic.hw, id)) { + aplic_clr_enbl(id); + aplic_clr_pend(id); + aplic_set_sourcecfg(id, APLIC_SOURCECFG_SM_INACTIVE); + aplic_set_target_prio(id, APLIC_TARGET_MIN_PRIO); + aplic_set_target_hart(id, 0); + } + } +} + void vaplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { if (cpu()->id == vm->master) { @@ -1301,5 +1324,7 @@ void vaplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) .handler = vaplic_idc_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vaplic.aplic_idc_emul); + + vaplic_reset(vm); } } diff --git a/src/arch/riscv/irqc/plic/inc/irqc.h b/src/arch/riscv/irqc/plic/inc/irqc.h index d0e3ccc2c..4c7494514 100644 --- a/src/arch/riscv/irqc/plic/inc/irqc.h +++ b/src/arch/riscv/irqc/plic/inc/irqc.h @@ -54,4 +54,9 @@ static inline void virqc_set_hw(struct vm* vm, irqid_t id) vplic_set_hw(vm, id); } +static inline void irqc_finish_interrupt(irqid_t id) +{ + irqc_clr_pend(id); +} + #endif // IRQC_H diff --git a/src/arch/riscv/irqc/plic/inc/vplic.h b/src/arch/riscv/irqc/plic/inc/vplic.h index 0d3325984..90968efec 100644 --- a/src/arch/riscv/irqc/plic/inc/vplic.h +++ b/src/arch/riscv/irqc/plic/inc/vplic.h @@ -31,6 +31,7 @@ union vm_irqc_dscrp; void vplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp); void vplic_inject(struct vcpu* vcpu, irqid_t id); void vplic_set_hw(struct vm* vm, irqid_t id); +void vplic_reset(struct vm* vm); static inline void virqc_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { @@ -43,4 +44,9 @@ static inline void virqc_inject(vcpu_t* vcpu, irqid_t id) vplic_inject(vcpu, id); } +static inline void virqc_reset(struct vm* vm) +{ + vplic_reset(vm); +} + #endif //__VPLIC_H__ diff --git a/src/arch/riscv/irqc/plic/plic.c b/src/arch/riscv/irqc/plic/plic.c index 08b45c88f..e63b52596 100644 --- a/src/arch/riscv/irqc/plic/plic.c +++ b/src/arch/riscv/irqc/plic/plic.c @@ -142,6 +142,7 @@ void plic_handle(void) uint32_t id = plic_hart[cpu()->arch.plic_cntxt].claim; if (id != 0) { + cpu()->arch.handling_irq.external_id = id; enum irq_res res = interrupts_handle(id); if (res == HANDLED_BY_HYP) { plic_hart[cpu()->arch.plic_cntxt].complete = id; diff --git a/src/arch/riscv/irqc/plic/vplic.c b/src/arch/riscv/irqc/plic/vplic.c index 76311b7dc..21b608449 100644 --- a/src/arch/riscv/irqc/plic/vplic.c +++ b/src/arch/riscv/irqc/plic/vplic.c @@ -10,6 +10,7 @@ #include #include #include +#include static ssize_t vplic_vcntxt_to_pcntxt(struct vcpu* vcpu, size_t vcntxt_id) { @@ -369,6 +370,24 @@ static bool vplic_hart_emul_handler(struct emul_access* acc) return true; } +void vplic_reset(struct vm* vm) +{ + memset(vm->arch.vplic.pend, 0, sizeof(vm->arch.vplic.pend)); + memset(vm->arch.vplic.act, 0, sizeof(vm->arch.vplic.act)); + memset(vm->arch.vplic.prio, 0, sizeof(vm->arch.vplic.prio)); + memset(vm->arch.vplic.enbl, 0, sizeof(vm->arch.vplic.enbl)); + memset(vm->arch.vplic.pend, 0, sizeof(vm->arch.vplic.pend)); + + for (irqid_t id = 1; id <= PLIC_IMPL_INTERRUPTS; id++) { + if (vm->arch.vplic.hw) { + plic_set_prio(id, 0); + for (size_t j = 1; j < PLIC_PLAT_CNTXT_NUM; j += 2) { + plic_set_enbl(j, id, false); + } + } + } +} + void vplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { if (cpu()->id == vm->master) { @@ -387,5 +406,7 @@ void vplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) /* assumes 2 contexts per hart */ vm->arch.vplic.cntxt_num = vm->cpu_num * 2; + + vplic_reset(vm); } } diff --git a/src/arch/riscv/sbi.c b/src/arch/riscv/sbi.c index 108880028..97650ca3f 100644 --- a/src/arch/riscv/sbi.c +++ b/src/arch/riscv/sbi.c @@ -40,6 +40,10 @@ #define SBI_REMOTE_HFENCE_VVMA_FID (5) #define SBI_REMOTE_HFENCE_VVMA_ASID_FID (6) +#define SBI_EXTID_SRST (0x53525354) +#define SBI_SYSTEM_RESET_FID (0) +#define SBI_RST_TYPE_COLD_REBOOT (0x00000001) + /** * For now we're defining bao specific ecalls, ie, hypercall, under the experimental extension id * space. @@ -166,7 +170,7 @@ struct sbiret sbi_hart_status(unsigned long hartid) } static unsigned long ext_table[] = { SBI_EXTID_BASE, SBI_EXTID_TIME, SBI_EXTID_IPI, SBI_EXTID_RFNC, - SBI_EXTID_HSM }; + SBI_EXTID_HSM, SBI_EXTID_SRST }; static const size_t NUM_EXT = sizeof(ext_table) / sizeof(unsigned long); @@ -256,7 +260,7 @@ static struct sbiret sbi_base_handler(unsigned long fid) switch (fid) { case SBI_GET_SBI_SPEC_VERSION_FID: - ret.value = 2; + ret.value = 3; break; case SBI_PROBE_EXTENSION_FID: ret.value = 0; @@ -391,6 +395,32 @@ static struct sbiret sbi_hsm_handler(unsigned long fid) return ret; } +static struct sbiret sbi_srst_handler(unsigned long fid) +{ + struct sbiret ret; + + uint32_t reset_type = (uint32_t)vcpu_readreg(cpu()->vcpu, REG_A0); + + switch (fid) { + case SBI_SYSTEM_RESET_FID: + if (reset_type == SBI_RST_TYPE_COLD_REBOOT) { + if (vm_reset(cpu()->vcpu->vm)) { + ret.error = SBI_SUCCESS; + } else { + ret.error = SBI_ERR_FAILURE; + } + } else { + ret.error = SBI_ERR_INVALID_PARAM; + } + break; + default: + ret.error = SBI_ERR_NOT_SUPPORTED; + break; + } + + return ret; +} + static struct sbiret sbi_bao_handler(unsigned long fid) { struct sbiret ret; @@ -422,6 +452,9 @@ size_t sbi_vs_handler() case SBI_EXTID_HSM: ret = sbi_hsm_handler(fid); break; + case SBI_EXTID_SRST: + ret = sbi_srst_handler(fid); + break; case SBI_EXTID_BAO: ret = sbi_bao_handler(fid); break; diff --git a/src/arch/riscv/vm.c b/src/arch/riscv/vm.c index 6acc20266..b68747763 100644 --- a/src/arch/riscv/vm.c +++ b/src/arch/riscv/vm.c @@ -29,7 +29,6 @@ void vcpu_arch_init(struct vcpu* vcpu, struct vm* vm) UNUSED_ARG(vm); vcpu->arch.sbi_ctx.lock = SPINLOCK_INITVAL; - vcpu->arch.sbi_ctx.state = vcpu->id == 0 ? STARTED : STOPPED; } void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) @@ -59,6 +58,8 @@ void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) csrs_vstval_write(0); csrs_hvip_write(0); csrs_vsatp_write(0); + + vcpu->arch.sbi_ctx.state = vcpu->id == 0 ? STARTED : STOPPED; } unsigned long vcpu_readreg(struct vcpu* vcpu, unsigned long reg) @@ -95,3 +96,8 @@ void vcpu_arch_run(struct vcpu* vcpu) cpu_idle(); } } + +void vm_arch_reset(struct vm* vm) +{ + virqc_reset(vm); +} diff --git a/src/core/cpu.c b/src/core/cpu.c index 68f30fa3c..f06bfc3b5 100644 --- a/src/core/cpu.c +++ b/src/core/cpu.c @@ -35,6 +35,7 @@ void cpu_init(cpuid_t cpu_id, paddr_t load_addr) { cpu()->id = cpu_id; cpu()->handling_msgs = false; + cpu()->is_handling_irq = false; cpu()->interface = cpu_if(cpu()->id); cpu_arch_init(cpu_id, load_addr); @@ -90,6 +91,8 @@ void cpu_msg_handler(void) void cpu_idle(void) { + interrupts_arch_finish(); + cpu_arch_idle(); /** diff --git a/src/core/inc/cpu.h b/src/core/inc/cpu.h index d14fac387..fa458e474 100644 --- a/src/core/inc/cpu.h +++ b/src/core/inc/cpu.h @@ -27,6 +27,8 @@ struct cpu { bool handling_msgs; + bool is_handling_irq; + struct addr_space as; struct vcpu* vcpu; diff --git a/src/core/inc/interrupts.h b/src/core/inc/interrupts.h index a48efc527..208e1c8c3 100644 --- a/src/core/inc/interrupts.h +++ b/src/core/inc/interrupts.h @@ -38,5 +38,6 @@ void interrupts_arch_clear(irqid_t int_id); void interrupts_arch_ipi_send(cpuid_t cpu_target, irqid_t ipi_id); void interrupts_arch_vm_assign(struct vm* vm, irqid_t id); bool interrupts_arch_conflict(bitmap_t* interrupt_bitmap, irqid_t id); +void interrupts_arch_finish(void); #endif /* __INTERRUPTS_H__ */ diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index a2177cd94..c3ce3c4ce 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -117,6 +117,7 @@ void vcpu_init(struct vcpu* vcpu, struct vm* vm, vaddr_t entry); void vm_msg_broadcast(struct vm* vm, struct cpu_msg* msg); cpumap_t vm_translate_to_pcpu_mask(struct vm* vm, cpumap_t mask, size_t len); cpumap_t vm_translate_to_vcpu_mask(struct vm* vm, cpumap_t mask, size_t len); +bool vm_reset(struct vm* vm); static inline struct vcpu* vm_get_vcpu(struct vm* vm, vcpuid_t vcpuid) { @@ -176,5 +177,6 @@ unsigned long vcpu_readpc(struct vcpu* vcpu); void vcpu_writepc(struct vcpu* vcpu, unsigned long pc); void vcpu_arch_run(struct vcpu* vcpu); void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry); +void vm_arch_reset(struct vm* vm); #endif /* __VM_H__ */ diff --git a/src/core/vm.c b/src/core/vm.c index 7be37f1e8..c33e37d20 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -349,6 +349,73 @@ __attribute__((weak)) cpumap_t vm_translate_to_vcpu_mask(struct vm* vm, cpumap_t void vcpu_run(struct vcpu* vcpu) { - cpu()->vcpu->active = true; vcpu_arch_run(vcpu); } + +static void vm_vcpu_reset(struct vm* vm) +{ + struct vcpu* vcpu = cpu()->vcpu; + + if (vcpu->vm->id != vm->id) { + ERROR("Trying to reset vm not hosted in this cpu"); + } + + cpu_sync_and_clear_msgs(&vm->sync); + + if (vm->master == cpu()->id) { + vm_arch_reset(vm); + for (size_t i = 0; i < vm->config->platform.region_num; i++) { + struct vm_mem_region* reg = &vm->config->platform.regions[i]; + bool img_is_in_rgn = range_in_range(vm->config->image.base_addr, vm->config->image.size, + reg->base, reg->size); + if (img_is_in_rgn) { + vm_install_image(vm, reg); + break; + } + } + } + + cpu_sync_barrier(&vcpu->vm->sync); + + vcpu_arch_reset(vcpu, vm->config->entry); + + vcpu_arch_run(vcpu); +} + +enum VM_EVENTS { VM_RESET }; + +static void vm_msg_handler(uint32_t event, uint64_t data) +{ + UNUSED_ARG(data); + + switch (event) { + case VM_RESET: + vm_vcpu_reset(cpu()->vcpu->vm); + break; + default: + break; + } +} + +CPU_MSG_HANDLER(vm_msg_handler, VM_IPI_ID) + +bool vm_reset(struct vm* vm) +{ + bool res; + + if (vm->config->image.inplace) { + res = false; + } else { + struct cpu_msg msg; + msg.handler = (uint32_t)VM_IPI_ID; + msg.event = VM_RESET; + + vm_msg_broadcast(vm, &msg); + + vm_vcpu_reset(vm); + + res = true; + } + + return res; +}