diff --git a/Makefile b/Makefile index 7b8eac42..bdf94c9e 100644 --- a/Makefile +++ b/Makefile @@ -204,8 +204,13 @@ endif ifeq ($(plat_mem),non_unified) build_macros+=-DMEM_NON_UNIFIED endif -ifeq ($(phys_irqs_only),y) - build_macros+=-DPHYS_IRQS_ONLY + +ifeq ($(mmio_slave_side_prot),y) + build_macros+=-DMMIO_SLAVE_SIDE_PROT + + ifneq ($(arch_mem_prot),mpu) + $(error mmio_slave_side_prot=y requires arch_mem_prot=mpu) + endif endif ifeq ($(CC_IS_GCC),y) diff --git a/src/core/inc/platform.h b/src/core/inc/platform.h index ac91015f..11d8adee 100644 --- a/src/core/inc/platform.h +++ b/src/core/inc/platform.h @@ -21,6 +21,8 @@ struct platform { size_t region_num; struct mem_region* regions; + size_t mmio_region_num; + struct mem_region* mmio_regions; struct { paddr_t base; } console; diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index 2d0686ed..1eabed97 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -183,5 +183,6 @@ unsigned long vcpu_readpc(struct vcpu* vcpu); void vcpu_writepc(struct vcpu* vcpu, unsigned long pc); void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry); bool vcpu_arch_is_on(struct vcpu* vcpu); +void vm_arch_allow_mmio_access(struct vm* vm, struct vm_dev_region* dev); #endif /* __VM_H__ */ diff --git a/src/core/mpu/mem.c b/src/core/mpu/mem.c index 73447e39..c63af79c 100644 --- a/src/core/mpu/mem.c +++ b/src/core/mpu/mem.c @@ -214,6 +214,15 @@ size_t mem_cpu_boot_alloc_size() return size; } +static void mem_mmio_init_regions(struct addr_space* as) +{ + for (unsigned long i = 0; i < platform.mmio_region_num; i++) { + mem_alloc_map_dev(as, as->type == AS_VM ? SEC_VM_ANY : SEC_HYP_ANY, + platform.mmio_regions[i].base, platform.mmio_regions[i].base, + NUM_PAGES(platform.mmio_regions[i].size)); + } +} + void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, colormap_t colors) { UNUSED_ARG(colors); @@ -229,6 +238,12 @@ void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, colormap_t col for (size_t i = 0; i < VMPU_NUM_ENTRIES; i++) { mem_vmpu_free_entry(as, i); } + + /* For architectures with slave-side mmio protection, we map all the + mmio regions to be accessible to all address spaces */ + if (DEFINED(MMIO_SLAVE_SIDE_PROT)) { + mem_mmio_init_regions(as); + } } static void mem_free_ppages(struct ppages* ppages) diff --git a/src/core/vm.c b/src/core/vm.c index fed31e1c..d94fee7e 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -20,8 +20,6 @@ static void vm_master_init(struct vm* vm, const struct vm_config* vm_config, vmi vm->lock = SPINLOCK_INITVAL; cpu_sync_init(&vm->sync, vm->cpu_num); - - vm_mem_prot_init(vm, vm_config); } static void vm_cpu_init(struct vm* vm) @@ -210,9 +208,10 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* vm_config) for (size_t i = 0; i < vm_config->platform.dev_num; i++) { struct vm_dev_region* dev = &vm_config->platform.devs[i]; - size_t n = ALIGN(dev->size, PAGE_SIZE) / PAGE_SIZE; - - if (dev->va != INVALID_VA) { + if (DEFINED(MMIO_SLAVE_SIDE_PROT)) { + vm_arch_allow_mmio_access(vm, dev); + } else if (dev->va != INVALID_VA) { + size_t n = ALIGN(dev->size, PAGE_SIZE) / PAGE_SIZE; mem_alloc_map_dev(&vm->as, SEC_VM_ANY, (vaddr_t)dev->va, dev->pa, n); } @@ -319,6 +318,12 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* vm_co cpu_sync_barrier(&vm->sync); + if (master) { + vm_mem_prot_init(vm, vm_config); + } + + cpu_sync_barrier(&vm->sync); + /** * Perform architecture dependent initializations. This includes, for example, setting the page * table pointer and other virtualization extensions specifics. @@ -422,3 +427,11 @@ void vcpu_run(struct vcpu* vcpu) cpu_powerdown(); } } + +__attribute__((weak)) void vm_arch_allow_mmio_access(struct vm* vm, struct vm_dev_region* dev) +{ + UNUSED_ARG(dev); + UNUSED_ARG(vm); + ERROR("vm_arch_allow_mmio_access must be implemented by the arch!") + return; +}