diff --git a/src/arch/armv8/armv8-a/smmuv2.c b/src/arch/armv8/armv8-a/smmuv2.c index 9e6734d6c..232b7a0dc 100644 --- a/src/arch/armv8/armv8-a/smmuv2.c +++ b/src/arch/armv8/armv8-a/smmuv2.c @@ -182,7 +182,7 @@ ssize_t smmu_alloc_ctxbnk(void) { spin_lock(&smmu.ctx_lock); /* Find a free context bank. */ - ssize_t nth = bitmap_find_nth(smmu.ctxbank_bitmap, smmu.ctx_num, 1, 0, false); + ssize_t nth = bitmap_find_nth(smmu.ctxbank_bitmap, smmu.ctx_num, 1, 0, BITMAP_NOT_SET); if (nth >= 0) { bitmap_set(smmu.ctxbank_bitmap, (size_t)nth); } @@ -247,7 +247,7 @@ ssize_t smmu_alloc_sme(void) { spin_lock(&smmu.sme_lock); /* Find a free sme. */ - ssize_t nth = bitmap_find_nth(smmu.sme_bitmap, smmu.sme_num, 1, 0, false); + ssize_t nth = bitmap_find_nth(smmu.sme_bitmap, smmu.sme_num, 1, 0, BITMAP_NOT_SET); if (nth >= 0) { bitmap_set(smmu.sme_bitmap, (size_t)nth); } diff --git a/src/arch/armv8/armv8-r/mpu.c b/src/arch/armv8/armv8-r/mpu.c index a43015a90..d47b316b1 100644 --- a/src/arch/armv8/armv8-r/mpu.c +++ b/src/arch/armv8/armv8-r/mpu.c @@ -76,7 +76,7 @@ static mpid_t mpu_entry_allocate(void) { mpid_t reg_num = INVALID_MPID; reg_num = (mpid_t)bitmap_find_nth(cpu()->arch.profile.mpu.allocated_entries, - MPU_ARCH_MAX_NUM_ENTRIES, 1, 0, false); + MPU_ARCH_MAX_NUM_ENTRIES, 1, 0, BITMAP_NOT_SET); bitmap_set(cpu()->arch.profile.mpu.allocated_entries, reg_num); diff --git a/src/arch/armv8/armv8-r/vmm.c b/src/arch/armv8/armv8-r/vmm.c index 0b87fb1c1..5e1502f4b 100644 --- a/src/arch/armv8/armv8-r/vmm.c +++ b/src/arch/armv8/armv8-r/vmm.c @@ -29,7 +29,8 @@ void vmm_arch_profile_init() timer_freq = timer_ctl->CNTDIF0; - mem_unmap(&cpu()->as, (vaddr_t)timer_ctl, sizeof(struct generic_timer_cntctrl), false); + mem_unmap(&cpu()->as, (vaddr_t)timer_ctl, sizeof(struct generic_timer_cntctrl), + MEM_DONT_FREE_PAGES); } cpu_sync_barrier(&cpu_glb_sync); diff --git a/src/arch/armv8/inc/arch/vgic.h b/src/arch/armv8/inc/arch/vgic.h index cc623866c..30c858128 100644 --- a/src/arch/armv8/inc/arch/vgic.h +++ b/src/arch/armv8/inc/arch/vgic.h @@ -71,6 +71,9 @@ struct vgic_priv { struct vgic_int interrupts[GIC_CPU_PRIV]; }; +#define VGIC_GICR_ACCESS true +#define VGIC_NO_GICR_ACCESS false + void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp); void vgic_cpu_init(struct vcpu* vcpu); void vgic_set_hw(struct vm* vm, irqid_t id); diff --git a/src/arch/armv8/vgic.c b/src/arch/armv8/vgic.c index 8b96ff29b..37680e71e 100644 --- a/src/arch/armv8/vgic.c +++ b/src/arch/armv8/vgic.c @@ -968,7 +968,7 @@ bool vgicd_emul_handler(struct emul_access* acc) if (vgic_check_reg_alignment(acc, handler_info)) { spin_lock(&cpu()->vcpu->vm->arch.vgicd.lock); - handler_info->reg_access(acc, handler_info, false, cpu()->vcpu->id); + handler_info->reg_access(acc, handler_info, VGIC_NO_GICR_ACCESS, cpu()->vcpu->id); spin_unlock(&cpu()->vcpu->vm->arch.vgicd.lock); return true; } else { diff --git a/src/arch/armv8/vgicv2.c b/src/arch/armv8/vgicv2.c index b799dbf92..d78b38995 100644 --- a/src/arch/armv8/vgicv2.c +++ b/src/arch/armv8/vgicv2.c @@ -155,7 +155,8 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) (vaddr_t)platform.arch.gic.gicv_addr, n); size_t vgic_int_size = vm->arch.vgicd.int_num * sizeof(struct vgic_int); - vm->arch.vgicd.interrupts = mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); + vm->arch.vgicd.interrupts = + mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, MEM_PPAGES_NOT_ALIGNED); if (vm->arch.vgicd.interrupts == NULL) { ERROR("failed to alloc vgic"); } diff --git a/src/arch/armv8/vgicv3.c b/src/arch/armv8/vgicv3.c index 7d12583f6..46722817b 100644 --- a/src/arch/armv8/vgicv3.c +++ b/src/arch/armv8/vgicv3.c @@ -276,7 +276,7 @@ static bool vgicr_emul_handler(struct emul_access* acc) struct vcpu* vcpu = vgicr_id == cpu()->vcpu->id ? cpu()->vcpu : vm_get_vcpu(cpu()->vcpu->vm, vgicr_id); spin_lock(&vcpu->arch.vgic_priv.vgicr.lock); - handler_info->reg_access(acc, handler_info, true, vgicr_id); + handler_info->reg_access(acc, handler_info, VGIC_GICR_ACCESS, vgicr_id); spin_unlock(&vcpu->arch.vgic_priv.vgicr.lock); return true; } else { @@ -331,7 +331,8 @@ void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) vm->arch.vgicd.lock = SPINLOCK_INITVAL; size_t vgic_int_size = vm->arch.vgicd.int_num * sizeof(struct vgic_int); - vm->arch.vgicd.interrupts = mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); + vm->arch.vgicd.interrupts = + mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, MEM_PPAGES_NOT_ALIGNED); if (vm->arch.vgicd.interrupts == NULL) { ERROR("failed to alloc vgic"); } diff --git a/src/arch/riscv/iommu.c b/src/arch/riscv/iommu.c index b3d5b23e7..43845fd4b 100644 --- a/src/arch/riscv/iommu.c +++ b/src/arch/riscv/iommu.c @@ -274,7 +274,7 @@ static void rv_iommu_init(void) // Allocate memory for FQ (aligned to 4kiB) vaddr_t fq_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct fq_entry) * FQ_N_ENTRIES), - SEC_HYP_GLOBAL, true); + SEC_HYP_GLOBAL, MEM_PPAGES_ALIGNED); memset((void*)fq_vaddr, 0, sizeof(struct fq_entry) * FQ_N_ENTRIES); rv_iommu.hw.fq = (struct fq_entry*)fq_vaddr; @@ -302,7 +302,7 @@ static void rv_iommu_init(void) // Allocate a page of memory (aligned) for the DDT vaddr_t ddt_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct ddt_entry) * DDT_N_ENTRIES), - SEC_HYP_GLOBAL, true); + SEC_HYP_GLOBAL, MEM_PPAGES_ALIGNED); // Clear entries memset((void*)ddt_vaddr, 0, sizeof(struct ddt_entry) * DDT_N_ENTRIES); rv_iommu.hw.ddt = (struct ddt_entry*)ddt_vaddr; diff --git a/src/arch/riscv/irqc/aia/imsic.c b/src/arch/riscv/irqc/aia/imsic.c index 06a9fa53f..591ade15c 100644 --- a/src/arch/riscv/irqc/aia/imsic.c +++ b/src/arch/riscv/irqc/aia/imsic.c @@ -105,7 +105,7 @@ irqid_t imsic_allocate_msi(void) irqid_t msi_id = INVALID_IRQID; spin_lock(&msi_alloc_lock); - ssize_t bit = bitmap_find_nth(msi_reserved, PLAT_IMSIC_MAX_INTERRUPTS, 1, 0, 0); + ssize_t bit = bitmap_find_nth(msi_reserved, PLAT_IMSIC_MAX_INTERRUPTS, 1, 0, BITMAP_NOT_SET); if (bit >= 0) { msi_id = (irqid_t)bit; bitmap_set(msi_reserved, msi_id); diff --git a/src/core/inc/mem.h b/src/core/inc/mem.h index 815e9cfa2..5992d6113 100644 --- a/src/core/inc/mem.h +++ b/src/core/inc/mem.h @@ -55,6 +55,12 @@ struct shmem { spinlock_t lock; }; +#define MEM_PPAGES_ALIGNED true +#define MEM_PPAGES_NOT_ALIGNED false + +#define MEM_FREE_PAGES true +#define MEM_DONT_FREE_PAGES false + static inline struct ppages mem_ppages_get(paddr_t base, size_t num_pages) { return (struct ppages){ .colors = 0, .base = base, .num_pages = num_pages }; diff --git a/src/core/mem.c b/src/core/mem.c index 36777906a..0590a468a 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -88,7 +88,8 @@ bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppa */ for (size_t i = 0; i < 2 && !ok; i++) { while (pool->free != 0) { - ssize_t bit = bitmap_find_consec(pool->bitmap, pool->num_pages, curr, num_pages, false); + ssize_t bit = + bitmap_find_consec(pool->bitmap, pool->num_pages, curr, num_pages, BITMAP_NOT_SET); if (bit < 0) { /** diff --git a/src/core/mmu/mem.c b/src/core/mmu/mem.c index 7baa8f6af..1d1e8fca3 100644 --- a/src/core/mmu/mem.c +++ b/src/core/mmu/mem.c @@ -16,6 +16,9 @@ #include #include +#define MEM_SEC_SHARED true +#define MEM_SEC_NOT_SHARED false + extern uint8_t _image_start, _image_load_end, _image_end, _dmem_phys_beg, _dmem_beg, _cpu_private_beg, _cpu_private_end, _vm_beg, _vm_end, _vm_image_start, _vm_image_end; @@ -34,15 +37,16 @@ struct section { }; struct section hyp_secs[] = { - [SEC_HYP_GLOBAL] = { (vaddr_t)&_dmem_beg, (vaddr_t)&_cpu_private_beg - 1, true, + [SEC_HYP_GLOBAL] = { (vaddr_t)&_dmem_beg, (vaddr_t)&_cpu_private_beg - 1, MEM_SEC_SHARED, SPINLOCK_INITVAL }, - [SEC_HYP_IMAGE] = { (vaddr_t)&_image_start, (vaddr_t)&_image_end - 1, true, SPINLOCK_INITVAL }, - [SEC_HYP_PRIVATE] = { (vaddr_t)&_cpu_private_beg, (vaddr_t)&_cpu_private_end - 1, false, + [SEC_HYP_IMAGE] = { (vaddr_t)&_image_start, (vaddr_t)&_image_end - 1, MEM_SEC_SHARED, SPINLOCK_INITVAL }, - [SEC_HYP_VM] = { (vaddr_t)&_vm_beg, (vaddr_t)&_vm_end - 1, true, SPINLOCK_INITVAL }, + [SEC_HYP_PRIVATE] = { (vaddr_t)&_cpu_private_beg, (vaddr_t)&_cpu_private_end - 1, + MEM_SEC_NOT_SHARED, SPINLOCK_INITVAL }, + [SEC_HYP_VM] = { (vaddr_t)&_vm_beg, (vaddr_t)&_vm_end - 1, MEM_SEC_SHARED, SPINLOCK_INITVAL }, }; -struct section vm_secs[] = { [SEC_VM_ANY] = { 0x0, MAX_VA, false, SPINLOCK_INITVAL } }; +struct section vm_secs[] = { [SEC_VM_ANY] = { 0x0, MAX_VA, MEM_SEC_NOT_SHARED, SPINLOCK_INITVAL } }; struct { struct section* sec; @@ -189,7 +193,8 @@ static inline pte_t* mem_alloc_pt(struct addr_space* as, pte_t* parent, size_t l { /* Must have lock on as and va section to call */ size_t ptsize = NUM_PAGES(pt_size(&as->pt, lvl + 1)); - struct ppages ppage = mem_alloc_ppages(as->colors, ptsize, ptsize > 1 ? true : false); + struct ppages ppage = mem_alloc_ppages(as->colors, ptsize, + ptsize > 1 ? MEM_PPAGES_ALIGNED : MEM_PPAGES_NOT_ALIGNED); if (ppage.num_pages == 0) { return NULL; } @@ -504,7 +509,7 @@ static bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, si struct ppages temp_ppages; if (ppages == NULL && !all_clrs(as->colors)) { - temp_ppages = mem_alloc_ppages(as->colors, num_pages, false); + temp_ppages = mem_alloc_ppages(as->colors, num_pages, MEM_PPAGES_NOT_ALIGNED); if (temp_ppages.num_pages < num_pages) { ERROR("failed to alloc colored physical pages"); } @@ -547,7 +552,8 @@ static bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, si while ((entry < nentries) && (count < num_pages) && (num_pages - count >= lvlsz / PAGE_SIZE)) { if (ppages == NULL) { - struct ppages temp = mem_alloc_ppages(as->colors, lvlsz / PAGE_SIZE, true); + struct ppages temp = + mem_alloc_ppages(as->colors, lvlsz / PAGE_SIZE, MEM_PPAGES_ALIGNED); if (temp.num_pages < lvlsz / PAGE_SIZE) { if (lvl == (as->pt.dscr->lvls - 1)) { // TODO: free previously allocated pages @@ -612,7 +618,7 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz } vaddr_t reclrd_va_base = mem_alloc_vpage(&cpu()->as, SEC_HYP_VM, INVALID_VA, reclrd_num); - struct ppages reclrd_ppages = mem_alloc_ppages(as->colors, reclrd_num, false); + struct ppages reclrd_ppages = mem_alloc_ppages(as->colors, reclrd_num, MEM_PPAGES_NOT_ALIGNED); mem_map(&cpu()->as, reclrd_va_base, &reclrd_ppages, reclrd_num, PTE_HYP_FLAGS); /** @@ -671,8 +677,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz .colors = ~as->colors }; mem_free_ppages(&unused_pages); - mem_unmap(&cpu()->as, reclrd_va_base, reclrd_num, false); - mem_unmap(&cpu()->as, phys_va_base, num_pages, false); + mem_unmap(&cpu()->as, reclrd_va_base, reclrd_num, MEM_DONT_FREE_PAGES); + mem_unmap(&cpu()->as, phys_va_base, num_pages, MEM_DONT_FREE_PAGES); return true; } @@ -712,7 +718,7 @@ vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, static void* copy_space(void* base, const size_t size, struct ppages* pages) { - *pages = mem_alloc_ppages(cpu()->as.colors, NUM_PAGES(size), false); + *pages = mem_alloc_ppages(cpu()->as.colors, NUM_PAGES(size), MEM_PPAGES_NOT_ALIGNED); vaddr_t va = mem_alloc_vpage(&cpu()->as, SEC_HYP_PRIVATE, INVALID_VA, NUM_PAGES(size)); mem_map(&cpu()->as, va, pages, NUM_PAGES(size), PTE_HYP_FLAGS); memcpy((void*)va, base, size); @@ -873,14 +879,14 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio va = mem_alloc_vpage(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, p_image.num_pages); mem_map(&cpu()->as, va, &p_image, p_image.num_pages, PTE_HYP_FLAGS); memset((void*)va, 0, p_image.num_pages * PAGE_SIZE); - mem_unmap(&cpu()->as, va, p_image.num_pages, true); + mem_unmap(&cpu()->as, va, p_image.num_pages, MEM_FREE_PAGES); p_image = mem_ppages_get(load_addr + image_load_size + vm_image_size, NUM_PAGES(image_noload_size)); va = mem_alloc_vpage(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, p_image.num_pages); mem_map(&cpu()->as, va, &p_image, p_image.num_pages, PTE_HYP_FLAGS); memset((void*)va, 0, p_image.num_pages * PAGE_SIZE); - mem_unmap(&cpu()->as, va, p_image.num_pages, true); + mem_unmap(&cpu()->as, va, p_image.num_pages, MEM_FREE_PAGES); p_bitmap = mem_ppages_get(load_addr + image_size + vm_image_size + (cpu_boot_size * platform.cpu_num), @@ -889,7 +895,7 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio va = mem_alloc_vpage(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, p_bitmap.num_pages); mem_map(&cpu()->as, va, &p_bitmap, p_bitmap.num_pages, PTE_HYP_FLAGS); memset((void*)va, 0, p_bitmap.num_pages * PAGE_SIZE); - mem_unmap(&cpu()->as, va, p_bitmap.num_pages, true); + mem_unmap(&cpu()->as, va, p_bitmap.num_pages, MEM_FREE_PAGES); } p_cpu = mem_ppages_get(load_addr + image_size + vm_image_size + (cpu_boot_size * cpu()->id), @@ -897,7 +903,7 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio va = mem_alloc_vpage(&cpu()->as, SEC_HYP_PRIVATE, INVALID_VA, p_cpu.num_pages); mem_map(&cpu()->as, va, &p_cpu, p_cpu.num_pages, PTE_HYP_FLAGS); memset((void*)va, 0, p_cpu.num_pages * PAGE_SIZE); - mem_unmap(&cpu()->as, va, p_cpu.num_pages, false); + mem_unmap(&cpu()->as, va, p_cpu.num_pages, MEM_DONT_FREE_PAGES); } void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt, colormap_t colors) @@ -911,7 +917,8 @@ void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt if (root_pt == NULL) { size_t n = NUM_PAGES(pt_size(&as->pt, 0)); root_pt = (pte_t*)mem_alloc_page(n, - type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM, true); + type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM, + MEM_PPAGES_ALIGNED); memset((void*)root_pt, 0, n * PAGE_SIZE); } as->pt.root = root_pt; diff --git a/src/core/mpu/mem.c b/src/core/mpu/mem.c index 73447e391..f8662a3cb 100644 --- a/src/core/mpu/mem.c +++ b/src/core/mpu/mem.c @@ -13,6 +13,12 @@ #include #include +#define MEM_BROADCAST true +#define MEM_DONT_BROADCAST false + +#define MEM_LOCKED true +#define MEM_UNLOCKED false + struct shared_region { enum AS_TYPE as_type; asid_t asid; @@ -174,7 +180,7 @@ static void mem_init_boot_regions(void) #endif .as_sec = SEC_HYP_IMAGE, }; - mem_map(&cpu()->as, &mpr, false, true); + mem_map(&cpu()->as, &mpr, MEM_DONT_BROADCAST, MEM_LOCKED); if (separate_noload_region) { mpr = (struct mp_region){ @@ -188,7 +194,7 @@ static void mem_init_boot_regions(void) .mem_flags = PTE_HYP_FLAGS, .as_sec = SEC_HYP_IMAGE, }; - mem_map(&cpu()->as, &mpr, false, true); + mem_map(&cpu()->as, &mpr, MEM_DONT_BROADCAST, MEM_LOCKED); } mpr = (struct mp_region){ @@ -197,7 +203,7 @@ static void mem_init_boot_regions(void) .mem_flags = PTE_HYP_FLAGS, .as_sec = SEC_HYP_PRIVATE, }; - mem_map(&cpu()->as, &mpr, false, true); + mem_map(&cpu()->as, &mpr, MEM_DONT_BROADCAST, MEM_LOCKED); } void mem_prot_init() @@ -377,7 +383,7 @@ static bool mem_vmpu_remove_region(struct addr_space* as, mpid_t mpid, bool broa static void mem_handle_broadcast_insert(struct addr_space* as, struct mp_region* mpr, bool locked) { if (as->type == AS_HYP) { - mem_map(&cpu()->as, mpr, false, locked); + mem_map(&cpu()->as, mpr, MEM_DONT_BROADCAST, locked); } else { mpu_map(as, mpr, locked); } @@ -391,7 +397,7 @@ static void mem_handle_broadcast_remove(struct addr_space* as, struct mp_region* During the handle of a broadcast we don't want that, to avoid a chain of broadcasts */ - mem_unmap_range(&cpu()->as, mpr->base, mpr->size, false); + mem_unmap_range(&cpu()->as, mpr->base, mpr->size, MEM_DONT_BROADCAST); } else { mpu_unmap(as, mpr); } @@ -415,7 +421,7 @@ static bool mem_update(struct addr_space* as, struct mp_region* mpr, bool broadc static void mem_handle_broadcast_update(struct addr_space* as, struct mp_region* mpr, bool locked) { if (as->type == AS_HYP) { - mem_update(&cpu()->as, mpr, false, locked); + mem_update(&cpu()->as, mpr, MEM_DONT_BROADCAST, locked); } else { mpu_update(as, mpr); } @@ -616,7 +622,7 @@ bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool bro void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, bool free_ppages) { - if (mem_unmap_range(as, at, num_pages * PAGE_SIZE, true) && free_ppages) { + if (mem_unmap_range(as, at, num_pages * PAGE_SIZE, MEM_BROADCAST) && free_ppages) { struct ppages ppages = mem_ppages_get(at, num_pages); mem_free_ppages(&ppages); } @@ -647,8 +653,8 @@ vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, va_res = INVALID_VA; } else { mpr.size = num_pages * PAGE_SIZE; - bool broadcast = mem_broadcast(asd, &mpr, true); - if (mem_map(asd, &mpr, broadcast, false)) { + bool broadcast = mem_broadcast(asd, &mpr, MEM_BROADCAST); + if (mem_map(asd, &mpr, broadcast, MEM_UNLOCKED)) { va_res = vas; } else { INFO("failed mem map on mem map cpy"); @@ -701,7 +707,7 @@ vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, struct ppages* pp .mem_flags = flags, }; - mem_map(as, &mpr, true, false); + mem_map(as, &mpr, MEM_BROADCAST, MEM_UNLOCKED); return at; } diff --git a/src/core/objpool.c b/src/core/objpool.c index f29d45fb1..c2f0e8522 100644 --- a/src/core/objpool.c +++ b/src/core/objpool.c @@ -17,7 +17,7 @@ void* objpool_alloc_with_id(struct objpool* objpool, objpool_id_t* id) { void* obj = NULL; spin_lock(&objpool->lock); - ssize_t n = bitmap_find_nth(objpool->bitmap, objpool->num, 1, 0, false); + ssize_t n = bitmap_find_nth(objpool->bitmap, objpool->num, 1, 0, BITMAP_NOT_SET); if (n >= 0) { bitmap_set(objpool->bitmap, (size_t)n); obj = (void*)((uintptr_t)objpool->pool + (objpool->objsize * (size_t)n)); diff --git a/src/core/shmem.c b/src/core/shmem.c index 482cf96eb..a0241ffc9 100644 --- a/src/core/shmem.c +++ b/src/core/shmem.c @@ -16,7 +16,7 @@ static void shmem_alloc(void) shmem->lock = SPINLOCK_INITVAL; if (!shmem->place_phys) { size_t n_pg = NUM_PAGES(shmem->size); - struct ppages ppages = mem_alloc_ppages(shmem->colors, n_pg, false); + struct ppages ppages = mem_alloc_ppages(shmem->colors, n_pg, MEM_PPAGES_NOT_ALIGNED); if (ppages.num_pages < n_pg) { ERROR("failed to allocate shared memory"); } diff --git a/src/core/vm.c b/src/core/vm.c index fed31e1c3..becce21b3 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -135,8 +135,8 @@ static void vm_install_image(struct vm* vm, struct vm_mem_region* reg) mem_map_cpy(&vm->as, &cpu()->as, vm->config->image.base_addr, INVALID_VA, img_num_pages); memcpy((void*)dst_va, (void*)src_va, vm->config->image.size); cache_flush_range((vaddr_t)dst_va, vm->config->image.size); - mem_unmap(&cpu()->as, src_va, img_num_pages, false); - mem_unmap(&cpu()->as, dst_va, img_num_pages, false); + mem_unmap(&cpu()->as, src_va, img_num_pages, MEM_DONT_FREE_PAGES); + mem_unmap(&cpu()->as, dst_va, img_num_pages, MEM_DONT_FREE_PAGES); } static void vm_map_img_rgn(struct vm* vm, const struct vm_config* vm_config, diff --git a/src/core/vmm.c b/src/core/vmm.c index f607cb090..515bc2539 100644 --- a/src/core/vmm.c +++ b/src/core/vmm.c @@ -90,7 +90,7 @@ static bool vmm_alloc_vm(struct vm_allocation* vm_alloc, struct vm_config* vm_co total_size = vcpus_offset + (vm_config->platform.cpu_num * sizeof(struct vcpu)); total_size = ALIGN(total_size, PAGE_SIZE); - void* allocation = mem_alloc_page(NUM_PAGES(total_size), SEC_HYP_VM, false); + void* allocation = mem_alloc_page(NUM_PAGES(total_size), SEC_HYP_VM, MEM_PPAGES_NOT_ALIGNED); if (allocation == NULL) { return false; } diff --git a/src/lib/inc/bitmap.h b/src/lib/inc/bitmap.h index 3c5aaaaa7..b0e8933b1 100644 --- a/src/lib/inc/bitmap.h +++ b/src/lib/inc/bitmap.h @@ -27,6 +27,9 @@ static const bitmap_granule_t ONE = 1; #define BITMAP_ALLOC_ARRAY(NAME, SIZE, NUM) bitmap_granule_t NAME[NUM][BITMAP_SIZE_IN_GRANULE(SIZE)] +#define BITMAP_SET true +#define BITMAP_NOT_SET false + static inline void bitmap_set(bitmap_t* map, size_t bit) { map[bit / BITMAP_GRANULE_LEN] |= ONE << (bit % BITMAP_GRANULE_LEN);