Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions src/arch/armv8/aarch32/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,34 @@
push {r0-r12}
SAVE_ELR_SPSR

#ifdef MEM_PROT_MPU
mrc p15, 4, r0, c13, c0, 2 // Read HTPIDR (CPU base address)
add r0, r0, #CPU_AS_ARCH_MASK_OFF
ldr r0, [r0]
mcr p15, 4, r0, c6, c1, 1
#endif /* MEM_PROT_MPU */

.endm

.macro VM_ENTRY

mrc p15, 4, r0, c13, c0, 2 // Read HTPIDR (CPU base address)

#ifdef MEM_PROT_MPU
ldr r1, [r0, #CPU_VCPU_OFF]
mov r2, #VCPU_VM_OFF
add r1, r1, r2
ldr r1, [r1]
ldr r1, [r1, #VM_AS_ARCH_MASK_OFF]

mov r2, #CPU_ARCH_PROFILE_MPU_LOCKED_OFF
add r2, r2, r0
ldr r2, [r2]

orr r1, r1, r2
mcr p15, 4, r1, c6, c1, 1
#endif /* MEM_PROT_MPU */

ldr r0, [r0, #CPU_VCPU_OFF]
add r0, r0, #VCPU_REGS_OFF
mov sp, r0
Expand Down
1 change: 1 addition & 0 deletions src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ SYSREG_GEN_ACCESSORS(ich_hcr_el2, 4, c12, c11, 0)
SYSREG_GEN_ACCESSORS_64(icc_sgi1r_el1, 0, c12)

SYSREG_GEN_ACCESSORS(vsctlr_el2, 4, c2, c0, 0)
SYSREG_GEN_ACCESSORS(sctlr_el2, 4, c1, c0, 0)

#define SYSREG_GEN_GIC_LR(n, crn1, crn2, op2) \
SYSREG_GEN_ACCESSORS(ich_lr##n, 4, c12, crn1, op2) \
Expand Down
24 changes: 24 additions & 0 deletions src/arch/armv8/aarch64/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,35 @@
add x0, x0, x1
mov sp, x0

#ifdef MEM_PROT_MPU
mrs x0, tpidr_el2
add x0, x0, #CPU_AS_ARCH_MASK_OFF
ldr x0, [x0]
msr prenr_el2, x0
#endif /* MEM_PROT_MPU */

.endm

.global vcpu_arch_entry
vcpu_arch_entry:
mrs x0, tpidr_el2

#ifdef MEM_PROT_MPU
ldr x1, [x0, #CPU_VCPU_OFF]
mov x2, #VCPU_VM_OFF
add x1, x1, x2
ldr x1, [x1]
ldr x1, [x1, #VM_AS_ARCH_MASK_OFF]

mov x2, #CPU_ARCH_PROFILE_MPU_LOCKED_OFF
add x2, x2, x0
ldr x2, [x2]

orr x1, x1, x2

msr prenr_el2, x1
#endif /* MEM_PROT_MPU */

ldr x0, [x0, #CPU_VCPU_OFF]
add x0, x0, #VCPU_REGS_OFF
mov sp, x0
Expand Down
1 change: 1 addition & 0 deletions src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ SYSREG_GEN_ACCESSORS(vttbr_el2)
SYSREG_GEN_ACCESSORS(id_aa64mmfr0_el1)
SYSREG_GEN_ACCESSORS(tpidr_el2)
SYSREG_GEN_ACCESSORS(vsctlr_el2)
SYSREG_GEN_ACCESSORS(sctlr_el2)
SYSREG_GEN_ACCESSORS(mpuir_el2)
SYSREG_GEN_ACCESSORS(prselr_el2)
SYSREG_GEN_ACCESSORS(prbar_el2)
Expand Down
62 changes: 1 addition & 61 deletions src/arch/armv8/armv8-r/aarch32/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -50,68 +50,8 @@ boot_arch_profile_init:
/* r4 contains the id of the MPU entry being used */
mov r4, #(-1)

/**
* Map loadable image (and possibly unloadable)
* If the vm image section is used and has built-in vm images, we need to map the loadble and
* non-loadble region of the image separately. Otherwise we can map it as a single region.
*/
add r4, r4, #1
mcr p15, 4, r4, c6, c2, 1 // HPRSELR
ldr r3, =_image_start
and r3, r3, #PRBAR_BASE_MSK
orr r3, r3, #PRBAR_SH_IS
orr r3, r3, #PRBAR_AP_RW_EL2
mcr p15, 4, r3, c6, c3, 0 // HPRBAR
ldr r10, =_image_load_end
ldr r11, =_image_noload_start
cmp r10, r11
ldreq r3, =_image_end
ldrne r3, =_image_load_end
sub r3, r3, #1
and r3, r3, #PRLAR_LIMIT_MSK
orr r3, r3, #(PRLAR_ATTR(1) | PRLAR_EN)
mcr p15, 4, r3, c6, c3, 1 // HPRLAR

/* Map Image Non-loadable if needed */
ldr r10, =_image_load_end
ldr r11, =_image_noload_start
cmp r10, r11
beq skip_non_loadable
add r4, r4, #1
mcr p15, 4, r4, c6, c2, 1 // HPRSELR
ldr r3, =_image_noload_start
and r3, r3, #PRBAR_BASE_MSK
orr r3, r3, #PRBAR_SH_IS
orr r3, r3, #PRBAR_AP_RW_EL2
mcr p15, 4, r3, c6, c3, 0 // HPRBAR
ldr r3, =_image_end
sub r3, r3, #1
and r3, r3, #PRLAR_LIMIT_MSK
orr r3, r3, #(PRLAR_ATTR(1) | PRLAR_EN)
mcr p15, 4, r3, c6, c3, 1 // HPRLAR

skip_non_loadable:

/* Region 2 - CPU */
add r4, r4, #1
mcr p15, 4, r4, c6, c2, 1 // HPRSELR
mrc p15, 4, r3, c13, c0, 2 // HTPIDR (read CPU base addr)
and r3, r3, #PRBAR_BASE_MSK
orr r3, r3, #PRBAR_SH_IS
orr r3, r3, #PRBAR_AP_RW_EL2
mcr p15, 4, r3, c6, c3, 0 // HPRBAR
mrc p15, 4, r3, c13, c0, 2 // HTPIDR (read CPU base addr)
add r3, r3, #CPU_SIZE
sub r3, r3, #1
and r3, r3, #PRLAR_LIMIT_MSK
orr r3, #(PRLAR_ATTR(1) | PRLAR_EN)
mcr p15, 4, r3, c6, c3, 1 // HPRLAR

dsb
isb

/* Enable caches and MPU */
ldr r4, =(SCTLR_RES1_AARCH32 | SCTLR_C | SCTLR_I | SCTLR_M)
ldr r4, =(SCTLR_RES1_AARCH32 | SCTLR_C | SCTLR_I)
mcr p15, 4, r4, c1, c0, 0 // HSCTLR

dsb
Expand Down
68 changes: 1 addition & 67 deletions src/arch/armv8/armv8-r/aarch64/boot.S
Original file line number Diff line number Diff line change
Expand Up @@ -45,76 +45,10 @@ boot_arch_profile_init:
ldr x3, =MAIR_EL2_DFLT
msr MAIR_EL2, x3

/* x4 contains the id of the MPU entry being used */
mov x4, 0

/**
* Map loadable image (and possibly unloadable)
* If the vm image section is used and has built-in vm images, we need to map the loadble and
* non-loadble region of the image separately. Otherwise we can map it as a single region.
*/
msr prselr_el2, x4
isb
ldr x3, =_image_start
and x3, x3, PRBAR_BASE_MSK
orr x3, x3, (PRBAR_SH_IS | PRBAR_AP_RW_EL2)
msr prbar_el2, x3
ldr x10, =_image_load_end
ldr x11, =_image_noload_start
cmp x10, x11
bne 1f
ldr x3, =_image_end
b 2f
1:
ldr x3, =_image_load_end
2:
sub x3, x3, 1
and x3, x3, PRLAR_LIMIT_MSK
orr x3, x3, (PRLAR_ATTR(1) | PRLAR_EN)
msr prlar_el2, x3

/* Map Image Non-loadable if needed */
ldr x10, =_image_load_end
ldr x11, =_image_noload_start
cmp x10, x11
beq skip_non_loadable

add x4, x4, 1
msr prselr_el2, x4
ldr x3, =_image_noload_start
and x3, x3, PRBAR_BASE_MSK
orr x3, x3, PRBAR_SH_IS
add x3, x3, PRBAR_AP_RW_EL2
msr prbar_el2, x3
isb
ldr x3, =_image_end
sub x3, x3, 1
and x3, x3, PRLAR_LIMIT_MSK
orr x3, x3, (PRLAR_ATTR(1) | PRLAR_EN)
msr prlar_el2, x3

skip_non_loadable:

/* Region 2 - CPU */
add x4, x4, 1
msr prselr_el2, x4
isb
mrs x3, tpidr_el2
and x3, x3, PRBAR_BASE_MSK
orr x3, x3, (PRBAR_SH_IS | PRBAR_AP_RW_EL2)
msr prbar_el2, x3
mrs x3, tpidr_el2
ldr x5, =CPU_SIZE
add x3, x3, x5
sub x3, x3, 1
and x3, x3, PRLAR_LIMIT_MSK
orr x3, x3, (PRLAR_ATTR(1) | PRLAR_EN)
msr prlar_el2, x3

isb

/* Enable caches and MPU */
ldr x4, =(SCTLR_RES1 | SCTLR_C | SCTLR_I | SCTLR_M)
ldr x4, =(SCTLR_RES1 | SCTLR_C | SCTLR_I)
msr sctlr_el2, x4

dsb nsh
Expand Down
4 changes: 4 additions & 0 deletions src/arch/armv8/armv8-r/inc/arch/mem.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ typedef union {
};
} mem_flags_t;

struct addr_space_arch {
unsigned long mpu_entry_mask;
};

#define PTE_FLAGS(_prbar, _prlar) \
((mem_flags_t){ \
.prbar = (_prbar), \
Expand Down
21 changes: 21 additions & 0 deletions src/arch/armv8/armv8-r/inc/arch/mpu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/**
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) Bao Project and Contributors. All rights reserved.
*/

#ifndef __ARCH_MPU_H__
#define __ARCH_MPU_H__

#include <bao.h>
#include <arch/sysregs.h>
#include <bitmap.h>
#include <mem.h>

struct mpu_arch {
BITMAP_ALLOC(allocated_entries, MPU_ARCH_MAX_NUM_ENTRIES);
BITMAP_ALLOC(locked_entries, MPU_ARCH_MAX_NUM_ENTRIES);
};

bool mpu_perms_compatible(unsigned long perms1, unsigned long perms2);

#endif /* __ARCH_MPU_H__ */
24 changes: 2 additions & 22 deletions src/arch/armv8/armv8-r/inc/arch/profile/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,34 +8,14 @@

#include <bao.h>
#include <arch/sysregs.h>
#include <arch/mpu.h>
#include <bitmap.h>
#include <list.h>
#include <mem.h>
#include <list.h>

struct cpu_arch_profile {
struct {
BITMAP_ALLOC(bitmap, MPU_ARCH_MAX_NUM_ENTRIES);
/**
* A locked region means that it can never be removed from the MPU. For example,
*/
BITMAP_ALLOC(locked, MPU_ARCH_MAX_NUM_ENTRIES);
struct mpu_perms {
perms_t el2;
perms_t el1;
} perms[MPU_ARCH_MAX_NUM_ENTRIES];
/**
* We maintain an ordered list of the regions currently in the mpu to simplify the merging
* algorithm when mapping an overllaping region.
*/
struct {
struct list list;
struct mpu_node {
node_t node;
mpid_t mpid;
} node[MPU_ARCH_MAX_NUM_ENTRIES];
} order;
} mpu;
struct mpu_arch mpu;
};

static inline struct cpu* cpu(void)
Expand Down
2 changes: 1 addition & 1 deletion src/arch/armv8/armv8-r/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@

void as_arch_init(struct addr_space* as)
{
UNUSED_ARG(as);
as->arch.mpu_entry_mask = 0;
}
Loading