diff --git a/Makefile b/Makefile index 7b8eac42c..695ef43cd 100644 --- a/Makefile +++ b/Makefile @@ -204,8 +204,13 @@ endif ifeq ($(plat_mem),non_unified) build_macros+=-DMEM_NON_UNIFIED endif -ifeq ($(phys_irqs_only),y) - build_macros+=-DPHYS_IRQS_ONLY + +ifeq ($(mmio_slave_side_prot),y) + build_macros+=-DMMIO_SLAVE_SIDE_PROT + + ifneq ($(arch_mem_prot),mpu) + $(error mmio_slave_side_prot=y requires arch_mem_prot=mpu) + endif endif ifeq ($(CC_IS_GCC),y) @@ -240,7 +245,8 @@ ifeq ($(CC_IS_GCC),y) -Wmissing-prototypes -Wmissing-declarations \ -Wswitch-default -Wshadow -Wshadow=global \ -Wcast-qual -Wunused-macros \ - -Wstrict-prototypes -Wunused-but-set-variable + -Wstrict-prototypes -Wunused-but-set-variable \ + -Wno-multistatement-macros override CFLAGS+=-Wno-unused-command-line-argument \ -pedantic -pedantic-errors diff --git a/scripts/arch/tricore/platform_defs_gen.c b/scripts/arch/tricore/platform_defs_gen.c new file mode 100644 index 000000000..54e685f81 --- /dev/null +++ b/scripts/arch/tricore/platform_defs_gen.c @@ -0,0 +1,36 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved + */ + +#include +#include + +extern uint32_t plat_ints[]; +extern uint32_t plat_int_size; + +void arch_platform_defs() { + unsigned int bitmap[64] = {0}; + unsigned int count = plat_int_size; + + for(int i = 0; i < count; i++){ + unsigned int irq = plat_ints[i]; + unsigned int index = irq / 32; + unsigned int bit = irq % 32; + + if(index < 64 && bit < 32) + bitmap[index] |= 1UL << bit; + } + + printf ("#define INTERRUPTS_COUNT %d\n", count); + printf("#define INTERRUPTS_BITMAP {\t"); + for(int i = 0; i < 64; i++) + { + if(i && i % 4 == 0) + printf(" \\\n\t\t\t\t\t\t"); + if(i != 63) + printf("0x%x, ", bitmap[i]); + else printf("0x%x }\n", bitmap[i]); + } + +} diff --git a/src/arch/tricore/arch.mk b/src/arch/tricore/arch.mk new file mode 100644 index 000000000..124e46447 --- /dev/null +++ b/src/arch/tricore/arch.mk @@ -0,0 +1,16 @@ +## SPDX-License-Identifier: Apache-2.0 +## Copyright (c) Bao Project and Contributors. All rights reserved. + +CROSS_COMPILE ?= tricore-elf- + +clang_arch_target:=tricore-unknown-unknown-elf + +arch-cppflags+= +#arch-cflags=-march=tc18 +#arch-asflags=-march=tc18 +arch-ldflags= + +arch_mem_prot:=mpu +plat_mem:=non_unified +PAGE_SIZE:=64 +mmio_slave_side_prot:=y \ No newline at end of file diff --git a/src/arch/tricore/asm_defs.c b/src/arch/tricore/asm_defs.c new file mode 100644 index 000000000..e6195bb86 --- /dev/null +++ b/src/arch/tricore/asm_defs.c @@ -0,0 +1,38 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include + +__attribute__((used)) static void cpu_defines(void) +{ + DEFINE_SIZE(CPU_SIZE, struct cpu); + + DEFINE_OFFSET(CPU_STACK_OFF, struct cpu, stack); + DEFINE_SIZE(CPU_STACK_SIZE, ((struct cpu*)NULL)->stack); + + DEFINE_OFFSET(CPU_VCPU_OFF, struct cpu, vcpu); +} + +__attribute__((used)) static void vcpu_defines(void) +{ + DEFINE_SIZE(VCPU_ARCH_SIZE, struct vcpu_arch); + DEFINE_OFFSET(VCPU_REGS_OFF, struct vcpu, regs); + DEFINE_OFFSET(VCPU_VM_OFF, struct vcpu, vm); + DEFINE_OFFSET(VCPU_REGS_LOWER_CTX_OFF, struct vcpu, regs.lower_ctx); + DEFINE_OFFSET(REGS_A0_OFF, struct arch_regs, a0); + DEFINE_OFFSET(REGS_A1_OFF, struct arch_regs, a1); + DEFINE_OFFSET(REGS_A8_OFF, struct arch_regs, a8); + DEFINE_OFFSET(REGS_A9_OFF, struct arch_regs, a9); + DEFINE_SIZE(VCPU_REGS_SIZE, struct arch_regs); +} + +__attribute__((used)) static void platform_defines(void) +{ + DEFINE_OFFSET(PLAT_CPUNUM_OFF, struct platform, cpu_num); + DEFINE_OFFSET(PLAT_ARCH_OFF, struct platform, arch); +} diff --git a/src/arch/tricore/boot.S b/src/arch/tricore/boot.S new file mode 100644 index 000000000..14ad0532b --- /dev/null +++ b/src/arch/tricore/boot.S @@ -0,0 +1,443 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +.macro get_phys_addr rd, ra, label + movh \rd, hi:\label + mov \rd, lo:\label + + movh \ra, hi:BAO_VAS_BASE + mov \ra, lo:BAO_VAS_BASE + + sub \rd, \rd, \ra + add \rd, \rd, %d11 +.endm + +.macro get_label_addr rd, label + movh \rd, hi:\label + addi \rd, \rd, lo:\label +.endm + +.data +.balign 4 +/** + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. + */ +_barrier: .4byte 0 + +/** + * The following code MUST be at the base of the image, as this is bao's entry point. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. + */ + .section ".boot", "ax" +.globl _reset_handler +_reset_handler: + + /** + * The following registers are however reserved to be passed to main + * as arguments, these are calculated at the end: + * d0 -> contains cpu id + * d1 -> contains image base load address + * Internal logic: + * Register d14 is reserved to indicate if the current CPU is master (negated) + * a14 -> pointer to cpu struct + * Intended ABI: + * a12-a13 -> sub-routine use + * d10-d13 -> sub-routine use + * d2-d7 -> main flow + * a2-a7 -> main flow + */ + + /* Read core ID */ + mfcr %d0,$core_id + and %d0,%d0,7 + + + /* + * Install vector table physical address early, in case exception occurs during this + * initialization. + */ + + get_label_addr %d3, _trap_vector_table + mtcr $btv,%d3 + + get_label_addr %d3, _hyp_vector_table + mtcr 0xb010,%d3 + + get_label_addr %d3, _irq_vector + mtcr $biv,%d3 + isync + + /* Force MPU disable */ + mov %d3, 0 + mtcr 0xFE14, %d3 + + jl platform_init_mem + isync + + /* Setting r9 should if set !is_cpu_master */ + mov %d3, CPU_MASTER_FIXED + ne %d14, %d0, %d3 + jeq %d14, 1, 1f + + get_label_addr %d3, CPU_MASTER + mov.a %a4, %d3 + st.w [%a4], %d0 +1: + jl disable_watchdogs + isync + + /** + * TODO: bring the system to a well known state. This includes disabling the MPU, all caches, + * BP and others, and invalidating them. + */ + + /* Clear stack pointer to avoid unaligned SP exceptions during boot */ + mov %d3, 0 + mov.a %sp, %d3 + + /* Invalidate Caches */ + mov %d3, 1 // invalidate data cache + mtcr csfr_dcon1, %d3 + + mov %d3, 3 // TODO invalidate program cache and program buffer */ + mtcr csfr_pcon1, %d3 + isync + + /** subarch specific **/ + /* CPU physical based address */ + get_label_addr %d3, _dmem_beg + + /* CPU_X physical base address */ + mov %d4, CPU_SIZE + madd %d3, %d3, %d0, %d4 + + mov.a %a8, %d3 + + /* Clear the CPU struct */ + mov.a %a12, %d3 //start of CPU struct + add %d3, %d4 //end of CPU struct + mov.a %a13, %d3 + + mov.aa %a14, %a8 /* a14 will contain pointer to CPU struct */ + +#ifdef MEM_NON_UNIFIED + jne %d14, 0, 1f + /* Copy data from RX to RWX */ + get_label_addr %d3, _data_lma_start // LMA start + get_label_addr %d4, _data_vma_start // VMA start + get_label_addr %d5, _image_load_end // LMA end + jl copy_data +#endif + /* disable memory protection and other properties... */ + mov %d3, 0 + mtcr csfr_corecon, %d3 + + isync + + + /** + * Get base image load address. + */ + get_label_addr %d1, _reset_handler + get_label_addr %d2, img_addr + mov.a %a4, %d2 + st.w [%a4], %d1 + + get_label_addr %d1, _data_vma_start + get_label_addr %d2, data_addr + mov.a %a4, %d2 + st.w [%a4], %d1 + +1: + isync + + mov %d3, 0 + /* make sure no region can be accessed, executed */ + mtcr csfr_dpre_0, %d3 + mtcr csfr_dpre_1, %d3 + mtcr csfr_dpre_2, %d3 + mtcr csfr_dpre_3, %d3 + mtcr csfr_dpre_4, %d3 + mtcr csfr_dpre_5, %d3 + mtcr csfr_dpre_6, %d3 + mtcr csfr_dpre_7, %d3 + + mtcr csfr_dpwe_0, %d3 + mtcr csfr_dpwe_1, %d3 + mtcr csfr_dpwe_2, %d3 + mtcr csfr_dpwe_3, %d3 + mtcr csfr_dpwe_4, %d3 + mtcr csfr_dpwe_5, %d3 + mtcr csfr_dpwe_6, %d3 + mtcr csfr_dpwe_7, %d3 + + mtcr csfr_cpxe_0, %d3 + mtcr csfr_cpxe_1, %d3 + mtcr csfr_cpxe_2, %d3 + mtcr csfr_cpxe_3, %d3 + mtcr csfr_cpxe_4, %d3 + mtcr csfr_cpxe_5, %d3 + mtcr csfr_cpxe_6, %d3 + mtcr csfr_cpxe_7, %d3 + + isync + + /* END OF SUBARCH */ + + /* If this is the cpu master, clear bss */ + jne %d14, 0, 1f + movh.a %a12,hi:_bss_start + lea %a12,[%a12]lo:_bss_start + + movh.a %a13,hi:_bss_end + lea %a13,[%a13]lo:_bss_end + + mov.aa %a7, %a11 + jl boot_clear + mov.aa %a11, %a7 + + movh.a %a5,hi:_barrier + lea %a5,[%a5]lo:_barrier + mov %d7, 2 + st.w [%a5],%d7 + +1: + /* wait for bsp to finish clearing bss */ + movh.a %a5,hi:_barrier + lea %a5,[%a5]lo:_barrier +2: + ld.w %d8, [%a5] + jlt %d8, 2, 2b + + isync + + /* initialize context save areas */ + mov.aa %a6, %a7 + jl _init_csa + mov.aa %a7, %a6 + + isync + + mov %d3, 2 + /* Program cache bypass */ + mtcr csfr_pcon0, %d3 + /* Data cache bypass */ + mtcr csfr_dcon0, %d3 + + isync + + /* reset access to system global registers */ + mfcr %d3,$psw + or %d3,%d3,0x100 // clear GW bit + mtcr $psw,%d3 + isync + + /* Initialize stack pointer */ + mov.d %d4, %a14 + + movh %d5,hi:(CPU_STACK_OFF + CPU_STACK_SIZE) + mov %d5,lo:(CPU_STACK_OFF + CPU_STACK_SIZE) + add %d4, %d4, %d5 + + mov.a %sp, %d4 + + mov %d4, %d0 + mov %d5, %d1 + + j init + + /* This point should never be reached */ +oops: + j oops + +/***** Helper functions for boot code. ******/ + +.global boot_clear +/* A12 contains the start position and A13 the end position. + this functions clears the memory between A0 and A1 */ +boot_clear: + mov %d10, 0 //zero + mov.d %d11, %a13 //d11 = end of loop + mov %d12, 4 //d12 = increment value + mov.d %d13, %a12 // d13 = current position +2: + st.w [%a12],%d10 + jge %d13, %d11, 1f + add %d13, %d13, %d12 + mov.a %a12, %d13 + j 2b +1: + ji %a11 + + +/* Copies data from d3 to d4 up to the d5 limit */ +.global copy_data +copy_data: + mov.a %a12, %d3 + mov.a %a13, %d4 +1: + ld.w %d3, [%a12] + st.w [%a13], %d3 + mov.d %d3, %a12 + mov.d %d4, %a13 + jge %d3, %d5, 2f + add %d3, %d3, 4 + add %d4, %d4, 4 + mov.a %a12 , %d3 + mov.a %a13 , %d4 + j 1b +2: + ji %a11 + +.global boot_cache_invalidate +boot_cache_invalidate: + /* TODO */ + /* cachei.i [a3]4 */ + /* TODO: how about l2? "If the cache line at the index/way specified + * by the address register A[b] is present in the L1 data cache, then + * invalidate the line. Note that there is no writeback of any dirty data + * in the cache line prior to invalidation." */ + +.global _init_csa +_init_csa: + movh %d10,0 + mtcr csfr_pcxi,%d10 // previous context info is null + isync + + // %d10 = begin of CSA + get_label_addr %d10, csa_array + + mov %d11, CSA_ENTRIES + mul %d12, %d11, 64 + madd %d10, %d10, %d0, %d12 + + //addi %d10,%d10,0x3f // force alignment (2^6) + //andn %d10,%d10,0x3f + + /* Initialize first CSA */ + mov.a %a12,%d10 // %a12 = address of first CSA + extr.u %d10,%d10,28,4 // %d10 = segment (4 msb) + sh %d10,%d10,16 // %d10 = segment << 16 + + mov.aa %a13,%a12 // %a13 = current CSA + lea %a12,[%a12]64 // %a12 = %a12->nextCSA + + mov.d %d12,%a12 + extr.u %d12,%d12,6,16 // get CSA index + or %d12,%d12,%d10 // add segment number + mtcr $fcx,%d12 // initialize FCX + + add %d11,%d11,-2 // CSAs to initialize -= 2 + mov.a %a7,%d11 // %a7 = loop counter + +csa_loop: + add %d12, %d12, 1 + st.w [%a12],%d12 // store "nextCSA" pointer + mov.aa %a13,%a12 // %a13 = current CSA address + lea %a12,[%a12]64 // %a12 = %a3->nextCSA + loop %a7,csa_loop // repeat until done + + mov %d10, 0 + st.w [%a12], %d10 + add %d12, %d12, -1 + mtcr $lcx,%d12 // initialize LCX + + isync + ji %a11 + +#define WDTSYS_CRTLA 0xF00001A8 +#define WDTSYS_CRTLB 0xF00001AC +#define WDTCPUy_CTRLA 0xF000003C +#define WDTCPUy_CTRLB 0xF0000040 + +.global disable_watchdogs +disable_watchdogs: + jne %d14, 0, 1f + get_label_addr %d10, WDTSYS_CRTLA + mov.a %a13, %d10 + + mov %d11, 0xF8 //Password is 0x7C on reset. UNLOCK + st.w [%a13], %d11 + + get_label_addr %d12, WDTSYS_CRTLB + mov.a %a12, %d12 + ld.w %d12, [%a12] + or %d12, %d12, 1 + st.w [%a12], %d12 + + mov %d11, 0xF9 //Password is 0x7C on reset. LOCK + st.w [%a13], %d11 + +1: + get_label_addr %d10, WDTCPUy_CTRLA + madd %d10,%d10, %d0, 0x30 + mov.a %a13, %d10 + + mov %d11, 0xF8 //Password is 0x7C on reset. UNLOCK + st.w [%a13], %d11 + + get_label_addr %d12, WDTCPUy_CTRLB + madd %d12,%d12, %d0, 0x30 + mov.a %a12, %d12 + ld.w %d12, [%a12] + or %d12, %d12, 1 + st.w [%a12], %d12 + + mov %d11, 0xF9 //Password is 0x7C on reset. LOCK + st.w [%a13], %d11 + + ji %a11 + + +#define LMU0_SFR_BASE 0xFB000000 +#define ALL_CPUS_MASK 0x10000FFF +#define LMU_RGN0_WRA_OFFSET 0x300 +#define ACCENDLMU0_CFG_WRA 0xF880E060 +#define ACCENDLMU0_RNG0_WRA 0xF880E400 + +.global platform_init_mem +platform_init_mem: +init_lmus: + // Enable access to LMUs to all CPUs + //maybe only master? master not fixed yet? master always fixed on TC4 + get_label_addr %d10, LMU0_SFR_BASE + get_label_addr %d11, ALL_CPUS_MASK + get_label_addr %d12, LMU_RGN0_WRA_OFFSET + add %d10, %d10, %d12 + mov.a %a12, %d10 + get_label_addr %d12, 0x10000 + mov.a %a13, %d12 + mov %d12, 9 +1: + st.w [%a12], %d11 + isync + add.a %a12, %a12, %a13 + add %d12, %d12, -1 + jnz %d12, 1b + +init_dlmus: + //currently, each cpus enables access to all other cpus to their DLMU + get_label_addr %d10, ACCENDLMU0_CFG_WRA + get_label_addr %d11, ACCENDLMU0_RNG0_WRA + get_label_addr %d12, ALL_CPUS_MASK + get_label_addr %d8, 0x40000 + +1: + madd %d10, %d10, %d0, %d8 + madd %d11, %d11, %d0, %d8 + mov.a %a13, %d10 + st.w [%a13], %d12 + mov.a %a13, %d11 + st.w [%a13], %d12 + + ji %a11 diff --git a/src/arch/tricore/cache.c b/src/arch/tricore/cache.c new file mode 100644 index 000000000..778b916bd --- /dev/null +++ b/src/arch/tricore/cache.c @@ -0,0 +1,36 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include + +/** + * The riscv spec does not include cache maintenance. There are current efforts to define and + * standardize a set of cache management instructions, but for now this is platform dependent. + */ + +void cache_arch_enumerate(struct cache* dscrp) +{ + /** + * Currently the typical of way for system software to discover cache topology is to read it of + * a dtb passed by the bootloader. As we are not implementing an fdt parser, a platform port + * must add it to the platform description. + */ + *dscrp = platform.cache; +} + +__attribute__((weak)) void cache_flush_range(vaddr_t base, size_t size) +{ + /** + * A platform must define its custom cache flush operation, otherwise certain mechanisms such + * as coloring and hypervisor relocation will most probably fail. + */ + + UNUSED_ARG(base); + UNUSED_ARG(size); + + // WARNING("trying to flush caches but the operation is not defined for this " + // "platform"); +} diff --git a/src/arch/tricore/cpu.c b/src/arch/tricore/cpu.c new file mode 100644 index 000000000..f2a92858e --- /dev/null +++ b/src/arch/tricore/cpu.c @@ -0,0 +1,51 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include + +cpuid_t CPU_MASTER __attribute__((section(".data"))); + +/* Perform architecture dependent cpu cores initializations */ +void cpu_arch_init(cpuid_t cpuid, paddr_t load_addr) +{ + if (cpuid == CPU_MASTER) { + platform_cpu_init(cpuid, load_addr); + } +} + +void cpu_arch_idle(void) +{ + /* __asm volatile("wfi\n\t" ::: "memory"); */ + __asm__ volatile("mov.a %%sp, %[val]\n\r" + //"j cpu_idle_wakeup\n\r" + ::[val] "d"(&cpu()->stack[STACK_SIZE])); + ERROR("returned from idle wake up"); +} + +void cpu_arch_powerdown(void) +{ + /* Is this the right instruction? + should we switch modes in the SMM (system mode management) ?*/ + __asm__ volatile("wait"); +} + +void cpu_arch_standby(void) +{ + /* Is this the right instruction? + should we switch modes in the SMM (system mode management) ?*/ + __asm__ volatile("wait"); +} + +/* The current GCC injects calls to abort() in some places, which + results in undefined references. + For this reason we define our own abort() for this architecture. */ +void abort(void) +{ + ERROR("abort() reached!"); +} diff --git a/src/arch/tricore/csa.c b/src/arch/tricore/csa.c new file mode 100644 index 000000000..f9845647a --- /dev/null +++ b/src/arch/tricore/csa.c @@ -0,0 +1,9 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include + +union csa csa_array[PLAT_CPU_NUM][CSA_ENTRIES] __attribute__((aligned(64))); diff --git a/src/arch/tricore/exceptions.S b/src/arch/tricore/exceptions.S new file mode 100644 index 000000000..34b349255 --- /dev/null +++ b/src/arch/tricore/exceptions.S @@ -0,0 +1,359 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +//#include + +#define ENTRY_SIZE (0x20) + +.macro get_label_addr rd, label + movh \rd, hi:\label + addi \rd, \rd, lo:\label +.endm + +.text + +.macro SAVE_HYP_GPRS + st.w [%a10]0x0, %d0 + st.w [%a10]0x4, %d1 + st.w [%a10]0x8, %d2 + st.w [%a10]0xc, %d3 + st.w [%a10]0x10, %d4 + st.w [%a10]0x14, %d5 + st.w [%a10]0x18, %d6 + st.w [%a10]0x1c, %d7 + st.w [%a10]0x20, %d8 + st.w [%a10]0x24, %d9 + st.w [%a10]0x28, %d10 + st.w [%a10]0x2c, %d11 + st.w [%a10]0x30, %d12 + st.w [%a10]0x34, %d13 + st.w [%a10]0x38, %d14 + st.w [%a10]0x3c, %d15 + + st.a [%a10]0x40, %a0 + st.a [%a10]0x44, %a1 + st.a [%a10]0x48, %a2 + st.a [%a10]0x4c, %a3 + st.a [%a10]0x50, %a4 + st.a [%a10]0x54, %a5 + st.a [%a10]0x58, %a6 + st.a [%a10]0x5c, %a7 + st.a [%a10]0x60, %a8 + st.a [%a10]0x64, %a9 + mov.d %d0, %a10 + mov.a %a9, %d0 + st.a [%a10]0x68, %a9 + st.a [%a10]0x6c, %a11 + st.a [%a10]0x70, %a12 + st.a [%a10]0x74, %a13 + st.a [%a10]0x78, %a14 + st.a [%a10]0x7c, %a15 +.endm + +.macro GET_CPU_PTR + movh %d9,hi:_dmem_phys_beg + addi %d9,%d9,lo:_dmem_phys_beg + + mfcr %d8,$core_id + and %d8,%d8,7 + /* CPU_X physical base address */ + mov %d10, CPU_SIZE + madd %d8, %d9, %d8, %d10 + mov.a %a10, %d0 +.endm + +.macro GET_VCPU_REGS_PTR + mov.d %d8, %a8 + mov %d10, CPU_VCPU_OFF + add %d8, %d8, %d10 + mov.a %a12, %d8 + ld.w %d8, [%a12] + mov %d11, VCPU_REGS_OFF + add %d8, %d8, %d11 + mov.a %a10, %d8 +.endm + +.macro GET_VM_ID + mov.d %d8, %a8 + mov %d10, CPU_VCPU_OFF + add %d8, %d8, %d10 + mov.a %a12, %d8 + ld.w %d8, [%a12] + mov %d11, VCPU_VM_OFF + add %d8, %d8, %d11 + mov.a %a10, %d8 + ld.a %a10, [%a10] + ld.w %d8, [%a10] + addi %d8, %d8, 1 +.endm + +.macro GET_VCPU_LOWER_CTX_PTR + mov.d %d8, %a8 + mov %d10, CPU_VCPU_OFF + add %d8, %d8, %d10 + mov.a %a12, %d8 + ld.w %d8, [%a12] + mov %d11, VCPU_REGS_LOWER_CTX_OFF + add %d8, %d8, %d11 + mov.a %a10, %d8 +.endm + +.macro VM_EXIT + /*Upper context was saved by hardware*/ + + /*Save lower context*/ + svlcx + + /* Save a0, a1, a8, a9 */ + GET_VCPU_REGS_PTR + + mfcr %d0, 0xFF81 + st.w [%a10] REGS_A0_OFF, %d0 + + mfcr %d0, 0xFF85 + st.w [%a10] REGS_A1_OFF, %d0 + + mfcr %d0, 0xFFA1 + st.w [%a10] REGS_A8_OFF, %d0 + + mfcr %d0, 0xFFA5 + st.w [%a10] REGS_A8_OFF, %d0 + + +.endm + +.macro VM_ENTRY + /* d8 and A10 contains the pointer to vcpu->regs (same as lower_ctx) */ + GET_VCPU_REGS_PTR + + /*restore a0, a1, a8, a9*/ + ld.w %d0, [%a10] REGS_A0_OFF + mtcr csfr_hvhra_a0, %d0 + + ld.w %d0, [%a10] REGS_A1_OFF + mtcr csfr_hvhra_a1, %d0 + + ld.w %d0, [%a10] REGS_A8_OFF + mtcr csfr_hvhra_a8, %d0 + + ld.w %d0, [%a10] REGS_A9_OFF + mtcr csfr_hvhra_a9, %d0 + + isync + /* Restore lower context */ + rslcx + + /* Return from hypervisor (Restore upper context) */ + rfh + +1: + j 1b +.endm + + +.balign 0x100 +.global _trap_vector_table +_trap_vector_table: + +.balign ENTRY_SIZE +mmu_trap: + j mmu_trap_handler + +.balign ENTRY_SIZE +internal_protection_trap: + j internal_protection_trap_handler + +.balign ENTRY_SIZE +instruction_error: + j instruction_error_handler + +.balign ENTRY_SIZE +ctx_mgnt: + j ctx_mgnt_handler + +.balign ENTRY_SIZE +sys_bus_errors: + j sys_bus_errors_handler + jz.t %d15, 31, 1f + rfh +1: + rfe + +.balign ENTRY_SIZE +assertion_trap: + j assertion_trap_handler + +.balign ENTRY_SIZE +system_call: + j system_call_handler + +.balign ENTRY_SIZE +non_mskbl_interrupt: + j non_mskbl_interrupt_handler + + + +.balign 0x100 +.global _irq_vector +_irq_vector: + .rept 255 + .balign ENTRY_SIZE + j _irq_handler + .endr + + + +.balign 0x100 +.global _hyp_vector_table +_hyp_vector_table: + +.balign ENTRY_SIZE +hyp_call: + j hyp_call_handler + +.balign ENTRY_SIZE +hyp_interrupt_trap: + j hyp_interrupt_trap_handler + +.balign ENTRY_SIZE +l2_data_mem_prot_trap: + j l2_data_mem_prot_trap_handler + +.balign ENTRY_SIZE +l2_code_mem_prot_trap: + j l2_code_mem_prot_trap_handler + +.balign ENTRY_SIZE +hyp_csfr_access_supp: + j hyp_csfr_access_supp_handler + + + + +/* Internal */ +.global mmu_trap_handler +mmu_trap_handler: + j mmu_trap_handler + +.global internal_protection_trap_handler +internal_protection_trap_handler: + j internal_protection_trap_handler + +.global instruction_error_handler +instruction_error_handler: + j instruction_error_handler + +.global ctx_mgnt_handler +ctx_mgnt_handler: + j ctx_mgnt_handler + +.global assertion_trap_handler +assertion_trap_handler: + j assertion_trap_handler + +.global system_call_handler +system_call_handler: + j system_call_handler + +.global non_mskbl_interrupt_handler +non_mskbl_interrupt_handler: + j non_mskbl_interrupt_handler + +.global _irq_handler +_irq_handler: + VM_EXIT + call ir_handle + VM_ENTRY + + +/* Virtualization Related */ +.global hyp_call_handler +hyp_call_handler: + VM_EXIT + call hvcall_handler + VM_ENTRY + +.global hyp_interrupt_trap_handler +hyp_interrupt_trap_handler: + VM_EXIT + j . + VM_ENTRY + +.global l2_data_mem_prot_trap_handler +l2_data_mem_prot_trap_handler: + VM_EXIT + mov.aa %a4, %a11 + mov %d4,%d15 + call l2_dmem_prot_trap_handler + VM_ENTRY + +.global l2_code_mem_prot_trap_handler +l2_code_mem_prot_trap_handler: + VM_EXIT + j . + VM_ENTRY + +.global hyp_csfr_access_supp_handler +hyp_csfr_access_supp_handler: + VM_EXIT + mov.aa %a4, %a11 + mov %d4,%d15 + call hyp_csfr_access_handler + VM_ENTRY + +.global vcpu_arch_entry +vcpu_arch_entry: +/* + The last in the CSA array is the 1st context used. + We are sure of this, because this code only executes in the boot sequence. + We need to re-establish the CSA list. To do it, we have to point the FCX + to the current PCXI, and point the 1st context to the current FCX. +*/ + mfcr %d3,$core_id + and %d3,%d3,7 + + mfcr %d0, $pcxi + mfcr %d1, $fcx + mtcr $fcx, %d0 + movh %d0, hi:csa_array + addi %d0, %d0, lo:csa_array + mov %d2, CSA_ARRAY_SIZE + madd %d0, %d0, %d3, %d2 + add %d0, %d0, 16*4 + mov.a %a2, %d0 + st.w [%a2], %d1 + + /* Invalidate the PCXI register */ + GET_VCPU_REGS_PTR + /* convert the vcpu->regs to pcx format */ + extr.u %d1, %d8, 28, 4 + extr.u %d2, %d8, 6, 16 + sh %d1, %d1, 16 + or %d2, %d2, %d1 + /* point pcxi to the vcpu->regs->lower_ctx*/ + mtcr $pcxi, %d2 + + /* Clear CDC before jumping to guest */ + mfcr %d0, $psw + movh %d1, hi:0xFFFFF80 + addi %d1, %d1, lo:0xFFFFF80 + and %d0, %d0, %d1 + mtcr $psw, %d0 + + /* Enable VCON2 l2_prs & VMn */ + GET_VM_ID + + movh %d0, lo:0 + or %d0, %d0, %d8 + sh %d8, %d8, 8 + or %d0, %d0, %d8 + mtcr 0xB008, %d0 + + isync + + VM_ENTRY diff --git a/src/arch/tricore/inc/arch/bao.h b/src/arch/tricore/inc/arch/bao.h new file mode 100644 index 000000000..beca5b5a0 --- /dev/null +++ b/src/arch/tricore/inc/arch/bao.h @@ -0,0 +1,23 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_BAO_H__ +#define __ARCH_BAO_H__ + +#define BAO_VAS_BASE CONFIG_HYP_BASE_ADDR + +#define PAGE_SIZE (64) +#define STACK_SIZE (0x1000) + +#ifndef __ASSEMBLER__ + +/* The current GCC injects calls to abort() in some places, which + results in undefined references. + For this reason we define our own abort() for this architecture. */ +void abort(void); + +#endif /* !__ASSEMBLER__ */ + +#endif /* __ARCH_BAO_H__ */ diff --git a/src/arch/tricore/inc/arch/cache.h b/src/arch/tricore/inc/arch/cache.h new file mode 100644 index 000000000..096a19c72 --- /dev/null +++ b/src/arch/tricore/inc/arch/cache.h @@ -0,0 +1,13 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_CACHE_H__ +#define __ARCH_CACHE_H__ + +#include + +#define CACHE_MAX_LVL 8 // Does this make sense in all architectures? + +#endif /* __ARCH_CACHE_H__ */ diff --git a/src/arch/tricore/inc/arch/cpu.h b/src/arch/tricore/inc/arch/cpu.h new file mode 100644 index 000000000..32a8a8777 --- /dev/null +++ b/src/arch/tricore/inc/arch/cpu.h @@ -0,0 +1,29 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_CPU_H__ +#define __ARCH_CPU_H__ + +#include +#include +#include + +#define CPU_HAS_EXTENSION(EXT) (DEFINED(EXT)) + +extern cpuid_t CPU_MASTER; + +struct cpu_arch { + struct mpu_arch mpu; + unsigned long state; +}; + +static inline struct cpu* cpu(void) +{ + register unsigned long a8 __asm__("a8"); + return (struct cpu*)a8; +} +void cpu_arch_idle(void); + +#endif /* __ARCH_CPU_H__ */ diff --git a/src/arch/tricore/inc/arch/csa.h b/src/arch/tricore/inc/arch/csa.h new file mode 100644 index 000000000..6fdb46961 --- /dev/null +++ b/src/arch/tricore/inc/arch/csa.h @@ -0,0 +1,71 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __CSA_H__ +#define __CSA_H__ + +#include + +#define CSA_ENTRIES 32 +#define CSA_ARRAY_SIZE (CSA_ENTRIES * 16 * 4) + +#ifndef __ASSEMBLER__ + +#define PCXI_PCXO_OFF 0 +#define PCXI_PCXS_OFF 16 +#define PCXI_UL_OFF 20 +#define ADDR_PCXS_OFF 28 +#define ADDR_PCXS_LEN 4 +#define ADDR_PCXO_OFF 6 +#define ADDR_PCXO_LEN 16 + +struct lower_context { + unsigned long pcxi; + unsigned long a11; + unsigned long a2; + unsigned long a3; + unsigned long d0; + unsigned long d1; + unsigned long d2; + unsigned long d3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long d4; + unsigned long d5; + unsigned long d6; + unsigned long d7; +} __attribute__((aligned(64))); + +struct upper_context { + unsigned long pcxi; + unsigned long csa_psw; + unsigned long a10; + unsigned long a11; + unsigned long d8; + unsigned long d9; + unsigned long d10; + unsigned long d11; + unsigned long a12; + unsigned long a13; + unsigned long a14; + unsigned long a15; + unsigned long d12; + unsigned long d13; + unsigned long d14; + unsigned long d15; +} __attribute__((aligned(64))); + +union csa { + struct lower_context lower; + struct upper_context upper; +}; + +extern union csa csa_array[PLAT_CPU_NUM][CSA_ENTRIES]; + +#endif + +#endif //__CSA_H__ diff --git a/src/arch/tricore/inc/arch/csfrs.h b/src/arch/tricore/inc/arch/csfrs.h new file mode 100644 index 000000000..8c2ddd0e0 --- /dev/null +++ b/src/arch/tricore/inc/arch/csfrs.h @@ -0,0 +1,568 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_CSFR_H__ +#define __ARCH_CSFR_H__ + +#include + +#define csfr_dcon1 0x9008 +#define csfr_pcon1 0x9204 +#define csfr_pcon0 0x920C +#define csfr_dcon0 0x9040 +#define csfr_corecon 0xFE14 +#define csfr_tccon 0xFE6C + +#define csfr_cpxe_0 0xE000 +#define csfr_cpxe_1 0xE004 +#define csfr_cpxe_2 0xE008 +#define csfr_cpxe_3 0xE00C +#define csfr_dpre_0 0xE010 +#define csfr_dpre_1 0xE014 +#define csfr_dpre_2 0xE018 +#define csfr_dpre_3 0xE01C +#define csfr_dpwe_0 0xE020 +#define csfr_dpwe_1 0xE024 +#define csfr_dpwe_2 0xE028 +#define csfr_dpwe_3 0xE02C + +#define csfr_cpxe_4 0xE040 +#define csfr_cpxe_5 0xE044 +#define csfr_cpxe_6 0xE048 +#define csfr_cpxe_7 0xE04C +#define csfr_dpre_4 0xE050 +#define csfr_dpre_5 0xE054 +#define csfr_dpre_6 0xE058 +#define csfr_dpre_7 0xE05C +#define csfr_dpwe_4 0xE060 +#define csfr_dpwe_5 0xE064 +#define csfr_dpwe_6 0xE068 +#define csfr_dpwe_7 0xE06C + +#define csfr_dpr0_l 0xC000 +#define csfr_dpr0_u 0xC004 +#define csfr_dpr1_l 0xC008 +#define csfr_dpr1_u 0xC00C +#define csfr_dpr2_l 0xC010 +#define csfr_dpr2_u 0xC014 +#define csfr_dpr3_l 0xC018 +#define csfr_dpr3_u 0xC01C +#define csfr_dpr4_l 0xC020 +#define csfr_dpr4_u 0xC024 +#define csfr_dpr5_l 0xC028 +#define csfr_dpr5_u 0xC02C +#define csfr_dpr6_l 0xC030 +#define csfr_dpr6_u 0xC034 +#define csfr_dpr7_l 0xC038 +#define csfr_dpr7_u 0xC03C +#define csfr_dpr8_l 0xC040 +#define csfr_dpr8_u 0xC044 +#define csfr_dpr9_l 0xC048 +#define csfr_dpr9_u 0xC04C +#define csfr_dpr10_l 0xC050 +#define csfr_dpr10_u 0xC054 +#define csfr_dpr11_l 0xC058 +#define csfr_dpr11_u 0xC05C +#define csfr_dpr12_l 0xC060 +#define csfr_dpr12_u 0xC064 +#define csfr_dpr13_l 0xC068 +#define csfr_dpr13_u 0xC06C +#define csfr_dpr14_l 0xC070 +#define csfr_dpr14_u 0xC074 +#define csfr_dpr15_l 0xC078 +#define csfr_dpr15_u 0xC07C + +#define csfr_cpr0_l 0xD000 +#define csfr_cpr0_u 0xD004 +#define csfr_cpr1_l 0xD008 +#define csfr_cpr1_u 0xD00C +#define csfr_cpr2_l 0xD010 +#define csfr_cpr2_u 0xD014 +#define csfr_cpr3_l 0xD018 +#define csfr_cpr3_u 0xD01C +#define csfr_cpr4_l 0xD020 +#define csfr_cpr4_u 0xD024 +#define csfr_cpr5_l 0xD028 +#define csfr_cpr5_u 0xD02C +#define csfr_cpr6_l 0xD030 +#define csfr_cpr6_u 0xD034 +#define csfr_cpr7_l 0xD038 +#define csfr_cpr7_u 0xD03C +#define csfr_cpr8_l 0xD040 +#define csfr_cpr8_u 0xD044 +#define csfr_cpr9_l 0xD048 +#define csfr_cpr9_u 0xD04C +#define csfr_cpr10_l 0xD050 +#define csfr_cpr10_u 0xD054 +#define csfr_cpr11_l 0xD058 +#define csfr_cpr11_u 0xD05C +#define csfr_cpr12_l 0xD060 +#define csfr_cpr12_u 0xD064 +#define csfr_cpr13_l 0xD068 +#define csfr_cpr13_u 0xD06C +#define csfr_cpr14_l 0xD070 +#define csfr_cpr14_u 0xD074 +#define csfr_cpr15_l 0xD078 +#define csfr_cpr15_u 0xD07C + +#define csfr_fcx 0xFE38 +#define csfr_lcx 0xFE3C +#define csfr_pcxi 0xFE00 +#define csfr_bhv 0xB010 +#define csfr_corecon 0xFE14 +#define csfr_pcx 0xFE00 + +#define csfr_dstr 0x9010 +#define csfr_datr 0x9018 +#define csfr_deadd 0x901C + +#define csfr_vm0_icr 0xB100 +#define csfr_vm1_icr 0xB104 +#define csfr_vm2_icr 0xB108 +#define csfr_vm3_icr 0xB10C +#define csfr_vm4_icr 0xB110 +#define csfr_vm5_icr 0xB114 +#define csfr_vm6_icr 0xB118 +#define csfr_vm7_icr 0xB11C + +#define csfr_base 0xF8830000 +#define csfr_bootcon 0xFE60 +#define csfr_pc 0xFE08 + +#define csfr_hvhra_a0 0xFF81 +#define csfr_hvhra_a1 0xFF85 +#define csfr_hvhra_a8 0xFFA1 +#define csfr_hvhra_a9 0xFFA5 + +#ifndef __ASSEMBLER__ + +#define CSFRS_GEN_ACCESSORS_NAMED(csfr_name, csfr_id) \ + static inline unsigned long csfr_##csfr_name##_read(void) \ + { \ + unsigned long csfr_value; \ + __asm__ volatile("mfcr %0, " XSTR(csfr_id) : "=r"(csfr_value)::"memory"); \ + return csfr_value; \ + } \ + static inline void csfr_##csfr_name##_write(unsigned long csfr_value) \ + { \ + __asm__ volatile("mtcr " XSTR(csfr_id) ", %0" ::"r"(csfr_value) : "memory"); \ + } + +CSFRS_GEN_ACCESSORS_NAMED(tccon, csfr_tccon) + +CSFRS_GEN_ACCESSORS_NAMED(dpr0_l, csfr_dpr0_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr1_l, csfr_dpr1_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr2_l, csfr_dpr2_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr3_l, csfr_dpr3_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr4_l, csfr_dpr4_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr5_l, csfr_dpr5_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr6_l, csfr_dpr6_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr7_l, csfr_dpr7_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr8_l, csfr_dpr8_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr9_l, csfr_dpr9_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr10_l, csfr_dpr10_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr11_l, csfr_dpr11_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr12_l, csfr_dpr12_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr13_l, csfr_dpr13_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr14_l, csfr_dpr14_l) +CSFRS_GEN_ACCESSORS_NAMED(dpr15_l, csfr_dpr15_l) + +CSFRS_GEN_ACCESSORS_NAMED(dpr0_u, csfr_dpr0_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr1_u, csfr_dpr1_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr2_u, csfr_dpr2_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr3_u, csfr_dpr3_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr4_u, csfr_dpr4_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr5_u, csfr_dpr5_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr6_u, csfr_dpr6_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr7_u, csfr_dpr7_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr8_u, csfr_dpr8_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr9_u, csfr_dpr9_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr10_u, csfr_dpr10_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr11_u, csfr_dpr11_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr12_u, csfr_dpr12_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr13_u, csfr_dpr13_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr14_u, csfr_dpr14_u) +CSFRS_GEN_ACCESSORS_NAMED(dpr15_u, csfr_dpr15_u) + +CSFRS_GEN_ACCESSORS_NAMED(cpr0_l, csfr_cpr0_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr1_l, csfr_cpr1_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr2_l, csfr_cpr2_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr3_l, csfr_cpr3_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr4_l, csfr_cpr4_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr5_l, csfr_cpr5_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr6_l, csfr_cpr6_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr7_l, csfr_cpr7_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr8_l, csfr_cpr8_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr9_l, csfr_cpr9_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr10_l, csfr_cpr10_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr11_l, csfr_cpr11_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr12_l, csfr_cpr12_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr13_l, csfr_cpr13_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr14_l, csfr_cpr14_l) +CSFRS_GEN_ACCESSORS_NAMED(cpr15_l, csfr_cpr15_l) + +CSFRS_GEN_ACCESSORS_NAMED(cpr0_u, csfr_cpr0_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr1_u, csfr_cpr1_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr2_u, csfr_cpr2_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr3_u, csfr_cpr3_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr4_u, csfr_cpr4_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr5_u, csfr_cpr5_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr6_u, csfr_cpr6_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr7_u, csfr_cpr7_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr8_u, csfr_cpr8_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr9_u, csfr_cpr9_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr10_u, csfr_cpr10_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr11_u, csfr_cpr11_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr12_u, csfr_cpr12_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr13_u, csfr_cpr13_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr14_u, csfr_cpr14_u) +CSFRS_GEN_ACCESSORS_NAMED(cpr15_u, csfr_cpr15_u) + +CSFRS_GEN_ACCESSORS_NAMED(dpre_0, csfr_dpre_0) +CSFRS_GEN_ACCESSORS_NAMED(dpre_1, csfr_dpre_1) +CSFRS_GEN_ACCESSORS_NAMED(dpre_2, csfr_dpre_2) +CSFRS_GEN_ACCESSORS_NAMED(dpre_3, csfr_dpre_3) +CSFRS_GEN_ACCESSORS_NAMED(dpre_4, csfr_dpre_4) +CSFRS_GEN_ACCESSORS_NAMED(dpre_5, csfr_dpre_5) +CSFRS_GEN_ACCESSORS_NAMED(dpre_6, csfr_dpre_6) +CSFRS_GEN_ACCESSORS_NAMED(dpre_7, csfr_dpre_7) + +CSFRS_GEN_ACCESSORS_NAMED(dpwe_0, csfr_dpwe_0) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_1, csfr_dpwe_1) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_2, csfr_dpwe_2) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_3, csfr_dpwe_3) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_4, csfr_dpwe_4) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_5, csfr_dpwe_5) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_6, csfr_dpwe_6) +CSFRS_GEN_ACCESSORS_NAMED(dpwe_7, csfr_dpwe_7) + +CSFRS_GEN_ACCESSORS_NAMED(cpxe_0, csfr_cpxe_0) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_1, csfr_cpxe_1) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_2, csfr_cpxe_2) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_3, csfr_cpxe_3) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_4, csfr_cpxe_4) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_5, csfr_cpxe_5) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_6, csfr_cpxe_6) +CSFRS_GEN_ACCESSORS_NAMED(cpxe_7, csfr_cpxe_7) + +CSFRS_GEN_ACCESSORS_NAMED(dstr, csfr_dstr) +CSFRS_GEN_ACCESSORS_NAMED(datr, csfr_datr) +CSFRS_GEN_ACCESSORS_NAMED(deadd, csfr_deadd) + +CSFRS_GEN_ACCESSORS_NAMED(vm0_icr, csfr_vm0_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm1_icr, csfr_vm1_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm2_icr, csfr_vm2_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm3_icr, csfr_vm3_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm4_icr, csfr_vm4_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm5_icr, csfr_vm5_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm6_icr, csfr_vm6_icr) +CSFRS_GEN_ACCESSORS_NAMED(vm7_icr, csfr_vm7_icr) + +CSFRS_GEN_ACCESSORS_NAMED(corecon, csfr_corecon) + +#define CSFRS_GEN_ACCESSORS_CPU_NAMED(csfr_name, csfr_base, csfr_offset) \ + static inline unsigned long csfr_cpu_##csfr_name##_read(unsigned long cpuid) \ + { \ + return *(unsigned long*)(csfr_base + (cpuid << 18) + csfr_offset); \ + } \ + static inline void csfr_cpu_##csfr_name##_write(unsigned long cpuid, unsigned long csfr_value) \ + { \ + *(unsigned long*)(csfr_base + (cpuid << 18) + csfr_offset) = csfr_value; \ + } + +CSFRS_GEN_ACCESSORS_CPU_NAMED(bootcon, csfr_base, csfr_bootcon) +CSFRS_GEN_ACCESSORS_CPU_NAMED(pc, csfr_base, csfr_pc) + +#define CSFR_GEN_PR_READ(m, r) \ + static inline unsigned long csfr_##m##pr_##r##_read(mpid_t id) \ + { \ + switch (id) { \ + case 0: \ + return csfr_##m##pr0_##r##_read(); \ + case 1: \ + return csfr_##m##pr1_##r##_read(); \ + case 2: \ + return csfr_##m##pr2_##r##_read(); \ + case 3: \ + return csfr_##m##pr3_##r##_read(); \ + case 4: \ + return csfr_##m##pr4_##r##_read(); \ + case 5: \ + return csfr_##m##pr5_##r##_read(); \ + case 6: \ + return csfr_##m##pr6_##r##_read(); \ + case 7: \ + return csfr_##m##pr7_##r##_read(); \ + case 8: \ + return csfr_##m##pr8_##r##_read(); \ + case 9: \ + return csfr_##m##pr9_##r##_read(); \ + case 10: \ + return csfr_##m##pr10_##r##_read(); \ + case 11: \ + return csfr_##m##pr11_##r##_read(); \ + case 12: \ + return csfr_##m##pr12_##r##_read(); \ + case 13: \ + return csfr_##m##pr13_##r##_read(); \ + case 14: \ + return csfr_##m##pr14_##r##_read(); \ + case 15: \ + return csfr_##m##pr15_##r##_read(); \ + default: \ + return 0; \ + } \ + } + +#define CSFR_GEN_PR_WRITE(m, r) \ + static inline void csfr_##m##pr_##r##_write(mpid_t id, unsigned long val) \ + { \ + switch (id) { \ + case 0: \ + csfr_##m##pr0_##r##_write(val); \ + break; \ + case 1: \ + csfr_##m##pr1_##r##_write(val); \ + break; \ + case 2: \ + csfr_##m##pr2_##r##_write(val); \ + break; \ + case 3: \ + csfr_##m##pr3_##r##_write(val); \ + break; \ + case 4: \ + csfr_##m##pr4_##r##_write(val); \ + break; \ + case 5: \ + csfr_##m##pr5_##r##_write(val); \ + break; \ + case 6: \ + csfr_##m##pr6_##r##_write(val); \ + break; \ + case 7: \ + csfr_##m##pr7_##r##_write(val); \ + break; \ + case 8: \ + csfr_##m##pr8_##r##_write(val); \ + break; \ + case 9: \ + csfr_##m##pr9_##r##_write(val); \ + break; \ + case 10: \ + csfr_##m##pr10_##r##_write(val); \ + break; \ + case 11: \ + csfr_##m##pr11_##r##_write(val); \ + break; \ + case 12: \ + csfr_##m##pr12_##r##_write(val); \ + break; \ + case 13: \ + csfr_##m##pr13_##r##_write(val); \ + break; \ + case 14: \ + csfr_##m##pr14_##r##_write(val); \ + break; \ + case 15: \ + csfr_##m##pr15_##r##_write(val); \ + break; \ + default: \ + break; \ + } \ + return; \ + } + +/* data */ +CSFR_GEN_PR_WRITE(d, u) +CSFR_GEN_PR_WRITE(d, l) + +CSFR_GEN_PR_READ(d, u) +CSFR_GEN_PR_READ(d, l) +/* code */ +CSFR_GEN_PR_READ(c, u) +CSFR_GEN_PR_READ(c, l) + +CSFR_GEN_PR_WRITE(c, u) +CSFR_GEN_PR_WRITE(c, l) + +#define GEN_P_ACCESSOR(m) \ + static inline bool get_##m##_bit(unsigned long psid, mpid_t mpid) \ + { \ + unsigned long val = 0; \ + switch (psid) { \ + case 0: \ + val = csfr_##m##_0_read(); \ + break; \ + case 1: \ + val = csfr_##m##_1_read(); \ + break; \ + case 2: \ + val = csfr_##m##_2_read(); \ + break; \ + case 3: \ + val = csfr_##m##_3_read(); \ + break; \ + case 4: \ + val = csfr_##m##_4_read(); \ + break; \ + case 5: \ + val = csfr_##m##_5_read(); \ + break; \ + case 6: \ + val = csfr_##m##_6_read(); \ + break; \ + case 7: \ + val = csfr_##m##_7_read(); \ + break; \ + default: \ + break; \ + } \ + return !!(val & (1UL << mpid)); \ + } \ + static inline void set_##m##_bit(unsigned long psid, mpid_t mpid, bool val) \ + { \ + unsigned long tmp = 0; \ + switch (psid) { \ + case 0: \ + tmp = csfr_##m##_0_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_0_write(tmp); \ + break; \ + case 1: \ + tmp = csfr_##m##_1_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_1_write(tmp); \ + break; \ + case 2: \ + tmp = csfr_##m##_2_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_2_write(tmp); \ + break; \ + case 3: \ + tmp = csfr_##m##_3_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_3_write(tmp); \ + break; \ + case 4: \ + tmp = csfr_##m##_4_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_4_write(tmp); \ + break; \ + case 5: \ + tmp = csfr_##m##_5_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_5_write(tmp); \ + break; \ + case 6: \ + tmp = csfr_##m##_6_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_6_write(tmp); \ + break; \ + case 7: \ + tmp = csfr_##m##_7_read(); \ + if (val) \ + tmp |= 1UL << mpid; \ + else \ + tmp &= ~(1UL << mpid); \ + csfr_##m##_7_write(tmp); \ + break; \ + default: \ + break; \ + } \ + return; \ + } \ + static inline unsigned long get_##m(unsigned long psid) \ + { \ + unsigned long val = 0; \ + switch (psid) { \ + case 0: \ + val = csfr_##m##_0_read(); \ + break; \ + case 1: \ + val = csfr_##m##_1_read(); \ + break; \ + case 2: \ + val = csfr_##m##_2_read(); \ + break; \ + case 3: \ + val = csfr_##m##_3_read(); \ + break; \ + case 4: \ + val = csfr_##m##_4_read(); \ + break; \ + case 5: \ + val = csfr_##m##_5_read(); \ + break; \ + case 6: \ + val = csfr_##m##_6_read(); \ + break; \ + case 7: \ + val = csfr_##m##_7_read(); \ + break; \ + default: \ + break; \ + } \ + return val; \ + } \ + static inline void set_##m(unsigned long psid, unsigned long val) \ + { \ + switch (psid) { \ + case 0: \ + csfr_##m##_0_write(val); \ + break; \ + case 1: \ + csfr_##m##_1_write(val); \ + break; \ + case 2: \ + csfr_##m##_2_write(val); \ + break; \ + case 3: \ + csfr_##m##_3_write(val); \ + break; \ + case 4: \ + csfr_##m##_4_write(val); \ + break; \ + case 5: \ + csfr_##m##_5_write(val); \ + break; \ + case 6: \ + csfr_##m##_6_write(val); \ + break; \ + case 7: \ + csfr_##m##_7_write(val); \ + break; \ + default: \ + break; \ + } \ + } + +GEN_P_ACCESSOR(dpre) +GEN_P_ACCESSOR(dpwe) +GEN_P_ACCESSOR(cpxe) + +#endif /* __ASSEMBLER__ */ + +#endif /* __ARCH_CSFRS_H__ */ diff --git a/src/arch/tricore/inc/arch/fences.h b/src/arch/tricore/inc/arch/fences.h new file mode 100644 index 000000000..ba1e32d37 --- /dev/null +++ b/src/arch/tricore/inc/arch/fences.h @@ -0,0 +1,38 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ +#ifndef __FENCES_ARCH_H__ +#define __FENCES_ARCH_H__ + +static inline void fence_ord_write(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +static inline void fence_ord_read(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +static inline void fence_ord(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +static inline void fence_sync_write(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +static inline void fence_sync_read(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +static inline void fence_sync(void) +{ + __asm__ volatile("isync\n\t" ::: "memory"); +} + +#endif /* __FENCES_ARCH_H__ */ diff --git a/src/arch/tricore/inc/arch/hypercall.h b/src/arch/tricore/inc/arch/hypercall.h new file mode 100644 index 000000000..e53b5e41d --- /dev/null +++ b/src/arch/tricore/inc/arch/hypercall.h @@ -0,0 +1,14 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef ARCH_HYPERCALL_H +#define ARCH_HYPERCALL_H + +#define HYPCALL_IN_ARG_REG(ARG) ((ARG) + 1 + 15 + 5) +#define HYPCALL_OUT_ARG_REG(ARG) (HYPCALL_IN_ARG_REG(ARG)) + +// #define HYPCALL_ARG_REG(ARG) ((ARG) + REG_A0) + +#endif /* ARCH_HYPERCALL_H */ diff --git a/src/arch/tricore/inc/arch/interrupts.h b/src/arch/tricore/inc/arch/interrupts.h new file mode 100644 index 000000000..59603247e --- /dev/null +++ b/src/arch/tricore/inc/arch/interrupts.h @@ -0,0 +1,25 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_INTERRUPTS_H__ +#define __ARCH_INTERRUPTS_H__ + +#include + +#define ARCH_MAX_INTERRUPTS 2048 +#define MAX_INTERRUPTS ARCH_MAX_INTERRUPTS + +#define MAX_GUEST_INTERRUPTS (MAX_INTERRUPTS) +#define MAX_INTERRUPT_LINES MAX_INTERRUPTS +#define MAX_INTERRUPT_HANDLERS MAX_INTERRUPTS + +/* TODO platform dependent */ +#define IPI_CPU_MSG (0x1460 / 4) /* TODO this is the first GPSR in TC49 */ +#define GSPR_SRC_BASE (0x1460 / 4) /* TODO this is the first GPSR in TC49 */ + +void interrupts_arch_handle(void); +void ir_init_ipi(void); + +#endif /* __ARCH_INTERRUPTS_H__ */ diff --git a/src/arch/tricore/inc/arch/iommu.h b/src/arch/tricore/inc/arch/iommu.h new file mode 100644 index 000000000..63398004d --- /dev/null +++ b/src/arch/tricore/inc/arch/iommu.h @@ -0,0 +1,16 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __IOMMU_ARCH_H__ +#define __IOMMU_ARCH_H__ + +#include + +// VM-specific IOMMU data +struct iommu_vm_arch { + EMPTY_STRUCT_FIELDS +}; + +#endif /* __IOMMU_ARCH_H__ */ diff --git a/src/arch/tricore/inc/arch/ir.h b/src/arch/tricore/inc/arch/ir.h new file mode 100644 index 000000000..46419c266 --- /dev/null +++ b/src/arch/tricore/inc/arch/ir.h @@ -0,0 +1,265 @@ +/** + * SPDX-License-Identifier: Apache-2.0U + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __IR_H__ +#define __IR_H__ + +#include +#include +#include "prot.h" + +#define IR_MAX_INTERRUPTS (2048U) +#define IR_MAX_ISP (11U) +#define IR_MAX_VM (8U) +#define IR_MAX_GROUPS (8U) +#define IR_MAX_SW_INT (8U) +#define IR_MAX_PRIO (255U) +#define IR_MIN_PRIO (0U) +#define IR_TARGET_NONE (0xfU) + +#ifndef PLAT_IR_MAX_INTERRUPTS +#define PLAT_IR_MAX_INTERRUPTS IR_MAX_INTERRUPTS +#endif + +#ifndef PLAT_IR_MAX_ISP +#define PLAT_IR_MAX_ISP IR_MAX_ISP /* TODO might not make sense */ +#endif + +#ifndef PLAT_IR_MAX_VM +#define PLAT_IR_MAX_VM IR_MAX_VM +#endif + +#ifndef PLAT_IR_MAX_VM +#define PLAT_IR_MAX_GROUPS IR_MAX_GROUPS +#endif + +#ifndef PLAT_IR_MAX_VM +#define PLAT_IR_MAX_SW_INT IR_MAX_SW_INT +#endif + +// Define the register offset +#define REG_OFFSET 0x0004U + +// Define bit positions for each field +#define IR_SRC_IOVCLR_POS 28U +#define IR_SRC_IOV_POS 27U +#define IR_SRC_SETR_POS 26U +#define IR_SRC_CLRR_POS 25U +#define IR_SRC_SRR_POS 24U +#define IR_SRC_SRE_POS 23U +#define IR_SRC_TOS_POS 12U +#define IR_SRC_CS_POS 11U +#define IR_SRC_VM_POS 8U +#define IR_SRC_SRPN_POS 0U + +// Define IR_SRC masks for each field +#define IR_SRC_IOVCLR_MASK (0x1U << IR_SRC_IOVCLR_POS) +#define IR_SRC_IOV_MASK (0x1U << IR_SRC_IOV_POS) +#define IR_SRC_SETR_MASK (0x1U << IR_SRC_SETR_POS) +#define IR_SRC_CLRR_MASK (0x1U << IR_SRC_CLRR_POS) +#define IR_SRC_SRR_MASK (0x1U << IR_SRC_SRR_POS) +#define IR_SRC_SRE_MASK (0x1U << IR_SRC_SRE_POS) +#define IR_SRC_TOS_MASK (0xFU << IR_SRC_TOS_POS) +#define IR_SRC_CS_MASK (0x1U << IR_SRC_CS_POS) +#define IR_SRC_VM_MASK (0x7U << IR_SRC_VM_POS) +#define IR_SRC_SRPN_MASK (0xFFU << IR_SRC_SRPN_POS) + +// Macros to read the fields from the register value +#define IR_SRC_GET_IOVCLR(reg) (((reg) & IR_SRC_IOVCLR_MASK) >> IR_SRC_IOVCLR_POS) +#define IR_SRC_GET_IOV(reg) (((reg) & IR_SRC_IOV_MASK) >> IR_SRC_IOV_POS) +#define IR_SRC_GET_SETR(reg) (((reg) & IR_SRC_SETR_MASK) >> IR_SRC_SETR_POS) +#define IR_SRC_GET_CLRR(reg) (((reg) & IR_SRC_CLRR_MASK) >> IR_SRC_CLRR_POS) +#define IR_SRC_GET_SRR(reg) (((reg) & IR_SRC_SRR_MASK) >> IR_SRC_SRR_POS) +#define IR_SRC_GET_SRE(reg) (((reg) & IR_SRC_SRE_MASK) >> IR_SRC_SRE_POS) +#define IR_SRC_GET_TOS(reg) (((reg) & IR_SRC_TOS_MASK) >> IR_SRC_TOS_POS) +#define IR_SRC_GET_CS(reg) (((reg) & IR_SRC_CS_MASK) >> IR_SRC_CS_POS) +#define IR_SRC_GET_VM(reg) (((reg) & IR_SRC_VM_MASK) >> IR_SRC_VM_POS) +#define IR_SRC_GET_SRPN(reg) (((reg) & IR_SRC_SRPN_MASK) >> IR_SRC_SRPN_POS) + +// Macros to set the fields in the register value +#define IR_SRC_SET_IOVCLR(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_IOVCLR_MASK) | (((val) << IR_SRC_IOVCLR_POS) & IR_SRC_IOVCLR_MASK)) +#define IR_SRC_SET_IOV(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_IOV_MASK) | (((val) << IR_SRC_IOV_POS) & IR_SRC_IOV_MASK)) +#define IR_SRC_SET_SETR(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_SETR_MASK) | (((val) << IR_SRC_SETR_POS) & IR_SRC_SETR_MASK)) +#define IR_SRC_SET_CLRR(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_CLRR_MASK) | (((val) << IR_SRC_CLRR_POS) & IR_SRC_CLRR_MASK)) +#define IR_SRC_SET_SRR(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_SRR_MASK) | (((val) << IR_SRC_SRR_POS) & IR_SRC_SRR_MASK)) +#define IR_SRC_SET_SRE(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_SRE_MASK) | (((val) << IR_SRC_SRE_POS) & IR_SRC_SRE_MASK)) +#define IR_SRC_SET_TOS(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_TOS_MASK) | (((val) << IR_SRC_TOS_POS) & IR_SRC_TOS_MASK)) +#define IR_SRC_SET_CS(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_CS_MASK) | (((val) << IR_SRC_CS_POS) & IR_SRC_CS_MASK)) +#define IR_SRC_SET_VM(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_VM_MASK) | (((val) << IR_SRC_VM_POS) & IR_SRC_VM_MASK)) +#define IR_SRC_SET_SRPN(reg, val) \ + ((reg) = ((reg) & ~IR_SRC_SRPN_MASK) | (((val) << IR_SRC_SRPN_POS) & IR_SRC_SRPN_MASK)) + +#define GPSRG_SR_IOVCLR_POS 31U +#define GPSRG_SR_IOV_POS 30U +#define GPSRG_SR_SETR_POS 29U +#define GPSRG_SR_SRR_POS 28U +#define GPSRG_SR_BRDIS_POS 27U +#define GPSRG_SR_LOCKSTAT_POS 18U +#define GPSRG_SR_LOCKCLR_POS 17U +#define GPSRG_SR_LOCKSET_POS 16U +#define GPSRG_SR_DATA_POS 0U + +#define GPSRG_SR_IOVCLR_MASK (0x1U << GPSRG_SR_IOVCLR_POS) // 1 bit: 31U +#define GPSRG_SR_IOV_MASK (0x1U << GPSRG_SR_IOV_POS) // 1 bit: 30U +#define GPSRG_SR_SETR_MASK (0x1U << GPSRG_SR_SETR_POS) // 1 bit: 29U +#define GPSRG_SR_SRR_MASK (0x1U << GPSRG_SR_SRR_POS) // 1 bit: 28U +#define GPSRG_SR_BRDIS_MASK (0x1U << GPSRG_SR_BRDIS_POS) // 1 bit: 27U +#define GPSRG_SR_LOCKSTAT_MASK (0x1U << GPSRG_SR_LOCKSTAT_POS) // 1 bit: 26U +#define GPSRG_SR_LOCKCLR_MASK (0x1U << GPSRG_SR_LOCKCLR_POS) // 1 bit: 18U +#define GPSRG_SR_LOCKSET_MASK (0x1U << GPSRG_SR_LOCKSET_POS) // 1 bit: 17U +#define GPSRG_SR_DATA_MASK (0x1FFFFU << GPSRG_SR_DATA_POS) // 17 bits: 16-0U + +#define GET_GPSRG_SR_IOV(reg) (((reg) & GPSRG_SR_IOV_MASK) >> GPSRG_SR_IOV_POS) +#define GET_GPSRG_SR_SRR(reg) (((reg) & GPSRG_SR_SRR_MASK) >> GPSRG_SR_SRR_POS) +#define GET_GPSRG_SR_BRDIS(reg) (((reg) & GPSRG_SR_BRDIS_MASK) >> GPSRG_SR_BRDIS_POS) +#define GET_GPSRG_SR_LOCKSTAT(reg) (((reg) & GPSRG_SR_LOCKSTART_MASK) >> GPSRG_SR_LOCKSTART_POS) +#define GET_GPSRG_SR_DATA(reg) (((reg) & GPSRG_SR_DATA_MASK) >> GPSRG_SR_DATA_POS) + +#define SET_GPSRG_SR_IOVCLR(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_IOVCLR_MASK) | \ + (((val) << GPSRG_SR_IOVCLR_POS) & GPSRG_SR_IOVCLR_MASK)) +#define SET_GPSRG_SR_SETR(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_SETR_MASK) | (((val) << GPSRG_SR_SETR_POS) & GPSRG_SR_SETR_MASK)) +#define SET_GPSRG_SR_BRDIS(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_BRDIS_MASK) | (((val) << GPSRG_SR_BRDIS_POS) & GPSRG_SR_BRDIS_MASK)) +#define SET_GPSRG_SR_LOCKCLR(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_LOCKCLR_MASK) | \ + (((val) << GPSRG_SR_LOCKCLR_POS) & GPSRG_SR_LOCKCLR_MASK)) +#define SET_GPSRG_SR_LOCKSET(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_LOCKSET_MASK) | \ + (((val) << GPSRG_SR_LOCKSET_POS) & GPSRG_SR_LOCKSET_MASK)) +#define SET_GPSRG_SR_DATA(reg, val) \ + ((reg) = ((reg) & ~GPSRG_SR_DATA_MASK) | (((val) << GPSRG_SR_DATA_POS) & GPSRG_SR_DATA_MASK)) + +// Define bit positions for each field +#define IR_SR_STAT_POS 31U +#define IR_SR_CS_POS 27U +#define IR_SR_ID_POS 16U +#define IR_SR_INVALID_POS 13U +#define IR_SR_VALID_POS 12U +#define IR_SR_PN_POS 0U + +// Define masks for each field +#define IR_SR_STAT_MASK (0x1U << IR_SR_STAT_POS) // 1 bit: 31U +#define IR_SR_CS_MASK (0x1U << IR_SR_CS_POS) // 4 bits: 27U +#define IR_SR_ID_MASK (0x7FFU << IR_SR_ID_POS) // 11 bits: 26-16U +#define IR_SR_INVALID_MASK (0x1U << IR_SR_INVALID_POS) // 1 bit: 13U +#define IR_SR_VALID_MASK (0x1U << IR_SR_VALID_POS) // 1 bit: 12U +#define IR_SR_PN_MASK (0xFFU << IR_SR_PN_POS) // 4 bits: 7-0U + +// Macros to read the fields from the register value +#define GET_IR_SR_STAT(reg) (((reg) & IR_SR_STAT_MASK) >> IR_SR_STAT_POS) +#define GET_IR_SR_CS(reg) (((reg) & IR_SR_CS_MASK) >> IR_SR_CS_POS) +#define GET_IR_SR_ID(reg) (((reg) & IR_SR_ID_MASK) >> IR_SR_ID_POS) +#define GET_IR_SR_INVALID(reg) (((reg) & IR_SR_INVALID_MASK) >> IR_SR_INVALID_POS) +#define GET_IR_SR_VALID(reg) (((reg) & IR_SR_VALID_MASK) >> IR_SR_VALID_POS) +#define GET_IR_SR_PN(reg) (((reg) & IR_SR_PN_MASK) >> IR_SR_PN_POS) + +/* ACCESSEN Register */ + +struct ir_src_hw { + volatile uint32_t SRC[IR_MAX_INTERRUPTS]; +} __attribute__((__packed__, aligned(PAGE_SIZE))); + +struct ir_gpsr_hw { + volatile uint32_t SRC_GPSRG_SR[8]; +} __attribute__((__packed__, aligned(PAGE_SIZE))); + +struct ir_int_gpsrg_swc { + volatile uint32_t ACCEN; // 0x0700 + x*40H + y*4: GPRSGx_SWCy write access protection register + volatile uint32_t CR; // 0x0720 + x*40H + y*4: SW control register for GPSRGxSRy +}; + +struct ir_int_gpsrg { + // volatile struct ir_int_gpsrg_swc SWC[IR_MAX_SW_INT]; // 0x0700U + volatile uint32_t SWCACCEN[IR_MAX_SW_INT]; // 0x0700 + x*40H + y*4: GPRSGx_SWCy write access + // protection register + volatile uint32_t SWC[IR_MAX_SW_INT]; // 0x0720 + x*40H + y*4: SW control register for GPSRGxSRy +}; + +struct ir_int_tos { + volatile struct PROT_ACCESSEN ACCENSCFG; /* Configure access to SRC[0:15] */ + volatile struct PROT_ACCESSEN ACCENSCTRL; /* Configure access to SRC[16:31] */ +}; + +struct ir_int_icu { + volatile uint32_t VM[IR_MAX_VM]; // 0x0C00 + z*34H + y*4: ICU latest service request information + // signaled for VMy + volatile uint32_t LASR; // 0x0C20 + z*34H: ICU Last Acknowledged Service Request Register + volatile uint32_t ECR; // 0x0C24 + z*34H: ICU error capture register + volatile uint32_t ECTRL; // 0x0C28 + z*34H: ICU error control register + volatile uint32_t EGEN; // 0x0C2C + z*34H: ICU error generation register + volatile uint32_t VMEN; // 0x0C30 + z*34H: ICU VM control register +}; + +struct ir_int_hw { + volatile uint32_t RESERVED1; + volatile uint32_t OCS; // 0x0004: OCDS Control and Status Register + volatile uint32_t ID; // 0x0008: Module Identification Register + volatile uint32_t LCLTEST; // 0x000C: LCL Test Register + volatile uint32_t OIXMS; // 0x0010: OTGB IRQ MUX Missed IRQ Select + volatile uint32_t OIXS0; // 0x0014: OTGB IRQ MUX Select 0U + volatile uint32_t OIXS1; // 0x0018: OTGB IRQ MUX Select 1U + volatile uint32_t OIT; // 0x001C: OTGB IRQ Trace + volatile uint32_t PROTSE; // 0x0020: PROT Register safe endinit + volatile uint32_t PROTCSE; // 0x0024: PROT Register Cyber Security Endinit + volatile uint8_t RESERVED8[0x30 - 0x28]; + volatile uint32_t PROTTOS[IR_MAX_ISP]; // 0x0030 + z*4: PROT Register for TOS=z 0 <= z <= 10U + volatile uint8_t RESERVED7[0x80 - 0x5C]; + volatile struct PROT_ACCESSEN ACCENCS; + volatile uint32_t reserved2[2]; // Padding to 0x00A0U + volatile struct PROT_ACCESSEN ACCENDBG; + volatile uint8_t RESERVED3[0x100 - 0xB8]; // Padding to 0x0100U + volatile struct PROT_ACCESSEN ACCENSRB[IR_MAX_GROUPS]; + volatile uint8_t RESERVED4[0x300 - 0x1C0]; // Padding to 0x0300U + volatile struct ir_int_tos TOS[IR_MAX_ISP]; // Type Of Service + volatile uint8_t RESERVED5[0x700 - 0x510]; // Padding to 0x0700U + volatile struct ir_int_gpsrg GPSRG[IR_MAX_GROUPS]; /* General Purpose Service Request */ + volatile uint8_t RESERVED9[0xB00 - 0x900]; + volatile uint32_t SRB[IR_MAX_GROUPS]; // 0x0B00 + x*4: Service request broadcast register x + volatile uint8_t RESERVED6[0xC00 - 0xB20]; // Padding to 0x0c00U + volatile struct ir_int_icu ICU[IR_MAX_ISP]; // 0x0C00 + z*34H +} __attribute__((__packed__, aligned(PAGE_SIZE))); + +extern volatile struct ir_src_hw* ir_src; +extern volatile struct ir_int_hw* ir_int; +extern volatile struct ir_gpsr_hw* ir_gpsr; +extern size_t IR_IMPL_INTERRUPTS; + +void ir_init(void); +void ir_cpu_init(void); +void ir_handle(void); +void ir_set_enbl(irqid_t int_id, bool en); +bool ir_get_enbl(irqid_t int_id); +void ir_set_prio(irqid_t int_id, uint32_t prio); +uint32_t ir_get_prio(irqid_t int_id); +bool ir_get_pend(irqid_t int_id); +bool ir_set_pend(irqid_t int_id); +bool ir_clr_pend(irqid_t int_id); +void ir_send_ipi(cpuid_t target_cpu); +bool ir_id_valid(unsigned long id); + +struct vm; +void ir_assign_icu_to_vm(unsigned long id, struct vm* vm); +bool ir_src_config_irq(unsigned long id, unsigned long tos, unsigned long vm, unsigned long prio); +bool ir_src_config_tos(unsigned long id, unsigned long tos); +bool ir_src_config_vm(unsigned long id, unsigned long vm); +bool ir_src_config_priority(unsigned long id, unsigned long prio); +bool ir_src_enable(unsigned long id, bool en); +bool ir_src_set_node(unsigned long id, unsigned long val); +void ir_init_gspr_group(unsigned long id, struct vm* vm); +void ir_clear_gspr_group(unsigned long id); +unsigned long ir_src_get_node(unsigned long id); + +#endif /* __IR_H__ */ diff --git a/src/arch/tricore/inc/arch/mem.h b/src/arch/tricore/inc/arch/mem.h new file mode 100644 index 000000000..088364c17 --- /dev/null +++ b/src/arch/tricore/inc/arch/mem.h @@ -0,0 +1,42 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_MEM_H__ +#define __ARCH_MEM_H__ + +#include +typedef union { + unsigned long raw; + struct { + bool write; + bool read; + bool exec; + + bool vm; + bool hyp; + }; +} mem_flags_t; + +struct addr_space_arch { + EMPTY_STRUCT_FIELDS +}; + +#define PTE_FLAGS(w, r, x, v, h) \ + ((mem_flags_t){ .write = (w), .read = (r), .exec = (x), .vm = (v), .hyp = (h) }) + +#define PTE_INVALID PTE_FLAGS(0, 0, 0, 0, 0) +#define PTE_HYP_FLAGS PTE_FLAGS(1, 1, 1, 0, 1) +#define PTE_HYP_DEV_FLAGS PTE_FLAGS(1, 1, 0, 0, 1) +#define PTE_VM_FLAGS PTE_FLAGS(1, 1, 1, 1, 1) +#define PTE_VM_DEV_FLAGS PTE_FLAGS(1, 1, 0, 1, 1) +#define PTE_HYP_FLAGS_CODE PTE_FLAGS(0, 1, 1, 0, 1) + +#define MPU_ARCH_MAX_NUM_ENTRIES (24) +#define MPU_CODE_MAX_NUM_ENTRIES (16) +#define MPU_DATA_MAX_NUM_ENTRIES (24) + +size_t mpu_granularity(void); + +#endif /* __ARCH_MEM_H__ */ diff --git a/src/arch/tricore/inc/arch/mpu.h b/src/arch/tricore/inc/arch/mpu.h new file mode 100644 index 000000000..79e710f82 --- /dev/null +++ b/src/arch/tricore/inc/arch/mpu.h @@ -0,0 +1,35 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __MPU_H__ +#define __MPU_H__ + +#include +#include + +struct mpu_node { + mpid_t mpid; + mpid_t link; + perms_t perms; + unsigned long ref_counter; +}; + +struct mpu_arch { + BITMAP_ALLOC(code_bitmap, MPU_CODE_MAX_NUM_ENTRIES); + BITMAP_ALLOC(data_bitmap, MPU_DATA_MAX_NUM_ENTRIES); + + BITMAP_ALLOC(code_locked, MPU_CODE_MAX_NUM_ENTRIES); + BITMAP_ALLOC(data_locked, MPU_DATA_MAX_NUM_ENTRIES); + + struct mpu_node code_entries[MPU_CODE_MAX_NUM_ENTRIES]; + struct mpu_node data_entries[MPU_DATA_MAX_NUM_ENTRIES]; +}; + +bool mpu_perms_compatible(unsigned long perms1, unsigned long perms2); +bool mpu_update(struct addr_space* as, struct mp_region* mpr); +void mpu_enable(void); +void mpu_disable(void); + +#endif //__MPU_H__ diff --git a/src/arch/tricore/inc/arch/platform.h b/src/arch/tricore/inc/arch/platform.h new file mode 100644 index 000000000..8d30d7e51 --- /dev/null +++ b/src/arch/tricore/inc/arch/platform.h @@ -0,0 +1,32 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_PLATFORM_H__ +#define __ARCH_PLATFORM_H__ + +#include + +// Arch-specific platform data +struct plat_device { + unsigned long dev_base; + unsigned long apu_num; + unsigned long * apu_addr; + unsigned long prot_num; + unsigned long * prot_addr; +}; +struct arch_platform { + struct ir_descript { + paddr_t int_addr; + paddr_t src_addr; + // unsigned long num_interrupts; + // irqid_t * interrupts; + unsigned long GPSR_offset; + unsigned long GPSR_size; + } ir; + unsigned long device_num; + struct plat_device * devices; +}; + +#endif /* __ARCH_PLATFORM_H__ */ diff --git a/src/arch/tricore/inc/arch/prot.h b/src/arch/tricore/inc/arch/prot.h new file mode 100644 index 000000000..9c303ae5c --- /dev/null +++ b/src/arch/tricore/inc/arch/prot.h @@ -0,0 +1,131 @@ +/** + * SPDX-License-Identifier: Apache-2.0U + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __PROT_H__ +#define __PROT_H__ + +#include +#include + +typedef unsigned long prottos_t; + +struct PROT_ACCESSEN { + volatile uint32_t WRA; // write access enable register A + volatile uint32_t WRB; // write access enable register B + volatile uint32_t RDA; // read access enable register A + volatile uint32_t RDB; // read access enable register B + volatile uint32_t VM; // VM access enable register + volatile uint32_t PRS; // PRS access enable register +}; + +#define PROT_OWEN_POS 31 +#define PROT_ODEF_POS 30 +#define PROT_TAGID_POS 24 +#define PROT_PRSEN_POS 23 +#define PROT_PRS_POS 20 +#define PROT_VMEN_POS 19 +#define PROT_VM_POS 16 +#define PROT_SWEN_POS 3 +#define PROT_STATE_POS 0 + +#define PROT_OWEN_MASK (0x1U << PROT_OWEN_POS) +#define PROT_ODEF_MASK (0x1U << PROT_ODEF_POS) +#define PROT_TAGID_MASK (0x3F << PROT_TAGID_POS) +#define PROT_PRSEN_MASK (0x1U << PROT_PRSEN_POS) +#define PROT_PRS_MASK (0x7U << PROT_PRS_POS) +#define PROT_VMEN_MASK (0x1U << PROT_VMEN_POS) +#define PROT_VM_MASK (0x7U << PROT_VM_POS) +#define PROT_SWEN_MASK (0x1U << PROT_SWEN_POS) +#define PROT_STATE_MASK (0x7U << PROT_STATE_POS) + +#define SET_PROT_ODEF(reg, val) \ + ((reg) = ((reg) & ~PROT_ODEF_MASK) | (((val) << PROT_ODEF_POS) & PROT_ODEF_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_TAGID(reg, val) \ + ((reg) = ((reg) & ~PROT_TAGID_MASK) | (((val) << PROT_TAGID_POS) & PROT_TAGID_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_PRSEN(reg, val) \ + ((reg) = ((reg) & ~PROT_PRSEN_MASK) | (((val) << PROT_PRSEN_POS) & PROT_PRSEN_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_PRS(reg, val) \ + ((reg) = ((reg) & ~PROT_PRS_MASK) | (((val) << PROT_PRS_POS) & PROT_PRS_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_VMEN(reg, val) \ + ((reg) = ((reg) & ~PROT_VMEN_MASK) | (((val) << PROT_VMEN_POS) & PROT_VMEN_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_VM(reg, val) \ + ((reg) = ((reg) & ~PROT_VM_MASK) | (((val) << PROT_VM_POS) & PROT_VM_MASK) | \ + (1UL << PROT_OWEN_POS)) +#define SET_PROT_STATE(reg, val) \ + (reg) = ((((val) << PROT_STATE_POS) & PROT_STATE_MASK) | \ + (1UL << PROT_SWEN_POS)) + +#define GET_PROT_ODEF(reg) (((reg) & PROT_ODEF_MASK) >> PROT_ODEF_POS) +#define GET_PROT_TAGID(reg) (((reg) & PROT_TAGID_MASK) >> PROT_TAGID_POS) +#define GET_PROT_PRSEN(reg) (((reg) & PROT_PRSEN_MASK) >> PROT_PRSEN_POS) +#define GET_PROT_PRS(reg) (((reg) & PROT_PRS_MASK) >> PROT_PRS_POS) +#define GET_PROT_VMEN(reg) (((reg) & PROT_VMEN_MASK) >> PROT_VMEN_POS) +#define GET_PROT_VM(reg) (((reg) & PROT_VM_MASK) >> PROT_VM_POS) + +#define PROT_STATE_INIT 0 +#define PROT_STATE_CONFIG 1 +#define PROT_STATE_CONFIG_SEC 2 +#define PROT_STATE_CHECK 3 +#define PROT_STATE_RUN 4 +#define PROT_STATE_RUN_SEC 5 +#define PROT_STATE_RUN_LOCK 6 +#define PROT_STATE_RUN_LOCK_SEC 7 + +inline void apu_enable_access_all_cpus(volatile struct PROT_ACCESSEN* accessen) +{ + accessen->WRA |= 0x7FFUL; + accessen->RDA |= 0x7FFUL; +} + +inline void apu_enable_access_cpu(volatile struct PROT_ACCESSEN* accessen, unsigned long cpuid) +{ + accessen->WRA |= 0x3UL << (cpuid * 2); + accessen->RDA |= 0x3UL << (cpuid * 2); +} + +inline void apu_enable_access_vm(volatile struct PROT_ACCESSEN* accessen, unsigned long vmid) +{ + accessen->VM |= 0x1UL << vmid; +} + +inline void apu_clear_access_cpu(struct PROT_ACCESSEN* accessen, unsigned long cpuid) +{ + unsigned long val; + val = accessen->WRA; + val = val & ~(0x3UL << cpuid); + accessen->WRA = val; + + val = accessen->RDA; + val = val & ~(0x3UL << cpuid); + accessen->RDA = val; +} + +inline void prot_set_state(volatile prottos_t* prottos, unsigned long state) +{ + SET_PROT_STATE(*prottos, state); +} + +inline void prot_set_vm(volatile prottos_t* prottos, unsigned long vmid, bool vmen) +{ + SET_PROT_VM(*prottos, vmid); + SET_PROT_VMEN(*prottos, vmen ? 1UL : 0); +} + +inline void prot_enable(volatile prottos_t* prottos) +{ + SET_PROT_ODEF(*prottos, 1); +} + +inline void prot_disable(volatile prottos_t* prottos) +{ + SET_PROT_ODEF(*prottos, 0); +} + +#endif //__PROT_H__ diff --git a/src/arch/tricore/inc/arch/spinlock.h b/src/arch/tricore/inc/arch/spinlock.h new file mode 100644 index 000000000..4649b8357 --- /dev/null +++ b/src/arch/tricore/inc/arch/spinlock.h @@ -0,0 +1,60 @@ +/** + * baohu separation kernel + * + * Copyright (c) Jose Martins, Sandro Pinto + * + * Authors: + * Jose Martins + * + * baohu is free software; you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 as published by the Free + * Software Foundation, with a special exception exempting guest code from such + * license. See the COPYING file in the top-level directory for details. + * + */ + +#ifndef __ARCH_SPINLOCK__ +#define __ARCH_SPINLOCK__ + +typedef struct { + uint32_t spinlock_t; +} spinlock_t; + +static const spinlock_t SPINLOCK_INITVAL = { 0 }; + +static inline unsigned int cmpAndSwap(unsigned int volatile* address, unsigned int value, + unsigned int condition) +{ + unsigned long long reg64; + + __asm__ volatile("mov %A[reg], %[cond], %[val]\n\t" + "cmpswap.w [%[addr]]0, %A[reg]" : [reg] "=d"(reg64) + : [addr] "a"(address), [cond] "d"(condition), [val] "d"(value) : "memory"); + __asm__ volatile("isync"); + return (unsigned long)reg64; +} + +/* TODO: ticket lock */ +static inline void spin_lock(spinlock_t* lock) +{ + volatile long unsigned spinLockVal; + + bool retVal = false; + + do { + spinLockVal = 1UL; + spinLockVal = cmpAndSwap(((unsigned int volatile*)lock), spinLockVal, 0); + + /* Check if the SpinLock WAS set before the attempt to acquire spinlock */ + if (spinLockVal == false) { + retVal = true; + } + } while (retVal == false); +} + +static inline void spin_unlock(spinlock_t* lock) +{ + *lock = SPINLOCK_INITVAL; +} + +#endif /* __ARCH_SPINLOCK__ */ diff --git a/src/arch/tricore/inc/arch/traps.h b/src/arch/tricore/inc/arch/traps.h new file mode 100644 index 000000000..51094d0fa --- /dev/null +++ b/src/arch/tricore/inc/arch/traps.h @@ -0,0 +1,77 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __TRAPS_H__ +#define __TRAPS_H__ + +/* Trap Identifcation numbers */ +/* Class 0 - MMU */ +#define TIN_MMU_VAF 1 /* Virtual Address Fill */ +#define TIN_MMU_VAP 2 /* Virtual Address Protection */ +/* Class 1 - Internal Protection Traps */ +#define TIN_IP_PRIV 1 /* Privileged Instruction */ +#define TIN_IP_MPR 2 /* Memory Protection Read */ +#define TIN_IP_MPW 3 /* Memory Protection Write */ +#define TIN_IP_MPX 4 /* Memory Protection Execute */ +#define TIN_IP_MPP 5 /* Memory Protection Peripheral Access */ +#define TIN_IP_MPN 6 /* Memory Protection Null Address */ +#define TIN_IP_GRWP 7 /* Global Register Write Protection */ +/* Class 2 - Instructon Errors */ +#define TIN_IE_IOPC 1 /* Illegal Opcode */ +#define TIN_IE_UOPC 2 /* Unimplemented Opcode */ +#define TIN_IE_OPD 3 /* Invalid Operand Specification */ +#define TIN_IE_ALN 4 /* Data Address Alignment */ +#define TIN_IE_MEM 5 /* Invalid Local Memory Address */ +#define TIN_IE_CSE 6 /* Coprocessor Trap Synchronous Error */ +/* Class 3 - Context Management */ +#define TIN_CTXM_FCD 1 /* Free Context List Depletion (FCX=LCX) */ +#define TIN_CTXM_CDO 2 /* Call Depth Overflow */ +#define TIN_CTXM_CDU 3 /* Call Depth Underflow */ +#define TIN_CTXM_FCU 4 /* Free Context List Underflow (FCX=0) */ +#define TIN_CTXM_CSU 5 /* Call Stack Underflow (PCX=0) */ +#define TIN_CTXM_CTYP 6 /* Context Type (PCXI.UL wrong) */ +#define TIN_CTXM_NEST 7 /* Nesting Error RFE with non-zero call depth */ +/* Class 4 - System Bus and Peripheral Errors*/ +#define TIN_BPE_PSE 1 /* Program Fetch Error */ +#define TIN_BPE_DSE 2 /* Data Access Synchronous Error */ +#define TIN_BPE_DAE 3 /* Data Access Asynchronous Error */ +#define TIN_BPE_CAE 4 /* Coprocessor Trap Asynchronous Error */ +#define TIN_BPE_PIE 5 /* Program Memory Integrity Error */ +#define TIN_BPE_DIE 6 /* Data Memory Integrity Error */ +#define TIN_BPE_TAE 7 /* Temporal Asynchronous Error */ +/* Class 5 - Assertion Traps */ +#define TIN_ASSERT_OVF 1 /* Arithmetic Overflow */ +#define TIN_ASSERT_SOVF 2 /* Sticky Arithmetic Overflow*/ +/* Class 6 - System Call */ +#define TIN_SYS_SYS 1 /* System Call */ +/* Class 7 - Non-Maskable Interrupt */ +#define TIN_NMI_NMI 0 /* Non-Maskable Interrupt */ + +/* Hypervisor Trap Identifcation numbers */ +/* Class 0 - Hypervisor Call */ +/* Class 1 - Hypervisor Interrupt Trap */ +/* Class 3 - Level 2 code memory protection trap */ +/* Class 4 - HV CSFR Access Support*/ +/* The previous trap classes only have a single trap source */ + +/* Class 2 - Level 2 data memory protection trap */ +#define TIN_HYP_L2MPR 0 /* Level 2 Memory Protection Read */ +#define TIN_HYP_L2MPW 1 /* Level 2 Memory Protection Write*/ + +/* CPUx HR specific data asynchronous trap register */ +#define DATR_SBE_BIT (1 << 3) +#define DATR_CWE_BIT (1 << 9) +#define DATR_CFE_BIT (1 << 10) +#define DATR_SOE_BIT (1 << 14) +#define DATR_E_PRS_EN_BIT (1 << 19) +#define DATR_E_VMN_EN_BIT (1 << 23) +#define DATR_BUS_S_BIT (1 << 24) + +void sys_bus_errors_handler(void); +void l2_dmem_prot_trap_handler(unsigned long* addr, unsigned long access); +void hyp_csfr_access_handler(unsigned long* addr, unsigned long hvtin); +void hvcall_handler(unsigned long function_id, unsigned long dev_id, unsigned long event_id); + +#endif //__TRAPS_H__ diff --git a/src/arch/tricore/inc/arch/vir.h b/src/arch/tricore/inc/arch/vir.h new file mode 100644 index 000000000..260d8fd0d --- /dev/null +++ b/src/arch/tricore/inc/arch/vir.h @@ -0,0 +1,37 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __VIR_H__ +#define __VIR_H__ + +#include +#include +#include +#include +#include + +#define IR_MAX_INTERRUPTS (2048U) /* TODO */ + +struct vir { + spinlock_t lock; + BITMAP_ALLOC(pend, IR_MAX_INTERRUPTS); + BITMAP_ALLOC(act, IR_MAX_INTERRUPTS); + uint32_t prio[IR_MAX_INTERRUPTS]; + BITMAP_ALLOC(enbl, IR_MAX_INTERRUPTS); + struct emul_mem ir_src_emul; +}; + +struct vir_reg_handler_info { + void (*reg_access)(struct emul_access*, cpuid_t vgicr_id); + size_t alignment; +}; + +struct vm; +struct vcpu; +void vir_init(struct vm* vm); //, const struct vir_dscrp* vm_vir_dscrp); +void vir_inject(struct vcpu* vcpu, irqid_t id); +void vir_set_hw(struct vm* vm, irqid_t id); + +#endif //__VIR_H__ diff --git a/src/arch/tricore/inc/arch/vm.h b/src/arch/tricore/inc/arch/vm.h new file mode 100644 index 000000000..ec3167cff --- /dev/null +++ b/src/arch/tricore/inc/arch/vm.h @@ -0,0 +1,121 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __ARCH_VM_H__ +#define __ARCH_VM_H__ + +#include +#include +#include +#include +// #include +#include +#include +#include +#include + +#define VM_TO_AS_ID(x) ((x) + 1) + +#define REG_D0 (1) +#define REG_D1 (2) +#define REG_D2 (3) +#define REG_D3 (4) +#define REG_D4 (5) +#define REG_D5 (6) +#define REG_D6 (7) +#define REG_D7 (8) +#define REG_D8 (9) +#define REG_D9 (10) +#define REG_D10 (11) +#define REG_D11 (12) +#define REG_D12 (13) +#define REG_D13 (14) +#define REG_D14 (15) +#define REG_A0 (16) +#define REG_A1 (17) +#define REG_A2 (18) +#define REG_A3 (19) +#define REG_A4 (20) +#define REG_A5 (21) +#define REG_A6 (22) +#define REG_A7 (23) +#define REG_A8 (24) +#define REG_A9 (25) +#define REG_A10 (26) +#define REG_SP (26) +#define REG_A11 (27) +#define REG_RA (27) +#define REG_A12 (28) +#define REG_A13 (29) +#define REG_A14 (30) +#define REG_A15 (31) + +struct arch_vm_platform { + unsigned long gspr_num; + unsigned long* gspr_groups; +}; + +struct vm_arch { + /* interrupt controller */ + /* TODO needed? struct vir_int vir_int; */ + struct vir_src* vir_src; + + paddr_t vir_int_addr; + spinlock_t vir_int_lock; + paddr_t vir_src_addr; + spinlock_t vir_src_lock; + + struct emul_mem vir_int_emul; + struct emul_mem vir_src_emul; +}; + +struct vcpu_arch { + vcpuid_t core_id; + /* TODO CPU power state ctx */ +}; + +struct arch_regs { + union { + struct lower_context lower_ctx; + unsigned long lower_array[16]; + }; + + union { + struct upper_context upper_ctx; + unsigned long upper_array[16]; + }; + unsigned long a0; /* System global register: not saved accross calls traps and irqs */ + unsigned long a1; /* System global register: not saved accross calls traps and irqs */ + unsigned long a8; /* System global register: not saved accross calls traps and irqs */ + unsigned long a9; /* System global register: not saved accross calls traps and irqs */ + +} __attribute__((__packed__, aligned(64))); + +void vcpu_arch_entry(void); + +static inline void vcpu_arch_inject_hw_irq(struct vcpu* vcpu, irqid_t id) +{ + (void)vcpu; + (void)id; + /* virqc_inject(vcpu, id); */ +} + +static inline void vcpu_arch_inject_irq(struct vcpu* vcpu, irqid_t id) +{ + vir_inject(vcpu, id); + /* virqc_inject(vcpu, id); */ +} + +struct vm; + +void vir_vcpu_init(struct vcpu* vcpu); + +void ir_config_irq(irqid_t int_id, bool en); + +void ir_assign_int_to_vm(struct vm* vm, irqid_t id); + +void vcpu_arch_run(struct vcpu* vcpu); + +#endif /* __ARCH_VM_H__ */ diff --git a/src/arch/tricore/inc/arch/vmm.h b/src/arch/tricore/inc/arch/vmm.h new file mode 100644 index 000000000..dbf17c5de --- /dev/null +++ b/src/arch/tricore/inc/arch/vmm.h @@ -0,0 +1,9 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef ARCH_VMM_H +#define ARCH_VMM_H + +#endif /* ARCH_VMM_H */ diff --git a/src/arch/tricore/interrupts.c b/src/arch/tricore/interrupts.c new file mode 100644 index 000000000..7b9a6cf90 --- /dev/null +++ b/src/arch/tricore/interrupts.c @@ -0,0 +1,78 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +void interrupts_arch_init() +{ + ir_init(); + + /* Wait for master hart to finish irqc initialization */ + cpu_sync_barrier(&cpu_glb_sync); + + ir_cpu_init(); +} + +void interrupts_arch_ipi_send(cpuid_t target_cpu) +{ + ir_send_ipi(target_cpu); +} + +void interrupts_arch_ipi_init(void) +{ + if (cpu_is_master()) { + ir_init_ipi(); + } + + for (int i = 0; i < PLAT_CPU_NUM; i++) { + if (!interrupts_reserve((irqid_t)(IPI_CPU_MSG + i), (irq_handler_t)cpu_msg_handler)) { + ERROR("Failed to reserve IPI_CPU_MSG interrupt"); + } + } +} + +void interrupts_arch_enable(irqid_t int_id, bool en) +{ + ir_src_enable(int_id, en); +} + +void interrupts_arch_handle(void) +{ + ir_handle(); +} + +bool interrupts_arch_check(irqid_t int_id) +{ + return ir_get_pend(int_id); +} + +void interrupts_arch_clear(irqid_t int_id) +{ + ir_clr_pend(int_id); +} + +inline bool interrupts_arch_conflict(bitmap_t* interrupt_bitmap, irqid_t int_id) +{ + return bitmap_get(interrupt_bitmap, int_id); +} + +void interrupts_arch_vm_assign(struct vm* vm, irqid_t id) +{ + ir_assign_int_to_vm(vm, id); +} + +inline irqid_t interrupts_arch_reserve(irqid_t pint_id) +{ + return pint_id; +} diff --git a/src/arch/tricore/ir.c b/src/arch/tricore/ir.c new file mode 100644 index 000000000..a3eb50947 --- /dev/null +++ b/src/arch/tricore/ir.c @@ -0,0 +1,404 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include "arch/interrupts.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +volatile struct ir_int_hw* ir_int; +volatile struct ir_src_hw* ir_src; + +bool ipi_initialized = false; + +bitmap_t valid[IR_MAX_INTERRUPTS / 4] = INTERRUPTS_BITMAP; + +spinlock_t src_lock = SPINLOCK_INITVAL; + +void ir_init_ipi(void) +{ + for (unsigned int i = 0; i < PLAT_CPU_NUM; i++) { + /* configure each GPSRG interrupt in this group for each CPU */ + /* We set the TOS of each Node in group 0 to the corresponding cpu*/ + + IR_SRC_SET_TOS(ir_src->SRC[IPI_CPU_MSG + i], i); + /* IPIs are the only interrupts for bao, we set them with the highest priority */ + IR_SRC_SET_SRPN(ir_src->SRC[IPI_CPU_MSG + i], 0xFF); + + prot_set_state(&ir_int->PROTTOS[i], PROT_STATE_CONFIG); + ir_int->GPSRG[0].SWCACCEN[i] = (0UL << 31) | (1UL << 16) | (0x3UL << i); + prot_set_state(&ir_int->PROTTOS[i], PROT_STATE_RUN); + + IR_SRC_SET_SRE(ir_src->SRC[IPI_CPU_MSG + i], 1UL); + /* TODO after enabling we can broadcast interrupts (through SRB) for all cpus simultaneously + * although care must be taken as the current cpu could also be interrupted. + * we could temporarily remove this cpu from broadcast. use the gpsr + * lock mechanism to do sync */ + } + + /* Bao (VM0) on any CPU can use the broadcast register */ + apu_enable_access_all_cpus(&ir_int->ACCENSRB[0]); + apu_enable_access_vm(&ir_int->ACCENSRB[0], 0); + + /* Enable VM0 in the arbitration process on all CPUs*/ + for (size_t i = 0; i < platform.cpu_num; i++) { + ir_int->ICU[i].VMEN |= 0x1; + } + + fence_sync(); + ipi_initialized = true; +} + +static void ir_assign_cpu_tos_access(void) +{ + /* In this function we only configure the TOS's related to each cpu */ + for (uint32_t tos = 0; tos < PLAT_CPU_NUM; tos++) { + /* By default all TOSs (CPUs) can read and write on all field of SRC */ + /* but only the VM0 (hypervisor)*/ + apu_enable_access_cpu(&ir_int->TOS[tos].ACCENSCFG, tos); + apu_enable_access_vm(&ir_int->TOS[tos].ACCENSCFG, 0); + apu_enable_access_cpu(&ir_int->TOS[tos].ACCENSCTRL, tos); + apu_enable_access_vm(&ir_int->TOS[tos].ACCENSCTRL, 0); + + /* We define VM0 on CPUx as the PROT owner of TOSx */ + /* Only bao on CPUx can reconfigure the acess to SRC for TOSx */ + prot_set_vm(&ir_int->PROTTOS[tos], 0, true); // set VM0 as the owner and enable VM as of the + // owner ID + prot_enable(&ir_int->PROTTOS[tos]); // enable protection + prot_set_state(&ir_int->PROTTOS[tos], PROT_STATE_RUN); + + /* TODO: MS: Maybe sacrifice readability for optimizations, all the previous + statements write on the same register */ + } +} + +void ir_init(void) +{ + /* Map IR and SRC */ + if(!DEFINED(MMIO_SLAVE_SIDE_PROT)){ + ir_int = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, platform.arch.ir.int_addr, + platform.arch.ir.int_addr, NUM_PAGES(sizeof(struct ir_int_hw))); + ir_src = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, platform.arch.ir.src_addr, + platform.arch.ir.src_addr, NUM_PAGES(sizeof(ir_src->SRC[0]) * PLAT_IR_MAX_INTERRUPTS)); + } + else { + ir_int = (struct ir_int_hw *)platform.arch.ir.int_addr; + ir_src = (struct ir_src_hw *)platform.arch.ir.src_addr; + } + /** Ensure that instructions after fence have the IR fully mapped */ + fence_sync(); + + if (cpu_is_master()) { + ir_assign_cpu_tos_access(); + } +} + +void ir_cpu_init(void) +{ + /* Nothing to do */ +} + +void ir_set_enbl(irqid_t int_id, bool en) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + } + IR_SRC_SET_TOS(ir_src->SRC[int_id], cpu()->id); /* TODO assumes current cpu + is requesting interrupt + to be active */ + IR_SRC_SET_SRE(ir_src->SRC[int_id], (unsigned long)en); + + spin_unlock(&src_lock); +} + +bool ir_get_enbl(irqid_t int_id) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + } + bool enabled = IR_SRC_GET_SRE(ir_src->SRC[int_id]); + + spin_unlock(&src_lock); + + return enabled; +} + +void ir_set_prio(irqid_t int_id, uint32_t prio) +{ + spin_lock(&src_lock); + + if (prio > IR_MAX_PRIO || !bitmap_get(valid, int_id)) { + ERROR("%s Invalid priority %u", __func__, prio); + } + + IR_SRC_SET_SRPN(ir_src->SRC[int_id], prio); + + spin_unlock(&src_lock); +} + +uint32_t ir_get_prio(irqid_t int_id) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + } + uint32_t prio = IR_SRC_GET_SRPN(ir_src->SRC[int_id]); + + spin_unlock(&src_lock); + + return prio; +} + +bool ir_get_pend(irqid_t int_id) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + } + bool pending = IR_SRC_GET_SRR(ir_src->SRC[int_id]) != 0; + + spin_unlock(&src_lock); + + return pending; +} + +bool ir_set_pend(irqid_t int_id) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + spin_unlock(&src_lock); + + return false; + } else { + IR_SRC_SET_SETR(ir_src->SRC[int_id], true); + spin_unlock(&src_lock); + + return true; + } +} + +bool ir_clr_pend(irqid_t int_id) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, int_id)) { + ERROR("%s Invalid interrupt %u", __func__, int_id); + } + IR_SRC_SET_CLRR(ir_src->SRC[int_id], 1); + + spin_unlock(&src_lock); + + return true; +} + +void ir_handle(void) +{ + uint32_t cpuid = cpu()->id; + (void)cpuid; + + bool for_vm = true; + + unsigned long id = (ir_int->ICU[cpuid].VM[0] >> 16) & 0x7FF; + + if (for_vm) { + /* TODO: Currently not supported? */ + } + + enum irq_res res = interrupts_handle(id); + (void)res; +} + +void ir_send_ipi(cpuid_t target_cpu) +{ + if (ipi_initialized) { + if (target_cpu >= PLAT_CPU_NUM) { + ERROR("%s invalid cpu number %u", target_cpu, __func__); + } + /* We previously configure interrupts for each CPU */ + ir_int->SRB[0] = 0x1UL << target_cpu; + } +} + +void ir_assign_int_to_vm(struct vm* vm, irqid_t id) +{ + /* VM direct injection */ + uint32_t vmid = (vm->id) + 1; + if (vmid > 7) { + ERROR("Unsuported vm id %u > 7", vmid); + return; + } + + IR_SRC_SET_VM(ir_src->SRC[id], vmid); +} + +void ir_assign_icu_to_vm(unsigned long id, struct vm* vm) +{ + UNUSED_ARG(id); + /* VM direct injection */ + uint32_t vmid = (vm->id) + 1; + if (vmid > 7) { + ERROR("Unsuported vm id %u > 7", vmid); + return; + } + + ir_int->ICU[cpu()->id].VMEN |= 1UL << vmid; +} + +bool ir_id_valid(unsigned long id) +{ + return bitmap_get(valid, id); +} + +bool ir_src_config_irq(unsigned long id, unsigned long tos, unsigned long vm, unsigned long prio) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + IR_SRC_SET_SRPN(ir_src->SRC[id], prio); + IR_SRC_SET_VM(ir_src->SRC[id], vm); + IR_SRC_SET_TOS(ir_src->SRC[id], tos); + + spin_unlock(&src_lock); + + return true; +} + +bool ir_src_config_tos(unsigned long id, unsigned long tos) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + IR_SRC_SET_TOS(ir_src->SRC[id], tos); + + spin_unlock(&src_lock); + + return true; +} + +bool ir_src_config_vm(unsigned long id, unsigned long vm) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + IR_SRC_SET_VM(ir_src->SRC[id], vm); + + spin_unlock(&src_lock); + + return true; +} + +bool ir_src_config_priority(unsigned long id, unsigned long prio) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + IR_SRC_SET_SRPN(ir_src->SRC[id], prio); + + spin_unlock(&src_lock); + + return true; +} + +bool ir_src_enable(unsigned long id, bool en) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + IR_SRC_SET_SRE(ir_src->SRC[id], (unsigned long)en); + + spin_unlock(&src_lock); + + return true; +} + +unsigned long ir_src_get_node(unsigned long id) +{ + unsigned long val = 0; + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return (unsigned long)-1; + } + + val = ir_src->SRC[id]; + + spin_unlock(&src_lock); + + return val; +} + +bool ir_src_set_node(unsigned long id, unsigned long val) +{ + spin_lock(&src_lock); + + if (!bitmap_get(valid, id)) { + ERROR("%s Invalid interrupt %u", __func__, id); + return false; + } + + ir_src->SRC[id] = val; + + spin_unlock(&src_lock); + + return true; +} + +void ir_clear_gspr_group(unsigned long id) +{ + for (int node = 0; node < 8; node++) { + ir_int->GPSRG[id].SWCACCEN[node] = 0; + } +} + +void ir_init_gspr_group(unsigned long id, struct vm* vm) +{ + for (unsigned long int i = 0; i < platform.cpu_num; i++) { + if (vm->cpus & (1UL << i)) { + apu_enable_access_cpu(&ir_int->ACCENSRB[id], i); + + for (int node = 0; node < 8; node++) { + // ir_int->GPSRG[id].SWCACCEN[node] = 0; + ir_int->GPSRG[id].SWCACCEN[node] |= (unsigned long)0x3 << (i * 2); + ir_int->GPSRG[id].SWCACCEN[node] |= (unsigned long)0x1 << (vm->id + 16); + } + } + } +} diff --git a/src/arch/tricore/mem.c b/src/arch/tricore/mem.c new file mode 100644 index 000000000..26aa6e05a --- /dev/null +++ b/src/arch/tricore/mem.c @@ -0,0 +1,19 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +#include +#include + +void as_arch_init(struct addr_space* as) +{ + UNUSED_ARG(as); +} + +size_t mpu_granularity(void) +{ + return (size_t)PAGE_SIZE; +} diff --git a/src/arch/tricore/mpu.c b/src/arch/tricore/mpu.c new file mode 100644 index 000000000..bfb86750b --- /dev/null +++ b/src/arch/tricore/mpu.c @@ -0,0 +1,330 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include "types.h" +#include +#include +#include +#include + +#define CAT_RW_FLAGS(mem_flags) \ + (((unsigned long int)(mem_flags).write << 2) | ((unsigned long int)(mem_flags).read << 1)) + +static mpid_t mpu_code_entry_allocate(void) +{ + mpid_t reg_num = INVALID_MPID; + for (mpid_t i = 0; i < MPU_CODE_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.code_bitmap, i) == 0) { + bitmap_set(cpu()->arch.mpu.code_bitmap, i); + reg_num = i; + break; + } + } + return reg_num; +} + +static void mpu_code_entry_deallocate(mpid_t mpid) +{ + bitmap_clear(cpu()->arch.mpu.code_bitmap, mpid); +} + +static mpid_t mpu_data_entry_allocate(void) +{ + mpid_t reg_num = INVALID_MPID; + for (mpid_t i = 0; i < MPU_DATA_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.data_bitmap, i) == 0) { + bitmap_set(cpu()->arch.mpu.data_bitmap, i); + reg_num = i; + break; + } + } + return reg_num; +} + +static mpid_t mpu_data_find_region(struct mp_region* mpr) +{ + mpid_t reg_num = INVALID_MPID; + for (mpid_t i = 0; i < MPU_DATA_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.data_bitmap, i)) { + if (csfr_dpr_l_read(i) == mpr->base && csfr_dpr_u_read(i) == mpr->base + mpr->size) { + reg_num = i; + break; + } + } + } + return reg_num; +} + +static mpid_t mpu_code_find_region(struct mp_region* mpr) +{ + mpid_t reg_num = INVALID_MPID; + for (mpid_t i = 0; i < MPU_CODE_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.code_bitmap, i)) { + if (csfr_cpr_l_read(i) == mpr->base && csfr_cpr_u_read(i) == mpr->base + mpr->size) { + reg_num = i; + break; + } + } + } + return reg_num; +} + +static void mpu_data_entry_deallocate(mpid_t mpid) +{ + bitmap_clear(cpu()->arch.mpu.data_bitmap, mpid); +} + +static void mpu_insert_data_entry(struct mp_region* mpr, mpid_t mpid) +{ + unsigned long lower_addr = mpr->base; + unsigned long upper_addr = mpr->base + mpr->size; + csfr_dpr_l_write(mpid, lower_addr); + csfr_dpr_u_write(mpid, upper_addr); +} + +static void mpu_insert_code_entry(struct mp_region* mpr, mpid_t mpid) +{ + unsigned long lower_addr = mpr->base; + unsigned long upper_addr = mpr->base + mpr->size; + csfr_cpr_l_write(mpid, lower_addr); + csfr_cpr_u_write(mpid, upper_addr); +} + +static void mpu_prs_enable_entry(unsigned long prs, mpid_t mpid, unsigned long perm) +{ + if (perm & PERM_W) { + set_dpwe_bit(prs, mpid, true); + } + + if (perm & PERM_R) { + set_dpre_bit(prs, mpid, true); + } + + if (perm & PERM_X) { + set_cpxe_bit(prs, mpid, true); + } +} + +static void mpu_prs_disable_entry(unsigned long prs, mpid_t mpid, unsigned long perm) +{ + if (perm & PERM_W) { + set_dpwe_bit(prs, mpid, false); + } + + if (perm & PERM_R) { + set_dpre_bit(prs, mpid, false); + } + + if (perm & PERM_X) { + set_cpxe_bit(prs, mpid, false); + } +} + +bool mpu_map(struct addr_space* as, struct mp_region* mpr, bool locked) +{ + bool entry_added = false; + unsigned long code_mpid = INVALID_MPID; + unsigned long data_mpid = INVALID_MPID; + unsigned long prs = as->id; // as->type ? 1 : 0; + mpid_t existing_data = mpu_data_find_region(mpr); + mpid_t existing_code = mpu_code_find_region(mpr); + + if (mpr->size == 0) { + return false; + } + + if (mpr->mem_flags.write || mpr->mem_flags.read) { + if (existing_data != INVALID_MPID) { + // Should we increment the ref_counter regardless? + // Shouldn't we evaluate if regions is already enabled for the PRS? + cpu()->arch.mpu.data_entries[existing_data].ref_counter++; + mpu_prs_enable_entry(prs, existing_data, CAT_RW_FLAGS(mpr->mem_flags)); + entry_added = true; + + data_mpid = existing_data; + + /* Is there any circumstance where two regions with the same boundaries are + not linked? */ + if (existing_code != INVALID_MPID && mpr->mem_flags.exec) { // We assume they were + // linked beforehand + cpu()->arch.mpu.code_entries[existing_code].ref_counter++; + mpu_prs_enable_entry(prs, existing_code, PERM_X); + return true; + } + } else { + data_mpid = mpu_data_entry_allocate(); + if (data_mpid != INVALID_MPID) { + mpu_insert_data_entry(mpr, data_mpid); + if (locked) { + bitmap_set(cpu()->arch.mpu.data_locked, data_mpid); + } + mpu_prs_enable_entry(prs, data_mpid, + (unsigned long int)CAT_RW_FLAGS(mpr->mem_flags)); + + entry_added = true; + + cpu()->arch.mpu.data_entries[data_mpid].ref_counter++; + cpu()->arch.mpu.data_entries[data_mpid].perms |= + (unsigned long int)CAT_RW_FLAGS(mpr->mem_flags); + } + } + } + + if (mpr->mem_flags.exec) { + if (existing_code != INVALID_MPID) { + cpu()->arch.mpu.code_entries[existing_code].ref_counter++; + + if (entry_added) { + cpu()->arch.mpu.code_entries[existing_code].link = data_mpid; + cpu()->arch.mpu.data_entries[data_mpid].link = existing_code; + } + + } else { + code_mpid = mpu_code_entry_allocate(); + if (code_mpid != INVALID_MPID) { + mpu_insert_code_entry(mpr, code_mpid); + if (locked) { + bitmap_set(cpu()->arch.mpu.code_locked, code_mpid); + } + + mpu_prs_enable_entry(prs, code_mpid, PERM_X); + cpu()->arch.mpu.code_entries[code_mpid].ref_counter++; + cpu()->arch.mpu.code_entries[code_mpid].perms |= PERM_X; + + if (entry_added) { + cpu()->arch.mpu.code_entries[code_mpid].link = data_mpid; + cpu()->arch.mpu.data_entries[data_mpid].link = code_mpid; + } + } + } + } + return true; +} + +bool mpu_update(struct addr_space* as, struct mp_region* mpr) +{ + UNUSED_ARG(as); + + for (mpid_t i = 0; i < MPU_DATA_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.data_bitmap, i) == 1) { + if (csfr_dpr_l_read(i) == mpr->base) { + csfr_dpr_u_write(i, mpr->base + mpr->size); + break; + } + } + } + + /* Since we iterate both mpus it is not needed to check links, we just update */ + + for (mpid_t i = 0; i < MPU_CODE_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.code_bitmap, i) == 1) { + if (csfr_cpr_l_read(i) == mpr->base) { + csfr_cpr_u_write(i, mpr->base + mpr->size); + break; + } + } + } + + return true; +} + +bool mpu_unmap(struct addr_space* as, struct mp_region* mpr) +{ + unsigned long prs = as->id; + + for (mpid_t i = 0; i < MPU_CODE_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.code_bitmap, i) == 1) { + if (csfr_cpr_l_read(i) == mpr->base && csfr_cpr_u_read(i) == mpr->base + mpr->size) { + if (cpu()->arch.mpu.code_entries[i].ref_counter == 1) { + csfr_cpr_u_write(i, 0); + csfr_cpr_l_write(i, 0); + mpu_code_entry_deallocate(i); + } + + cpu()->arch.mpu.code_entries[i].ref_counter--; + mpu_prs_disable_entry(prs, i, PERM_X); + + if (cpu()->arch.mpu.code_entries[i].link != INVALID_MPID) { + cpu()->arch.mpu.code_entries[i].link = INVALID_MPID; + } + break; + } + } + } + + for (mpid_t i = 0; i < MPU_DATA_MAX_NUM_ENTRIES; i++) { + if (bitmap_get(cpu()->arch.mpu.data_bitmap, i) == 1) { + if (csfr_dpr_l_read(i) == mpr->base && csfr_dpr_u_read(i) == mpr->base + mpr->size) { + if (cpu()->arch.mpu.data_entries[i].ref_counter == 1) { + csfr_dpr_u_write(i, 0); + csfr_dpr_l_write(i, 0); + mpu_data_entry_deallocate(i); + } + + cpu()->arch.mpu.data_entries[i].ref_counter--; + mpu_prs_disable_entry(prs, i, PERM_RW); + + if (cpu()->arch.mpu.data_entries[i].link != INVALID_MPID) { + cpu()->arch.mpu.data_entries[i].link = INVALID_MPID; + } + break; + } + } + } + return true; +} + +void mpu_enable(void) +{ + csfr_corecon_write(2); +} + +void mpu_disable(void) +{ + csfr_corecon_write(0); +} + +bool mpu_perms_compatible(unsigned long perms1, unsigned long perms2) +{ + mem_flags_t p1; + mem_flags_t p2; + + /* -Wpedantic doesn't allow casts to union types */ + p1.raw = perms1; + p2.raw = perms2; + + if (p1.exec == p2.exec) { + return true; + } + + if ((p1.read || p1.write) && (p2.read || p2.write)) { + return true; + } + + return false; +} + +void mpu_init(void) +{ + bitmap_clear_consecutive(cpu()->arch.mpu.code_bitmap, 0, MPU_CODE_MAX_NUM_ENTRIES); + bitmap_clear_consecutive(cpu()->arch.mpu.data_bitmap, 0, MPU_DATA_MAX_NUM_ENTRIES); + + bitmap_clear_consecutive(cpu()->arch.mpu.code_locked, 0, MPU_CODE_MAX_NUM_ENTRIES); + bitmap_clear_consecutive(cpu()->arch.mpu.data_locked, 0, MPU_DATA_MAX_NUM_ENTRIES); + + for (mpid_t mpid = 0; mpid < MPU_CODE_MAX_NUM_ENTRIES; mpid++) { + cpu()->arch.mpu.code_entries[mpid].mpid = mpid; + cpu()->arch.mpu.code_entries[mpid].link = INVALID_MPID; + cpu()->arch.mpu.code_entries[mpid].perms = 0; + cpu()->arch.mpu.code_entries[mpid].ref_counter = 0; + } + + for (mpid_t mpid = 0; mpid < MPU_DATA_MAX_NUM_ENTRIES; mpid++) { + cpu()->arch.mpu.data_entries[mpid].mpid = mpid; + cpu()->arch.mpu.data_entries[mpid].link = INVALID_MPID; + cpu()->arch.mpu.data_entries[mpid].perms = 0; + cpu()->arch.mpu.data_entries[mpid].ref_counter = 0; + } +} diff --git a/src/arch/tricore/objects.mk b/src/arch/tricore/objects.mk new file mode 100644 index 000000000..b8f5ed719 --- /dev/null +++ b/src/arch/tricore/objects.mk @@ -0,0 +1,17 @@ +## SPDX-License-Identifier: Apache-2.0 +## Copyright (c) Bao Project and Contributors. All rights reserved. + +cpu-objs-y+=boot.o +cpu-objs-y+=exceptions.o +cpu-objs-y+=mem.o +cpu-objs-y+=ir.o +cpu-objs-y+=vm.o +cpu-objs-y+=vmm.o +cpu-objs-y+=interrupts.o +cpu-objs-y+=cpu.o +cpu-objs-y+=cache.o +cpu-objs-y+=mpu.o +cpu-objs-y+=csa.o +cpu-objs-y+=traps.o +cpu-objs-y+=prot.o +cpu-objs-y+=vir.o \ No newline at end of file diff --git a/src/arch/tricore/prot.c b/src/arch/tricore/prot.c new file mode 100644 index 000000000..83d747b9a --- /dev/null +++ b/src/arch/tricore/prot.c @@ -0,0 +1,12 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include + +// TODO:MS WIP diff --git a/src/arch/tricore/traps.c b/src/arch/tricore/traps.c new file mode 100644 index 000000000..d75acab63 --- /dev/null +++ b/src/arch/tricore/traps.c @@ -0,0 +1,435 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef void (*trap_handler_t)(unsigned long, unsigned long); + +static void bpe_dae_handler(unsigned long datr, unsigned long deadd) +{ + UNUSED_ARG(deadd); + if (!(datr & DATR_SBE_BIT)) { + ERROR("Not a store."); + } +} + +static void bpe_pse_handler(unsigned long datr, unsigned long deadd) +{ + /* This is a synchronous trap, the inputs will be different */ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} +static void bpe_dse_handler(unsigned long datr, unsigned long deadd) +{ + /* This is a synchronous trap, the inputs will be different */ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} +static void bpe_cae_handler(unsigned long datr, unsigned long deadd) +{ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} +static void bpe_pie_handler(unsigned long datr, unsigned long deadd) +{ + /* This is a synchronous trap, the inputs will be different */ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} +static void bpe_die_handler(unsigned long datr, unsigned long deadd) +{ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} +static void bpe_tae_handler(unsigned long datr, unsigned long deadd) +{ + UNUSED_ARG(datr); + UNUSED_ARG(deadd); + return; +} + +trap_handler_t bus_periph_error_handlers[8] = { [TIN_BPE_PSE] = bpe_pse_handler, + [TIN_BPE_DSE] = bpe_dse_handler, + [TIN_BPE_DAE] = bpe_dae_handler, + [TIN_BPE_CAE] = bpe_cae_handler, + [TIN_BPE_PIE] = bpe_pie_handler, + [TIN_BPE_DIE] = bpe_die_handler, + [TIN_BPE_TAE] = bpe_tae_handler }; + +void sys_bus_errors_handler(void) { } + +/* We assume that D regs are (16+reg_num)*/ +static bool decode_16b_access(unsigned long ins, struct emul_access* emul) +{ + unsigned long opcode = bit32_extract(ins, 0, 8); + bool ret = false; + + switch (opcode) { + case 0xD8: + case 0xCC: + /* LD.A */ + case 0xF8: + case 0xEC: + /* ST.A */ + emul->reg = 15; + emul->reg_width = 4; + ret = true; + break; + + case 0xC4: + case 0xD4: + case 0xC8: + /* LD.A */ + case 0xE4: + case 0xF4: + case 0xE8: + /* ST.A */ + emul->reg = bit32_extract(ins, 8, 4); + emul->reg_width = 4; + ret = true; + break; + + case 0x0C: + /* LD.BU */ + case 0x2C: + /* ST.B */ + emul->reg = 15 + 16; + emul->reg_width = 1; + break; + + case 0x04: + case 0x14: + case 0x08: + /* LD.BU */ + case 0x24: + case 0x34: + case 0x28: + /* ST.B */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 1; + ret = true; + break; + + case 0x8C: + /* LD.H */ + case 0xAC: + /* ST.H */ + emul->reg_width = 2; + ret = true; + break; + case 0x84: + case 0x94: + case 0x88: + /* LD.H */ + case 0xA4: + case 0xB4: + case 0xA8: + /* ST.H */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 2; + ret = true; + break; + + case 0x58: + case 0x4C: + /* LD.W */ + case 0x78: + case 0x6C: + /* ST.W */ + emul->reg = 15 + 16; + emul->reg_width = 4; + ret = true; + break; + + case 0x44: + case 0x54: + case 0x48: + /* LD.W */ + case 0x64: + case 0x74: + case 0x68: + /* ST.W */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 4; + ret = true; + break; + default: + break; + } + return ret; +} + +static bool decode_32b_access(unsigned long ins, struct emul_access* emul) +{ + bool ret = 0; + unsigned long opcode = bit32_extract(ins, 0, 8); + /* The first sub_opcode is 6 bits, but only the least significant 4 + are used to identify which instruction. The other two are used to identify + the addressing mode. */ + unsigned long sub_opc = bit32_extract(ins, 22, 4); + unsigned long sub_opc2 = bit32_extract(ins, 26, 2); + + switch (opcode) { + case 0x85: + /* LD.A / LD.W / LD.D / LD.DA */ + case 0xA5: + /* ST.A / ST.W / ST.D / ST.DA */ + emul->reg = ((sub_opc2 > 1) ? bit32_extract(ins, 8, 4) : bit32_extract(ins, 8, 4) + 16); + emul->reg_width = (sub_opc2 % 2 == 0 ? 4 : 8); + ret = true; + break; + + case 0x09: + case 0x29: + /* LSB(sub_opc) + LD.B = 0x0 D 1 + LD.BU = 0x1 D 1 + LD.H = 0x2 D 2 + LD.HU = 0x3 D 2 + LD.W = 0x4 D 4 + LD.D = 0x5 D 8 + LD.A = 0x6 A 4 + LD.DA = 0x7 A 8 + LD.Q = 0x8 D 2 + LD.DD = 0x9 D 16 + + */ + case 0xA9: + case 0x89: + /* LSB(sub_opc) + ST.B = 0x0 D 1 + ST.H = 0x2 D 2 + ST.W = 0x4 D 4 + ST.D = 0x5 D 8 + ST.A = 0x6 A 4 + ST.DA = 0x7 A 8 + ST.Q = 0x8 D 2 + ST.DD = 0x9 D 16 + */ + emul->reg = ((sub_opc == 0x6 || sub_opc == 0x7) ? bit32_extract(ins, 8, 4) : + bit32_extract(ins, 8, 4) + 16); + emul->reg_width = (sub_opc == 0x0) ? 1 : + (sub_opc == 0x9) ? 16 : + (sub_opc == 0x2 || sub_opc == 0x8) ? 2 : + (sub_opc == 0x4 || sub_opc == 0x6) ? 4 : + 8; + ret = true; + break; + case 0xB5: + /* ST.A */ + case 0x99: + /* LD.A */ + emul->reg = bit32_extract(ins, 8, 4); + emul->reg_width = 4; + ret = true; + break; + + case 0x25: + /* ST.B / ST.H */ + case 0x05: + /* LD.B / LD.H / LD.BU / LD.HU */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = (sub_opc2 < 2) ? 1 : 2; + ret = true; + break; + + case 0xE9: + /* ST.B */ + case 0x79: + case 0x39: + /* LD.B / LD.BU */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 1; + ret = true; + break; + + case 0xF9: + /* ST.H */ + case 0xB9: + case 0xC9: + /* LD.H / LD.HU */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 2; + ret = true; + break; + + case 0x59: + /* ST.W */ + case 0x19: + /* LD.W */ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 4; + ret = true; + break; + + case 0x65: + /* ST.Q */ + case 0x45: + /* LD.Q */ + /* TODO: Verify*/ + emul->reg = bit32_extract(ins, 8, 4) + 16; + emul->reg_width = 2; + ret = true; + break; + + case 0xD5: + /* ST.T */ + /* Store bit, how to deal with it? */ + break; + + default: + break; + } + + return ret; +} + +static bool decode_cfr_access(unsigned long ins, struct emul_access* emul) +{ + unsigned long opcode = bit32_extract(ins, 0, 8); + bool ret = false; + + switch (opcode) { + case 0x4D: + /* MFCR */ + emul->write = false; + emul->reg = bit32_extract(ins, 28, 31) + 16; + emul->reg_width = 4; + break; + + case 0x4F: + /* MFDCR*/ + emul->write = false; + emul->reg = bit32_extract(ins, 28, 31) + 16; + emul->reg_width = 8; + break; + + case 0xCD: + /* MTCR */ + emul->write = true; + emul->reg = bit32_extract(ins, 28, 31) + 16; + emul->reg_width = 4; + break; + + case 0xCF: + /* MTDCR*/ + emul->write = true; + emul->reg = bit32_extract(ins, 28, 31) + 16; + emul->reg_width = 8; + break; + + default: + break; + } + return ret; +} + +void l2_dmem_prot_trap_handler(unsigned long* addr, unsigned long access) +{ + struct emul_access emul; + /* Give bao the same read permissions on the mpu */ + /* We save the bao prs bitmap, and we OR it with the guest prs */ + volatile unsigned long hyp_d_r_entries = csfr_dpre_0_read(); + unsigned long vmid = cpu()->vcpu->vm->id; + + volatile unsigned long vm_d_r_entries = get_dpre(vmid + 1); + + volatile unsigned long perms = hyp_d_r_entries | vm_d_r_entries; + set_dpre(0, perms); + + fence_sync(); + + unsigned long ins = *(unsigned long*)addr; + + unsigned long opcode = bit32_extract(ins, 0, 8); + + volatile bool reg = 0; + + unsigned long access_addr = csfr_deadd_read(); + + emul_handler_t handler = vm_emul_get_mem(cpu()->vcpu->vm, access_addr); + + if (handler != NULL) { + // Only adjust the return addr if there is an emul_handler. + if (opcode % 2 == 0) { + reg = decode_16b_access(ins, &emul); + cpu()->vcpu->regs.lower_ctx.a11 += 2; + } else { + reg = decode_32b_access(ins, &emul); + cpu()->vcpu->regs.lower_ctx.a11 += 4; + } + + if (reg == false) { + return; + } + + emul.addr = access_addr; + emul.width = emul.reg_width; + emul.write = (access) ? true : false; + emul.sign_ext = false; + + handler(&emul); + } + + set_dpre(0, hyp_d_r_entries); +} + +void hvcall_handler(unsigned long function_id, unsigned long dev_id, unsigned long event_id) +{ + UNUSED_ARG(dev_id); + UNUSED_ARG(event_id); + + if ((function_id & 0xFFFF0000) == 0xDEAD0000) { + hypercall(function_id & 0x3); + } +} + +static void csfr_emul_handler(struct emul_access * emul, unsigned long csfr) +{ + UNUSED_ARG(emul); + switch (csfr) { + default: + break; + } + return; +} + +void hyp_csfr_access_handler(unsigned long* addr, unsigned long hvtin) +{ + struct emul_access emul; + /* Give bao the same read permissions on the mpu */ + /* We save the bao prs bitmap, and we OR it with the guest prs */ + volatile unsigned long hyp_d_r_entries = csfr_dpre_0_read(); + unsigned long vmid = cpu()->vcpu->vm->id; + + volatile unsigned long vm_d_r_entries = get_dpre(vmid + 1); + + volatile unsigned long perms = hyp_d_r_entries | vm_d_r_entries; + set_dpre(0, perms); + + fence_sync(); + + unsigned long ins = *(unsigned long*)addr; + + decode_cfr_access(ins, &emul); + + csfr_emul_handler(&emul, (hvtin & 0xFFFF)); + + cpu()->vcpu->regs.lower_ctx.a11 += 4; + + set_dpre(0, hyp_d_r_entries); +} diff --git a/src/arch/tricore/vir.c b/src/arch/tricore/vir.c new file mode 100644 index 000000000..00e041875 --- /dev/null +++ b/src/arch/tricore/vir.c @@ -0,0 +1,125 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern volatile const size_t VIR_IPI_ID; +extern volatile bitmap_granule_t* valid; + +#define VIR_MSG_VM(DATA) ((DATA) >> 48) +#define VIR_MSG_VIRRID(DATA) (((DATA) >> 32) & 0xffff) +#define VIR_MSG_INTID(DATA) (((DATA) >> 16) & 0x7fff) +#define VIR_MSG_VAL(DATA) ((DATA) & 0xff) + +void vir_ipi_handler(uint32_t event, uint64_t data); +CPU_MSG_HANDLER(vir_ipi_handler, VIR_IPI_ID) + +static void vir_emul_src_access(struct emul_access* acc, struct vcpu* vcpu, unsigned long irqid) +{ + /* TODO consider 16bit access */ + if (acc->write) { + uint32_t orig = (unsigned long)ir_src_get_node(irqid); + uint32_t val = vcpu_readreg(vcpu, acc->reg); + uint32_t tos = IR_SRC_GET_TOS(val); + uint32_t orig_tos = IR_SRC_GET_TOS(orig); + uint32_t vm = IR_SRC_GET_VM(orig); + + /* TODO: Validate is the tos belongs to the VM or the DMA belongs to BM */ + if (tos > PLAT_CPU_NUM) { + WARNING("Other TOS receiving the interrupt is currently not supported!"); + return; + } + + if (orig_tos != 0xF && orig_tos != cpu()->id) { + // WARNING("TOS is already set up (%d)!", orig_tos); + return; + } + + uint32_t out = ((val & (unsigned long)~(0x7 << 8)) | vm << 8); + + ir_src_set_node(irqid, out); + } else { + uint32_t val = ir_src->SRC[irqid]; + + vcpu_writereg(vcpu, acc->reg, val); + } +} + +static bool ir_src_emul_handler(struct emul_access* acc) +{ + uint32_t addr = acc->addr; + + uint32_t irqid = (addr - platform.arch.ir.src_addr) / sizeof(ir_src->SRC[0]); + + if (!vm_has_interrupt(cpu()->vcpu->vm, irqid)) { + ERROR("Access to unsigned interrupt %u", irqid); + return false; + } + + vir_emul_src_access(acc, cpu()->vcpu, irqid); + return true; +} + +void vir_inject(struct vcpu* vcpu, irqid_t id) +{ + struct vm* vm = vcpu->vm; + + if (!vm_has_interrupt(vm, id)) { + ERROR("VM tried to access unassigned interrupt"); + } + + ir_set_pend(id); +} + +void vir_ipi_handler(uint32_t event, uint64_t data) +{ + uint16_t vm_id = (uint16_t)VIR_MSG_VM(data); + uint16_t virr_id = (uint16_t)VIR_MSG_VIRRID(data); + irqid_t int_id = VIR_MSG_INTID(data); + uint64_t val = VIR_MSG_VAL(data); + + /* TODO do we need this? */ + (void)event; + (void)vm_id; + (void)virr_id; + (void)int_id; + (void)val; +} + +void vir_init(struct vm* vm) +{ + if (DEFINED(MMIO_SLAVE_SIDE_PROT)) { + // Make sure the SRC is unmapped from the MMIO regions + mem_unmap(&vm->as, (vaddr_t)platform.arch.ir.src_addr, + NUM_PAGES(sizeof(struct ir_src_hw)), false ); + } + else { + // Map access to the GPSR nodes in INT. + mem_alloc_map_dev(&vm->as, SEC_VM_ANY, + (vaddr_t)(platform.arch.ir.GPSR_offset + platform.arch.ir.int_addr), + (vaddr_t)(platform.arch.ir.GPSR_offset + platform.arch.ir.int_addr), + NUM_PAGES(platform.arch.ir.GPSR_size)); + } + + // Install emul handler for accesses to the SRC + vm->arch.vir_src_emul = (struct emul_mem){ .va_base = (vaddr_t)platform.arch.ir.src_addr, + .size = ALIGN(sizeof(struct ir_src_hw), PAGE_SIZE), + .handler = ir_src_emul_handler }; + vm_emul_add_mem(vm, &vm->arch.vir_src_emul); +} + +void vir_vcpu_init(struct vcpu* vcpu) +{ + ir_assign_icu_to_vm(cpu()->id, vcpu->vm); +} diff --git a/src/arch/tricore/vm.c b/src/arch/tricore/vm.c new file mode 100644 index 000000000..26e1c4b42 --- /dev/null +++ b/src/arch/tricore/vm.c @@ -0,0 +1,192 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* We assume that D regs are (16+reg_num) */ +long int a_lut[16] = { -1, -1, 2, 3, 8, 9, 10, 11, -1, -1, 2, 3, 8, 9, 10, 11 }; +long int d_lut[16] = { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 }; + +static void vm_ipi_init(struct vm* vm, const struct vm_config* vm_config) +{ + // Return if there are no groups to be assigned + if (vm_config->platform.arch.gspr_num == 0) { + return; + } + + for (unsigned long int i = 0; i < vm_config->platform.arch.gspr_num; i++) { + unsigned long group = (unsigned long)vm_config->platform.arch.gspr_groups[i]; + unsigned long gspr_src_node = GSPR_SRC_BASE + (8 * (group)); + if (group == 0) { + ERROR("GSPR group 0 is reserved for Bao internal use.") + } + // We need to clear the group 1st because it allows access to everyone by default + ir_clear_gspr_group(group); + ir_init_gspr_group(group, vm); + + for (unsigned long int node = gspr_src_node; node < gspr_src_node + 8; + node++) { // interrupt assign all src nodes from each group + interrupts_vm_assign(vm, (irqid_t)node); + } + } +} + +void vm_arch_init(struct vm* vm, const struct vm_config* vm_config) +{ + if (vm->master == cpu()->id) { + vir_init(vm); + vm_ipi_init(vm, vm_config); + } + cpu_sync_and_clear_msgs(&vm->sync); +} + +void vcpu_arch_init(struct vcpu* vcpu, struct vm* vm) +{ + unsigned long pcxo = + bit32_extract((uint32_t) & (vcpu->regs.upper_ctx), ADDR_PCXO_OFF, ADDR_PCXO_LEN); + unsigned long pcxs = + bit32_extract((uint32_t) & (vcpu->regs.upper_ctx), ADDR_PCXS_OFF, ADDR_PCXS_LEN); + + vcpu->regs.lower_ctx.pcxi = (1 << 21) | (1 << PCXI_UL_OFF) | (pcxs << PCXI_PCXS_OFF) | pcxo; + vcpu->regs.lower_ctx.a11 = vm->config->entry; + vcpu->regs.lower_ctx.a2 = 0; + vcpu->regs.lower_ctx.a3 = 0; + vcpu->regs.lower_ctx.d0 = 0; + vcpu->regs.lower_ctx.d1 = 0; + vcpu->regs.lower_ctx.d2 = 0; + vcpu->regs.lower_ctx.d3 = 0; + vcpu->regs.lower_ctx.a4 = 0; + vcpu->regs.lower_ctx.a5 = 0; + vcpu->regs.lower_ctx.a6 = 0; + vcpu->regs.lower_ctx.a7 = 0; + vcpu->regs.lower_ctx.d4 = 0; + vcpu->regs.lower_ctx.d5 = 0; + vcpu->regs.lower_ctx.d6 = 0; + vcpu->regs.lower_ctx.d7 = 0; + + vcpu->regs.upper_ctx.pcxi = 0; + vcpu->regs.upper_ctx.csa_psw = 2 << 10; + vcpu->regs.upper_ctx.a10 = 0; + vcpu->regs.upper_ctx.a11 = 0; + vcpu->regs.upper_ctx.d8 = 0; + vcpu->regs.upper_ctx.d9 = 0; + vcpu->regs.upper_ctx.d10 = 0; + vcpu->regs.upper_ctx.d11 = 0; + vcpu->regs.upper_ctx.a12 = 0; + vcpu->regs.upper_ctx.a13 = 0; + vcpu->regs.upper_ctx.a14 = 0; + vcpu->regs.upper_ctx.a15 = 0; + vcpu->regs.upper_ctx.d12 = 0; + vcpu->regs.upper_ctx.d13 = 0; + vcpu->regs.upper_ctx.d14 = 0; + vcpu->regs.upper_ctx.d15 = 0; + + vcpu->regs.a0 = 0xFFFFFFFF; + vcpu->regs.a8 = 0xFFFFFFFF; + + vir_vcpu_init(vcpu); +} + +void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) +{ + vcpu->regs.lower_ctx.a11 = entry; +} + +unsigned long vcpu_readreg(struct vcpu* vcpu, unsigned long reg) +{ + bool a_reg = (reg < 15) ? true : false; + unsigned long reg_num = a_reg ? reg : reg - 16; + if (reg_num < 8) { + return (a_reg) ? vcpu->regs.lower_array[a_lut[reg_num]] : + vcpu->regs.lower_array[d_lut[reg_num]]; + } else { + return (a_reg) ? vcpu->regs.upper_array[a_lut[reg_num]] : + vcpu->regs.upper_array[d_lut[reg_num]]; + } + + return 0; +} + +void vcpu_writereg(struct vcpu* vcpu, unsigned long reg, unsigned long val) +{ + bool a_reg = (reg < 15) ? true : false; + unsigned long reg_num = a_reg ? reg : reg - 16; + if (reg_num < 8) { + if (a_reg) { + vcpu->regs.lower_array[a_lut[reg_num]] = val; + } else { + vcpu->regs.lower_array[d_lut[reg_num]] = val; + } + } else { + if (a_reg) { + vcpu->regs.upper_array[a_lut[reg_num]] = val; + } else { + vcpu->regs.upper_array[d_lut[reg_num]] = val; + } + } +} + +unsigned long vcpu_readpc(struct vcpu* vcpu) +{ + return vcpu->regs.lower_ctx.a11; +} + +void vcpu_writepc(struct vcpu* vcpu, unsigned long pc) +{ + vcpu->regs.lower_ctx.a11 = pc; +} + +void vcpu_arch_run(struct vcpu* vcpu) +{ + (void)vcpu; + /* if (vcpu->arch.scr_ctx.state == STARTED) { */ + // TODO: All vms start running at "same" time? + if (1) { + vcpu_arch_entry(); + } /*else { + cpu_idle(); + }*/ +} + +bool vcpu_arch_is_on(struct vcpu* vcpu) +{ + UNUSED_ARG(vcpu); + return true; +} + +void vm_arch_allow_mmio_access (struct vm* vm, struct vm_dev_region * dev) +{ + for (unsigned long i = 0; i < platform.arch.device_num; i++) + { + if (dev->pa == platform.arch.devices[i].dev_base) + { + //Consider checking is vm cpu master + console_printk("MATCH\n"); + for (unsigned long apu = 0; apu < platform.arch.devices[i].apu_num; apu++){ + console_printk("APU[%d] - 0x%x\n", apu, platform.arch.devices[i].apu_addr[apu]); + apu_enable_access_vm((struct PROT_ACCESSEN*)(platform.arch.devices[i].apu_addr[apu]+platform.arch.devices[i].dev_base),vm->as.id); + for(unsigned long cpu = 0; cpu < platform.cpu_num; cpu++){ + if(vm->cpus & (1UL << cpu)) + apu_enable_access_cpu((struct PROT_ACCESSEN*)(platform.arch.devices[i].apu_addr[apu]+platform.arch.devices[i].dev_base),cpu); + } + } + for (unsigned long j = 0; j < platform.arch.devices[i].prot_num; j++){ + console_printk("PROT[%d] - 0x%x\n", j, platform.arch.devices[i].prot_addr[j]); + //prot_set_state((volatile prottos_t*)(platform.arch.devices[j].prot_addr[j]+platform.arch.devices[i].dev_base), PROT_STATE_CONFIG); + //prot_set_vm((volatile prottos_t*)(platform.arch.devices[i].prot_addr[j]+platform.arch.devices[i].dev_base), 0, true); + //prot_enable((volatile prottos_t*)(platform.arch.devices[i].prot_addr[j]+platform.arch.devices[i].dev_base)); + //prot_set_state((volatile prottos_t*)(platform.arch.devices[i].prot_addr[j]+platform.arch.devices[i].dev_base), PROT_STATE_RUN); + *(unsigned long *)(platform.arch.devices[i].prot_addr[j]+platform.arch.devices[i].dev_base) = 0xC009000C; + } + } + } +} diff --git a/src/arch/tricore/vmm.c b/src/arch/tricore/vmm.c new file mode 100644 index 000000000..29a7f1dc5 --- /dev/null +++ b/src/arch/tricore/vmm.c @@ -0,0 +1,9 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include + +void vmm_arch_init() { } diff --git a/src/core/inc/platform.h b/src/core/inc/platform.h index ac91015f1..11d8adee3 100644 --- a/src/core/inc/platform.h +++ b/src/core/inc/platform.h @@ -21,6 +21,8 @@ struct platform { size_t region_num; struct mem_region* regions; + size_t mmio_region_num; + struct mem_region* mmio_regions; struct { paddr_t base; } console; diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index 2d0686ed8..1eabed978 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -183,5 +183,6 @@ unsigned long vcpu_readpc(struct vcpu* vcpu); void vcpu_writepc(struct vcpu* vcpu, unsigned long pc); void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry); bool vcpu_arch_is_on(struct vcpu* vcpu); +void vm_arch_allow_mmio_access(struct vm* vm, struct vm_dev_region* dev); #endif /* __VM_H__ */ diff --git a/src/core/mmu/inc/mem_prot/mem.h b/src/core/mmu/inc/mem_prot/mem.h index 489852851..b52107b79 100644 --- a/src/core/mmu/inc/mem_prot/mem.h +++ b/src/core/mmu/inc/mem_prot/mem.h @@ -10,8 +10,6 @@ #include #include #include - -#define HYP_ASID 0 struct addr_space { struct page_table pt; enum AS_TYPE type; @@ -23,7 +21,7 @@ struct addr_space { typedef pte_t mem_flags_t; -void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt, colormap_t colors); +void as_init(struct addr_space* as, enum AS_TYPE type, pte_t* root_pt, colormap_t colors); vaddr_t mem_alloc_vpage(struct addr_space* as, enum AS_SEC section, vaddr_t at, size_t n); #endif /* __MEM_PROT_H__ */ diff --git a/src/core/mmu/mem.c b/src/core/mmu/mem.c index 7baa8f6af..1de68db02 100644 --- a/src/core/mmu/mem.c +++ b/src/core/mmu/mem.c @@ -764,7 +764,7 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio * the new CPU region is created, cleaned, prepared and finally mapped. */ cpu_new = copy_space((void*)BAO_CPU_BASE, sizeof(struct cpu), &p_cpu); - as_init(&cpu_new->as, AS_HYP_CPY, HYP_ASID, NULL, colors); + as_init(&cpu_new->as, AS_HYP_CPY, NULL, colors); va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_PRIVATE, (vaddr_t)BAO_CPU_BASE, NUM_PAGES(sizeof(struct cpu))); if (va != (vaddr_t)BAO_CPU_BASE) { @@ -860,7 +860,7 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio while (shared_pte != 0) { } } - as_init(&cpu()->as, AS_HYP, HYP_ASID, (void*)v_root_pt_addr, colors); + as_init(&cpu()->as, AS_HYP, (void*)v_root_pt_addr, colors); /* * Clear the old region that have been copied. @@ -900,13 +900,29 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio mem_unmap(&cpu()->as, va, p_cpu.num_pages, false); } -void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt, colormap_t colors) +static unsigned long as_id_alloc(struct addr_space* as) +{ + static spinlock_t as_id_alloc_lock = SPINLOCK_INITVAL; + static asid_t asid_counter = 1; + unsigned long ret = 0; + + spin_lock(&as_id_alloc_lock); + if (as->type != AS_HYP) { + ret = asid_counter; + asid_counter++; + } + spin_unlock(&as_id_alloc_lock); + + return ret; +} + +void as_init(struct addr_space* as, enum AS_TYPE type, pte_t* root_pt, colormap_t colors) { as->type = type; as->pt.dscr = type == AS_HYP || type == AS_HYP_CPY ? hyp_pt_dscr : vm_pt_dscr; + as->id = as_id_alloc(as); as->colors = colors; as->lock = SPINLOCK_INITVAL; - as->id = id; if (root_pt == NULL) { size_t n = NUM_PAGES(pt_size(&as->pt, 0)); @@ -922,7 +938,7 @@ void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt void mem_prot_init(void) { pte_t* root_pt = (pte_t*)ALIGN(((vaddr_t)cpu()) + sizeof(struct cpu), PAGE_SIZE); - as_init(&cpu()->as, AS_HYP, HYP_ASID, root_pt, config.hyp.colors); + as_init(&cpu()->as, AS_HYP, root_pt, config.hyp.colors); } vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages* page, vaddr_t at, diff --git a/src/core/mmu/vm.c b/src/core/mmu/vm.c index ccf610fe0..44c3f4388 100644 --- a/src/core/mmu/vm.c +++ b/src/core/mmu/vm.c @@ -10,5 +10,5 @@ void vm_mem_prot_init(struct vm* vm, const struct vm_config* vm_config) { - as_init(&vm->as, AS_VM, vm->id, NULL, vm_config->colors); + as_init(&vm->as, AS_VM, NULL, vm_config->colors); } diff --git a/src/core/mpu/inc/mem_prot/mem.h b/src/core/mpu/inc/mem_prot/mem.h index 2df86d1a3..d0cfd2169 100644 --- a/src/core/mpu/inc/mem_prot/mem.h +++ b/src/core/mpu/inc/mem_prot/mem.h @@ -12,9 +12,7 @@ #include #include -#define HYP_ASID 0 #define VMPU_NUM_ENTRIES 64 - struct mp_region { vaddr_t base; size_t size; @@ -25,7 +23,6 @@ struct mp_region { struct addr_space { asid_t id; enum AS_TYPE type; - cpumap_t cpus; colormap_t colors; struct addr_space_arch arch; struct { @@ -41,7 +38,7 @@ struct addr_space { spinlock_t lock; }; -void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, cpumap_t cpus, colormap_t colors); +void as_init(struct addr_space* as, enum AS_TYPE type, colormap_t colors); static inline bool mem_regions_overlap(struct mp_region* reg1, struct mp_region* reg2) { diff --git a/src/core/mpu/mem.c b/src/core/mpu/mem.c index 9a202d10b..840404944 100644 --- a/src/core/mpu/mem.c +++ b/src/core/mpu/mem.c @@ -203,7 +203,7 @@ static void mem_init_boot_regions(void) void mem_prot_init() { mpu_init(); - as_init(&cpu()->as, AS_HYP, HYP_ASID, BIT_MASK(0, PLAT_CPU_NUM), 0); + as_init(&cpu()->as, AS_HYP, 0); mem_init_boot_regions(); mpu_enable(); } @@ -214,14 +214,38 @@ size_t mem_cpu_boot_alloc_size() return size; } -void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, cpumap_t cpus, colormap_t colors) +static void mem_mmio_init_regions(struct addr_space* as) +{ + for (unsigned long i = 0; i < platform.mmio_region_num; i++) { + mem_alloc_map_dev(as, as->type == AS_VM ? SEC_VM_ANY : SEC_HYP_ANY, + platform.mmio_regions[i].base, platform.mmio_regions[i].base, + NUM_PAGES(platform.mmio_regions[i].size)); + } +} + +static unsigned long as_id_alloc(struct addr_space* as) +{ + static spinlock_t as_id_alloc_lock = SPINLOCK_INITVAL; + static asid_t asid_counter = 1; + unsigned long ret = 0; + + spin_lock(&as_id_alloc_lock); + if (as->type != AS_HYP) { + ret = asid_counter; + asid_counter++; + } + spin_unlock(&as_id_alloc_lock); + + return ret; +} + +void as_init(struct addr_space* as, enum AS_TYPE type, colormap_t colors) { UNUSED_ARG(colors); as->type = type; as->colors = 0; - as->id = id; - as->cpus = cpus; + as->id = as_id_alloc(as); as->lock = SPINLOCK_INITVAL; as_arch_init(as); @@ -230,6 +254,12 @@ void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, cpumap_t cpus, for (size_t i = 0; i < VMPU_NUM_ENTRIES; i++) { mem_vmpu_free_entry(as, i); } + + /* For architectures with slave-side mmio protection, we map all the + mmio regions to be accessible to all address spaces */ + if (DEFINED(MMIO_SLAVE_SIDE_PROT)) { + mem_mmio_init_regions(as); + } } static void mem_free_ppages(struct ppages* ppages) diff --git a/src/core/mpu/vm.c b/src/core/mpu/vm.c index 5371f7d74..4aac457b7 100644 --- a/src/core/mpu/vm.c +++ b/src/core/mpu/vm.c @@ -9,5 +9,5 @@ void vm_mem_prot_init(struct vm* vm, const struct vm_config* config) { UNUSED_ARG(config); - as_init(&vm->as, AS_VM, vm->id, vm->cpus, 0); + as_init(&vm->as, AS_VM, 0); } diff --git a/src/core/vm.c b/src/core/vm.c index e3bbadc8c..e14b01183 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -20,15 +20,12 @@ static void vm_master_init(struct vm* vm, const struct vm_config* vm_config, vmi vm->lock = SPINLOCK_INITVAL; cpu_sync_init(&vm->sync, vm->cpu_num); - - vm_mem_prot_init(vm, vm_config); } static void vm_cpu_init(struct vm* vm) { spin_lock(&vm->lock); vm->cpus |= (1UL << cpu()->id); - vm->as.cpus |= (1UL << cpu()->id); spin_unlock(&vm->lock); } @@ -211,9 +208,10 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* vm_config) for (size_t i = 0; i < vm_config->platform.dev_num; i++) { struct vm_dev_region* dev = &vm_config->platform.devs[i]; - size_t n = ALIGN(dev->size, PAGE_SIZE) / PAGE_SIZE; - - if (dev->va != INVALID_VA) { + if (DEFINED(MMIO_SLAVE_SIDE_PROT)) { + vm_arch_allow_mmio_access(vm, dev); + } else if (dev->va != INVALID_VA) { + size_t n = ALIGN(dev->size, PAGE_SIZE) / PAGE_SIZE; mem_alloc_map_dev(&vm->as, SEC_VM_ANY, (vaddr_t)dev->va, dev->pa, n); } @@ -320,6 +318,12 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* vm_co cpu_sync_barrier(&vm->sync); + if (master) { + vm_mem_prot_init(vm, vm_config); + } + + cpu_sync_barrier(&vm->sync); + /** * Perform architecture dependent initializations. This includes, for example, setting the page * table pointer and other virtualization extensions specifics. @@ -423,3 +427,11 @@ void vcpu_run(struct vcpu* vcpu) cpu_powerdown(); } } + +__attribute__((weak)) void vm_arch_allow_mmio_access(struct vm* vm, struct vm_dev_region* dev) +{ + UNUSED_ARG(dev); + UNUSED_ARG(vm); + //ERROR("vm_arch_allow_mmio_access must be implemented by the arch!") + return; +} diff --git a/src/platform/drivers/tricore_uart/inc/drivers/tricore_uart.h b/src/platform/drivers/tricore_uart/inc/drivers/tricore_uart.h new file mode 100644 index 000000000..dccc122ce --- /dev/null +++ b/src/platform/drivers/tricore_uart/inc/drivers/tricore_uart.h @@ -0,0 +1,67 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __UART_TRICORE_H +#define __UART_TRICORE_H + +#include +#include +#include + +struct asclin0_hw { + volatile uint8_t RESERVED[0x200]; +}; + +struct rst { + volatile uint32_t CTRLA; + volatile uint32_t CTRLB; + volatile uint32_t STAT; +}; + +struct asclin_hw { + volatile uint32_t CLC; + volatile uint32_t OCS; + volatile uint32_t ID; + struct rst RST; + volatile uint8_t RESERVED[0x20 - 0x14 - 0x4]; + volatile uint32_t PROTE; + volatile uint32_t PROTSE; + volatile uint8_t RESERVED1[0x40 - 0x24 - 0x4]; + struct PROT_ACCESSEN ACCEN; + volatile uint8_t RESERVED2[0x100 - 0x54 - 0x4]; + volatile uint32_t IOCR; + volatile uint32_t TXFIFOCON; + volatile uint32_t RXFIFOCON; + volatile uint32_t BITCON; + volatile uint32_t FRAMECON; + volatile uint32_t DATCON; + volatile uint32_t BRG; + volatile uint32_t BRD; + volatile uint32_t LINCON; + volatile uint32_t LINBTIMER; + volatile uint32_t LINHTIMER; + volatile uint32_t FLAGS; + volatile uint32_t FLAGSSET; + volatile uint32_t FLAGSCLEAR; + volatile uint32_t FLAGSENABLE; + volatile uint32_t CSR; + volatile uint32_t TXDATA[8]; + volatile uint32_t RXDATA[8]; + volatile uint32_t RXDATAD; + volatile uint8_t RESERVED3[0x200 - 0x180 - 0x4]; +}; + +typedef struct asclin_hw bao_uart_t; + +/** Public Tricore UART interfaces */ + +bool uart_init(volatile struct asclin_hw* uart); +void uart_enable(volatile struct asclin_hw* uart); +void uart_disable(volatile struct asclin_hw* uart); +bool uart_set_baud_rate(volatile struct asclin_hw* uart, uint32_t baud_rate); +uint32_t uart_getc(volatile struct asclin_hw* uart); +void uart_putc(volatile struct asclin_hw* uart, int8_t c); + +#endif /* __UART_TRICORE_H */ diff --git a/src/platform/drivers/tricore_uart/objects.mk b/src/platform/drivers/tricore_uart/objects.mk new file mode 100644 index 000000000..a84f63f59 --- /dev/null +++ b/src/platform/drivers/tricore_uart/objects.mk @@ -0,0 +1,4 @@ +## SPDX-License-Identifier: Apache-2.0 +## Copyright (c) Bao Project and Contributors. All rights reserved. + +drivers-objs-y+=tricore_uart/tricore_uart.o diff --git a/src/platform/drivers/tricore_uart/tricore_uart.c b/src/platform/drivers/tricore_uart/tricore_uart.c new file mode 100644 index 000000000..5488286b6 --- /dev/null +++ b/src/platform/drivers/tricore_uart/tricore_uart.c @@ -0,0 +1,103 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include +#include + +#define NUMERATOR 2 * 4 +#define DENOMINATOR 0xD9 + +static inline void uart_enable_clock(volatile struct asclin_hw* uart, uint32_t clk_mode) +{ + uart->CSR = clk_mode; + while ((uart->CSR) >> 31) + ; +} + +static inline void uart_disable_clock(volatile struct asclin_hw* uart) +{ + uart->CSR = 0; + while ((uart->CSR) >> 31) + ; +} + +bool uart_init(volatile struct asclin_hw* uart) +{ + // Enable module + uart->CLC = 0x0; + + uart_disable_clock(uart); + + // set module to initilise mode + uart->FRAMECON = 0; + // set prescaler to 1 + uart->BITCON = 1; + + uart_enable_clock(uart, 2); + uart_disable_clock(uart); + + // Set baudrate to 115200 + uart->BITCON |= 0x880F0000; + uart->BRG = NUMERATOR << 16 | DENOMINATOR; + + uart_enable_clock(uart, 2); + uart_disable_clock(uart); + + uart->RXFIFOCON = 1 << 6; // outlet width = 1 + uart->TXFIFOCON = 1 << 6; // inlet width 1 + uart->DATCON = 0x7; + uart->FRAMECON |= 0x1 << 16 | 1 << 9; + + uart_enable_clock(uart, 2); + + uart->FLAGSENABLE = 0; + uart->FLAGSCLEAR = 0xFFFFFFFF; + + uart->FLAGSENABLE = 1 << 16 | 1 << 18 | 1 << 26 | 1 << 27 | 1 << 30; // parity error PEE, frame + // error, rxfifooverflow, + // rxfifounderflow, + // txfifooverflow + + // Enable fifo outlet + uart->TXFIFOCON |= 0x2; + uart->RXFIFOCON |= 0x2; + + // Flush TxFIFO + uart->TXFIFOCON |= 1; + uart->RXFIFOCON |= 1; + + return true; +} + +void uart_enable(volatile struct asclin_hw* uart) +{ + UNUSED_ARG(uart); +} + +void uart_disable(volatile struct asclin_hw* uart) +{ + UNUSED_ARG(uart); +} + +bool uart_set_baud_rate(volatile struct asclin_hw* uart, uint32_t baud_rate) +{ + UNUSED_ARG(uart); + UNUSED_ARG(baud_rate); + return true; +} + +uint32_t uart_getc(volatile struct asclin_hw* uart) +{ + UNUSED_ARG(uart); + return 0; +} + +void uart_putc(volatile struct asclin_hw* uart, int8_t c) +{ + uart->TXDATA[0] = (uint32_t)c; + while (!(uart->FLAGS & (1UL << 31))) + ; + uart->FLAGSCLEAR = 0xFFFFFFFF; +} diff --git a/src/platform/tc4dx/inc/plat/platform.h b/src/platform/tc4dx/inc/plat/platform.h new file mode 100644 index 000000000..3d09b06cd --- /dev/null +++ b/src/platform/tc4dx/inc/plat/platform.h @@ -0,0 +1,16 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __PLAT_PLATFORM_H__ +#define __PLAT_PLATFORM_H__ + +#define UART8250_REG_WIDTH (4) +#define UART8250_PAGE_OFFSET (0x40) + +#include + +void platform_cpu_init(cpuid_t cpuid, paddr_t load_addr); + +#endif diff --git a/src/platform/tc4dx/objects.mk b/src/platform/tc4dx/objects.mk new file mode 100644 index 000000000..ba75047ec --- /dev/null +++ b/src/platform/tc4dx/objects.mk @@ -0,0 +1,6 @@ +## SPDX-License-Identifier: Apache-2.0 +## Copyright (c) Bao Project and Contributors. All rights reserved. + +boards-objs-y+=tc4dx_desc.o platform.o +drivers = tricore_uart + diff --git a/src/platform/tc4dx/platform.c b/src/platform/tc4dx/platform.c new file mode 100644 index 000000000..d4fd53762 --- /dev/null +++ b/src/platform/tc4dx/platform.c @@ -0,0 +1,44 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +#include +#include +#include + +#define PORT14 0xF003D800UL + +void platform_default_init(void) +{ + mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, 0xF003D400, 0x800); + + unsigned long* P14_0_DRVCFG = (unsigned long*)(PORT14 + 0x304); + unsigned long* P14_1_DRVCFG = (unsigned long*)(PORT14 + 0x304 + 0x10); + + // UART PINS P14.1 Rx and P14.0 Tx + *P14_0_DRVCFG = 0x21; + *P14_1_DRVCFG = 0; + + struct PROT_ACCESSEN* gpio_accessen = (struct PROT_ACCESSEN*)(PORT14 + 0x90); + + for (unsigned long i = 0; i < platform.cpu_num; i++) { + apu_enable_access_cpu(gpio_accessen, i); + } + + mem_unmap(&cpu()->as, 0xF003D400, 0x800, 0); +} + +void platform_cpu_init(cpuid_t cpuid, paddr_t load_addr) +{ + for (size_t coreid = 0; coreid < platform.cpu_num; coreid++) { + if (coreid == cpuid) { + continue; + } + + csfr_cpu_pc_write(coreid, load_addr); + csfr_cpu_bootcon_write(coreid, 0); + } +} diff --git a/src/platform/tc4dx/platform.mk b/src/platform/tc4dx/platform.mk new file mode 100644 index 000000000..848058506 --- /dev/null +++ b/src/platform/tc4dx/platform.mk @@ -0,0 +1,17 @@ +## SPDX-License-Identifier: Apache-2.0 +## Copyright (c) Bao Project and Contributors. All rights reserved. + +# Architecture definition +ARCH:=tricore + +drivers = tricore_uart + +platform_description:=tc4dx_desc.c + +TRICORE_MCPU=tc4DAx +#TRICORE_MCPU=tc4xx + +platform-cppflags = +platform-cflags=-mcpu=$(TRICORE_MCPU) -gdwarf-4 +platform-asflags=-mcpu=$(TRICORE_MCPU) +platform-ldflags=--mcpu=tc18 --no-warn-rwx-segments \ No newline at end of file diff --git a/src/platform/tc4dx/tc4dx_desc.c b/src/platform/tc4dx/tc4dx_desc.c new file mode 100644 index 000000000..6b54c9694 --- /dev/null +++ b/src/platform/tc4dx/tc4dx_desc.c @@ -0,0 +1,175 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +#ifdef GENERATING_DEFS +uint32_t plat_ints[] = { 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 106, 107, 120, 121, 122, 126, 127, 128, 132, 133, + 135, 136, 140, 141, 142, 143, 144, 145, 146, 147, 148, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 260, + 261, 262, 263, 264, 265, 266, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 318, 319, 336, 337, 340, 341, 342, 343, 344, 345, 346, 347, 348, + 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 373, 376, 377, 378, 380, 381, 382, 384, 385, 386, 388, 389, 390, 391, + 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 436, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, + 456, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, + 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, + 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, + 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, + 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, + 592, 593, 594, 595, 604, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, + 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, + 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 736, 737, 740, 741, 742, 743, 744, + 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 764, 765, 766, + 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 788, + 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, + 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, + 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, + 865, 866, 867, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 900, 901, 902, 903, + 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, + 923, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 1076, 1077, 1078, 1079, 1080, + 1081, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1144, 1145, 1146, 1147, 1148, + 1149, 1150, 1151, 1152, 1153, 1156, 1157, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, + 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, + 1185, 1188, 1190, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1204, 1205, 1206, 1207, 1208, + 1209, 1210, 1211, 1212, 1213, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1224, 1225, 1226, 1227, + 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1248, 1249, 1250, 1251, + 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, + 1268, 1269, 1270, 1271, 1272, 1273, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, + 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1300, 1301, 1304, 1305, + 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, + 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, + 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, + 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1924, 1927, + 1928, 1929, 1930, 1940, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, + 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1992, 1993, 1994, 1995, + 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, + 2012, 2013, 2014, 2015, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, + 2035 }; +const uint32_t plat_int_size = sizeof(plat_ints) / 4; +#endif + +struct platform platform = { + + .cpu_num = 6, + .cpu_master_fixed = true, + .cpu_master = 0, + + .region_num = 3, + .regions = (struct mem_region[]) { + +/* { //FLASH + .base = 0xA0000000, + .size = 0x20000, + .perms = RX, + },*/ +/*{ + // DRAM, 0GB-2GB + .base = 0x90080000, + .size = 0x80000, + .perms = RWX, +},*/ + +#ifdef MEM_NON_UNIFIED + { + .base = 0xA0000000, + .size = 0x200000, + .perms = MEM_RX, + }, +#endif + { + // DRAM, 0GB-2GB + .base = 0x90000000, + .size = 0x80000, + .perms = MEM_RWX, + }, +#ifndef MEM_NON_UNIFIED + { + // DRAM, 0GB-2GB + .base = 0x90080000, + .size = 0x80000, + .perms = MEM_RWX, + },/* + { + // DRAM, 0GB-2GB + .base = 0x70000000, + .size = 0x100000, + .perms = MEM_RWX, + },*/ +#endif + { + // DRAM, 0GB-2GB + .base = 0x90100000, + .size = 0x100000, + .perms = MEM_RWX, + }, + /*{ + // DRAM, 0GB-2GB + .base = 0x90100000, + .size = 0x80000, + .perms = MEM_RWX, + },*/ + + /*{ + // DRAM, 0GB-2GB + .base = 0x70000000, + .size = 0x40000, + .perms = MEM_RWX, + },*/ + + + }, + .mmio_region_num = 1, + .mmio_regions = (struct mem_region[]) { + { + .base = 0xF0000000, + .size = 0xFFF0000, + }, + }, + .arch = { + .ir = { + .int_addr = 0xF4430000, + .src_addr = 0xF4432000, + //.interrupts = plat_ints, + //.num_interrupts = sizeof(plat_ints)/sizeof(plat_ints[0]), + .GPSR_offset = 0x700, + .GPSR_size = (0xB00 + (8*4)) - 0x700, + }, + .device_num = 1, + .devices = (struct plat_device []) { + /*{ + .dev_base = 0xF003D400, + .apu_num = 1, + .apu_addr = (unsigned long[]) {0x90}, + },*/ + { + .dev_base = 0xF46C0000, + .prot_num = 1, + .prot_addr = (unsigned long[]) {0x20}, + .apu_num = 1, + .apu_addr = (unsigned long[]) {0x40}, + }, + }, + }, + + .console = { + .base = 0xF46C0000, //ASCLIN0 + }, +};