diff --git a/boards/nordic/nrf54h20dk/nrf54h20dk_nrf54h20-memory_map.dtsi b/boards/nordic/nrf54h20dk/nrf54h20dk_nrf54h20-memory_map.dtsi index 834448f33e4..1e3bc7ba9bc 100644 --- a/boards/nordic/nrf54h20dk/nrf54h20dk_nrf54h20-memory_map.dtsi +++ b/boards/nordic/nrf54h20dk/nrf54h20dk_nrf54h20-memory_map.dtsi @@ -26,8 +26,17 @@ reg = <0x800 DT_SIZE_K(2)>; }; - cpuapp_data: memory@1000 { - reg = <0x1000 DT_SIZE_K(256)>; + /* todo: remove once cpuapp_ram0x_region can be used by cache helpers */ + cpuapp_cpusec_misc_shm: memory@1000 { + compatible = "zephyr,memory-region"; + reg = <0x1000 DT_SIZE_K(16)>; + #memory-region-cells = <0>; + zephyr,memory-region = "CPUAPP_CPUSEC_MISC_SHM"; + zephyr,memory-attr = <( DT_MEM_CACHEABLE )>; + }; + + cpuapp_data: memory@5000 { + reg = <0x5000 DT_SIZE_K(240)>; }; }; @@ -110,7 +119,7 @@ status = "disabled"; #memory-region-cells = <0>; zephyr,memory-region = "DMA_RAM21"; - zephyr,memory-attr = <( DT_MEM_CACHEABLE )>; + zephyr,memory-attr = <( DT_MEM_DMA | DT_MEM_CACHEABLE )>; }; }; @@ -154,6 +163,7 @@ status = "disabled"; #memory-region-cells = <0>; zephyr,memory-region = "DMA_RAM3x_APP"; + zephyr,memory-attr = <( DT_MEM_DMA )>; }; cpurad_dma_region: memory@1e80 { @@ -162,6 +172,7 @@ status = "disabled"; #memory-region-cells = <0>; zephyr,memory-region = "DMA_RAM3x_RAD"; + zephyr,memory-attr = <( DT_MEM_DMA )>; }; }; }; diff --git a/soc/nordic/common/CMakeLists.txt b/soc/nordic/common/CMakeLists.txt index 805113f53d6..abf8b80d3fa 100644 --- a/soc/nordic/common/CMakeLists.txt +++ b/soc/nordic/common/CMakeLists.txt @@ -9,6 +9,10 @@ zephyr_library_sources_ifdef(CONFIG_POWEROFF poweroff.c) zephyr_include_directories(.) +if(CONFIG_HAS_NORDIC_DMM) + zephyr_library_sources(dmm.c) +endif() + if(CONFIG_TFM_PARTITION_PLATFORM) zephyr_library_sources(soc_secure.c) zephyr_library_include_directories( diff --git a/soc/nordic/common/Kconfig b/soc/nordic/common/Kconfig index 54e2356c6af..8de20c37dd4 100644 --- a/soc/nordic/common/Kconfig +++ b/soc/nordic/common/Kconfig @@ -1,4 +1,7 @@ # Copyright (c) 2024 Nordic Semiconductor ASA # SPDX-License-Identifier: Apache-2.0 +config HAS_NORDIC_DMM + bool + rsource "vpr/Kconfig" diff --git a/soc/nordic/common/dmm.c b/soc/nordic/common/dmm.c new file mode 100644 index 00000000000..85f18dfa7e8 --- /dev/null +++ b/soc/nordic/common/dmm.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include "dmm.h" + +#define _FILTER_MEM(node_id, fn) \ + COND_CODE_1(DT_NODE_HAS_PROP(node_id, zephyr_memory_attr), (fn(node_id)), ()) +#define DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(fn) \ + DT_FOREACH_STATUS_OKAY_NODE_VARGS(_FILTER_MEM, fn) + +#define __BUILD_LINKER_END_VAR(_name) DT_CAT3(__, _name, _end) +#define _BUILD_LINKER_END_VAR(node_id) \ + __BUILD_LINKER_END_VAR(DT_STRING_UNQUOTED(node_id, zephyr_memory_region)) + +#define _BUILD_MEM_REGION(node_id) \ + {.dt_addr = DT_REG_ADDR(node_id), \ + .dt_size = DT_REG_SIZE(node_id), \ + .dt_attr = DT_PROP(node_id, zephyr_memory_attr), \ + .dt_allc = &_BUILD_LINKER_END_VAR(node_id)}, + +/* Generate declarations of linker variables used to determine size of preallocated variables + * stored in memory sections spanning over memory regions. + * These are used to determine memory left for dynamic bounce buffer allocator to work with. + */ +#define _DECLARE_LINKER_VARS(node_id) extern uint32_t _BUILD_LINKER_END_VAR(node_id); +DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(_DECLARE_LINKER_VARS); + +struct dmm_region { + uintptr_t dt_addr; + size_t dt_size; + uint32_t dt_attr; + void *dt_allc; +}; + +struct dmm_heap { + struct sys_heap heap; + const struct dmm_region *region; +}; + +static const struct dmm_region dmm_regions[] = { + DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(_BUILD_MEM_REGION) +}; + +struct { + struct dmm_heap dmm_heaps[ARRAY_SIZE(dmm_regions)]; +} dmm_heaps_data; + +static struct dmm_heap *dmm_heap_find(void *region) +{ + struct dmm_heap *dh; + + for (size_t idx = 0; idx < ARRAY_SIZE(dmm_heaps_data.dmm_heaps); idx++) { + dh = &dmm_heaps_data.dmm_heaps[idx]; + if (dh->region->dt_addr == (uintptr_t)region) { + return dh; + } + } + + return NULL; +} + +static bool is_region_cacheable(const struct dmm_region *region) +{ + return (IS_ENABLED(CONFIG_DCACHE) && (region->dt_attr & DT_MEM_CACHEABLE)); +} + +static bool is_buffer_within_region(uintptr_t start, size_t size, + uintptr_t reg_start, size_t reg_size) +{ + return ((start >= reg_start) && ((start + size) <= (reg_start + reg_size))); +} + +static bool is_user_buffer_correctly_preallocated(void const *user_buffer, size_t user_length, + const struct dmm_region *region) +{ + uintptr_t addr = (uintptr_t)user_buffer; + + if (!is_buffer_within_region(addr, user_length, region->dt_addr, region->dt_size)) { + return false; + } + + if (!is_region_cacheable(region)) { + /* Buffer is contained within non-cacheable region - use it as it is. */ + return true; + } + + if (IS_ALIGNED(addr, DMM_DCACHE_LINE_SIZE)) { + /* If buffer is in cacheable region it must be aligned to data cache line size. */ + return true; + } + + return false; +} + +static size_t dmm_heap_start_get(struct dmm_heap *dh) +{ + return ROUND_UP(dh->region->dt_allc, DMM_DCACHE_LINE_SIZE); +} + +static size_t dmm_heap_size_get(struct dmm_heap *dh) +{ + return (dh->region->dt_size - (dmm_heap_start_get(dh) - dh->region->dt_addr)); +} + +static void *dmm_buffer_alloc(struct dmm_heap *dh, size_t length) +{ + length = ROUND_UP(length, DMM_DCACHE_LINE_SIZE); + return sys_heap_aligned_alloc(&dh->heap, DMM_DCACHE_LINE_SIZE, length); +} + +static void dmm_buffer_free(struct dmm_heap *dh, void *buffer) +{ + sys_heap_free(&dh->heap, buffer); +} + +int dmm_buffer_out_prepare(void *region, void const *user_buffer, size_t user_length, + void **buffer_out) +{ + struct dmm_heap *dh; + + if (user_length == 0) { + /* Assume that zero-length buffers are correct as they are. */ + *buffer_out = (void *)user_buffer; + return 0; + } + + /* Get memory region that specified device can perform DMA transfers from */ + dh = dmm_heap_find(region); + if (dh == NULL) { + return -EINVAL; + } + + /* Check if: + * - provided user buffer is already in correct memory region, + * - provided user buffer is aligned and padded to cache line, + * if it is located in cacheable region. + */ + if (is_user_buffer_correctly_preallocated(user_buffer, user_length, dh->region)) { + /* If yes, assign buffer_out to user_buffer*/ + *buffer_out = (void *)user_buffer; + } else { + /* If no: + * - dynamically allocate buffer in correct memory region that respects cache line + * alignment and padding + */ + *buffer_out = dmm_buffer_alloc(dh, user_length); + /* Return error if dynamic allocation fails */ + if (*buffer_out == NULL) { + return -ENOMEM; + } + /* - copy user buffer contents into allocated buffer */ + memcpy(*buffer_out, user_buffer, user_length); + } + + /* Check if device memory region is cacheable + * If yes, writeback all cache lines associated with output buffer + * (either user or allocated) + */ + if (is_region_cacheable(dh->region)) { + sys_cache_data_flush_range(*buffer_out, user_length); + } + /* If no, no action is needed */ + + return 0; +} + +int dmm_buffer_out_release(void *region, void *buffer_out) +{ + struct dmm_heap *dh; + uintptr_t addr = (uintptr_t)buffer_out; + + /* Get memory region that specified device can perform DMA transfers from */ + dh = dmm_heap_find(region); + if (dh == NULL) { + return -EINVAL; + } + + /* Check if output buffer is contained within memory area + * managed by dynamic memory allocator + */ + if (is_buffer_within_region(addr, 0, dmm_heap_start_get(dh), dmm_heap_size_get(dh))) { + /* If yes, free the buffer */ + dmm_buffer_free(dh, buffer_out); + } + /* If no, no action is needed */ + + return 0; +} + +int dmm_buffer_in_prepare(void *region, void *user_buffer, size_t user_length, void **buffer_in) +{ + struct dmm_heap *dh; + + if (user_length == 0) { + /* Assume that zero-length buffers are correct as they are. */ + *buffer_in = (void *)user_buffer; + return 0; + } + + /* Get memory region that specified device can perform DMA transfers to */ + dh = dmm_heap_find(region); + if (dh == NULL) { + return -EINVAL; + } + + /* Check if: + * - provided user buffer is already in correct memory region, + * - provided user buffer is aligned and padded to cache line, + * if it is located in cacheable region. + */ + if (is_user_buffer_correctly_preallocated(user_buffer, user_length, dh->region)) { + /* If yes, assign buffer_in to user_buffer */ + *buffer_in = user_buffer; + } else { + /* If no, dynamically allocate buffer in correct memory region that respects cache + * line alignment and padding + */ + *buffer_in = dmm_buffer_alloc(dh, user_length); + /* Return error if dynamic allocation fails */ + if (*buffer_in == NULL) { + return -ENOMEM; + } + } + + /* Check if device memory region is cacheable + * If yes, invalidate all cache lines associated with input buffer + * (either user or allocated) to clear potential dirty bits. + */ + if (is_region_cacheable(dh->region)) { + sys_cache_data_invd_range(*buffer_in, user_length); + } + /* If no, no action is needed */ + + return 0; +} + +int dmm_buffer_in_release(void *region, void *user_buffer, size_t user_length, void *buffer_in) +{ + struct dmm_heap *dh; + uintptr_t addr = (uintptr_t)buffer_in; + + /* Get memory region that specified device can perform DMA transfers to, using devicetree */ + dh = dmm_heap_find(region); + if (dh == NULL) { + return -EINVAL; + } + + /* Check if device memory region is cacheable + * If yes, invalidate all cache lines associated with input buffer + * (either user or allocated) + */ + if (is_region_cacheable(dh->region)) { + sys_cache_data_invd_range(buffer_in, user_length); + } + /* If no, no action is needed */ + + /* Check if user buffer and allocated buffer points to the same memory location + * If no, copy allocated buffer to the user buffer + */ + if (buffer_in != user_buffer) { + memcpy(user_buffer, buffer_in, user_length); + } + /* If yes, no action is needed */ + + /* Check if input buffer is contained within memory area + * managed by dynamic memory allocator + */ + if (is_buffer_within_region(addr, 0, dmm_heap_start_get(dh), dmm_heap_size_get(dh))) { + /* If yes, free the buffer */ + dmm_buffer_free(dh, buffer_in); + } + /* If no, no action is needed */ + + return 0; +} + +int dmm_init(void) +{ + struct dmm_heap *dh; + + for (size_t idx = 0; idx < ARRAY_SIZE(dmm_regions); idx++) { + dh = &dmm_heaps_data.dmm_heaps[idx]; + dh->region = &dmm_regions[idx]; + sys_heap_init(&dh->heap, (void *)dmm_heap_start_get(dh), dmm_heap_size_get(dh)); + } + + return 0; +} + +SYS_INIT(dmm_init, POST_KERNEL, 0); diff --git a/soc/nordic/common/dmm.h b/soc/nordic/common/dmm.h new file mode 100644 index 00000000000..03780f37239 --- /dev/null +++ b/soc/nordic/common/dmm.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * nRF SoC specific public APIs for Device Memory Management (dmm) subsystem + */ + +#ifndef SOC_NORDIC_COMMON_DMM_H_ +#define SOC_NORDIC_COMMON_DMM_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond INTERNAL_HIDDEN */ + +#define DMM_DCACHE_LINE_SIZE \ + COND_CODE_1(IS_ENABLED(CONFIG_DCACHE), (CONFIG_DCACHE_LINE_SIZE), (sizeof(uint8_t))) + +/** + * @brief Get reference to memory region associated with the specified device node + * + * @param node_id Device node. + * + * @return Reference to memory region. NULL if not defined for given device node. + */ +#define DMM_DEV_TO_REG(node_id) \ + COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \ + ((void *)DT_REG_ADDR(DT_PHANDLE(node_id, memory_regions))), (NULL)) + +/** + * @brief Preallocate buffer in memory region associated with the specified device node + * + * @param node_id Device node. + */ +#define DMM_MEMORY_SECTION(node_id) \ + COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \ + (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ + DT_PHANDLE(node_id, memory_regions))))) \ + __aligned(DMM_DCACHE_LINE_SIZE)), \ + ()) + +#ifdef CONFIG_HAS_NORDIC_DMM + +/** + * @brief Prepare a DMA output buffer for the specified device + * + * Allocate an output buffer in memory region that given device can perform DMA transfers from. + * Copy @p user_buffer contents into it. + * Writeback data cache lines associated with output buffer, if needed. + * + * @note Depending on provided user buffer parameters and SoC architecture, + * dynamic allocation and cache operations might be skipped. + * + * @note @p buffer_out can be released using @ref dmm_buffer_in_release() + * to support transmitting and receiving data to the same buffer. + * + * @warning It is prohibited to read or write @p user_buffer or @p buffer_out contents + * from the time this function is called until @ref dmm_buffer_out_release() + * or @ref dmm_buffer_in_release is called on the same buffer + * or until this function returns with an error. + * + * @param region Memory region associated with device to prepare the buffer for. + * @param user_buffer CPU address (virtual if applicable) of the buffer containing data + * to be processed by the given device. + * @param user_length Length of the buffer containing data to be processed by the given device. + * @param buffer_out Pointer to a bus address of a buffer containing the prepared DMA buffer. + * + * @retval 0 If succeeded. + * @retval -ENOMEM If output buffer could not be allocated. + * @retval -errno Negative errno for other failures. + */ +int dmm_buffer_out_prepare(void *region, void const *user_buffer, size_t user_length, + void **buffer_out); + +/** + * @brief Release the previously prepared DMA output buffer + * + * @param region Memory region associated with device to release the buffer for. + * @param buffer_out Bus address of the DMA output buffer previously prepared + * with @ref dmm_buffer_out_prepare(). + * + * @retval 0 If succeeded. + * @retval -errno Negative errno code on failure. + */ +int dmm_buffer_out_release(void *region, void *buffer_out); + +/** + * @brief Prepare a DMA input buffer for the specified device + * + * Allocate an input buffer in memory region that given device can perform DMA transfers to. + * + * @note Depending on provided user buffer parameters and SoC architecture, + * dynamic allocation might be skipped. + * + * @warning It is prohibited to read or write @p user_buffer or @p buffer_in contents + * from the time this function is called until @ref dmm_buffer_in_release() + * is called on the same buffer or until this function returns with an error. + * + * @param region Memory region associated with device to prepare the buffer for. + * @param user_buffer CPU address (virtual if applicable) of the buffer to be filled with data + * from the given device. + * @param user_length Length of the buffer to be filled with data from the given device. + * @param buffer_in Pointer to a bus address of a buffer containing the prepared DMA buffer. + * + * @retval 0 If succeeded. + * @retval -ENOMEM If input buffer could not be allocated. + * @retval -errno Negative errno for other failures. + */ +int dmm_buffer_in_prepare(void *region, void *user_buffer, size_t user_length, void **buffer_in); + +/** + * @brief Release the previously prepared DMA input buffer + * + * Invalidate data cache lines associated with input buffer, if needed. + * Copy @p buffer_in contents into @p user_buffer, if needed. + * + * @param region Memory region associated with device to release the buffer for. + * @param user_buffer CPU address (virtual if applicable) of the buffer to be filled with data + * from the given device. + * @param user_length Length of the buffer to be filled with data from the given device. + * @param buffer_in Bus address of the DMA input buffer previously prepared + * with @ref dmm_buffer_in_prepare(). + * + * @note @p user_buffer and @p buffer_in arguments pair provided in this function call must match + * the arguments pair provided in prior call to @ref dmm_buffer_out_prepare() + * or @ref dmm_buffer_in_prepare(). + * + * @retval 0 If succeeded. + * @retval -errno Negative errno code on failure. + */ +int dmm_buffer_in_release(void *region, void *user_buffer, size_t user_length, void *buffer_in); + +/** @endcond */ + +#else + +static ALWAYS_INLINE int dmm_buffer_out_prepare(void *region, void const *user_buffer, + size_t user_length, void **buffer_out) +{ + ARG_UNUSED(region); + ARG_UNUSED(user_length); + *buffer_out = (void *)user_buffer; + return 0; +} + +static ALWAYS_INLINE int dmm_buffer_out_release(void *region, void *buffer_out) +{ + ARG_UNUSED(region); + ARG_UNUSED(buffer_out); + return 0; +} + +static ALWAYS_INLINE int dmm_buffer_in_prepare(void *region, void *user_buffer, size_t user_length, + void **buffer_in) +{ + ARG_UNUSED(region); + ARG_UNUSED(user_length); + *buffer_in = user_buffer; + return 0; +} + +static ALWAYS_INLINE int dmm_buffer_in_release(void *region, void *user_buffer, size_t user_length, + void *buffer_in) +{ + ARG_UNUSED(region); + ARG_UNUSED(user_buffer); + ARG_UNUSED(user_length); + ARG_UNUSED(buffer_in); + return 0; +} + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* SOC_NORDIC_COMMON_DMM_H_ */ diff --git a/soc/nordic/nrf54h/Kconfig b/soc/nordic/nrf54h/Kconfig index a9fe5f9863e..4a812b879d0 100644 --- a/soc/nordic/nrf54h/Kconfig +++ b/soc/nordic/nrf54h/Kconfig @@ -7,6 +7,7 @@ config SOC_SERIES_NRF54HX select HAS_NRFS select HAS_NRFX select HAS_NORDIC_DRIVERS + select HAS_NORDIC_DMM config SOC_NRF54H20_CPUAPP select ARM diff --git a/tests/boards/nrf/dmm/CMakeLists.txt b/tests/boards/nrf/dmm/CMakeLists.txt new file mode 100644 index 00000000000..3d047c94898 --- /dev/null +++ b/tests/boards/nrf/dmm/CMakeLists.txt @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(dmm) + +FILE(GLOB app_sources src/*.c) + +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/boards/nrf/dmm/boards/nrf5340dk_nrf5340_cpuapp.overlay b/tests/boards/nrf/dmm/boards/nrf5340dk_nrf5340_cpuapp.overlay new file mode 100644 index 00000000000..9d2eceba667 --- /dev/null +++ b/tests/boards/nrf/dmm/boards/nrf5340dk_nrf5340_cpuapp.overlay @@ -0,0 +1,56 @@ +/ { + aliases { + dut-cache = &spi1; + dut-nocache= &spi3; + }; +}; + +&pinctrl { + spi1_default_alt: spi1_default_alt { + group1 { + psels = , + ; + }; + }; + + spi1_sleep_alt: spi1_sleep_alt { + group1 { + psels = , + ; + low-power-enable; + }; + }; + + spi3_default_alt: spi3_default_alt { + group1 { + psels = , + ; + }; + }; + + spi3_sleep_alt: spi3_sleep_alt { + group1 { + psels = , + ; + low-power-enable; + }; + }; +}; + +&spi1 +{ + compatible = "nordic,nrf-spim"; + status = "okay"; + pinctrl-0 = <&spi1_default_alt>; + pinctrl-1 = <&spi1_sleep_alt>; + pinctrl-names = "default", "sleep"; +}; + +&spi3 +{ + compatible = "nordic,nrf-spim"; + status = "okay"; + pinctrl-0 = <&spi3_default_alt>; + pinctrl-1 = <&spi3_sleep_alt>; + pinctrl-names = "default", "sleep"; +}; diff --git a/tests/boards/nrf/dmm/boards/nrf54h20dk_nrf54h20_cpuapp.overlay b/tests/boards/nrf/dmm/boards/nrf54h20dk_nrf54h20_cpuapp.overlay new file mode 100644 index 00000000000..513ef21776f --- /dev/null +++ b/tests/boards/nrf/dmm/boards/nrf54h20dk_nrf54h20_cpuapp.overlay @@ -0,0 +1,58 @@ +/ { + aliases { + dut-cache = &spi120; + dut-nocache = &spi130; + }; +}; + +&pinctrl { + spi130_default_alt: spi130_default_alt { + group1 { + psels = , + ; + }; + }; + + spi130_sleep_alt: spi130_sleep_alt { + group1 { + psels = , + ; + low-power-enable; + }; + }; + + spi120_default_alt: spi120_default_alt { + group1 { + psels = , + ; + }; + }; + + spi120_sleep_alt: spi120_sleep_alt { + group1 { + psels = , + ; + low-power-enable; + }; + }; +}; + +&spi130 +{ + compatible = "nordic,nrf-spim"; + status = "okay"; + pinctrl-0 = <&spi130_default_alt>; + pinctrl-1 = <&spi130_sleep_alt>; + pinctrl-names = "default", "sleep"; + memory-regions = <&cpuapp_dma_region>; +}; + +&spi120 +{ + compatible = "nordic,nrf-spim"; + status = "okay"; + pinctrl-0 = <&spi120_default_alt>; + pinctrl-1 = <&spi120_sleep_alt>; + pinctrl-names = "default", "sleep"; + memory-regions = <&dma_fast_region>; +}; diff --git a/tests/boards/nrf/dmm/prj.conf b/tests/boards/nrf/dmm/prj.conf new file mode 100644 index 00000000000..9467c292689 --- /dev/null +++ b/tests/boards/nrf/dmm/prj.conf @@ -0,0 +1 @@ +CONFIG_ZTEST=y diff --git a/tests/boards/nrf/dmm/src/main.c b/tests/boards/nrf/dmm/src/main.c new file mode 100644 index 00000000000..5ddbebfb838 --- /dev/null +++ b/tests/boards/nrf/dmm/src/main.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include + +#define DUT_CACHE DT_ALIAS(dut_cache) +#define DUT_NOCACHE DT_ALIAS(dut_nocache) + +#define DMM_TEST_GET_REG_START(node_id) \ + COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \ + (DT_REG_ADDR(DT_PHANDLE(node_id, memory_regions))), (0)) + +#define DMM_TEST_GET_REG_SIZE(node_id) \ + COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \ + (DT_REG_SIZE(DT_PHANDLE(node_id, memory_regions))), (0)) + +struct dmm_test_region { + void *mem_reg; + uintptr_t start; + size_t size; +}; + +enum { + DMM_TEST_REGION_CACHE, + DMM_TEST_REGION_NOCACHE, + DMM_TEST_REGION_COUNT +}; + +struct dmm_fixture { + struct dmm_test_region regions[DMM_TEST_REGION_COUNT]; + uint32_t fill_value; +}; + +static const struct dmm_test_region dmm_test_regions[DMM_TEST_REGION_COUNT] = { + [DMM_TEST_REGION_CACHE] = { + .mem_reg = DMM_DEV_TO_REG(DUT_CACHE), + .start = DMM_TEST_GET_REG_START(DUT_CACHE), + .size = DMM_TEST_GET_REG_SIZE(DUT_CACHE) + }, + [DMM_TEST_REGION_NOCACHE] = { + .mem_reg = DMM_DEV_TO_REG(DUT_NOCACHE), + .start = DMM_TEST_GET_REG_START(DUT_NOCACHE), + .size = DMM_TEST_GET_REG_SIZE(DUT_NOCACHE) + }, +}; + +static void *test_setup(void) +{ + static struct dmm_fixture fixture; + + memcpy(fixture.regions, dmm_test_regions, sizeof(dmm_test_regions)); + fixture.fill_value = 0x1; + return &fixture; +} + +static void test_cleanup(void *argc) +{ +} + +static bool dmm_buffer_in_region_check(struct dmm_test_region *dtr, void *buf, size_t size) +{ + uintptr_t start = (uintptr_t)buf; + + return ((start >= dtr->start) && ((start + size) <= (dtr->start + dtr->size))); +} + +static void dmm_check_output_buffer(struct dmm_test_region *dtr, uint32_t *fill_value, + void *data, size_t size, bool was_prealloc) +{ + void *buf; + int retval; + + memset(data, (*fill_value)++, size); + retval = dmm_buffer_out_prepare(dtr->mem_reg, data, size, &buf); + zassert_ok(retval); + zassert_true(IS_ALIGNED(buf, DMM_DCACHE_LINE_SIZE)); + + if (IS_ENABLED(CONFIG_HAS_NORDIC_DMM)) { + if (was_prealloc) { + zassert_equal(data, buf); + } else { + zassert_not_equal(data, buf); + } + zassert_true(dmm_buffer_in_region_check(dtr, buf, size)); + } else { + zassert_equal(data, buf); + } + sys_cache_data_invd_range(buf, size); + zassert_mem_equal(buf, data, size); + + retval = dmm_buffer_out_release(dtr->mem_reg, buf); + zassert_ok(retval); +} + +static void dmm_check_input_buffer(struct dmm_test_region *dtr, uint32_t *fill_value, + void *data, size_t size, bool was_prealloc, bool is_cached) +{ + void *buf; + int retval; + uint8_t intermediate_buf[128]; + + zassert_true(size < sizeof(intermediate_buf)); + + retval = dmm_buffer_in_prepare(dtr->mem_reg, data, size, &buf); + zassert_ok(retval); + zassert_true(IS_ALIGNED(buf, DMM_DCACHE_LINE_SIZE)); + + if (IS_ENABLED(CONFIG_HAS_NORDIC_DMM)) { + if (was_prealloc) { + zassert_equal(data, buf); + } else { + zassert_not_equal(data, buf); + } + zassert_true(dmm_buffer_in_region_check(dtr, buf, size)); + } else { + zassert_equal(data, buf); + } + + /* Simulate external bus master writing to memory region */ + memset(buf, (*fill_value)++, size); + sys_cache_data_flush_range(buf, size); + /* Preserve actual memory region contents before polluting the cache */ + memcpy(intermediate_buf, buf, size); + if (IS_ENABLED(CONFIG_DCACHE) && is_cached) { + /* Purposefully pollute the cache to make sure library manages cache properly */ + memset(buf, (*fill_value)++, size); + } + + retval = dmm_buffer_in_release(dtr->mem_reg, data, size, buf); + zassert_ok(retval); + + zassert_mem_equal(data, intermediate_buf, size); +} + +ZTEST_USER_F(dmm, test_check_dev_cache_in_allocate) +{ + uint8_t user_data[16]; + + dmm_check_input_buffer(&fixture->regions[DMM_TEST_REGION_CACHE], &fixture->fill_value, + user_data, sizeof(user_data), false, true); +} + +ZTEST_USER_F(dmm, test_check_dev_cache_in_preallocate) +{ + static uint8_t user_data[16] DMM_MEMORY_SECTION(DUT_CACHE); + + dmm_check_input_buffer(&fixture->regions[DMM_TEST_REGION_CACHE], &fixture->fill_value, + user_data, sizeof(user_data), true, true); +} + +ZTEST_USER_F(dmm, test_check_dev_cache_out_allocate) +{ + uint8_t user_data[16]; + + dmm_check_output_buffer(&fixture->regions[DMM_TEST_REGION_CACHE], &fixture->fill_value, + user_data, sizeof(user_data), false); +} + +ZTEST_USER_F(dmm, test_check_dev_cache_out_preallocate) +{ + static uint8_t user_data[16] DMM_MEMORY_SECTION(DUT_CACHE); + + dmm_check_output_buffer(&fixture->regions[DMM_TEST_REGION_CACHE], &fixture->fill_value, + user_data, sizeof(user_data), true); +} + +ZTEST_USER_F(dmm, test_check_dev_nocache_in_allocate) +{ + uint8_t user_data[16]; + + dmm_check_input_buffer(&fixture->regions[DMM_TEST_REGION_NOCACHE], &fixture->fill_value, + user_data, sizeof(user_data), false, false); +} + +ZTEST_USER_F(dmm, test_check_dev_nocache_in_preallocate) +{ + static uint8_t user_data[16] DMM_MEMORY_SECTION(DUT_NOCACHE); + + dmm_check_input_buffer(&fixture->regions[DMM_TEST_REGION_NOCACHE], &fixture->fill_value, + user_data, sizeof(user_data), true, false); +} + +ZTEST_USER_F(dmm, test_check_dev_nocache_out_allocate) +{ + uint8_t user_data[16]; + + dmm_check_output_buffer(&fixture->regions[DMM_TEST_REGION_NOCACHE], &fixture->fill_value, + user_data, sizeof(user_data), false); +} + +ZTEST_USER_F(dmm, test_check_dev_nocache_out_preallocate) +{ + static uint8_t user_data[16] DMM_MEMORY_SECTION(DUT_NOCACHE); + + dmm_check_output_buffer(&fixture->regions[DMM_TEST_REGION_NOCACHE], &fixture->fill_value, + user_data, sizeof(user_data), true); +} + +ZTEST_SUITE(dmm, NULL, test_setup, NULL, test_cleanup, NULL); + +int dmm_test_prepare(void) +{ + const struct dmm_test_region *dtr; + + for (size_t i = 0; i < ARRAY_SIZE(dmm_test_regions); i++) { + dtr = &dmm_test_regions[i]; + memset((void *)dtr->start, 0x00, dtr->size); + } + + return 0; +} + +SYS_INIT(dmm_test_prepare, PRE_KERNEL_1, 0); diff --git a/tests/boards/nrf/dmm/testcase.yaml b/tests/boards/nrf/dmm/testcase.yaml new file mode 100644 index 00000000000..b5f41f281a5 --- /dev/null +++ b/tests/boards/nrf/dmm/testcase.yaml @@ -0,0 +1,18 @@ +common: + tags: drivers + harness: ztest + +tests: + boards.nrf.dmm: + platform_allow: + - nrf54h20dk/nrf54h20/cpuapp + - nrf5340dk/nrf5340/cpuapp + integration_platforms: + - nrf5340dk/nrf5340/cpuapp + - nrf54h20dk/nrf54h20/cpuapp + + boards.nrf.dmm.cache_disabled: + extra_configs: + - CONFIG_DCACHE=n + platform_allow: + - nrf54h20dk/nrf54h20/cpuapp