diff --git a/doc/services/mem_mgmt/index.rst b/doc/services/mem_mgmt/index.rst index eb689c409d1dd..d42b72ebe51be 100644 --- a/doc/services/mem_mgmt/index.rst +++ b/doc/services/mem_mgmt/index.rst @@ -84,7 +84,112 @@ one by renaming the property and changing its value according to the following l "IO" -> <( DT_ARM_MPU(ATTR_MPU_IO) )> "EXTMEM" -> <( DT_ARM_MPU(ATTR_MPU_EXTMEM) )> +Memory Attributes Heap Allocator +******************************** + +It is possible to leverage the memory attribute property ``zephyr,memory-attr`` +to define and create a set of memory heaps from which the user can allocate +memory from with certain attributes / capabilities. + +When the :kconfig:option:`CONFIG_MEM_ATTR_HEAP` is set, every region marked +with one of the memory attributes listed in in +:zephyr_file:`include/zephyr/dt-bindings/memory-attr/memory-attr-sw.h` is added +to a pool of memory heaps used for dynamic allocation of memory buffers with +certain attributes. + +Here a non exhaustive list of possible attributes: + +.. code-block:: none + + DT_MEM_SW_ALLOC_CACHE + DT_MEM_SW_ALLOC_NON_CACHE + DT_MEM_SW_ALLOC_DMA + +For example we can define several memory regions with different attributes and +use the appropriate attribute to indicate that it is possible to dynamically +allocate memory from those regions: + +.. code-block:: devicetree + + mem_cacheable: memory@10000000 { + compatible = "mmio-sram"; + reg = <0x10000000 0x1000>; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_SW_ALLOC_CACHE )>; + }; + + mem_non_cacheable: memory@20000000 { + compatible = "mmio-sram"; + reg = <0x20000000 0x1000>; + zephyr,memory-attr = <( DT_MEM_NON_CACHEABLE | ATTR_SW_ALLOC_NON_CACHE )>; + }; + + mem_cacheable_big: memory@30000000 { + compatible = "mmio-sram"; + reg = <0x30000000 0x10000>; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_OOO | DT_MEM_SW_ALLOC_CACHE )>; + }; + + mem_cacheable_dma: memory@40000000 { + compatible = "mmio-sram"; + reg = <0x40000000 0x10000>; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_DMA | + DT_MEM_SW_ALLOC_CACHE | DT_MEM_SW_ALLOC_DMA )>; + }; + +The user can then dynamically carve memory out of those regions using the +provided functions, the library will take care of allocating memory from the +correct heap depending on the provided attribute and size: + +.. code-block:: c + + // Init the pool + mem_attr_heap_pool_init(); + + // Allocate 0x100 bytes of cacheable memory from `mem_cacheable` + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x100); + + // Allocate 0x200 bytes of non-cacheable memory aligned to 32 bytes + // from `mem_non_cacheable` + block = mem_attr_heap_aligned_alloc(ATTR_SW_ALLOC_NON_CACHE, 0x100, 32); + + // Allocate 0x100 bytes of cacheable and dma-able memory from `mem_cacheable_dma` + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE | DT_MEM_SW_ALLOC_DMA, 0x100); + +When several regions are marked with the same attributes, the memory is allocated: + +1. From the regions where the ``zephyr,memory-attr`` property has the requested + property (or properties). + +2. Among the regions as at point 1, from the smallest region if there is any + unallocated space left for the requested size + +3. If there is not enough space, from the next bigger region able to + accommodate the requested size + +The following example shows the point 3: + +.. code-block:: c + + // This memory is allocated from `mem_non_cacheable` + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x100); + + // This memory is allocated from `mem_cacheable_big` + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x5000); + +.. note:: + + The framework is assuming that the memory regions used to create the heaps + are usable by the code and available at init time. The user must take of + initializing and setting the memory area before calling + :c:func:`mem_attr_heap_pool_init`. + + That means that the region must be correctly configured in terms of MPU / + MMU (if needed) and that an actual heap can be created out of it, for + example by leveraging the ``zephyr,memory-region`` property to create a + proper linker section to accommodate the heap. + API Reference ************* .. doxygengroup:: memory_attr_interface +.. doxygengroup:: memory_attr_heap diff --git a/include/zephyr/dt-bindings/memory-attr/memory-attr-sw.h b/include/zephyr/dt-bindings/memory-attr/memory-attr-sw.h new file mode 100644 index 0000000000000..ccd47addf1113 --- /dev/null +++ b/include/zephyr/dt-bindings/memory-attr/memory-attr-sw.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Carlo Caione + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef ZEPHYR_INCLUDE_DT_BINDINGS_MEM_ATTR_SW_H_ +#define ZEPHYR_INCLUDE_DT_BINDINGS_MEM_ATTR_SW_H_ + +#include +#include + +/* + * Software specific memory attributes. + */ +#define DT_MEM_SW_MASK DT_MEM_SW_ATTR_MASK +#define DT_MEM_SW_GET(x) ((x) & DT_MEM_SW_ATTR_MASK) +#define DT_MEM_SW(x) ((x) << DT_MEM_SW_ATTR_SHIFT) + +#define ATTR_SW_ALLOC_CACHE BIT(0) +#define ATTR_SW_ALLOC_NON_CACHE BIT(1) +#define ATTR_SW_ALLOC_DMA BIT(2) + +#define DT_MEM_SW_ALLOC_CACHE DT_MEM_SW(ATTR_SW_ALLOC_CACHE) +#define DT_MEM_SW_ALLOC_NON_CACHE DT_MEM_SW(ATTR_SW_ALLOC_NON_CACHE) +#define DT_MEM_SW_ALLOC_DMA DT_MEM_SW(ATTR_SW_ALLOC_DMA) + +#endif /* ZEPHYR_INCLUDE_DT_BINDINGS_MEM_ATTR_SW_H_ */ diff --git a/include/zephyr/mem_mgmt/mem_attr_heap.h b/include/zephyr/mem_mgmt/mem_attr_heap.h new file mode 100644 index 0000000000000..60cf183dd49ba --- /dev/null +++ b/include/zephyr/mem_mgmt/mem_attr_heap.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2023 Carlo Caione, + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_ +#define ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_ + +/** + * @brief Memory heaps based on memory attributes + * @defgroup memory_attr_heap Memory heaps based on memory attributes + * @ingroup mem_mgmt + * @{ + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Init the memory pool + * + * This must be the first function to be called to initialize the memory pools + * from all the memory regions with the a software attribute. + * + * @retval 0 on success. + * @retval -EALREADY if the pool was already initialized. + * @retval -ENOMEM too many regions already allocated. + */ +int mem_attr_heap_pool_init(void); + +/** + * @brief Allocate memory with a specified attribute and size. + * + * Allocates a block of memory of the specified size in bytes and with a + * specified capability / attribute. The attribute is used to select the + * correct memory heap to allocate memory from. + * + * @param attr capability / attribute requested for the memory block. + * @param bytes requested size of the allocation in bytes. + * + * @retval ptr a valid pointer to the allocated memory. + * @retval NULL if no memory is available with that attribute and size. + */ +void *mem_attr_heap_alloc(uint32_t attr, size_t bytes); + +/** + * @brief Allocate aligned memory with a specified attribute, size and alignment. + * + * Allocates a block of memory of the specified size in bytes and with a + * specified capability / attribute. Takes an additional parameter specifying a + * power of two alignment in bytes. + * + * @param attr capability / attribute requested for the memory block. + * @param align power of two alignment for the returned pointer in bytes. + * @param bytes requested size of the allocation in bytes. + * + * @retval ptr a valid pointer to the allocated memory. + * @retval NULL if no memory is available with that attribute and size. + */ +void *mem_attr_heap_aligned_alloc(uint32_t attr, size_t align, size_t bytes); + +/** + * @brief Free the allocated memory + * + * Used to free the passed block of memory that must be the return value of a + * previously call to @ref mem_attr_heap_alloc or @ref + * mem_attr_heap_aligned_alloc. + * + * @param block block to free, must be a pointer to a block allocated by + * @ref mem_attr_heap_alloc or @ref mem_attr_heap_aligned_alloc. + */ +void mem_attr_heap_free(void *block); + +/** + * @brief Get a specific memory region descriptor for a provided address + * + * Finds the memory region descriptor struct controlling the provided pointer. + * + * @param addr address to be found, must be a pointer to a block allocated by + * @ref mem_attr_heap_alloc or @ref mem_attr_heap_aligned_alloc. + * + * @retval str pointer to a memory region structure the address belongs to. + */ +const struct mem_attr_region_t *mem_attr_heap_get_region(void *addr); + +#ifdef __cplusplus +} +#endif + +/** + * @} + */ + +#endif /* ZEPHYR_INCLUDE_MEM_ATTR_HEAP_H_ */ diff --git a/subsys/mem_mgmt/CMakeLists.txt b/subsys/mem_mgmt/CMakeLists.txt index 62e61fb4d1f4e..d2fcd1a72f226 100644 --- a/subsys/mem_mgmt/CMakeLists.txt +++ b/subsys/mem_mgmt/CMakeLists.txt @@ -1,3 +1,4 @@ # SPDX-License-Identifier: Apache-2.0 zephyr_sources_ifdef(CONFIG_MEM_ATTR mem_attr.c) +zephyr_sources_ifdef(CONFIG_MEM_ATTR_HEAP mem_attr_heap.c) diff --git a/subsys/mem_mgmt/Kconfig b/subsys/mem_mgmt/Kconfig index dda1a404167e9..b381b3892e548 100644 --- a/subsys/mem_mgmt/Kconfig +++ b/subsys/mem_mgmt/Kconfig @@ -10,3 +10,10 @@ config MEM_ATTR time an array of the memory regions defined in the DT that can be probed at run-time using several helper functions. Set to `N` if unsure to save RODATA space. + +config MEM_ATTR_HEAP + bool "Memory Attributes heap allocator" + depends on MEM_ATTR + help + Enable an heap allocator based on memory attributes to dynamically + allocate memory from DeviceTree defined memory regions. diff --git a/subsys/mem_mgmt/mem_attr_heap.c b/subsys/mem_mgmt/mem_attr_heap.c new file mode 100644 index 0000000000000..e1b3578a4ed77 --- /dev/null +++ b/subsys/mem_mgmt/mem_attr_heap.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2021 Carlo Caione, + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +struct ma_heap { + struct sys_heap heap; + uint32_t attr; +}; + +struct { + struct ma_heap ma_heaps[MAX_MULTI_HEAPS]; + struct sys_multi_heap multi_heap; + int nheaps; +} mah_data; + +static void *mah_choice(struct sys_multi_heap *m_heap, void *cfg, size_t align, size_t size) +{ + uint32_t attr; + void *block; + + if (size == 0) { + return NULL; + } + + attr = (uint32_t)(long) cfg; + + /* Set in case the user requested a non-existing attr */ + block = NULL; + + for (size_t hdx = 0; hdx < mah_data.nheaps; hdx++) { + struct ma_heap *h; + + h = &mah_data.ma_heaps[hdx]; + + if (h->attr != attr) { + continue; + } + + block = sys_heap_aligned_alloc(&h->heap, align, size); + if (block != NULL) { + break; + } + } + + return block; +} + +void mem_attr_heap_free(void *block) +{ + sys_multi_heap_free(&mah_data.multi_heap, block); +} + +void *mem_attr_heap_alloc(uint32_t attr, size_t bytes) +{ + return sys_multi_heap_alloc(&mah_data.multi_heap, + (void *)(long) attr, bytes); +} + +void *mem_attr_heap_aligned_alloc(uint32_t attr, size_t align, size_t bytes) +{ + return sys_multi_heap_aligned_alloc(&mah_data.multi_heap, + (void *)(long) attr, align, bytes); +} + +const struct mem_attr_region_t *mem_attr_heap_get_region(void *addr) +{ + const struct sys_multi_heap_rec *heap_rec; + + heap_rec = sys_multi_heap_get_heap(&mah_data.multi_heap, addr); + + return (const struct mem_attr_region_t *) heap_rec->user_data; +} + +static int ma_heap_add(const struct mem_attr_region_t *region, uint32_t attr) +{ + struct ma_heap *mh; + struct sys_heap *h; + + /* No more heaps available */ + if (mah_data.nheaps >= MAX_MULTI_HEAPS) { + return -ENOMEM; + } + + mh = &mah_data.ma_heaps[mah_data.nheaps++]; + h = &mh->heap; + + mh->attr = attr; + + sys_heap_init(h, (void *) region->dt_addr, region->dt_size); + sys_multi_heap_add_heap(&mah_data.multi_heap, h, (void *) region); + + return 0; +} + + +int mem_attr_heap_pool_init(void) +{ + const struct mem_attr_region_t *regions; + static atomic_t state; + size_t num_regions; + + if (!atomic_cas(&state, 0, 1)) { + return -EALREADY; + } + + sys_multi_heap_init(&mah_data.multi_heap, mah_choice); + + num_regions = mem_attr_get_regions(®ions); + + for (size_t idx = 0; idx < num_regions; idx++) { + uint32_t sw_attr; + + sw_attr = DT_MEM_SW_ATTR_GET(regions[idx].dt_attr); + + /* No SW attribute is present */ + if (!sw_attr) { + continue; + } + + if (ma_heap_add(®ions[idx], sw_attr)) { + return -ENOMEM; + } + } + + return 0; +} diff --git a/tests/subsys/mem_mgmt/mem_attr_heap/CMakeLists.txt b/tests/subsys/mem_mgmt/mem_attr_heap/CMakeLists.txt new file mode 100644 index 0000000000000..bebaf8dd83b05 --- /dev/null +++ b/tests/subsys/mem_mgmt/mem_attr_heap/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(mem_attr_heap) + +target_sources(app PRIVATE src/main.c) diff --git a/tests/subsys/mem_mgmt/mem_attr_heap/boards/qemu_cortex_m3.overlay b/tests/subsys/mem_mgmt/mem_attr_heap/boards/qemu_cortex_m3.overlay new file mode 100644 index 0000000000000..dc2e25a4e0d99 --- /dev/null +++ b/tests/subsys/mem_mgmt/mem_attr_heap/boards/qemu_cortex_m3.overlay @@ -0,0 +1,52 @@ +#include +#include +#include + +/ { + mem_cache: memory@20008000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x20008000 0x1000>; + zephyr,memory-region = "MEM_CACHEABLE"; + zephyr,memory-attr = <( DT_MEM_CACHEABLE )>; + }; + + mem_cache_sw: memory@20009000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x20009000 0x1000>; + zephyr,memory-region = "MEM_CACHEABLE_SW"; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_SW_ALLOC_CACHE )>; + }; + + mem_noncache_sw: memory@2000A000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x2000A000 0x1000>; + zephyr,memory-region = "MEM_NON_CACHEABLE_SW"; + zephyr,memory-attr = <( DT_MEM_SW_ALLOC_NON_CACHE )>; + }; + + mem_dma_sw: memory@2000B000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x2000B000 0x1000>; + zephyr,memory-region = "MEM_DMA_SW"; + zephyr,memory-attr = <( DT_MEM_DMA | DT_MEM_SW_ALLOC_DMA )>; + }; + + mem_cache_sw_big: memory@2000C000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x2000C000 0x2000>; + zephyr,memory-region = "MEM_CACHEABLE_SW_BIG"; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_SW_ALLOC_CACHE )>; + }; + + mem_cache_cache_dma_multi: memory@2000E000 { + compatible = "zephyr,memory-region", "mmio-sram"; + reg = <0x2000E000 0x1000>; + zephyr,memory-region = "MEM_CACHEABLE_SW_MULTI_ATTR"; + zephyr,memory-attr = <( DT_MEM_CACHEABLE | DT_MEM_DMA | + DT_MEM_SW_ALLOC_CACHE | DT_MEM_SW_ALLOC_DMA)>; + }; +}; + +&sram0 { + reg = <0x20000000 DT_SIZE_K(32)>; +}; diff --git a/tests/subsys/mem_mgmt/mem_attr_heap/prj.conf b/tests/subsys/mem_mgmt/mem_attr_heap/prj.conf new file mode 100644 index 0000000000000..64c26116108b3 --- /dev/null +++ b/tests/subsys/mem_mgmt/mem_attr_heap/prj.conf @@ -0,0 +1,6 @@ +# Copyright 2023 Carlo Caione +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_ZTEST=y +CONFIG_MEM_ATTR=y +CONFIG_MEM_ATTR_HEAP=y diff --git a/tests/subsys/mem_mgmt/mem_attr_heap/src/main.c b/tests/subsys/mem_mgmt/mem_attr_heap/src/main.c new file mode 100644 index 0000000000000..cf7d6c3bec2a5 --- /dev/null +++ b/tests/subsys/mem_mgmt/mem_attr_heap/src/main.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2023 Carlo Caione + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +#define ADDR_MEM_CACHE DT_REG_ADDR(DT_NODELABEL(mem_cache)) +#define ADDR_MEM_CACHE_SW DT_REG_ADDR(DT_NODELABEL(mem_cache_sw)) +#define ADDR_MEM_NON_CACHE_SW DT_REG_ADDR(DT_NODELABEL(mem_noncache_sw)) +#define ADDR_MEM_DMA_SW DT_REG_ADDR(DT_NODELABEL(mem_dma_sw)) +#define ADDR_MEM_CACHE_BIG_SW DT_REG_ADDR(DT_NODELABEL(mem_cache_sw_big)) +#define ADDR_MEM_CACHE_DMA_SW DT_REG_ADDR(DT_NODELABEL(mem_cache_cache_dma_multi)) + +ZTEST(mem_attr_heap, test_mem_attr_heap) +{ + const struct mem_attr_region_t *region; + void *block, *old_block; + int ret; + + /* + * Init the pool. + */ + ret = mem_attr_heap_pool_init(); + zassert_equal(0, ret, "Failed initialization"); + + /* + * Any subsequent initialization should fail. + */ + ret = mem_attr_heap_pool_init(); + zassert_equal(-EALREADY, ret, "Second init should be failing"); + + /* + * Allocate 0x100 bytes of cacheable memory. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x100); + zassert_not_null(block, "Failed to allocate memory"); + + /* + * Check that the just allocated memory was allocated from the correct + * memory region. + */ + region = mem_attr_heap_get_region(block); + zassert_equal(region->dt_addr, ADDR_MEM_CACHE_SW, + "Memory allocated from the wrong region"); + + /* + * Allocate 0x100 bytes of non-cacheable memory. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_NON_CACHE, 0x100); + zassert_not_null(block, "Failed to allocate memory"); + + /* + * Check that the just allocated memory was allocated from the correct + * memory region. + */ + region = mem_attr_heap_get_region(block); + zassert_equal(region->dt_addr, ADDR_MEM_NON_CACHE_SW, + "Memory allocated from the wrong region"); + + /* + * Allocate 0x100 bytes of DMA memory. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_DMA, 0x100); + zassert_not_null(block, "Failed to allocate memory"); + + /* + * Check that the just allocated memory was allocated from the correct + * memory region. + */ + region = mem_attr_heap_get_region(block); + zassert_equal(region->dt_addr, ADDR_MEM_DMA_SW, + "Memory allocated from the wrong region"); + + /* + * Allocate 0x100 bytes of cacheable and DMA memory. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE | DT_MEM_SW_ALLOC_DMA, 0x100); + zassert_not_null(block, "Failed to allocate memory"); + + /* + * Check that the just allocated memory was allocated from the correct + * memory region (CACHE + DMA and not just CACHE or just DMA). + */ + region = mem_attr_heap_get_region(block); + zassert_equal(region->dt_addr, ADDR_MEM_CACHE_DMA_SW, + "Memory allocated from the wrong region"); + + /* + * Allocate memory with a non-existing attribute. + */ + block = mem_attr_heap_alloc(DT_MEM_SW(DT_MEM_SW_ATTR_UNKNOWN), 0x100); + zassert_is_null(block, "Memory allocated with non-existing attribute"); + + /* + * Allocate memory too big to fit into the first cacheable memory + * region. It should be allocated from the second bigger memory region. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x1500); + zassert_not_null(block, "Failed to allocate memory"); + + /* + * Check that the just allocated memory was allocated from the correct + * (bigger) cacheable memory region + */ + region = mem_attr_heap_get_region(block); + zassert_equal(region->dt_addr, ADDR_MEM_CACHE_BIG_SW, + "Memory allocated from the wrong region"); + + /* + * Try to allocate a buffer too big. + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x4000); + zassert_is_null(block, "Buffer too big for regions correctly allocated"); + + /* + * Check if the memory is correctly released and can be reused + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x100); + old_block = block; + mem_attr_heap_free(block); + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_CACHE, 0x100); + zassert_equal_ptr(old_block, block, "Memory not correctly released"); + + /* + * Check if the memory is correctly aligned when requested + */ + block = mem_attr_heap_alloc(DT_MEM_SW_ALLOC_NON_CACHE, 0x100); + zassert_true(((uintptr_t) block % 32 != 0), ""); + mem_attr_heap_free(block); + block = mem_attr_heap_aligned_alloc(DT_MEM_SW_ALLOC_NON_CACHE, 0x100, 32); + zassert_true(((uintptr_t) block % 32 == 0), ""); + + /* + * Try with a different alignment + */ + block = mem_attr_heap_aligned_alloc(DT_MEM_SW_ALLOC_NON_CACHE, 0x100, 64); + zassert_true(((uintptr_t) block % 64 == 0), ""); +} + +ZTEST_SUITE(mem_attr_heap, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/subsys/mem_mgmt/mem_attr_heap/testcase.yaml b/tests/subsys/mem_mgmt/mem_attr_heap/testcase.yaml new file mode 100644 index 0000000000000..903836ed3678a --- /dev/null +++ b/tests/subsys/mem_mgmt/mem_attr_heap/testcase.yaml @@ -0,0 +1,9 @@ +# Copyright 2023 Carlo Caione +# SPDX-License-Identifier: Apache-2.0 + +tests: + mem_mgmt.mem_attr_heap: + platform_allow: + - qemu_cortex_m3 + integration_platforms: + - qemu_cortex_m3