Skip to content

Commit df8cb42

Browse files
nika-nordicjonathannilsen
authored andcommitted
[nrf fromtree] soc: nordic: add dmm component
DMM stands for Device Memory Management and its role is to streamline the process of allocating DMA buffer in correct memory region and managing the data cache. Signed-off-by: Nikodem Kastelik <[email protected]> (cherry picked from commit 37e511b)
1 parent f4a2714 commit df8cb42

File tree

5 files changed

+492
-0
lines changed

5 files changed

+492
-0
lines changed

soc/nordic/common/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@ zephyr_library_sources_ifdef(CONFIG_POWEROFF poweroff.c)
99

1010
zephyr_include_directories(.)
1111

12+
if(CONFIG_HAS_NORDIC_DMM)
13+
zephyr_library_sources(dmm.c)
14+
endif()
15+
1216
if(CONFIG_TFM_PARTITION_PLATFORM)
1317
zephyr_library_sources(soc_secure.c)
1418
zephyr_library_include_directories(

soc/nordic/common/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
# Copyright (c) 2024 Nordic Semiconductor ASA
22
# SPDX-License-Identifier: Apache-2.0
33

4+
config HAS_NORDIC_DMM
5+
bool
6+
47
rsource "vpr/Kconfig"

soc/nordic/common/dmm.c

Lines changed: 297 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,297 @@
1+
/*
2+
* Copyright (c) 2024 Nordic Semiconductor ASA
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
#include <string.h>
7+
#include <zephyr/cache.h>
8+
#include <zephyr/kernel.h>
9+
#include <zephyr/sys/sys_heap.h>
10+
#include <zephyr/mem_mgmt/mem_attr.h>
11+
#include "dmm.h"
12+
13+
#define _FILTER_MEM(node_id, fn) \
14+
COND_CODE_1(DT_NODE_HAS_PROP(node_id, zephyr_memory_attr), (fn(node_id)), ())
15+
#define DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(fn) \
16+
DT_FOREACH_STATUS_OKAY_NODE_VARGS(_FILTER_MEM, fn)
17+
18+
#define __BUILD_LINKER_END_VAR(_name) DT_CAT3(__, _name, _end)
19+
#define _BUILD_LINKER_END_VAR(node_id) \
20+
__BUILD_LINKER_END_VAR(DT_STRING_UNQUOTED(node_id, zephyr_memory_region))
21+
22+
#define _BUILD_MEM_REGION(node_id) \
23+
{.dt_addr = DT_REG_ADDR(node_id), \
24+
.dt_size = DT_REG_SIZE(node_id), \
25+
.dt_attr = DT_PROP(node_id, zephyr_memory_attr), \
26+
.dt_allc = &_BUILD_LINKER_END_VAR(node_id)},
27+
28+
/* Generate declarations of linker variables used to determine size of preallocated variables
29+
* stored in memory sections spanning over memory regions.
30+
* These are used to determine memory left for dynamic bounce buffer allocator to work with.
31+
*/
32+
#define _DECLARE_LINKER_VARS(node_id) extern uint32_t _BUILD_LINKER_END_VAR(node_id);
33+
DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(_DECLARE_LINKER_VARS);
34+
35+
struct dmm_region {
36+
uintptr_t dt_addr;
37+
size_t dt_size;
38+
uint32_t dt_attr;
39+
void *dt_allc;
40+
};
41+
42+
struct dmm_heap {
43+
struct sys_heap heap;
44+
const struct dmm_region *region;
45+
};
46+
47+
static const struct dmm_region dmm_regions[] = {
48+
DT_MEMORY_REGION_FOREACH_STATUS_OKAY_NODE(_BUILD_MEM_REGION)
49+
};
50+
51+
struct {
52+
struct dmm_heap dmm_heaps[ARRAY_SIZE(dmm_regions)];
53+
} dmm_heaps_data;
54+
55+
static struct dmm_heap *dmm_heap_find(void *region)
56+
{
57+
struct dmm_heap *dh;
58+
59+
for (size_t idx = 0; idx < ARRAY_SIZE(dmm_heaps_data.dmm_heaps); idx++) {
60+
dh = &dmm_heaps_data.dmm_heaps[idx];
61+
if (dh->region->dt_addr == (uintptr_t)region) {
62+
return dh;
63+
}
64+
}
65+
66+
return NULL;
67+
}
68+
69+
static bool is_region_cacheable(const struct dmm_region *region)
70+
{
71+
return (IS_ENABLED(CONFIG_DCACHE) && (region->dt_attr & DT_MEM_CACHEABLE));
72+
}
73+
74+
static bool is_buffer_within_region(uintptr_t start, size_t size,
75+
uintptr_t reg_start, size_t reg_size)
76+
{
77+
return ((start >= reg_start) && ((start + size) <= (reg_start + reg_size)));
78+
}
79+
80+
static bool is_user_buffer_correctly_preallocated(void const *user_buffer, size_t user_length,
81+
const struct dmm_region *region)
82+
{
83+
uintptr_t addr = (uintptr_t)user_buffer;
84+
85+
if (!is_buffer_within_region(addr, user_length, region->dt_addr, region->dt_size)) {
86+
return false;
87+
}
88+
89+
if (!is_region_cacheable(region)) {
90+
/* Buffer is contained within non-cacheable region - use it as it is. */
91+
return true;
92+
}
93+
94+
if (IS_ALIGNED(addr, DMM_DCACHE_LINE_SIZE)) {
95+
/* If buffer is in cacheable region it must be aligned to data cache line size. */
96+
return true;
97+
}
98+
99+
return false;
100+
}
101+
102+
static size_t dmm_heap_start_get(struct dmm_heap *dh)
103+
{
104+
return ROUND_UP(dh->region->dt_allc, DMM_DCACHE_LINE_SIZE);
105+
}
106+
107+
static size_t dmm_heap_size_get(struct dmm_heap *dh)
108+
{
109+
return (dh->region->dt_size - (dmm_heap_start_get(dh) - dh->region->dt_addr));
110+
}
111+
112+
static void *dmm_buffer_alloc(struct dmm_heap *dh, size_t length)
113+
{
114+
length = ROUND_UP(length, DMM_DCACHE_LINE_SIZE);
115+
return sys_heap_aligned_alloc(&dh->heap, DMM_DCACHE_LINE_SIZE, length);
116+
}
117+
118+
static void dmm_buffer_free(struct dmm_heap *dh, void *buffer)
119+
{
120+
sys_heap_free(&dh->heap, buffer);
121+
}
122+
123+
int dmm_buffer_out_prepare(void *region, void const *user_buffer, size_t user_length,
124+
void **buffer_out)
125+
{
126+
struct dmm_heap *dh;
127+
128+
if (user_length == 0) {
129+
/* Assume that zero-length buffers are correct as they are. */
130+
*buffer_out = (void *)user_buffer;
131+
return 0;
132+
}
133+
134+
/* Get memory region that specified device can perform DMA transfers from */
135+
dh = dmm_heap_find(region);
136+
if (dh == NULL) {
137+
return -EINVAL;
138+
}
139+
140+
/* Check if:
141+
* - provided user buffer is already in correct memory region,
142+
* - provided user buffer is aligned and padded to cache line,
143+
* if it is located in cacheable region.
144+
*/
145+
if (is_user_buffer_correctly_preallocated(user_buffer, user_length, dh->region)) {
146+
/* If yes, assign buffer_out to user_buffer*/
147+
*buffer_out = (void *)user_buffer;
148+
} else {
149+
/* If no:
150+
* - dynamically allocate buffer in correct memory region that respects cache line
151+
* alignment and padding
152+
*/
153+
*buffer_out = dmm_buffer_alloc(dh, user_length);
154+
/* Return error if dynamic allocation fails */
155+
if (*buffer_out == NULL) {
156+
return -ENOMEM;
157+
}
158+
/* - copy user buffer contents into allocated buffer */
159+
memcpy(*buffer_out, user_buffer, user_length);
160+
}
161+
162+
/* Check if device memory region is cacheable
163+
* If yes, writeback all cache lines associated with output buffer
164+
* (either user or allocated)
165+
*/
166+
if (is_region_cacheable(dh->region)) {
167+
sys_cache_data_flush_range(*buffer_out, user_length);
168+
}
169+
/* If no, no action is needed */
170+
171+
return 0;
172+
}
173+
174+
int dmm_buffer_out_release(void *region, void *buffer_out)
175+
{
176+
struct dmm_heap *dh;
177+
uintptr_t addr = (uintptr_t)buffer_out;
178+
179+
/* Get memory region that specified device can perform DMA transfers from */
180+
dh = dmm_heap_find(region);
181+
if (dh == NULL) {
182+
return -EINVAL;
183+
}
184+
185+
/* Check if output buffer is contained within memory area
186+
* managed by dynamic memory allocator
187+
*/
188+
if (is_buffer_within_region(addr, 0, dmm_heap_start_get(dh), dmm_heap_size_get(dh))) {
189+
/* If yes, free the buffer */
190+
dmm_buffer_free(dh, buffer_out);
191+
}
192+
/* If no, no action is needed */
193+
194+
return 0;
195+
}
196+
197+
int dmm_buffer_in_prepare(void *region, void *user_buffer, size_t user_length, void **buffer_in)
198+
{
199+
struct dmm_heap *dh;
200+
201+
if (user_length == 0) {
202+
/* Assume that zero-length buffers are correct as they are. */
203+
*buffer_in = (void *)user_buffer;
204+
return 0;
205+
}
206+
207+
/* Get memory region that specified device can perform DMA transfers to */
208+
dh = dmm_heap_find(region);
209+
if (dh == NULL) {
210+
return -EINVAL;
211+
}
212+
213+
/* Check if:
214+
* - provided user buffer is already in correct memory region,
215+
* - provided user buffer is aligned and padded to cache line,
216+
* if it is located in cacheable region.
217+
*/
218+
if (is_user_buffer_correctly_preallocated(user_buffer, user_length, dh->region)) {
219+
/* If yes, assign buffer_in to user_buffer */
220+
*buffer_in = user_buffer;
221+
} else {
222+
/* If no, dynamically allocate buffer in correct memory region that respects cache
223+
* line alignment and padding
224+
*/
225+
*buffer_in = dmm_buffer_alloc(dh, user_length);
226+
/* Return error if dynamic allocation fails */
227+
if (*buffer_in == NULL) {
228+
return -ENOMEM;
229+
}
230+
}
231+
232+
/* Check if device memory region is cacheable
233+
* If yes, invalidate all cache lines associated with input buffer
234+
* (either user or allocated) to clear potential dirty bits.
235+
*/
236+
if (is_region_cacheable(dh->region)) {
237+
sys_cache_data_invd_range(*buffer_in, user_length);
238+
}
239+
/* If no, no action is needed */
240+
241+
return 0;
242+
}
243+
244+
int dmm_buffer_in_release(void *region, void *user_buffer, size_t user_length, void *buffer_in)
245+
{
246+
struct dmm_heap *dh;
247+
uintptr_t addr = (uintptr_t)buffer_in;
248+
249+
/* Get memory region that specified device can perform DMA transfers to, using devicetree */
250+
dh = dmm_heap_find(region);
251+
if (dh == NULL) {
252+
return -EINVAL;
253+
}
254+
255+
/* Check if device memory region is cacheable
256+
* If yes, invalidate all cache lines associated with input buffer
257+
* (either user or allocated)
258+
*/
259+
if (is_region_cacheable(dh->region)) {
260+
sys_cache_data_invd_range(buffer_in, user_length);
261+
}
262+
/* If no, no action is needed */
263+
264+
/* Check if user buffer and allocated buffer points to the same memory location
265+
* If no, copy allocated buffer to the user buffer
266+
*/
267+
if (buffer_in != user_buffer) {
268+
memcpy(user_buffer, buffer_in, user_length);
269+
}
270+
/* If yes, no action is needed */
271+
272+
/* Check if input buffer is contained within memory area
273+
* managed by dynamic memory allocator
274+
*/
275+
if (is_buffer_within_region(addr, 0, dmm_heap_start_get(dh), dmm_heap_size_get(dh))) {
276+
/* If yes, free the buffer */
277+
dmm_buffer_free(dh, buffer_in);
278+
}
279+
/* If no, no action is needed */
280+
281+
return 0;
282+
}
283+
284+
int dmm_init(void)
285+
{
286+
struct dmm_heap *dh;
287+
288+
for (size_t idx = 0; idx < ARRAY_SIZE(dmm_regions); idx++) {
289+
dh = &dmm_heaps_data.dmm_heaps[idx];
290+
dh->region = &dmm_regions[idx];
291+
sys_heap_init(&dh->heap, (void *)dmm_heap_start_get(dh), dmm_heap_size_get(dh));
292+
}
293+
294+
return 0;
295+
}
296+
297+
SYS_INIT(dmm_init, POST_KERNEL, 0);

0 commit comments

Comments
 (0)