Skip to content

Commit 4cf95b1

Browse files
dcpleungnashif
authored andcommitted
mm: common: reset mappings during simple mapping failures
For mapping operations in sys_mm_drv_simple_map_region() and sys_mm_drv_simple_map_array(), when there was an error with mapping any pages, it would simple return an error, leaving already mapped pages still mapped. This modifies the behavior where any failure during the whole mapping operation will unmap the already mapped pages. This also modifies the remapping and moving functions to reset mappings when error is encountered. Fixes #73121 Signed-off-by: Daniel Leung <[email protected]>
1 parent 2c496c0 commit 4cf95b1

File tree

1 file changed

+150
-84
lines changed

1 file changed

+150
-84
lines changed

drivers/mm/mm_drv_common.c

Lines changed: 150 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,41 @@ bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size)
7878
return ret;
7979
}
8080

81+
/**
82+
* Unmap a memory region with synchronization already locked.
83+
*
84+
* @param virt Page-aligned base virtual address to un-map
85+
* @param size Page-aligned region size
86+
* @param is_reset True if resetting the mappings
87+
*
88+
* @retval 0 if successful
89+
* @retval -EINVAL if invalid arguments are provided
90+
* @retval -EFAULT if virtual address is not mapped
91+
*/
92+
static int unmap_locked(void *virt, size_t size, bool is_reset)
93+
{
94+
int ret = 0;
95+
size_t offset;
96+
97+
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
98+
uint8_t *va = (uint8_t *)virt + offset;
99+
100+
int ret2 = sys_mm_drv_unmap_page(va);
101+
102+
if (ret2 != 0) {
103+
if (is_reset) {
104+
__ASSERT(false, "cannot reset mapping %p\n", va);
105+
} else {
106+
__ASSERT(false, "cannot unmap %p\n", va);
107+
}
108+
109+
ret = ret2;
110+
}
111+
}
112+
113+
return ret;
114+
}
115+
81116
int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
82117
size_t size, uint32_t flags)
83118
{
@@ -98,12 +133,19 @@ int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
98133
uint8_t *va = (uint8_t *)virt + offset;
99134
uintptr_t pa = phys + offset;
100135

101-
int ret2 = sys_mm_drv_map_page(va, pa, flags);
136+
ret = sys_mm_drv_map_page(va, pa, flags);
102137

103-
if (ret2 != 0) {
138+
if (ret != 0) {
104139
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va);
105140

106-
ret = ret2;
141+
/*
142+
* Reset the already mapped virtual addresses.
143+
* Note the offset is at the current failed address
144+
* which will not be included during unmapping.
145+
*/
146+
(void)unmap_locked(virt, offset, true);
147+
148+
break;
107149
}
108150
}
109151

@@ -136,12 +178,19 @@ int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys,
136178
while (idx < cnt) {
137179
uint8_t *va = (uint8_t *)virt + offset;
138180

139-
int ret2 = sys_mm_drv_map_page(va, phys[idx], flags);
181+
ret = sys_mm_drv_map_page(va, phys[idx], flags);
140182

141-
if (ret2 != 0) {
183+
if (ret != 0) {
142184
__ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va);
143185

144-
ret = ret2;
186+
/*
187+
* Reset the already mapped virtual addresses.
188+
* Note the offset is at the current failed address
189+
* which will not be included during unmapping.
190+
*/
191+
(void)unmap_locked(virt, offset, true);
192+
193+
break;
145194
}
146195

147196
offset += CONFIG_MM_DRV_PAGE_SIZE;
@@ -160,7 +209,6 @@ int sys_mm_drv_simple_unmap_region(void *virt, size_t size)
160209
{
161210
k_spinlock_key_t key;
162211
int ret = 0;
163-
size_t offset;
164212

165213
CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
166214
!sys_mm_drv_is_size_aligned(size)) {
@@ -170,17 +218,7 @@ int sys_mm_drv_simple_unmap_region(void *virt, size_t size)
170218

171219
key = k_spin_lock(&sys_mm_drv_common_lock);
172220

173-
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
174-
uint8_t *va = (uint8_t *)virt + offset;
175-
176-
int ret2 = sys_mm_drv_unmap_page(va);
177-
178-
if (ret2 != 0) {
179-
__ASSERT(false, "cannot unmap %p\n", va);
180-
181-
ret = ret2;
182-
}
183-
}
221+
ret = unmap_locked(virt, size, false);
184222

185223
k_spin_unlock(&sys_mm_drv_common_lock, key);
186224

@@ -224,48 +262,60 @@ int sys_mm_drv_simple_remap_region(void *virt_old, size_t size,
224262
uint8_t *va_new = (uint8_t *)virt_new + offset;
225263
uintptr_t pa;
226264
uint32_t flags;
227-
int ret2;
228-
bool to_map;
229265

230266
/*
231-
* va_old is mapped as checked above, so no need
232-
* to check for return value here.
267+
* Grab the physical address of the old mapped page
268+
* so the new page can map to the same physical address.
233269
*/
234-
(void)sys_mm_drv_page_phys_get(va_old, &pa);
270+
ret = sys_mm_drv_page_phys_get(va_old, &pa);
271+
if (ret != 0) {
272+
__ASSERT(false, "cannot query %p\n", va_old);
235273

236-
to_map = true;
237-
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
238-
if (ret2 != 0) {
239-
__ASSERT(false, "cannot query page %p\n", va_old);
274+
/*
275+
* Reset the already mapped virtual addresses.
276+
* Note the offset is at the current failed address
277+
* which will not be included during unmapping.
278+
*/
279+
(void)unmap_locked(virt_new, offset, true);
240280

241-
ret = ret2;
242-
to_map = false;
281+
goto unlock_out;
243282
}
244283

245-
ret2 = sys_mm_drv_unmap_page(va_old);
246-
if (ret2 != 0) {
247-
__ASSERT(false, "cannot unmap %p\n", va_old);
248-
249-
ret = ret2;
250-
}
284+
/*
285+
* Grab the flags of the old mapped page
286+
* so the new page can map to the same flags.
287+
*/
288+
ret = sys_mm_drv_page_flag_get(va_old, &flags);
289+
if (ret != 0) {
290+
__ASSERT(false, "cannot query page %p\n", va_old);
251291

252-
if (!to_map) {
253292
/*
254-
* Cannot retrieve flags of mapped virtual memory.
255-
* Skip mapping this page as we don't want to map
256-
* with unknown random flags.
293+
* Reset the already mapped virtual addresses.
294+
* Note the offset is at the current failed address
295+
* which will not be included during unmapping.
257296
*/
258-
continue;
297+
(void)unmap_locked(virt_new, offset, true);
298+
299+
goto unlock_out;
259300
}
260301

261-
ret2 = sys_mm_drv_map_page(va_new, pa, flags);
262-
if (ret2 != 0) {
302+
ret = sys_mm_drv_map_page(va_new, pa, flags);
303+
if (ret != 0) {
263304
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
264305

265-
ret = ret2;
306+
/*
307+
* Reset the already mapped virtual addresses.
308+
* Note the offset is at the current failed address
309+
* which will not be included during unmapping.
310+
*/
311+
(void)unmap_locked(virt_new, offset, true);
312+
313+
goto unlock_out;
266314
}
267315
}
268316

317+
(void)unmap_locked(virt_old, size, false);
318+
269319
unlock_out:
270320
k_spin_unlock(&sys_mm_drv_common_lock, key);
271321

@@ -310,38 +360,46 @@ int sys_mm_drv_simple_move_region(void *virt_old, size_t size,
310360
uint8_t *va_new = (uint8_t *)virt_new + offset;
311361
uintptr_t pa = phys_new + offset;
312362
uint32_t flags;
313-
int ret2;
314363

315-
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
316-
if (ret2 != 0) {
364+
ret = sys_mm_drv_page_flag_get(va_old, &flags);
365+
if (ret != 0) {
317366
__ASSERT(false, "cannot query page %p\n", va_old);
318367

319-
ret = ret2;
320-
} else {
321368
/*
322-
* Only map the new page when we can retrieve
323-
* flags of the old mapped page as We don't
324-
* want to map with unknown random flags.
369+
* Reset the already mapped virtual addresses.
370+
* Note the offset is at the current failed address
371+
* which will not be included during unmapping.
325372
*/
326-
ret2 = sys_mm_drv_map_page(va_new, pa, flags);
327-
if (ret2 != 0) {
328-
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
373+
(void)unmap_locked(virt_new, offset, true);
329374

330-
ret = ret2;
331-
} else {
332-
(void)memcpy(va_new, va_old,
333-
CONFIG_MM_DRV_PAGE_SIZE);
334-
}
375+
goto unlock_out;
335376
}
336377

337-
ret2 = sys_mm_drv_unmap_page(va_old);
338-
if (ret2 != 0) {
339-
__ASSERT(false, "cannot unmap %p\n", va_old);
378+
/*
379+
* Map the new page with flags of the old mapped page
380+
* so they both have the same properties.
381+
*/
382+
ret = sys_mm_drv_map_page(va_new, pa, flags);
383+
if (ret != 0) {
384+
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
340385

341-
ret = ret2;
386+
/*
387+
* Reset the already mapped virtual addresses.
388+
* Note the offset is at the current failed address
389+
* which will not be included during unmapping.
390+
*/
391+
(void)unmap_locked(virt_new, offset, true);
392+
393+
goto unlock_out;
342394
}
343395
}
344396

397+
/* Once new mappings are in place, copy the content over. */
398+
(void)memcpy(virt_new, virt_old, size);
399+
400+
/* Unmap old virtual memory region once the move is done. */
401+
(void)unmap_locked(virt_old, size, false);
402+
345403
unlock_out:
346404
k_spin_unlock(&sys_mm_drv_common_lock, key);
347405

@@ -388,43 +446,51 @@ int sys_mm_drv_simple_move_array(void *virt_old, size_t size,
388446
uint8_t *va_old = (uint8_t *)virt_old + offset;
389447
uint8_t *va_new = (uint8_t *)virt_new + offset;
390448
uint32_t flags;
391-
int ret2;
392449

393-
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
394-
if (ret2 != 0) {
450+
ret = sys_mm_drv_page_flag_get(va_old, &flags);
451+
if (ret != 0) {
395452
__ASSERT(false, "cannot query page %p\n", va_old);
396453

397-
ret = ret2;
398-
} else {
399454
/*
400-
* Only map the new page when we can retrieve
401-
* flags of the old mapped page as We don't
402-
* want to map with unknown random flags.
455+
* Reset the already mapped virtual addresses.
456+
* Note the offset is at the current failed address
457+
* which will not be included during unmapping.
403458
*/
404-
ret2 = sys_mm_drv_map_page(va_new, phys_new[idx], flags);
405-
if (ret2 != 0) {
406-
__ASSERT(false, "cannot map 0x%lx to %p\n",
407-
phys_new[idx], va_new);
459+
(void)unmap_locked(virt_new, offset, true);
408460

409-
ret = ret2;
410-
} else {
411-
(void)memcpy(va_new, va_old,
412-
CONFIG_MM_DRV_PAGE_SIZE);
413-
}
461+
goto unlock_out;
414462
}
415463

416-
ret2 = sys_mm_drv_unmap_page(va_old);
464+
/*
465+
* Only map the new page when we can retrieve
466+
* flags of the old mapped page as We don't
467+
* want to map with unknown random flags.
468+
*/
469+
ret = sys_mm_drv_map_page(va_new, phys_new[idx], flags);
470+
if (ret != 0) {
471+
__ASSERT(false, "cannot map 0x%lx to %p\n",
472+
phys_new[idx], va_new);
417473

418-
if (ret2 != 0) {
419-
__ASSERT(false, "cannot unmap %p\n", va_old);
474+
/*
475+
* Reset the already mapped virtual addresses.
476+
* Note the offset is at the current failed address
477+
* which will not be included during unmapping.
478+
*/
479+
(void)unmap_locked(virt_new, offset, true);
420480

421-
ret = ret2;
481+
goto unlock_out;
422482
}
423483

424484
offset += CONFIG_MM_DRV_PAGE_SIZE;
425485
idx++;
426486
}
427487

488+
/* Once new mappings are in place, copy the content over. */
489+
(void)memcpy(virt_new, virt_old, size);
490+
491+
/* Unmap old virtual memory region once the move is done. */
492+
(void)unmap_locked(virt_old, size, false);
493+
428494
unlock_out:
429495
k_spin_unlock(&sys_mm_drv_common_lock, key);
430496

0 commit comments

Comments
 (0)