Skip to content

Commit 8b59853

Browse files
jxstelterMaureenHelm
authored andcommitted
soc: ace_v1x: correct the move_region behavior
This patch introduces sys_mm_drv_move_region implementation which stops usage of the sys_mm_drv_simple_move_region. The sys_mm_drv_simple_move_region is unsuitable becuase it iterates through physical pages in linear fashion. The new implementation queries the tlb for each virtual page of the requested region to find the physical page to be remapped. Signed-off-by: Jaroslaw Stelter <[email protected]>
1 parent ff22268 commit 8b59853

File tree

1 file changed

+86
-6
lines changed

1 file changed

+86
-6
lines changed

drivers/mm/mm_drv_intel_adsp_mtl_tlb.c

Lines changed: 86 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -517,19 +517,99 @@ int sys_mm_drv_remap_region(void *virt_old, size_t size,
517517
int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
518518
uintptr_t phys_new)
519519
{
520-
int ret;
520+
k_spinlock_key_t key;
521+
size_t offset;
522+
int ret = 0;
521523

522-
void *va_new = z_soc_cached_ptr(virt_new);
523-
void *va_old = z_soc_cached_ptr(virt_old);
524+
virt_new = z_soc_cached_ptr(virt_new);
525+
virt_old = z_soc_cached_ptr(virt_old);
524526

525-
ret = sys_mm_drv_simple_move_region(va_old, size, va_new, phys_new);
527+
CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
528+
!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
529+
!sys_mm_drv_is_size_aligned(size)) {
530+
ret = -EINVAL;
531+
goto out;
532+
}
533+
534+
if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
535+
(POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
536+
ret = -EINVAL; /* overlaps */
537+
goto out;
538+
}
526539

527540
/*
528-
* Since memcpy() is done in virtual space, need to
541+
* The function's behavior has been updated to accept
542+
* phys_new == NULL and get the physical addresses from
543+
* the actual TLB instead of from the caller.
544+
*/
545+
if (phys_new != POINTER_TO_UINT(NULL) &&
546+
!sys_mm_drv_is_addr_aligned(phys_new)) {
547+
ret = -EINVAL;
548+
goto out;
549+
}
550+
551+
key = k_spin_lock(&sys_mm_drv_common_lock);
552+
553+
if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
554+
!sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
555+
ret = -EINVAL;
556+
goto unlock_out;
557+
}
558+
559+
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
560+
uint8_t *va_old = (uint8_t *)virt_old + offset;
561+
uint8_t *va_new = (uint8_t *)virt_new + offset;
562+
uintptr_t pa;
563+
uint32_t flags;
564+
int ret2;
565+
566+
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
567+
if (ret2 != 0) {
568+
__ASSERT(false, "cannot query page flags %p\n", va_old);
569+
570+
ret = ret2;
571+
goto unlock_out;
572+
}
573+
574+
ret2 = sys_mm_drv_page_phys_get(va_old, &pa);
575+
if (ret2 != 0) {
576+
__ASSERT(false, "cannot query page paddr %p\n", va_old);
577+
578+
ret = ret2;
579+
goto unlock_out;
580+
}
581+
582+
/*
583+
* Only map the new page when we can retrieve
584+
* flags and phys addr of the old mapped page as We don't
585+
* want to map with unknown random flags.
586+
*/
587+
ret2 = sys_mm_drv_map_page(va_new, pa, flags);
588+
if (ret2 != 0) {
589+
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
590+
591+
ret = ret2;
592+
}
593+
594+
ret2 = sys_mm_drv_unmap_page(va_old);
595+
if (ret2 != 0) {
596+
__ASSERT(false, "cannot unmap %p\n", va_old);
597+
598+
ret = ret2;
599+
}
600+
}
601+
602+
unlock_out:
603+
k_spin_unlock(&sys_mm_drv_common_lock, key);
604+
605+
out:
606+
/*
607+
* Since move is done in virtual space, need to
529608
* flush the cache to make sure the backing physical
530609
* pages have the new data.
531610
*/
532-
z_xtensa_cache_flush(va_new, size);
611+
z_xtensa_cache_flush(virt_new, size);
612+
z_xtensa_cache_flush_inv(virt_old, size);
533613

534614
return ret;
535615
}

0 commit comments

Comments
 (0)