Skip to content

Commit 81ea436

Browse files
dcpleungcarlescufi
authored andcommitted
xtensa: mmu: send IPI to invalidate TLBs on other CPUs
After changing content of page table(s), it is needed to notify the other CPUs that the page table(s) have been changed so they can do the necessary steps to use the updated version. Note that the actual way to send IPI is SoC specific as Xtensa does not have a common way to do this at the moment. Signed-off-by: Daniel Leung <[email protected]>
1 parent eb546a8 commit 81ea436

File tree

2 files changed

+99
-0
lines changed

2 files changed

+99
-0
lines changed

arch/xtensa/core/xtensa_mmu.c

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,10 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
659659
pa += KB(4);
660660
}
661661

662+
#if CONFIG_MP_MAX_NUM_CPUS > 1
663+
z_xtensa_mmu_tlb_ipi();
664+
#endif
665+
662666
k_spin_unlock(&xtensa_mmu_lock, key);
663667
}
664668

@@ -791,9 +795,80 @@ void arch_mem_unmap(void *addr, size_t size)
791795
va += KB(4);
792796
}
793797

798+
#if CONFIG_MP_MAX_NUM_CPUS > 1
799+
z_xtensa_mmu_tlb_ipi();
800+
#endif
801+
794802
k_spin_unlock(&xtensa_mmu_lock, key);
795803
}
796804

805+
/* This should be implemented in the SoC layer.
806+
* This weak version is here to avoid build errors.
807+
*/
808+
void __weak z_xtensa_mmu_tlb_ipi(void)
809+
{
810+
}
811+
812+
void z_xtensa_mmu_tlb_shootdown(void)
813+
{
814+
unsigned int key;
815+
816+
/* Need to lock interrupts to prevent any context
817+
* switching until all the page tables are updated.
818+
* Or else we would be switching to another thread
819+
* and running that with incorrect page tables
820+
* which would result in permission issues.
821+
*/
822+
key = arch_irq_lock();
823+
824+
/* We don't have information on which page tables have changed,
825+
* so we just invalidate the cache for all L1 page tables.
826+
*/
827+
sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
828+
sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
829+
830+
#ifdef CONFIG_USERSPACE
831+
struct k_thread *thread = _current_cpu->current;
832+
833+
/* If current thread is a user thread, we need to see if it has
834+
* been migrated to another memory domain as the L1 page table
835+
* is different from the currently used one.
836+
*/
837+
if ((thread->base.user_options & K_USER) == K_USER) {
838+
uint32_t ptevaddr_entry, ptevaddr, thread_ptables;
839+
840+
/* Need to read the currently used L1 page table.
841+
* We know that L1 page table is always mapped at way
842+
* MMU_PTE_WAY, so we can skip the probing step by
843+
* generating the query entry directly.
844+
*/
845+
ptevaddr_entry = Z_XTENSA_PAGE_TABLE_VADDR | MMU_PTE_WAY;
846+
ptevaddr = xtensa_dtlb_paddr_read(ptevaddr_entry);
847+
848+
thread_ptables = (uint32_t)thread->arch.ptables;
849+
850+
if (thread_ptables != ptevaddr) {
851+
/* Need to remap the thread page tables if the ones
852+
* indicated by the current thread are different
853+
* than the current mapped page table.
854+
*/
855+
switch_page_tables((uint32_t *)thread_ptables, false, false);
856+
}
857+
858+
}
859+
#endif /* CONFIG_USERSPACE */
860+
861+
/* L2 are done via autofill, so invalidate autofill TLBs
862+
* would refresh the L2 page tables.
863+
*
864+
* L1 will be refreshed during context switch so no need
865+
* to do anything here.
866+
*/
867+
xtensa_tlb_autorefill_invalidate();
868+
869+
arch_irq_unlock(key);
870+
}
871+
797872
#ifdef CONFIG_USERSPACE
798873

799874
static inline uint32_t *alloc_l1_table(void)
@@ -951,6 +1026,10 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
9511026
ret = region_map_update(ptables, start, size, ring, flags);
9521027
#endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
9531028

1029+
#if CONFIG_MP_MAX_NUM_CPUS > 1
1030+
z_xtensa_mmu_tlb_ipi();
1031+
#endif
1032+
9541033
k_spin_unlock(&xtensa_mmu_lock, key);
9551034

9561035
return ret;

include/zephyr/arch/xtensa/xtensa_mmu.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,4 +60,24 @@ extern int xtensa_soc_mmu_ranges_num;
6060

6161
void z_xtensa_mmu_init(void);
6262

63+
/**
64+
* @brief Tell other processors to flush TLBs.
65+
*
66+
* This sends IPI to other processors to telling them to
67+
* invalidate cache to page tables and flush TLBs. This is
68+
* needed when one processor is updating page tables that
69+
* may affect threads running on other processors.
70+
*
71+
* @note This needs to be implemented in the SoC layer.
72+
*/
73+
void z_xtensa_mmu_tlb_ipi(void);
74+
75+
/**
76+
* @brief Invalidate cache to page tables and flush TLBs.
77+
*
78+
* This invalidates cache to all page tables and flush TLBs
79+
* as they may have been modified by other processors.
80+
*/
81+
void z_xtensa_mmu_tlb_shootdown(void);
82+
6383
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H */

0 commit comments

Comments
 (0)