@@ -659,6 +659,10 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
659
659
pa += KB (4 );
660
660
}
661
661
662
+ #if CONFIG_MP_MAX_NUM_CPUS > 1
663
+ z_xtensa_mmu_tlb_ipi ();
664
+ #endif
665
+
662
666
k_spin_unlock (& xtensa_mmu_lock , key );
663
667
}
664
668
@@ -791,9 +795,80 @@ void arch_mem_unmap(void *addr, size_t size)
791
795
va += KB (4 );
792
796
}
793
797
798
+ #if CONFIG_MP_MAX_NUM_CPUS > 1
799
+ z_xtensa_mmu_tlb_ipi ();
800
+ #endif
801
+
794
802
k_spin_unlock (& xtensa_mmu_lock , key );
795
803
}
796
804
805
+ /* This should be implemented in the SoC layer.
806
+ * This weak version is here to avoid build errors.
807
+ */
808
+ void __weak z_xtensa_mmu_tlb_ipi (void )
809
+ {
810
+ }
811
+
812
+ void z_xtensa_mmu_tlb_shootdown (void )
813
+ {
814
+ unsigned int key ;
815
+
816
+ /* Need to lock interrupts to prevent any context
817
+ * switching until all the page tables are updated.
818
+ * Or else we would be switching to another thread
819
+ * and running that with incorrect page tables
820
+ * which would result in permission issues.
821
+ */
822
+ key = arch_irq_lock ();
823
+
824
+ /* We don't have information on which page tables have changed,
825
+ * so we just invalidate the cache for all L1 page tables.
826
+ */
827
+ sys_cache_data_invd_range ((void * )l1_page_table , sizeof (l1_page_table ));
828
+ sys_cache_data_invd_range ((void * )l2_page_tables , sizeof (l2_page_tables ));
829
+
830
+ #ifdef CONFIG_USERSPACE
831
+ struct k_thread * thread = _current_cpu -> current ;
832
+
833
+ /* If current thread is a user thread, we need to see if it has
834
+ * been migrated to another memory domain as the L1 page table
835
+ * is different from the currently used one.
836
+ */
837
+ if ((thread -> base .user_options & K_USER ) == K_USER ) {
838
+ uint32_t ptevaddr_entry , ptevaddr , thread_ptables ;
839
+
840
+ /* Need to read the currently used L1 page table.
841
+ * We know that L1 page table is always mapped at way
842
+ * MMU_PTE_WAY, so we can skip the probing step by
843
+ * generating the query entry directly.
844
+ */
845
+ ptevaddr_entry = Z_XTENSA_PAGE_TABLE_VADDR | MMU_PTE_WAY ;
846
+ ptevaddr = xtensa_dtlb_paddr_read (ptevaddr_entry );
847
+
848
+ thread_ptables = (uint32_t )thread -> arch .ptables ;
849
+
850
+ if (thread_ptables != ptevaddr ) {
851
+ /* Need to remap the thread page tables if the ones
852
+ * indicated by the current thread are different
853
+ * than the current mapped page table.
854
+ */
855
+ switch_page_tables ((uint32_t * )thread_ptables , false, false);
856
+ }
857
+
858
+ }
859
+ #endif /* CONFIG_USERSPACE */
860
+
861
+ /* L2 are done via autofill, so invalidate autofill TLBs
862
+ * would refresh the L2 page tables.
863
+ *
864
+ * L1 will be refreshed during context switch so no need
865
+ * to do anything here.
866
+ */
867
+ xtensa_tlb_autorefill_invalidate ();
868
+
869
+ arch_irq_unlock (key );
870
+ }
871
+
797
872
#ifdef CONFIG_USERSPACE
798
873
799
874
static inline uint32_t * alloc_l1_table (void )
@@ -951,6 +1026,10 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
951
1026
ret = region_map_update (ptables , start , size , ring , flags );
952
1027
#endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
953
1028
1029
+ #if CONFIG_MP_MAX_NUM_CPUS > 1
1030
+ z_xtensa_mmu_tlb_ipi ();
1031
+ #endif
1032
+
954
1033
k_spin_unlock (& xtensa_mmu_lock , key );
955
1034
956
1035
return ret ;
0 commit comments