16
16
#include <kernel_arch_func.h>
17
17
#include <mmu.h>
18
18
19
+ /* Skip TLB IPI when updating page tables.
20
+ * This allows us to send IPI only after the last
21
+ * changes of a series.
22
+ */
23
+ #define OPTION_NO_TLB_IPI BIT(0)
24
+
19
25
/* Level 1 contains page table entries
20
26
* necessary to map the page table itself.
21
27
*/
@@ -995,7 +1001,8 @@ static int region_map_update(uint32_t *ptables, uintptr_t start,
995
1001
}
996
1002
997
1003
static inline int update_region (uint32_t * ptables , uintptr_t start ,
998
- size_t size , uint32_t ring , uint32_t flags )
1004
+ size_t size , uint32_t ring , uint32_t flags ,
1005
+ uint32_t option )
999
1006
{
1000
1007
int ret ;
1001
1008
k_spinlock_key_t key ;
@@ -1027,28 +1034,19 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
1027
1034
#endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
1028
1035
1029
1036
#if CONFIG_MP_MAX_NUM_CPUS > 1
1030
- z_xtensa_mmu_tlb_ipi ();
1037
+ if ((option & OPTION_NO_TLB_IPI ) != OPTION_NO_TLB_IPI ) {
1038
+ z_xtensa_mmu_tlb_ipi ();
1039
+ }
1031
1040
#endif
1032
1041
1033
1042
k_spin_unlock (& xtensa_mmu_lock , key );
1034
1043
1035
1044
return ret ;
1036
1045
}
1037
1046
1038
- static inline int reset_region (uint32_t * ptables , uintptr_t start , size_t size )
1047
+ static inline int reset_region (uint32_t * ptables , uintptr_t start , size_t size , uint32_t option )
1039
1048
{
1040
- return update_region (ptables , start , size , Z_XTENSA_KERNEL_RING , Z_XTENSA_MMU_W );
1041
- }
1042
-
1043
- void xtensa_set_stack_perms (struct k_thread * thread )
1044
- {
1045
- if ((thread -> base .user_options & K_USER ) == 0 ) {
1046
- return ;
1047
- }
1048
-
1049
- update_region (thread_page_tables_get (thread ),
1050
- thread -> stack_info .start , thread -> stack_info .size ,
1051
- Z_XTENSA_USER_RING , Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB );
1049
+ return update_region (ptables , start , size , Z_XTENSA_KERNEL_RING , Z_XTENSA_MMU_W , option );
1052
1050
}
1053
1051
1054
1052
void xtensa_user_stack_perms (struct k_thread * thread )
@@ -1058,7 +1056,7 @@ void xtensa_user_stack_perms(struct k_thread *thread)
1058
1056
1059
1057
update_region (thread_page_tables_get (thread ),
1060
1058
thread -> stack_info .start , thread -> stack_info .size ,
1061
- Z_XTENSA_USER_RING , Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB );
1059
+ Z_XTENSA_USER_RING , Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB , 0 );
1062
1060
}
1063
1061
1064
1062
int arch_mem_domain_max_partitions_get (void )
@@ -1073,7 +1071,7 @@ int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
1073
1071
1074
1072
/* Reset the partition's region back to defaults */
1075
1073
return reset_region (domain -> arch .ptables , partition -> start ,
1076
- partition -> size );
1074
+ partition -> size , 0 );
1077
1075
}
1078
1076
1079
1077
int arch_mem_domain_partition_add (struct k_mem_domain * domain ,
@@ -1083,7 +1081,7 @@ int arch_mem_domain_partition_add(struct k_mem_domain *domain,
1083
1081
struct k_mem_partition * partition = & domain -> partitions [partition_id ];
1084
1082
1085
1083
return update_region (domain -> arch .ptables , partition -> start ,
1086
- partition -> size , ring , partition -> attr );
1084
+ partition -> size , ring , partition -> attr , 0 );
1087
1085
}
1088
1086
1089
1087
/* These APIs don't need to do anything */
@@ -1101,19 +1099,42 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
1101
1099
is_user = (thread -> base .user_options & K_USER ) != 0 ;
1102
1100
is_migration = (old_ptables != NULL ) && is_user ;
1103
1101
1104
- /* Give access to the thread's stack in its new
1105
- * memory domain if it is migrating.
1106
- */
1107
- if (is_migration ) {
1108
- xtensa_set_stack_perms (thread );
1109
- }
1110
-
1111
1102
if (is_migration ) {
1103
+ /* Give access to the thread's stack in its new
1104
+ * memory domain if it is migrating.
1105
+ */
1106
+ update_region (thread_page_tables_get (thread ),
1107
+ thread -> stack_info .start , thread -> stack_info .size ,
1108
+ Z_XTENSA_USER_RING ,
1109
+ Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB ,
1110
+ OPTION_NO_TLB_IPI );
1111
+ /* and reset thread's stack permission in
1112
+ * the old page tables.
1113
+ */
1112
1114
ret = reset_region (old_ptables ,
1113
1115
thread -> stack_info .start ,
1114
- thread -> stack_info .size );
1116
+ thread -> stack_info .size , 0 );
1117
+ }
1118
+
1119
+ /* Need to switch to new page tables if this is
1120
+ * the current thread running.
1121
+ */
1122
+ if (thread == _current_cpu -> current ) {
1123
+ switch_page_tables (thread -> arch .ptables , true, true);
1115
1124
}
1116
1125
1126
+ #if CONFIG_MP_MAX_NUM_CPUS > 1
1127
+ /* Need to tell other CPUs to switch to the new page table
1128
+ * in case the thread is running on one of them.
1129
+ *
1130
+ * Note that there is no need to send TLB IPI if this is
1131
+ * migration as it was sent above during reset_region().
1132
+ */
1133
+ if ((thread != _current_cpu -> current ) && !is_migration ) {
1134
+ z_xtensa_mmu_tlb_ipi ();
1135
+ }
1136
+ #endif
1137
+
1117
1138
return ret ;
1118
1139
}
1119
1140
@@ -1136,10 +1157,15 @@ int arch_mem_domain_thread_remove(struct k_thread *thread)
1136
1157
1137
1158
/* Restore permissions on the thread's stack area since it is no
1138
1159
* longer a member of the domain.
1160
+ *
1161
+ * Note that, since every thread must have an associated memory
1162
+ * domain, removing a thread from domain will be followed by
1163
+ * adding it back to another. So there is no need to send TLB IPI
1164
+ * at this point.
1139
1165
*/
1140
1166
return reset_region (domain -> arch .ptables ,
1141
1167
thread -> stack_info .start ,
1142
- thread -> stack_info .size );
1168
+ thread -> stack_info .size , OPTION_NO_TLB_IPI );
1143
1169
}
1144
1170
1145
1171
static bool page_validate (uint32_t * ptables , uint32_t page , uint8_t ring , bool write )
0 commit comments