|
44 | 44 | #define _CPUS_NR 1 |
45 | 45 | #endif /* RT_USING_SMP */ |
46 | 46 |
|
47 | | -extern rt_list_t rt_thread_defunct; |
| 47 | +static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);; |
48 | 48 |
|
49 | 49 | static struct rt_thread idle[_CPUS_NR]; |
50 | 50 | ALIGN(RT_ALIGN_SIZE) |
51 | 51 | static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE]; |
52 | 52 |
|
| 53 | +#ifdef RT_USING_SMP |
| 54 | +#ifndef SYSTEM_THREAD_STACK_SIZE |
| 55 | +#define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE |
| 56 | +#endif |
| 57 | +static struct rt_thread rt_system_thread; |
| 58 | +ALIGN(RT_ALIGN_SIZE) |
| 59 | +static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE]; |
| 60 | +static struct rt_semaphore system_sem; |
| 61 | +#endif |
| 62 | + |
53 | 63 | #ifdef RT_USING_IDLE_HOOK |
54 | 64 | #ifndef RT_IDLE_HOOK_LIST_SIZE |
55 | 65 | #define RT_IDLE_HOOK_LIST_SIZE 4 |
@@ -127,62 +137,132 @@ rt_err_t rt_thread_idle_delhook(void (*hook)(void)) |
127 | 137 |
|
128 | 138 | #endif /* RT_USING_IDLE_HOOK */ |
129 | 139 |
|
130 | | -#ifdef RT_USING_HEAP |
| 140 | +#ifdef RT_USING_MODULE |
131 | 141 | /* Return whether there is defunctional thread to be deleted. */ |
132 | 142 | rt_inline int _has_defunct_thread(void) |
133 | 143 | { |
134 | 144 | /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)". |
135 | | - * So the compiler has a good reason that the rt_thread_defunct list does |
136 | | - * not change within rt_thread_idle_excute thus optimize the "while" loop |
| 145 | + * So the compiler has a good reason that the _rt_thread_defunct list does |
| 146 | + * not change within rt_thread_defunct_exceute thus optimize the "while" loop |
137 | 147 | * into a "if". |
138 | 148 | * |
139 | 149 | * So add the volatile qualifier here. */ |
140 | | - const volatile rt_list_t *l = (const volatile rt_list_t *)&rt_thread_defunct; |
| 150 | + const volatile rt_list_t *l = (const volatile rt_list_t *)&_rt_thread_defunct; |
141 | 151 |
|
142 | 152 | return l->next != l; |
143 | 153 | } |
144 | | -#endif /* RT_USING_HEAP */ |
| 154 | +#endif /* RT_USING_MODULE */ |
| 155 | + |
| 156 | +/* enqueue a thread to defunct queue |
| 157 | + * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable |
| 158 | + */ |
| 159 | +void rt_thread_defunct_enqueue(rt_thread_t thread) |
| 160 | +{ |
| 161 | + rt_list_insert_after(&_rt_thread_defunct, &thread->tlist); |
| 162 | +#ifdef RT_USING_SMP |
| 163 | + rt_sem_release(&system_sem); |
| 164 | +#endif |
| 165 | +} |
| 166 | + |
| 167 | +/* dequeue a thread from defunct queue |
| 168 | + * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable |
| 169 | + */ |
| 170 | +rt_thread_t rt_thread_defunct_dequeue(void) |
| 171 | +{ |
| 172 | + rt_thread_t thread = RT_NULL; |
| 173 | + rt_list_t *l = &_rt_thread_defunct; |
| 174 | + |
| 175 | + if (l->next != l) |
| 176 | + { |
| 177 | + thread = rt_list_entry(l->next, |
| 178 | + struct rt_thread, |
| 179 | + tlist); |
| 180 | + rt_list_remove(&(thread->tlist)); |
| 181 | + } |
| 182 | + return thread; |
| 183 | +} |
145 | 184 |
|
146 | 185 | /** |
147 | 186 | * @ingroup Thread |
148 | 187 | * |
149 | 188 | * This function will perform system background job when system idle. |
150 | 189 | */ |
151 | | -void rt_thread_idle_excute(void) |
| 190 | +static void rt_defunct_execute(void) |
152 | 191 | { |
153 | | - /* Loop until there is no dead thread. So one call to rt_thread_idle_excute |
| 192 | + /* Loop until there is no dead thread. So one call to rt_defunct_execute |
154 | 193 | * will do all the cleanups. */ |
155 | | - /* disable interrupt */ |
156 | | - |
157 | | - RT_DEBUG_NOT_IN_INTERRUPT; |
158 | | - |
159 | | -#ifdef RT_USING_HEAP |
160 | 194 | while (1) |
161 | 195 | { |
162 | 196 | rt_base_t lock; |
163 | 197 | rt_thread_t thread; |
| 198 | + void (*cleanup)(struct rt_thread *tid); |
164 | 199 |
|
| 200 | +#ifdef RT_USING_MODULE |
| 201 | + struct rt_dlmodule *module = RT_NULL; |
| 202 | +#endif |
| 203 | + RT_DEBUG_NOT_IN_INTERRUPT; |
| 204 | + |
| 205 | + /* disable interrupt */ |
165 | 206 | lock = rt_hw_interrupt_disable(); |
166 | 207 |
|
| 208 | +#ifdef RT_USING_MODULE |
167 | 209 | /* check whether list is empty */ |
168 | 210 | if (!_has_defunct_thread()) |
169 | 211 | { |
170 | 212 | rt_hw_interrupt_enable(lock); |
171 | 213 | break; |
172 | 214 | } |
173 | 215 | /* get defunct thread */ |
174 | | - thread = rt_list_entry(rt_thread_defunct.next, |
| 216 | + thread = rt_list_entry(_rt_thread_defunct.next, |
175 | 217 | struct rt_thread, |
176 | 218 | tlist); |
| 219 | + module = (struct rt_dlmodule*)thread->module_id; |
| 220 | + if (module) |
| 221 | + { |
| 222 | + dlmodule_destroy(module); |
| 223 | + } |
177 | 224 | /* remove defunct thread */ |
178 | 225 | rt_list_remove(&(thread->tlist)); |
179 | | - /* release thread's stack */ |
180 | | - RT_KERNEL_FREE(thread->stack_addr); |
181 | | - /* delete thread object */ |
182 | | - rt_object_delete((rt_object_t)thread); |
183 | | - rt_hw_interrupt_enable(lock); |
| 226 | +#else |
| 227 | + thread = rt_thread_defunct_dequeue(); |
| 228 | + if (!thread) |
| 229 | + { |
| 230 | + rt_hw_interrupt_enable(lock); |
| 231 | + break; |
| 232 | + } |
| 233 | +#endif |
| 234 | + /* invoke thread cleanup */ |
| 235 | + cleanup = thread->cleanup; |
| 236 | + if (cleanup != RT_NULL) |
| 237 | + { |
| 238 | + rt_hw_interrupt_enable(lock); |
| 239 | + cleanup(thread); |
| 240 | + lock = rt_hw_interrupt_disable(); |
| 241 | + } |
| 242 | + |
| 243 | +#ifdef RT_USING_SIGNALS |
| 244 | + rt_thread_free_sig(thread); |
| 245 | +#endif |
| 246 | + |
| 247 | + /* if it's a system object, not delete it */ |
| 248 | + if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE) |
| 249 | + { |
| 250 | + /* detach this object */ |
| 251 | + rt_object_detach((rt_object_t)thread); |
| 252 | + /* enable interrupt */ |
| 253 | + rt_hw_interrupt_enable(lock); |
| 254 | + } |
| 255 | + else |
| 256 | + { |
| 257 | + rt_hw_interrupt_enable(lock); |
| 258 | +#ifdef RT_USING_HEAP |
| 259 | + /* release thread's stack */ |
| 260 | + RT_KERNEL_FREE(thread->stack_addr); |
| 261 | + /* delete thread object */ |
| 262 | + rt_object_delete((rt_object_t)thread); |
| 263 | +#endif |
| 264 | + } |
184 | 265 | } |
185 | | -#endif /* RT_USING_HEAP */ |
186 | 266 | } |
187 | 267 |
|
188 | 268 | extern void rt_system_power_manager(void); |
@@ -214,13 +294,27 @@ static void rt_thread_idle_entry(void *parameter) |
214 | 294 | } |
215 | 295 | #endif /* RT_USING_IDLE_HOOK */ |
216 | 296 |
|
217 | | - rt_thread_idle_excute(); |
| 297 | +#ifndef RT_USING_SMP |
| 298 | + rt_defunct_execute(); |
| 299 | +#endif /* RT_USING_SMP */ |
| 300 | + |
218 | 301 | #ifdef RT_USING_PM |
219 | 302 | rt_system_power_manager(); |
220 | 303 | #endif /* RT_USING_PM */ |
221 | 304 | } |
222 | 305 | } |
223 | 306 |
|
| 307 | +#ifdef RT_USING_SMP |
| 308 | +static void rt_thread_system_entry(void *parameter) |
| 309 | +{ |
| 310 | + while (1) |
| 311 | + { |
| 312 | + rt_sem_take(&system_sem, RT_WAITING_FOREVER); |
| 313 | + rt_defunct_execute(); |
| 314 | + } |
| 315 | +} |
| 316 | +#endif |
| 317 | + |
224 | 318 | /** |
225 | 319 | * @ingroup SystemInit |
226 | 320 | * |
@@ -250,6 +344,24 @@ void rt_thread_idle_init(void) |
250 | 344 | /* startup */ |
251 | 345 | rt_thread_startup(&idle[i]); |
252 | 346 | } |
| 347 | + |
| 348 | +#ifdef RT_USING_SMP |
| 349 | + RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2); |
| 350 | + |
| 351 | + rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO); |
| 352 | + |
| 353 | + /* create defunct thread */ |
| 354 | + rt_thread_init(&rt_system_thread, |
| 355 | + "tsystem", |
| 356 | + rt_thread_system_entry, |
| 357 | + RT_NULL, |
| 358 | + rt_system_stack, |
| 359 | + sizeof(rt_system_stack), |
| 360 | + RT_THREAD_PRIORITY_MAX - 2, |
| 361 | + 32); |
| 362 | + /* startup */ |
| 363 | + rt_thread_startup(&rt_system_thread); |
| 364 | +#endif |
253 | 365 | } |
254 | 366 |
|
255 | 367 | /** |
|
0 commit comments