@@ -423,12 +423,15 @@ RTM_EXPORT(rt_thread_startup);
423423 */
424424rt_err_t rt_thread_close (rt_thread_t thread )
425425{
426+ rt_err_t error ;
426427 rt_sched_lock_level_t slvl ;
427428 rt_uint8_t thread_status ;
428429
429430 /* forbid scheduling on current core if closing current thread */
430431 RT_ASSERT (thread != rt_thread_self () || rt_critical_level ());
431432
433+ error = RT_EOK ;
434+
432435 /* before checking status of scheduler */
433436 rt_sched_lock (& slvl );
434437
@@ -447,12 +450,62 @@ rt_err_t rt_thread_close(rt_thread_t thread)
447450
448451 /* change stat */
449452 rt_sched_thread_close (thread );
450- }
451453
454+ #ifdef RT_USING_SMP
455+ int cpu_id ;
456+ rt_tick_t start_tick = rt_tick_get ();
457+ /**
458+ * using conservative 1s timeout, may adjust based on
459+ * hardware characteristics and system load.
460+ */
461+ rt_tick_t timeout = RT_TICK_PER_SECOND ;
462+ rt_bool_t need_wait = RT_FALSE ;
463+
464+ /**
465+ * in SMP, the current thread and target thread may run on different CPUs.
466+ * although we set the target thread's state to closed, it may still execute
467+ * on another CPU until rescheduled. send IPI to force immediate rescheduling.
468+ */
469+ cpu_id = RT_SCHED_CTX (thread ).oncpu ;
470+ rt_sched_unlock (slvl );
471+ if ((cpu_id != RT_CPU_DETACHED ) && (cpu_id != rt_cpu_get_id ()))
472+ {
473+ rt_hw_ipi_send (RT_SCHEDULE_IPI , RT_CPU_MASK ^ (1 << cpu_id ));
474+ need_wait = RT_TRUE ;
475+ }
476+
477+ /**
478+ * continuously check if target thread has detached from CPU core.
479+ * this loop ensures the thread fully stops before resource cleanup.
480+ * a timeout prevents deadlock if thread fails to detach promptly.
481+ */
482+ while (need_wait )
483+ {
484+ if (rt_tick_get_delta (start_tick ) >= timeout )
485+ {
486+ LOG_D ("Timeout waiting for thread %s (tid=%p) to detach from CPU%d" ,
487+ RT_NAME_MAX , thread -> parent .name , thread , cpu_id );
488+ error = - RT_ETIMEOUT ;
489+ break ;
490+ }
491+
492+ rt_sched_lock (& slvl );
493+ cpu_id = RT_SCHED_CTX (thread ).oncpu ;
494+ rt_sched_unlock (slvl );
495+
496+ if (cpu_id == RT_CPU_DETACHED )
497+ {
498+ break ;
499+ }
500+ }
501+
502+ return error ;
503+ #endif
504+ }
452505 /* scheduler works are done */
453506 rt_sched_unlock (slvl );
454507
455- return RT_EOK ;
508+ return error ;
456509}
457510RTM_EXPORT (rt_thread_close );
458511
@@ -491,10 +544,14 @@ static rt_err_t _thread_detach(rt_thread_t thread)
491544
492545 error = rt_thread_close (thread );
493546
494- _thread_detach_from_mutex (thread );
547+ /* only when the current thread has successfully closed the target thread. */
548+ if (error == RT_EOK )
549+ {
550+ _thread_detach_from_mutex (thread );
495551
496- /* insert to defunct thread list */
497- rt_thread_defunct_enqueue (thread );
552+ /* insert to defunct thread list */
553+ rt_thread_defunct_enqueue (thread );
554+ }
498555
499556 rt_exit_critical_safe (critical_level );
500557 return error ;
@@ -1142,4 +1199,4 @@ rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size
11421199}
11431200RTM_EXPORT (rt_thread_get_name );
11441201
1145- /**@}*/
1202+ /**@}*/
0 commit comments