@@ -423,12 +423,15 @@ RTM_EXPORT(rt_thread_startup);
423423 */
424424rt_err_t rt_thread_close (rt_thread_t thread )
425425{
426+ rt_err_t error ;
426427 rt_sched_lock_level_t slvl ;
427428 rt_uint8_t thread_status ;
428429
429430 /* forbid scheduling on current core if closing current thread */
430431 RT_ASSERT (thread != rt_thread_self () || rt_critical_level ());
431432
433+ error = RT_EOK ;
434+
432435 /* before checking status of scheduler */
433436 rt_sched_lock (& slvl );
434437
@@ -447,12 +450,64 @@ rt_err_t rt_thread_close(rt_thread_t thread)
447450
448451 /* change stat */
449452 rt_sched_thread_close (thread );
450- }
451453
454+ #ifdef RT_USING_SMP
455+ int cpu_id ;
456+ rt_tick_t start_tick ;
457+ /**
458+ * using conservative 1s timeout, may adjust based on
459+ * hardware characteristics and system load.
460+ */
461+ rt_tick_t timeout = RT_TICK_PER_SECOND ;
462+ rt_bool_t need_wait = RT_FALSE ;
463+
464+ /**
465+ * in SMP, the current thread and target thread may run on different CPUs.
466+ * although we set the target thread's state to closed, it may still execute
467+ * on another CPU until rescheduled. send IPI to force immediate rescheduling.
468+ */
469+ cpu_id = RT_SCHED_CTX (thread ).oncpu ;
470+ rt_sched_unlock (slvl );
471+ if ((cpu_id != RT_CPU_DETACHED ) && (cpu_id != rt_cpu_get_id ()))
472+ {
473+ rt_hw_ipi_send (RT_SCHEDULE_IPI , RT_CPU_MASK ^ (1 << cpu_id ));
474+ need_wait = RT_TRUE ;
475+ }
476+
477+ start_tick = rt_tick_get ();
478+
479+ /**
480+ * continuously check if target thread has detached from CPU core.
481+ * this loop ensures the thread fully stops before resource cleanup.
482+ * a timeout prevents deadlock if thread fails to detach promptly.
483+ */
484+ while (need_wait )
485+ {
486+ if (rt_tick_get_delta (start_tick ) >= timeout )
487+ {
488+ LOG_D ("Timeout waiting for thread %s (tid=%p) to detach from CPU%d" ,
489+ thread -> parent .name , thread , cpu_id );
490+ error = - RT_ETIMEOUT ;
491+ break ;
492+ }
493+
494+ rt_sched_lock (& slvl );
495+ cpu_id = RT_SCHED_CTX (thread ).oncpu ;
496+ rt_sched_unlock (slvl );
497+
498+ if (cpu_id == RT_CPU_DETACHED )
499+ {
500+ break ;
501+ }
502+ }
503+
504+ return error ;
505+ #endif
506+ }
452507 /* scheduler works are done */
453508 rt_sched_unlock (slvl );
454509
455- return RT_EOK ;
510+ return error ;
456511}
457512RTM_EXPORT (rt_thread_close );
458513
@@ -491,10 +546,14 @@ static rt_err_t _thread_detach(rt_thread_t thread)
491546
492547 error = rt_thread_close (thread );
493548
494- _thread_detach_from_mutex (thread );
549+ /* only when the current thread has successfully closed the target thread. */
550+ if (error == RT_EOK )
551+ {
552+ _thread_detach_from_mutex (thread );
495553
496- /* insert to defunct thread list */
497- rt_thread_defunct_enqueue (thread );
554+ /* insert to defunct thread list */
555+ rt_thread_defunct_enqueue (thread );
556+ }
498557
499558 rt_exit_critical_safe (critical_level );
500559 return error ;
@@ -1142,4 +1201,4 @@ rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size
11421201}
11431202RTM_EXPORT (rt_thread_get_name );
11441203
1145- /**@}*/
1204+ /**@}*/
0 commit comments