@@ -30,11 +30,6 @@ extern struct k_thread *pending_current;
3030
3131struct k_spinlock _sched_spinlock ;
3232
33- /* Storage to "complete" the context switch from an invalid/incomplete thread
34- * context (ex: exiting an ISR that aborted _current)
35- */
36- __incoherent struct k_thread _thread_dummies [CONFIG_MP_MAX_NUM_CPUS ];
37-
3833static void update_cache (int preempt_ok );
3934static void halt_thread (struct k_thread * thread , uint8_t new_state );
4035static void add_to_waitq_locked (struct k_thread * thread , _wait_q_t * wait_q );
@@ -428,21 +423,19 @@ void z_sched_start(struct k_thread *thread)
428423 * another CPU to catch the IPI we sent and halt. Note that we check
429424 * for ourselves being asynchronously halted first to prevent simple
430425 * deadlocks (but not complex ones involving cycles of 3+ threads!).
431- * Acts to release the provided lock before returning.
432426 */
433- static void thread_halt_spin (struct k_thread * thread , k_spinlock_key_t key )
427+ static k_spinlock_key_t thread_halt_spin (struct k_thread * thread , k_spinlock_key_t key )
434428{
435429 if (is_halting (_current )) {
436430 halt_thread (_current ,
437431 is_aborting (_current ) ? _THREAD_DEAD : _THREAD_SUSPENDED );
438432 }
439433 k_spin_unlock (& _sched_spinlock , key );
440434 while (is_halting (thread )) {
441- unsigned int k = arch_irq_lock ();
442-
443- arch_spin_relax (); /* Requires interrupts be masked */
444- arch_irq_unlock (k );
445435 }
436+ key = k_spin_lock (& _sched_spinlock );
437+ z_sched_switch_spin (thread );
438+ return key ;
446439}
447440
448441/* Shared handler for k_thread_{suspend,abort}(). Called with the
@@ -472,7 +465,8 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
472465 arch_sched_ipi ();
473466#endif
474467 if (arch_is_in_isr ()) {
475- thread_halt_spin (thread , key );
468+ key = thread_halt_spin (thread , key );
469+ k_spin_unlock (& _sched_spinlock , key );
476470 } else {
477471 add_to_waitq_locked (_current , wq );
478472 z_swap (& _sched_spinlock , key );
@@ -486,10 +480,6 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
486480 k_spin_unlock (& _sched_spinlock , key );
487481 }
488482 }
489- /* NOTE: the scheduler lock has been released. Don't put
490- * logic here, it's likely to be racy/deadlocky even if you
491- * re-take the lock!
492- */
493483}
494484
495485
@@ -1289,8 +1279,6 @@ extern void thread_abort_hook(struct k_thread *thread);
12891279 */
12901280static void halt_thread (struct k_thread * thread , uint8_t new_state )
12911281{
1292- bool dummify = false;
1293-
12941282 /* We hold the lock, and the thread is known not to be running
12951283 * anywhere.
12961284 */
@@ -1307,16 +1295,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
13071295 }
13081296 (void )z_abort_thread_timeout (thread );
13091297 unpend_all (& thread -> join_queue );
1310-
1311- /* Edge case: aborting _current from within an
1312- * ISR that preempted it requires clearing the
1313- * _current pointer so the upcoming context
1314- * switch doesn't clobber the now-freed
1315- * memory
1316- */
1317- if (thread == _current && arch_is_in_isr ()) {
1318- dummify = true;
1319- }
13201298 }
13211299#ifdef CONFIG_SMP
13221300 unpend_all (& thread -> halt_queue );
@@ -1355,16 +1333,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
13551333#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
13561334 k_thread_abort_cleanup (thread );
13571335#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1358-
1359- /* Do this "set _current to dummy" step last so that
1360- * subsystems above can rely on _current being
1361- * unchanged. Disabled for posix as that arch
1362- * continues to use the _current pointer in its swap
1363- * code.
1364- */
1365- if (dummify && !IS_ENABLED (CONFIG_ARCH_POSIX )) {
1366- z_dummy_thread_init (& _thread_dummies [_current_cpu -> id ]);
1367- }
13681336 }
13691337}
13701338
0 commit comments