Skip to content

Commit ea26bcf

Browse files
aescolarfabiobaltieri
authored andcommitted
Revert "kernel/sched: Fix free-memory write when ISRs abort _current"
This reverts commit 61c7062. This PR introduced 2 regressions in main CI: 71977 & 71978 Let's revert it by now to get main's CI passing again. Signed-off-by: Alberto Escolar Piedras <[email protected]>
1 parent ab41d3a commit ea26bcf

File tree

4 files changed

+14
-42
lines changed

4 files changed

+14
-42
lines changed

kernel/include/ksched.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
3737
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
3838
#endif /* CONFIG_MULTITHREADING */
3939

40-
extern struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];
41-
4240
void z_sched_init(void);
4341
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
4442
void z_unpend_thread_no_timeout(struct k_thread *thread);

kernel/init.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,12 @@ FUNC_NORETURN void z_cstart(void)
650650
LOG_CORE_INIT();
651651

652652
#if defined(CONFIG_MULTITHREADING)
653-
z_dummy_thread_init(&_thread_dummies[0]);
653+
/* Note: The z_ready_thread() call in prepare_multithreading() requires
654+
* a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
655+
*/
656+
struct k_thread dummy_thread;
657+
658+
z_dummy_thread_init(&dummy_thread);
654659
#endif /* CONFIG_MULTITHREADING */
655660
/* do any necessary initialization of static devices */
656661
z_device_state_init();

kernel/sched.c

Lines changed: 6 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,6 @@ extern struct k_thread *pending_current;
3030

3131
struct k_spinlock _sched_spinlock;
3232

33-
/* Storage to "complete" the context switch from an invalid/incomplete thread
34-
* context (ex: exiting an ISR that aborted _current)
35-
*/
36-
__incoherent struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];
37-
3833
static void update_cache(int preempt_ok);
3934
static void halt_thread(struct k_thread *thread, uint8_t new_state);
4035
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
@@ -428,21 +423,19 @@ void z_sched_start(struct k_thread *thread)
428423
* another CPU to catch the IPI we sent and halt. Note that we check
429424
* for ourselves being asynchronously halted first to prevent simple
430425
* deadlocks (but not complex ones involving cycles of 3+ threads!).
431-
* Acts to release the provided lock before returning.
432426
*/
433-
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
427+
static k_spinlock_key_t thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
434428
{
435429
if (is_halting(_current)) {
436430
halt_thread(_current,
437431
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
438432
}
439433
k_spin_unlock(&_sched_spinlock, key);
440434
while (is_halting(thread)) {
441-
unsigned int k = arch_irq_lock();
442-
443-
arch_spin_relax(); /* Requires interrupts be masked */
444-
arch_irq_unlock(k);
445435
}
436+
key = k_spin_lock(&_sched_spinlock);
437+
z_sched_switch_spin(thread);
438+
return key;
446439
}
447440

448441
/* Shared handler for k_thread_{suspend,abort}(). Called with the
@@ -472,7 +465,8 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
472465
arch_sched_ipi();
473466
#endif
474467
if (arch_is_in_isr()) {
475-
thread_halt_spin(thread, key);
468+
key = thread_halt_spin(thread, key);
469+
k_spin_unlock(&_sched_spinlock, key);
476470
} else {
477471
add_to_waitq_locked(_current, wq);
478472
z_swap(&_sched_spinlock, key);
@@ -486,10 +480,6 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
486480
k_spin_unlock(&_sched_spinlock, key);
487481
}
488482
}
489-
/* NOTE: the scheduler lock has been released. Don't put
490-
* logic here, it's likely to be racy/deadlocky even if you
491-
* re-take the lock!
492-
*/
493483
}
494484

495485

@@ -1289,8 +1279,6 @@ extern void thread_abort_hook(struct k_thread *thread);
12891279
*/
12901280
static void halt_thread(struct k_thread *thread, uint8_t new_state)
12911281
{
1292-
bool dummify = false;
1293-
12941282
/* We hold the lock, and the thread is known not to be running
12951283
* anywhere.
12961284
*/
@@ -1307,16 +1295,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
13071295
}
13081296
(void)z_abort_thread_timeout(thread);
13091297
unpend_all(&thread->join_queue);
1310-
1311-
/* Edge case: aborting _current from within an
1312-
* ISR that preempted it requires clearing the
1313-
* _current pointer so the upcoming context
1314-
* switch doesn't clobber the now-freed
1315-
* memory
1316-
*/
1317-
if (thread == _current && arch_is_in_isr()) {
1318-
dummify = true;
1319-
}
13201298
}
13211299
#ifdef CONFIG_SMP
13221300
unpend_all(&thread->halt_queue);
@@ -1355,16 +1333,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
13551333
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
13561334
k_thread_abort_cleanup(thread);
13571335
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1358-
1359-
/* Do this "set _current to dummy" step last so that
1360-
* subsystems above can rely on _current being
1361-
* unchanged. Disabled for posix as that arch
1362-
* continues to use the _current pointer in its swap
1363-
* code.
1364-
*/
1365-
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1366-
z_dummy_thread_init(&_thread_dummies[_current_cpu->id]);
1367-
}
13681336
}
13691337
}
13701338

kernel/smp.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,7 @@ static void wait_for_start_signal(atomic_t *start_flag)
108108

109109
static inline void smp_init_top(void *arg)
110110
{
111+
struct k_thread dummy_thread;
111112
struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};
112113

113114
/* Let start_cpu() know that this CPU has powered up. */
@@ -122,7 +123,7 @@ static inline void smp_init_top(void *arg)
122123
/* Initialize the dummy thread struct so that
123124
* the scheduler can schedule actual threads to run.
124125
*/
125-
z_dummy_thread_init(&_thread_dummies[arch_curr_cpu()->id]);
126+
z_dummy_thread_init(&dummy_thread);
126127
}
127128

128129
#ifdef CONFIG_SYS_CLOCK_EXISTS

0 commit comments

Comments
 (0)