Skip to content

Commit 851d14a

Browse files
Andy Rossnashif
authored andcommitted
kernel/sched: Remove "cooperative scheduling only" special cases
The scheduler has historically had an API where an application can inform the kernel that it will never create a thread that can be preempted, and the kernel and architecture layer would use that as an optimization hint to eliminate some code paths. Those optimizations have dwindled to almost nothing at this point, and they're now objectively a smaller impact than the special casing that was required to handle the idle thread (which, obviously, must always be preemptible). Fix this by eliminating the idea of "cooperative only" and ensuring that there will always be at least one preemptible priority with value >=0. CONFIG_NUM_PREEMPT_PRIORITIES now specifies the number of user-accessible priorities other than the idle thread. The only remaining workaround is that some older architectures (and also SPARC) use the CONFIG_PREEMPT_ENABLED=n state as a hint to skip thread switching on interrupt exit. So detect exactly those platforms and implement a minimal workaround in the idle loop (basically "just call swap()") instead, with a big explanation. Note that this also fixes a bug in one of the philosophers samples, where it would ask for 6 cooperative priorities but then use values -7 through -2. It was assuming the kernel would magically create a cooperative priority for its idle thread, which wasn't correct even before. Fixes #34584 Signed-off-by: Andy Ross <[email protected]>
1 parent a7c732d commit 851d14a

File tree

7 files changed

+39
-73
lines changed

7 files changed

+39
-73
lines changed

include/kernel.h

Lines changed: 7 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -36,43 +36,19 @@ extern "C" {
3636
* @}
3737
*/
3838

39-
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
40-
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
41-
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
42-
#elif defined(CONFIG_COOP_ENABLED)
43-
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1)
44-
#define _NUM_PREEMPT_PRIO (0)
45-
#elif defined(CONFIG_PREEMPT_ENABLED)
46-
#define _NUM_COOP_PRIO (0)
47-
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
48-
#else
49-
#error "invalid configuration"
50-
#endif
51-
52-
#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x)))
53-
#define K_PRIO_PREEMPT(x) (x)
54-
5539
#define K_ANY NULL
5640
#define K_END NULL
5741

58-
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
59-
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
60-
#elif defined(CONFIG_COOP_ENABLED)
61-
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1)
62-
#elif defined(CONFIG_PREEMPT_ENABLED)
63-
#define K_HIGHEST_THREAD_PRIO 0
64-
#else
65-
#error "invalid configuration"
42+
#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
43+
#error Zero available thread priorities defined!
6644
#endif
6745

68-
#ifdef CONFIG_PREEMPT_ENABLED
69-
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
70-
#else
71-
#define K_LOWEST_THREAD_PRIO -1
72-
#endif
46+
#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
47+
#define K_PRIO_PREEMPT(x) (x)
7348

49+
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
50+
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
7451
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
75-
7652
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
7753
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
7854

@@ -2553,7 +2529,7 @@ struct k_mutex {
25532529
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
25542530
.owner = NULL, \
25552531
.lock_count = 0, \
2556-
.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
2532+
.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
25572533
}
25582534

25592535
/**

kernel/Kconfig

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,11 @@ config PREEMPT_ENABLED
9090

9191
config PRIORITY_CEILING
9292
int "Priority inheritance ceiling"
93-
default 0
93+
default -127
94+
help
95+
This defines the minimum priority value (i.e. the logically
96+
highest priority) that a thread will acquire as part of
97+
k_mutex priority inheritance.
9498

9599
config NUM_METAIRQ_PRIORITIES
96100
int "Number of very-high priority 'preemptor' threads"

kernel/idle.c

Lines changed: 17 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <stdbool.h>
1414
#include <logging/log.h>
1515
#include <ksched.h>
16+
#include <kswap.h>
1617

1718
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
1819

@@ -66,6 +67,8 @@ void idle(void *unused1, void *unused2, void *unused3)
6667
ARG_UNUSED(unused2);
6768
ARG_UNUSED(unused3);
6869

70+
__ASSERT_NO_MSG(_current->base.prio >= 0);
71+
6972
while (true) {
7073
/* SMP systems without a working IPI can't
7174
* actual enter an idle state, because they
@@ -95,14 +98,21 @@ void idle(void *unused1, void *unused2, void *unused3)
9598
k_cpu_idle();
9699
}
97100

98-
/* It is possible to (pathologically) configure the
99-
* idle thread to have a non-preemptible priority.
100-
* You might think this is an API bug, but we actually
101-
* have a test that exercises this. Handle the edge
102-
* case when that happens.
101+
#if !defined(CONFIG_PREEMPT_ENABLED)
102+
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
103+
/* A legacy mess: the idle thread is by definition
104+
* preemptible as far as the modern scheduler is
105+
* concerned, but older platforms use
106+
* CONFIG_PREEMPT_ENABLED=n as an optimization hint
107+
* that interrupt exit always returns to the
108+
* interrupted context. So in that setup we need to
109+
* explicitly yield in the idle thread otherwise
110+
* nothing else will run once it starts.
103111
*/
104-
if (K_IDLE_PRIO < 0) {
105-
k_yield();
112+
if (_kernel.ready_q.cache != _current) {
113+
z_swap_unlocked();
106114
}
115+
# endif
116+
#endif
107117
}
108118
}

kernel/include/ksched.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -250,27 +250,22 @@ static inline void _ready_one_thread(_wait_q_t *wq)
250250

251251
static inline void z_sched_lock(void)
252252
{
253-
#ifdef CONFIG_PREEMPT_ENABLED
254253
__ASSERT(!arch_is_in_isr(), "");
255254
__ASSERT(_current->base.sched_locked != 1U, "");
256255

257256
--_current->base.sched_locked;
258257

259258
compiler_barrier();
260-
261-
#endif
262259
}
263260

264261
static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
265262
{
266-
#ifdef CONFIG_PREEMPT_ENABLED
267263
__ASSERT(!arch_is_in_isr(), "");
268264
__ASSERT(_current->base.sched_locked != 0U, "");
269265

270266
compiler_barrier();
271267

272268
++_current->base.sched_locked;
273-
#endif
274269
}
275270

276271
static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)

kernel/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ static void init_idle_thread(int i)
259259

260260
z_setup_new_thread(thread, stack,
261261
CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
262-
NULL, NULL, K_LOWEST_THREAD_PRIO, K_ESSENTIAL,
262+
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
263263
tname);
264264
z_mark_thread_as_started(thread);
265265

kernel/sched.c

Lines changed: 8 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,8 @@ static void end_thread(struct k_thread *thread);
5353

5454
static inline int is_preempt(struct k_thread *thread)
5555
{
56-
#ifdef CONFIG_PREEMPT_ENABLED
5756
/* explanation in kernel_struct.h */
5857
return thread->base.preempt <= _PREEMPT_THRESHOLD;
59-
#else
60-
return 0;
61-
#endif
6258
}
6359

6460
static inline int is_metairq(struct k_thread *thread)
@@ -154,15 +150,6 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
154150
return true;
155151
}
156152

157-
/* The idle threads can look "cooperative" if there are no
158-
* preemptible priorities (this is sort of an API glitch).
159-
* They must always be preemptible.
160-
*/
161-
if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) &&
162-
z_is_idle_thread_object(_current)) {
163-
return true;
164-
}
165-
166153
return false;
167154
}
168155

@@ -845,7 +832,6 @@ void k_sched_lock(void)
845832

846833
void k_sched_unlock(void)
847834
{
848-
#ifdef CONFIG_PREEMPT_ENABLED
849835
LOCKED(&sched_spinlock) {
850836
__ASSERT(_current->base.sched_locked != 0U, "");
851837
__ASSERT(!arch_is_in_isr(), "");
@@ -860,7 +846,6 @@ void k_sched_unlock(void)
860846
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
861847

862848
z_reschedule_unlocked();
863-
#endif
864849
}
865850

866851
struct k_thread *z_swap_next_thread(void)
@@ -1201,20 +1186,16 @@ void z_impl_k_yield(void)
12011186

12021187
SYS_PORT_TRACING_FUNC(k_thread, yield);
12031188

1204-
if (!z_is_idle_thread_object(_current)) {
1205-
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1189+
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
12061190

1207-
if (!IS_ENABLED(CONFIG_SMP) ||
1208-
z_is_thread_queued(_current)) {
1209-
dequeue_thread(&_kernel.ready_q.runq,
1210-
_current);
1211-
}
1212-
queue_thread(&_kernel.ready_q.runq, _current);
1213-
update_cache(1);
1214-
z_swap(&sched_spinlock, key);
1215-
} else {
1216-
z_swap_unlocked();
1191+
if (!IS_ENABLED(CONFIG_SMP) ||
1192+
z_is_thread_queued(_current)) {
1193+
dequeue_thread(&_kernel.ready_q.runq,
1194+
_current);
12171195
}
1196+
queue_thread(&_kernel.ready_q.runq, _current);
1197+
update_cache(1);
1198+
z_swap(&sched_spinlock, key);
12181199
}
12191200

12201201
#ifdef CONFIG_USERSPACE

samples/philosophers/sample.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,4 @@ tests:
3838
sample.kernel.philosopher.coop_only:
3939
extra_configs:
4040
- CONFIG_NUM_PREEMPT_PRIORITIES=0
41-
- CONFIG_NUM_COOP_PRIORITIES=6
41+
- CONFIG_NUM_COOP_PRIORITIES=7

0 commit comments

Comments
 (0)