Skip to content

Commit 5879d2d

Browse files
Nicolas Pitrecarlescufi
authored andcommitted
sched: minor time slicing cleanup
Make sliceable() the actual condition for a sliceable thread. Avoid creating a slice timeout for non sliceable threads. Always reset slice_expired even if the next thread is not sliceable. Fold slice_expired_locked() into z_time_slice() to avoid the hidden unlock/lock. Change `curr` to `thread` as this is not necessarily the current thread (yet) being set. Make variables static. Signed-off-by: Nicolas Pitre <[email protected]>
1 parent 94ae33c commit 5879d2d

File tree

1 file changed

+44
-59
lines changed

1 file changed

+44
-59
lines changed

kernel/sched.c

Lines changed: 44 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -414,30 +414,45 @@ static void flag_ipi(void)
414414

415415
static int slice_ticks;
416416
static int slice_max_prio;
417-
struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
418-
bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
417+
static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
418+
static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
419419

420-
static inline int slice_time(struct k_thread *curr)
420+
#ifdef CONFIG_SWAP_NONATOMIC
421+
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
422+
* to try to timeslice away _current after it has already pended
423+
* itself but before the corresponding context switch. Treat that as
424+
* a noop condition in z_time_slice().
425+
*/
426+
static struct k_thread *pending_current;
427+
#endif
428+
429+
static inline int slice_time(struct k_thread *thread)
421430
{
422431
int ret = slice_ticks;
423432

424433
#ifdef CONFIG_TIMESLICE_PER_THREAD
425-
if (curr->base.slice_ticks != 0) {
426-
ret = curr->base.slice_ticks;
434+
if (thread->base.slice_ticks != 0) {
435+
ret = thread->base.slice_ticks;
427436
}
428437
#endif
429438
return ret;
430439
}
431440

432-
#ifdef CONFIG_SWAP_NONATOMIC
433-
/* If z_swap() isn't atomic, then it's possible for a timer interrupt
434-
* to try to timeslice away _current after it has already pended
435-
* itself but before the corresponding context switch. Treat that as
436-
* a noop condition in z_time_slice().
437-
*/
438-
static struct k_thread *pending_current;
441+
static inline bool sliceable(struct k_thread *thread)
442+
{
443+
bool ret = is_preempt(thread)
444+
&& slice_time(thread) != 0
445+
&& !z_is_prio_higher(thread->base.prio, slice_max_prio)
446+
&& !z_is_thread_prevented_from_running(thread)
447+
&& !z_is_idle_thread_object(thread);
448+
449+
#ifdef CONFIG_TIMESLICE_PER_THREAD
450+
ret |= thread->base.slice_ticks != 0;
439451
#endif
440452

453+
return ret;
454+
}
455+
441456
static void slice_timeout(struct _timeout *t)
442457
{
443458
int cpu = ARRAY_INDEX(slice_timeouts, t);
@@ -458,8 +473,8 @@ void z_reset_time_slice(struct k_thread *curr)
458473
int cpu = _current_cpu->id;
459474

460475
z_abort_timeout(&slice_timeouts[cpu]);
461-
if (slice_time(curr) != 0) {
462-
slice_expired[cpu] = false;
476+
slice_expired[cpu] = false;
477+
if (sliceable(curr)) {
463478
z_add_timeout(&slice_timeouts[cpu], slice_timeout,
464479
K_TICKS(slice_time(curr) - 1));
465480
}
@@ -492,63 +507,33 @@ void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
492507
}
493508
#endif
494509

495-
static inline bool sliceable(struct k_thread *thread)
496-
{
497-
bool ret = is_preempt(thread)
498-
&& !z_is_thread_prevented_from_running(thread)
499-
&& !z_is_prio_higher(thread->base.prio, slice_max_prio)
500-
&& !z_is_idle_thread_object(thread);
501-
502-
#ifdef CONFIG_TIMESLICE_PER_THREAD
503-
ret |= thread->base.slice_ticks != 0;
504-
#endif
505-
506-
return ret;
507-
}
508-
509-
static k_spinlock_key_t slice_expired_locked(k_spinlock_key_t sched_lock_key)
510-
{
511-
struct k_thread *curr = _current;
512-
513-
#ifdef CONFIG_TIMESLICE_PER_THREAD
514-
if (curr->base.slice_expired) {
515-
k_spin_unlock(&sched_spinlock, sched_lock_key);
516-
curr->base.slice_expired(curr, curr->base.slice_data);
517-
sched_lock_key = k_spin_lock(&sched_spinlock);
518-
}
519-
#endif
520-
if (!z_is_thread_prevented_from_running(curr)) {
521-
move_thread_to_end_of_prio_q(curr);
522-
}
523-
z_reset_time_slice(curr);
524-
525-
return sched_lock_key;
526-
}
527-
528510
/* Called out of each timer interrupt */
529511
void z_time_slice(void)
530512
{
531513
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
514+
struct k_thread *curr = _current;
532515

533516
#ifdef CONFIG_SWAP_NONATOMIC
534-
if (pending_current == _current) {
535-
z_reset_time_slice(_current);
517+
if (pending_current == curr) {
518+
z_reset_time_slice(curr);
536519
k_spin_unlock(&sched_spinlock, key);
537520
return;
538521
}
539522
pending_current = NULL;
540523
#endif
541524

542-
if (slice_time(_current) && sliceable(_current)) {
543-
if (slice_expired[_current_cpu->id]) {
544-
/* Note: this will (if so enabled) internally
545-
* drop and reacquire the scheduler lock
546-
* around the callback! Don't put anything
547-
* after this line that requires
548-
* synchronization.
549-
*/
550-
key = slice_expired_locked(key);
525+
if (slice_expired[_current_cpu->id] && sliceable(curr)) {
526+
#ifdef CONFIG_TIMESLICE_PER_THREAD
527+
if (curr->base.slice_expired) {
528+
k_spin_unlock(&sched_spinlock, key);
529+
curr->base.slice_expired(curr, curr->base.slice_data);
530+
key = k_spin_lock(&sched_spinlock);
531+
}
532+
#endif
533+
if (!z_is_thread_prevented_from_running(curr)) {
534+
move_thread_to_end_of_prio_q(curr);
551535
}
536+
z_reset_time_slice(curr);
552537
}
553538
k_spin_unlock(&sched_spinlock, key);
554539
}
@@ -1582,7 +1567,7 @@ void z_sched_ipi(void)
15821567
#endif
15831568

15841569
#ifdef CONFIG_TIMESLICING
1585-
if (slice_time(_current) && sliceable(_current)) {
1570+
if (sliceable(_current)) {
15861571
z_time_slice();
15871572
}
15881573
#endif

0 commit comments

Comments
 (0)