Skip to content

Commit 121cb49

Browse files
committed
kernel: sched: inline update_cache
This improves context switching by 7% when measured using the thread_metric benchmark. Before: **** Thread-Metric Preemptive Scheduling Test **** Relative Time: 120 Time Period Total: 5451879 After: **** Thread-Metric Preemptive Scheduling Test **** Relative Time: 30 Time Period Total: 5853535 Signed-off-by: Anas Nashif <[email protected]>
1 parent 9b5260d commit 121cb49

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

kernel/sched.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ struct k_spinlock _sched_spinlock;
3535
*/
3636
__incoherent struct k_thread _thread_dummy;
3737

38-
static void update_cache(int preempt_ok);
38+
static ALWAYS_INLINE void update_cache(int preempt_ok);
3939
static void halt_thread(struct k_thread *thread, uint8_t new_state);
4040
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
4141

@@ -320,7 +320,7 @@ static void update_metairq_preempt(struct k_thread *thread)
320320
*/
321321
}
322322

323-
static void update_cache(int preempt_ok)
323+
static ALWAYS_INLINE void update_cache(int preempt_ok)
324324
{
325325
#ifndef CONFIG_SMP
326326
struct k_thread *thread = next_up();

0 commit comments

Comments
 (0)