@@ -1083,6 +1083,13 @@ static void __resched_curr(struct rq *rq, int tif)
10831083
10841084 lockdep_assert_rq_held (rq );
10851085
1086+ /*
1087+ * Always immediately preempt the idle task; no point in delaying doing
1088+ * actual work.
1089+ */
1090+ if (is_idle_task (curr ) && tif == TIF_NEED_RESCHED_LAZY )
1091+ tif = TIF_NEED_RESCHED ;
1092+
10861093 if (cti -> flags & ((1 << tif ) | _TIF_NEED_RESCHED ))
10871094 return ;
10881095
@@ -1108,6 +1115,32 @@ void resched_curr(struct rq *rq)
11081115 __resched_curr (rq , TIF_NEED_RESCHED );
11091116}
11101117
1118+ #ifdef CONFIG_PREEMPT_DYNAMIC
1119+ static DEFINE_STATIC_KEY_FALSE (sk_dynamic_preempt_lazy );
1120+ static __always_inline bool dynamic_preempt_lazy (void )
1121+ {
1122+ return static_branch_unlikely (& sk_dynamic_preempt_lazy );
1123+ }
1124+ #else
1125+ static __always_inline bool dynamic_preempt_lazy (void )
1126+ {
1127+ return IS_ENABLED (CONFIG_PREEMPT_LAZY );
1128+ }
1129+ #endif
1130+
1131+ static __always_inline int get_lazy_tif_bit (void )
1132+ {
1133+ if (dynamic_preempt_lazy ())
1134+ return TIF_NEED_RESCHED_LAZY ;
1135+
1136+ return TIF_NEED_RESCHED ;
1137+ }
1138+
1139+ void resched_curr_lazy (struct rq * rq )
1140+ {
1141+ __resched_curr (rq , get_lazy_tif_bit ());
1142+ }
1143+
11111144void resched_cpu (int cpu )
11121145{
11131146 struct rq * rq = cpu_rq (cpu );
@@ -5612,6 +5645,10 @@ void sched_tick(void)
56125645 update_rq_clock (rq );
56135646 hw_pressure = arch_scale_hw_pressure (cpu_of (rq ));
56145647 update_hw_load_avg (rq_clock_task (rq ), rq , hw_pressure );
5648+
5649+ if (dynamic_preempt_lazy () && tif_test_bit (TIF_NEED_RESCHED_LAZY ))
5650+ resched_curr (rq );
5651+
56155652 donor -> sched_class -> task_tick (rq , donor , 0 );
56165653 if (sched_feat (LATENCY_WARN ))
56175654 resched_latency = cpu_resched_latency (rq );
@@ -7374,27 +7411,39 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
73747411 * preempt_schedule <- NOP
73757412 * preempt_schedule_notrace <- NOP
73767413 * irqentry_exit_cond_resched <- NOP
7414+ * dynamic_preempt_lazy <- false
73777415 *
73787416 * VOLUNTARY:
73797417 * cond_resched <- __cond_resched
73807418 * might_resched <- __cond_resched
73817419 * preempt_schedule <- NOP
73827420 * preempt_schedule_notrace <- NOP
73837421 * irqentry_exit_cond_resched <- NOP
7422+ * dynamic_preempt_lazy <- false
73847423 *
73857424 * FULL:
73867425 * cond_resched <- RET0
73877426 * might_resched <- RET0
73887427 * preempt_schedule <- preempt_schedule
73897428 * preempt_schedule_notrace <- preempt_schedule_notrace
73907429 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7430+ * dynamic_preempt_lazy <- false
7431+ *
7432+ * LAZY:
7433+ * cond_resched <- RET0
7434+ * might_resched <- RET0
7435+ * preempt_schedule <- preempt_schedule
7436+ * preempt_schedule_notrace <- preempt_schedule_notrace
7437+ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7438+ * dynamic_preempt_lazy <- true
73917439 */
73927440
73937441enum {
73947442 preempt_dynamic_undefined = -1 ,
73957443 preempt_dynamic_none ,
73967444 preempt_dynamic_voluntary ,
73977445 preempt_dynamic_full ,
7446+ preempt_dynamic_lazy ,
73987447};
73997448
74007449int preempt_dynamic_mode = preempt_dynamic_undefined ;
@@ -7410,15 +7459,23 @@ int sched_dynamic_mode(const char *str)
74107459 if (!strcmp (str , "full" ))
74117460 return preempt_dynamic_full ;
74127461
7462+ #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7463+ if (!strcmp (str , "lazy" ))
7464+ return preempt_dynamic_lazy ;
7465+ #endif
7466+
74137467 return - EINVAL ;
74147468}
74157469
7470+ #define preempt_dynamic_key_enable (f ) static_key_enable(&sk_dynamic_##f.key)
7471+ #define preempt_dynamic_key_disable (f ) static_key_disable(&sk_dynamic_##f.key)
7472+
74167473#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL )
74177474#define preempt_dynamic_enable (f ) static_call_update(f, f##_dynamic_enabled)
74187475#define preempt_dynamic_disable (f ) static_call_update(f, f##_dynamic_disabled)
74197476#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY )
7420- #define preempt_dynamic_enable (f ) static_key_enable(&sk_dynamic_##f.key )
7421- #define preempt_dynamic_disable (f ) static_key_disable(&sk_dynamic_##f.key )
7477+ #define preempt_dynamic_enable (f ) preempt_dynamic_key_enable(f )
7478+ #define preempt_dynamic_disable (f ) preempt_dynamic_key_disable(f )
74227479#else
74237480#error "Unsupported PREEMPT_DYNAMIC mechanism"
74247481#endif
@@ -7438,6 +7495,7 @@ static void __sched_dynamic_update(int mode)
74387495 preempt_dynamic_enable (preempt_schedule );
74397496 preempt_dynamic_enable (preempt_schedule_notrace );
74407497 preempt_dynamic_enable (irqentry_exit_cond_resched );
7498+ preempt_dynamic_key_disable (preempt_lazy );
74417499
74427500 switch (mode ) {
74437501 case preempt_dynamic_none :
@@ -7447,6 +7505,7 @@ static void __sched_dynamic_update(int mode)
74477505 preempt_dynamic_disable (preempt_schedule );
74487506 preempt_dynamic_disable (preempt_schedule_notrace );
74497507 preempt_dynamic_disable (irqentry_exit_cond_resched );
7508+ preempt_dynamic_key_disable (preempt_lazy );
74507509 if (mode != preempt_dynamic_mode )
74517510 pr_info ("Dynamic Preempt: none\n" );
74527511 break ;
@@ -7458,6 +7517,7 @@ static void __sched_dynamic_update(int mode)
74587517 preempt_dynamic_disable (preempt_schedule );
74597518 preempt_dynamic_disable (preempt_schedule_notrace );
74607519 preempt_dynamic_disable (irqentry_exit_cond_resched );
7520+ preempt_dynamic_key_disable (preempt_lazy );
74617521 if (mode != preempt_dynamic_mode )
74627522 pr_info ("Dynamic Preempt: voluntary\n" );
74637523 break ;
@@ -7469,9 +7529,22 @@ static void __sched_dynamic_update(int mode)
74697529 preempt_dynamic_enable (preempt_schedule );
74707530 preempt_dynamic_enable (preempt_schedule_notrace );
74717531 preempt_dynamic_enable (irqentry_exit_cond_resched );
7532+ preempt_dynamic_key_disable (preempt_lazy );
74727533 if (mode != preempt_dynamic_mode )
74737534 pr_info ("Dynamic Preempt: full\n" );
74747535 break ;
7536+
7537+ case preempt_dynamic_lazy :
7538+ if (!klp_override )
7539+ preempt_dynamic_disable (cond_resched );
7540+ preempt_dynamic_disable (might_resched );
7541+ preempt_dynamic_enable (preempt_schedule );
7542+ preempt_dynamic_enable (preempt_schedule_notrace );
7543+ preempt_dynamic_enable (irqentry_exit_cond_resched );
7544+ preempt_dynamic_key_enable (preempt_lazy );
7545+ if (mode != preempt_dynamic_mode )
7546+ pr_info ("Dynamic Preempt: lazy\n" );
7547+ break ;
74757548 }
74767549
74777550 preempt_dynamic_mode = mode ;
@@ -7534,6 +7607,8 @@ static void __init preempt_dynamic_init(void)
75347607 sched_dynamic_update (preempt_dynamic_none );
75357608 } else if (IS_ENABLED (CONFIG_PREEMPT_VOLUNTARY )) {
75367609 sched_dynamic_update (preempt_dynamic_voluntary );
7610+ } else if (IS_ENABLED (CONFIG_PREEMPT_LAZY )) {
7611+ sched_dynamic_update (preempt_dynamic_lazy );
75377612 } else {
75387613 /* Default static call setting, nothing to do */
75397614 WARN_ON_ONCE (!IS_ENABLED (CONFIG_PREEMPT ));
@@ -7554,6 +7629,7 @@ static void __init preempt_dynamic_init(void)
75547629PREEMPT_MODEL_ACCESSOR (none );
75557630PREEMPT_MODEL_ACCESSOR (voluntary );
75567631PREEMPT_MODEL_ACCESSOR (full );
7632+ PREEMPT_MODEL_ACCESSOR (lazy );
75577633
75587634#else /* !CONFIG_PREEMPT_DYNAMIC: */
75597635
0 commit comments