Skip to content

Commit 26baa1f

Browse files
author
Peter Zijlstra
committed
sched: Add TIF_NEED_RESCHED_LAZY infrastructure
Add the basic infrastructure to split the TIF_NEED_RESCHED bit in two. Either bit will cause a resched on return-to-user, but only TIF_NEED_RESCHED will drive IRQ preemption. No behavioural change intended. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Sebastian Andrzej Siewior <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 0f0d1b8 commit 26baa1f

File tree

7 files changed

+48
-24
lines changed

7 files changed

+48
-24
lines changed

include/linux/entry-common.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@
6464

6565
#define EXIT_TO_USER_MODE_WORK \
6666
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
67-
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
67+
_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
68+
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
6869
ARCH_EXIT_TO_USER_MODE_WORK)
6970

7071
/**

include/linux/entry-kvm.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@
1717
#endif
1818

1919
#define XFER_TO_GUEST_MODE_WORK \
20-
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
21-
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
20+
(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
21+
_TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
22+
ARCH_XFER_TO_GUEST_MODE_WORK)
2223

2324
struct kvm_vcpu;
2425

include/linux/sched.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2002,7 +2002,8 @@ static inline void set_tsk_need_resched(struct task_struct *tsk)
20022002

20032003
static inline void clear_tsk_need_resched(struct task_struct *tsk)
20042004
{
2005-
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2005+
atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
2006+
(atomic_long_t *)&task_thread_info(tsk)->flags);
20062007
}
20072008

20082009
static inline int test_tsk_need_resched(struct task_struct *tsk)

include/linux/thread_info.h

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,14 @@ enum syscall_work_bit {
5959

6060
#include <asm/thread_info.h>
6161

62+
#ifndef TIF_NEED_RESCHED_LAZY
63+
#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
64+
#error Inconsistent PREEMPT_LAZY
65+
#endif
66+
#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
67+
#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
68+
#endif
69+
6270
#ifdef __KERNEL__
6371

6472
#ifndef arch_set_restart_data
@@ -179,22 +187,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
179187

180188
#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
181189

182-
static __always_inline bool tif_need_resched(void)
190+
static __always_inline bool tif_test_bit(int bit)
183191
{
184-
return arch_test_bit(TIF_NEED_RESCHED,
192+
return arch_test_bit(bit,
185193
(unsigned long *)(&current_thread_info()->flags));
186194
}
187195

188196
#else
189197

190-
static __always_inline bool tif_need_resched(void)
198+
static __always_inline bool tif_test_bit(int bit)
191199
{
192-
return test_bit(TIF_NEED_RESCHED,
200+
return test_bit(bit,
193201
(unsigned long *)(&current_thread_info()->flags));
194202
}
195203

196204
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
197205

206+
static __always_inline bool tif_need_resched(void)
207+
{
208+
return tif_test_bit(TIF_NEED_RESCHED);
209+
}
210+
198211
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
199212
static inline int arch_within_stack_frames(const void * const stack,
200213
const void * const stackend,

kernel/entry/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
9898

9999
local_irq_enable_exit_to_user(ti_work);
100100

101-
if (ti_work & _TIF_NEED_RESCHED)
101+
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
102102
schedule();
103103

104104
if (ti_work & _TIF_UPROBE)

kernel/entry/kvm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
1313
return -EINTR;
1414
}
1515

16-
if (ti_work & _TIF_NEED_RESCHED)
16+
if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
1717
schedule();
1818

1919
if (ti_work & _TIF_NOTIFY_RESUME)
@@ -24,7 +24,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
2424
return ret;
2525

2626
ti_work = read_thread_flags();
27-
} while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
27+
} while (ti_work & XFER_TO_GUEST_MODE_WORK);
2828
return 0;
2929
}
3030

kernel/sched/core.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -941,10 +941,9 @@ static inline void hrtick_rq_init(struct rq *rq)
941941
* this avoids any races wrt polling state changes and thereby avoids
942942
* spurious IPIs.
943943
*/
944-
static inline bool set_nr_and_not_polling(struct task_struct *p)
944+
static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
945945
{
946-
struct thread_info *ti = task_thread_info(p);
947-
return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
946+
return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
948947
}
949948

950949
/*
@@ -969,9 +968,9 @@ static bool set_nr_if_polling(struct task_struct *p)
969968
}
970969

971970
#else
972-
static inline bool set_nr_and_not_polling(struct task_struct *p)
971+
static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
973972
{
974-
set_tsk_need_resched(p);
973+
set_ti_thread_flag(ti, tif);
975974
return true;
976975
}
977976

@@ -1076,28 +1075,37 @@ void wake_up_q(struct wake_q_head *head)
10761075
* might also involve a cross-CPU call to trigger the scheduler on
10771076
* the target CPU.
10781077
*/
1079-
void resched_curr(struct rq *rq)
1078+
static void __resched_curr(struct rq *rq, int tif)
10801079
{
10811080
struct task_struct *curr = rq->curr;
1081+
struct thread_info *cti = task_thread_info(curr);
10821082
int cpu;
10831083

10841084
lockdep_assert_rq_held(rq);
10851085

1086-
if (test_tsk_need_resched(curr))
1086+
if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
10871087
return;
10881088

10891089
cpu = cpu_of(rq);
10901090

10911091
if (cpu == smp_processor_id()) {
1092-
set_tsk_need_resched(curr);
1093-
set_preempt_need_resched();
1092+
set_ti_thread_flag(cti, tif);
1093+
if (tif == TIF_NEED_RESCHED)
1094+
set_preempt_need_resched();
10941095
return;
10951096
}
10961097

1097-
if (set_nr_and_not_polling(curr))
1098-
smp_send_reschedule(cpu);
1099-
else
1098+
if (set_nr_and_not_polling(cti, tif)) {
1099+
if (tif == TIF_NEED_RESCHED)
1100+
smp_send_reschedule(cpu);
1101+
} else {
11001102
trace_sched_wake_idle_without_ipi(cpu);
1103+
}
1104+
}
1105+
1106+
void resched_curr(struct rq *rq)
1107+
{
1108+
__resched_curr(rq, TIF_NEED_RESCHED);
11011109
}
11021110

11031111
void resched_cpu(int cpu)
@@ -1192,7 +1200,7 @@ static void wake_up_idle_cpu(int cpu)
11921200
* and testing of the above solutions didn't appear to report
11931201
* much benefits.
11941202
*/
1195-
if (set_nr_and_not_polling(rq->idle))
1203+
if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
11961204
smp_send_reschedule(cpu);
11971205
else
11981206
trace_sched_wake_idle_without_ipi(cpu);

0 commit comments

Comments
 (0)