Skip to content

Commit a9f3a74

Browse files
committed
entry: Provide generic syscall exit function
Like syscall entry all architectures have similar and pointlessly different code to handle pending work before returning from a syscall to user space. 1) One-time syscall exit work: - rseq syscall exit - audit - syscall tracing - tracehook (single stepping) 2) Preparatory work - Exit to user mode loop (common TIF handling). - Architecture specific one time work arch_exit_to_user_mode_prepare() - Address limit and lockdep checks 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes arch_exit_to_user_mode() to handle e.g. speculation mitigations Provide a generic version based on the x86 code which has all the RCU and instrumentation protections right. Provide a variant for interrupt return to user mode as well which shares the above #2 and #3 work items. After syscall_exit_to_user_mode() and irqentry_exit_to_user_mode() the architecture code just has to return to user space. The code after returning from these functions must not be instrumented. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Kees Cook <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 142781e commit a9f3a74

File tree

2 files changed

+358
-0
lines changed

2 files changed

+358
-0
lines changed

include/linux/entry-common.h

Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,14 @@
2929
# define _TIF_SYSCALL_AUDIT (0)
3030
#endif
3131

32+
#ifndef _TIF_PATCH_PENDING
33+
# define _TIF_PATCH_PENDING (0)
34+
#endif
35+
36+
#ifndef _TIF_UPROBE
37+
# define _TIF_UPROBE (0)
38+
#endif
39+
3240
/*
3341
* TIF flags handled in syscall_enter_from_usermode()
3442
*/
@@ -41,6 +49,29 @@
4149
_TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \
4250
ARCH_SYSCALL_ENTER_WORK)
4351

52+
/*
53+
* TIF flags handled in syscall_exit_to_user_mode()
54+
*/
55+
#ifndef ARCH_SYSCALL_EXIT_WORK
56+
# define ARCH_SYSCALL_EXIT_WORK (0)
57+
#endif
58+
59+
#define SYSCALL_EXIT_WORK \
60+
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
61+
_TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
62+
63+
/*
64+
* TIF flags handled in exit_to_user_mode_loop()
65+
*/
66+
#ifndef ARCH_EXIT_TO_USER_MODE_WORK
67+
# define ARCH_EXIT_TO_USER_MODE_WORK (0)
68+
#endif
69+
70+
#define EXIT_TO_USER_MODE_WORK \
71+
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
72+
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
73+
ARCH_EXIT_TO_USER_MODE_WORK)
74+
4475
/**
4576
* arch_check_user_regs - Architecture specific sanity check for user mode regs
4677
* @regs: Pointer to currents pt_regs
@@ -105,6 +136,149 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
105136
*/
106137
long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
107138

139+
/**
140+
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
141+
* @ti_work: Cached TIF flags gathered with interrupts disabled
142+
*
143+
* Defaults to local_irq_enable(). Can be supplied by architecture specific
144+
* code.
145+
*/
146+
static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
147+
148+
#ifndef local_irq_enable_exit_to_user
149+
static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
150+
{
151+
local_irq_enable();
152+
}
153+
#endif
154+
155+
/**
156+
* local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
157+
*
158+
* Defaults to local_irq_disable(). Can be supplied by architecture specific
159+
* code.
160+
*/
161+
static inline void local_irq_disable_exit_to_user(void);
162+
163+
#ifndef local_irq_disable_exit_to_user
164+
static inline void local_irq_disable_exit_to_user(void)
165+
{
166+
local_irq_disable();
167+
}
168+
#endif
169+
170+
/**
171+
* arch_exit_to_user_mode_work - Architecture specific TIF work for exit
172+
* to user mode.
173+
* @regs: Pointer to currents pt_regs
174+
* @ti_work: Cached TIF flags gathered with interrupts disabled
175+
*
176+
* Invoked from exit_to_user_mode_loop() with interrupt enabled
177+
*
178+
* Defaults to NOOP. Can be supplied by architecture specific code.
179+
*/
180+
static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
181+
unsigned long ti_work);
182+
183+
#ifndef arch_exit_to_user_mode_work
184+
static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
185+
unsigned long ti_work)
186+
{
187+
}
188+
#endif
189+
190+
/**
191+
* arch_exit_to_user_mode_prepare - Architecture specific preparation for
192+
* exit to user mode.
193+
* @regs: Pointer to currents pt_regs
194+
* @ti_work: Cached TIF flags gathered with interrupts disabled
195+
*
196+
* Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
197+
* function before return. Defaults to NOOP.
198+
*/
199+
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
200+
unsigned long ti_work);
201+
202+
#ifndef arch_exit_to_user_mode_prepare
203+
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
204+
unsigned long ti_work)
205+
{
206+
}
207+
#endif
208+
209+
/**
210+
* arch_exit_to_user_mode - Architecture specific final work before
211+
* exit to user mode.
212+
*
213+
* Invoked from exit_to_user_mode() with interrupt disabled as the last
214+
* function before return. Defaults to NOOP.
215+
*
216+
* This needs to be __always_inline because it is non-instrumentable code
217+
* invoked after context tracking switched to user mode.
218+
*
219+
* An architecture implementation must not do anything complex, no locking
220+
* etc. The main purpose is for speculation mitigations.
221+
*/
222+
static __always_inline void arch_exit_to_user_mode(void);
223+
224+
#ifndef arch_exit_to_user_mode
225+
static __always_inline void arch_exit_to_user_mode(void) { }
226+
#endif
227+
228+
/**
229+
* arch_do_signal - Architecture specific signal delivery function
230+
* @regs: Pointer to currents pt_regs
231+
*
232+
* Invoked from exit_to_user_mode_loop().
233+
*/
234+
void arch_do_signal(struct pt_regs *regs);
235+
236+
/**
237+
* arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
238+
* @regs: Pointer to currents pt_regs
239+
* @step: Indicator for single step
240+
*
241+
* Defaults to tracehook_report_syscall_exit(). Can be replaced by
242+
* architecture specific code.
243+
*
244+
* Invoked from syscall_exit_to_user_mode()
245+
*/
246+
static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step);
247+
248+
#ifndef arch_syscall_exit_tracehook
249+
static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
250+
{
251+
tracehook_report_syscall_exit(regs, step);
252+
}
253+
#endif
254+
255+
/**
256+
* syscall_exit_to_user_mode - Handle work before returning to user mode
257+
* @regs: Pointer to currents pt_regs
258+
*
259+
* Invoked with interrupts enabled and fully valid regs. Returns with all
260+
* work handled, interrupts disabled such that the caller can immediately
261+
* switch to user mode. Called from architecture specific syscall and ret
262+
* from fork code.
263+
*
264+
* The call order is:
265+
* 1) One-time syscall exit work:
266+
* - rseq syscall exit
267+
* - audit
268+
* - syscall tracing
269+
* - tracehook (single stepping)
270+
*
271+
* 2) Preparatory work
272+
* - Exit to user mode loop (common TIF handling). Invokes
273+
* arch_exit_to_user_mode_work() for architecture specific TIF work
274+
* - Architecture specific one time work arch_exit_to_user_mode_prepare()
275+
* - Address limit and lockdep checks
276+
*
277+
* 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes
278+
* arch_exit_to_user_mode() to handle e.g. speculation mitigations
279+
*/
280+
void syscall_exit_to_user_mode(struct pt_regs *regs);
281+
108282
/**
109283
* irqentry_enter_from_user_mode - Establish state before invoking the irq handler
110284
* @regs: Pointer to currents pt_regs
@@ -118,4 +292,19 @@ long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
118292
*/
119293
void irqentry_enter_from_user_mode(struct pt_regs *regs);
120294

295+
/**
296+
* irqentry_exit_to_user_mode - Interrupt exit work
297+
* @regs: Pointer to current's pt_regs
298+
*
299+
* Invoked with interrupts disbled and fully valid regs. Returns with all
300+
* work handled, interrupts disabled such that the caller can immediately
301+
* switch to user mode. Called from architecture specific interrupt
302+
* handling code.
303+
*
304+
* The call order is #2 and #3 as described in syscall_exit_to_user_mode().
305+
* Interrupt exit is not invoking #1 which is the syscall specific one time
306+
* work.
307+
*/
308+
void irqentry_exit_to_user_mode(struct pt_regs *regs);
309+
121310
#endif

kernel/entry/common.c

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
#include <linux/context_tracking.h>
44
#include <linux/entry-common.h>
5+
#include <linux/livepatch.h>
6+
#include <linux/audit.h>
57

68
#define CREATE_TRACE_POINTS
79
#include <trace/events/syscalls.h>
@@ -82,7 +84,174 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
8284
return syscall;
8385
}
8486

87+
/**
88+
* exit_to_user_mode - Fixup state when exiting to user mode
89+
*
90+
* Syscall/interupt exit enables interrupts, but the kernel state is
91+
* interrupts disabled when this is invoked. Also tell RCU about it.
92+
*
93+
* 1) Trace interrupts on state
94+
* 2) Invoke context tracking if enabled to adjust RCU state
95+
* 3) Invoke architecture specific last minute exit code, e.g. speculation
96+
* mitigations, etc.
97+
* 4) Tell lockdep that interrupts are enabled
98+
*/
99+
static __always_inline void exit_to_user_mode(void)
100+
{
101+
instrumentation_begin();
102+
trace_hardirqs_on_prepare();
103+
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
104+
instrumentation_end();
105+
106+
user_enter_irqoff();
107+
arch_exit_to_user_mode();
108+
lockdep_hardirqs_on(CALLER_ADDR0);
109+
}
110+
111+
/* Workaround to allow gradual conversion of architecture code */
112+
void __weak arch_do_signal(struct pt_regs *regs) { }
113+
114+
static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
115+
unsigned long ti_work)
116+
{
117+
/*
118+
* Before returning to user space ensure that all pending work
119+
* items have been completed.
120+
*/
121+
while (ti_work & EXIT_TO_USER_MODE_WORK) {
122+
123+
local_irq_enable_exit_to_user(ti_work);
124+
125+
if (ti_work & _TIF_NEED_RESCHED)
126+
schedule();
127+
128+
if (ti_work & _TIF_UPROBE)
129+
uprobe_notify_resume(regs);
130+
131+
if (ti_work & _TIF_PATCH_PENDING)
132+
klp_update_patch_state(current);
133+
134+
if (ti_work & _TIF_SIGPENDING)
135+
arch_do_signal(regs);
136+
137+
if (ti_work & _TIF_NOTIFY_RESUME) {
138+
clear_thread_flag(TIF_NOTIFY_RESUME);
139+
tracehook_notify_resume(regs);
140+
rseq_handle_notify_resume(NULL, regs);
141+
}
142+
143+
/* Architecture specific TIF work */
144+
arch_exit_to_user_mode_work(regs, ti_work);
145+
146+
/*
147+
* Disable interrupts and reevaluate the work flags as they
148+
* might have changed while interrupts and preemption was
149+
* enabled above.
150+
*/
151+
local_irq_disable_exit_to_user();
152+
ti_work = READ_ONCE(current_thread_info()->flags);
153+
}
154+
155+
/* Return the latest work state for arch_exit_to_user_mode() */
156+
return ti_work;
157+
}
158+
159+
static void exit_to_user_mode_prepare(struct pt_regs *regs)
160+
{
161+
unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
162+
163+
lockdep_assert_irqs_disabled();
164+
165+
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
166+
ti_work = exit_to_user_mode_loop(regs, ti_work);
167+
168+
arch_exit_to_user_mode_prepare(regs, ti_work);
169+
170+
/* Ensure that the address limit is intact and no locks are held */
171+
addr_limit_user_check();
172+
lockdep_assert_irqs_disabled();
173+
lockdep_sys_exit();
174+
}
175+
176+
#ifndef _TIF_SINGLESTEP
177+
static inline bool report_single_step(unsigned long ti_work)
178+
{
179+
return false;
180+
}
181+
#else
182+
/*
183+
* If TIF_SYSCALL_EMU is set, then the only reason to report is when
184+
* TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
185+
* instruction has been already reported in syscall_enter_from_usermode().
186+
*/
187+
#define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)
188+
189+
static inline bool report_single_step(unsigned long ti_work)
190+
{
191+
return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP;
192+
}
193+
#endif
194+
195+
static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
196+
{
197+
bool step;
198+
199+
audit_syscall_exit(regs);
200+
201+
if (ti_work & _TIF_SYSCALL_TRACEPOINT)
202+
trace_sys_exit(regs, syscall_get_return_value(current, regs));
203+
204+
step = report_single_step(ti_work);
205+
if (step || ti_work & _TIF_SYSCALL_TRACE)
206+
arch_syscall_exit_tracehook(regs, step);
207+
}
208+
209+
/*
210+
* Syscall specific exit to user mode preparation. Runs with interrupts
211+
* enabled.
212+
*/
213+
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
214+
{
215+
u32 cached_flags = READ_ONCE(current_thread_info()->flags);
216+
unsigned long nr = syscall_get_nr(current, regs);
217+
218+
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
219+
220+
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
221+
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
222+
local_irq_enable();
223+
}
224+
225+
rseq_syscall(regs);
226+
227+
/*
228+
* Do one-time syscall specific work. If these work items are
229+
* enabled, we want to run them exactly once per syscall exit with
230+
* interrupts enabled.
231+
*/
232+
if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
233+
syscall_exit_work(regs, cached_flags);
234+
}
235+
236+
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
237+
{
238+
instrumentation_begin();
239+
syscall_exit_to_user_mode_prepare(regs);
240+
local_irq_disable_exit_to_user();
241+
exit_to_user_mode_prepare(regs);
242+
instrumentation_end();
243+
exit_to_user_mode();
244+
}
245+
85246
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
86247
{
87248
enter_from_user_mode(regs);
88249
}
250+
251+
noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
252+
{
253+
instrumentation_begin();
254+
exit_to_user_mode_prepare(regs);
255+
instrumentation_end();
256+
exit_to_user_mode();
257+
}

0 commit comments

Comments
 (0)