Skip to content

Commit 4d1c2ee

Browse files
mrutland-armctmarinas
authored andcommitted
arm64: entry: move bulk of ret_to_user to C
In `ret_to_user` we perform some conditional work depending on the thread flags, then perform some IRQ/context tracking which is intended to balance with the IRQ/context tracking performed in the entry C code. For simplicity and consistency, it would be preferable to move this all to C. As a step towards that, this patch moves the conditional work and IRQ/context tracking into a C helper function. To aid bisectability, this is called from the `ret_to_user` assembly, and a subsequent patch will move the call to C code. As local_daif_mask() handles all necessary tracing and PMR manipulation, we no longer need to handle this explicitly. As we call exit_to_user_mode() directly, the `user_enter_irqoff` macro is no longer used, and can be removed. As enter_from_user_mode() and exit_to_user_mode() are no longer called from assembly, these can be made static, and as these are typically very small, they are marked __always_inline to avoid the overhead of a function call. For now, enablement of single-step is left in entry.S, and for this we still need to read the flags in ret_to_user(). It is safe to read this separately as TIF_SINGLESTEP is not part of _TIF_WORK_MASK. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Cc: James Morse <[email protected]> Cc: Joey Gouly <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: Will Deacon <[email protected]> Reviewed-by: Joey Gouly <[email protected]> Link: https://lore.kernel.org/r/[email protected] [[email protected]: removed unused gic_prio_kentry_setup macro] Signed-off-by: Catalin Marinas <[email protected]>
1 parent bc29b71 commit 4d1c2ee

File tree

4 files changed

+26
-51
lines changed

4 files changed

+26
-51
lines changed

arch/arm64/include/asm/exception.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
5555

5656
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
5757
void (*func)(struct pt_regs *));
58-
asmlinkage void enter_from_user_mode(void);
59-
asmlinkage void exit_to_user_mode(void);
58+
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
59+
6060
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
6161
void do_undefinstr(struct pt_regs *regs);
6262
void do_bti(struct pt_regs *regs);
@@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs);
7373
void do_el0_svc_compat(struct pt_regs *regs);
7474
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
7575
void do_serror(struct pt_regs *regs, unsigned int esr);
76+
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
7677

7778
void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
7879
#endif /* __ASM_EXCEPTION_H */

arch/arm64/kernel/entry-common.c

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ static __always_inline void __enter_from_user_mode(void)
104104
trace_hardirqs_off_finish();
105105
}
106106

107-
asmlinkage void noinstr enter_from_user_mode(void)
107+
static __always_inline void enter_from_user_mode(void)
108108
{
109109
__enter_from_user_mode();
110110
}
@@ -123,12 +123,29 @@ static __always_inline void __exit_to_user_mode(void)
123123
lockdep_hardirqs_on(CALLER_ADDR0);
124124
}
125125

126-
asmlinkage void noinstr exit_to_user_mode(void)
126+
static __always_inline void exit_to_user_mode(void)
127127
{
128128
mte_check_tfsr_exit();
129129
__exit_to_user_mode();
130130
}
131131

132+
static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
133+
{
134+
unsigned long flags;
135+
136+
local_daif_mask();
137+
138+
flags = READ_ONCE(current_thread_info()->flags);
139+
if (unlikely(flags & _TIF_WORK_MASK))
140+
do_notify_resume(regs, flags);
141+
}
142+
143+
asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
144+
{
145+
prepare_exit_to_user_mode(regs);
146+
exit_to_user_mode();
147+
}
148+
132149
/*
133150
* Handle IRQ/context state management when entering an NMI from user/kernel
134151
* mode. Before this function is called it is not safe to call regular kernel

arch/arm64/kernel/entry.S

Lines changed: 3 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -29,16 +29,6 @@
2929
#include <asm/asm-uaccess.h>
3030
#include <asm/unistd.h>
3131

32-
/*
33-
* Context tracking and irqflag tracing need to instrument transitions between
34-
* user and kernel mode.
35-
*/
36-
.macro user_enter_irqoff
37-
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38-
bl exit_to_user_mode
39-
#endif
40-
.endm
41-
4232
.macro clear_gp_regs
4333
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
4434
mov x\n, xzr
@@ -474,18 +464,6 @@ SYM_CODE_END(__swpan_exit_el0)
474464
/* GPRs used by entry code */
475465
tsk .req x28 // current thread_info
476466

477-
/*
478-
* Interrupt handling.
479-
*/
480-
.macro gic_prio_kentry_setup, tmp:req
481-
#ifdef CONFIG_ARM64_PSEUDO_NMI
482-
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
483-
mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
484-
msr_s SYS_ICC_PMR_EL1, \tmp
485-
alternative_else_nop_endif
486-
#endif
487-
.endm
488-
489467
.text
490468

491469
/*
@@ -585,37 +563,17 @@ SYM_CODE_START_LOCAL(ret_to_kernel)
585563
kernel_exit 1
586564
SYM_CODE_END(ret_to_kernel)
587565

588-
/*
589-
* "slow" syscall return path.
590-
*/
591566
SYM_CODE_START_LOCAL(ret_to_user)
592-
disable_daif
593-
gic_prio_kentry_setup tmp=x3
594-
#ifdef CONFIG_TRACE_IRQFLAGS
595-
bl trace_hardirqs_off
596-
#endif
597-
ldr x19, [tsk, #TSK_TI_FLAGS]
598-
and x2, x19, #_TIF_WORK_MASK
599-
cbnz x2, work_pending
600-
finish_ret_to_user:
601-
user_enter_irqoff
567+
mov x0, sp
568+
bl asm_exit_to_user_mode
602569
/* Ignore asynchronous tag check faults in the uaccess routines */
603570
clear_mte_async_tcf
571+
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
604572
enable_step_tsk x19, x2
605573
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
606574
bl stackleak_erase
607575
#endif
608576
kernel_exit 0
609-
610-
/*
611-
* Ok, we need to do extra processing, enter the slow path.
612-
*/
613-
work_pending:
614-
mov x0, sp // 'regs'
615-
mov x1, x19
616-
bl do_notify_resume
617-
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
618-
b finish_ret_to_user
619577
SYM_CODE_END(ret_to_user)
620578

621579
.popsection // .entry.text

arch/arm64/kernel/signal.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -924,8 +924,7 @@ static bool cpu_affinity_invalid(struct pt_regs *regs)
924924
system_32bit_el0_cpumask());
925925
}
926926

927-
asmlinkage void do_notify_resume(struct pt_regs *regs,
928-
unsigned long thread_flags)
927+
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
929928
{
930929
do {
931930
if (thread_flags & _TIF_NEED_RESCHED) {

0 commit comments

Comments
 (0)