Skip to content

Commit 0ac7584

Browse files
Ada Couprie Diazwilldeacon
authored andcommitted
arm64: debug: split single stepping exception entry
Currently all debug exceptions share common entry code and are routed to `do_debug_exception()`, which calls dynamically-registered handlers for each specific debug exception. This is unfortunate as different debug exceptions have different entry handling requirements, and it would be better to handle these distinct requirements earlier. The single stepping exception has the most constraints : it can be exploited to train branch predictors and it needs special handling at EL1 for the Cortex-A76 erratum #1463225. We need to conserve all those mitigations. However, it does not write an address at FAR_EL1, as only hardware watchpoints do so. The single-step handler does its own signaling if it needs to and only returns 0, so we can call it directly from `entry-common.c`. Split the single stepping exception entry, adjust the function signature, keep the security mitigation and erratum handling. Further, as the EL0 and EL1 code paths are cleanly separated, we can split `do_softstep()` into `do_el0_softstep()` and `do_el1_softstep()` and call them directly from the relevant entry paths. We can also remove `NOKPROBE_SYMBOL` for the EL0 path, as it cannot lead to a kprobe recursion. Move the call to `arm64_apply_bp_hardening()` to `entry-common.c` so that we can do it as early as possible, and only for the exceptions coming from EL0, where it is needed. This is safe to do as it is `noinstr`, as are all the functions it may call. `el0_ia()` and `el0_pc()` already call it this way. When taking a soft-step exception from EL0, most of the single stepping handling is safely preemptible : the only possible handler is `uprobe_single_step_handler()`. It only operates on task-local data and properly checks its validity, then raises a Thread Information Flag, processed before returning to userspace in `do_notify_resume()`, which is already preemptible. However, the soft-step handler first calls `reinstall_suspended_bps()` to check if there is any hardware breakpoint or watchpoint pending or already stepped through. This cannot be preempted as it manipulates the hardware breakpoint and watchpoint registers. Move the call to `try_step_suspended_breakpoints()` to `entry-common.c` and adjust the relevant comments. We can now safely unmask interrupts before handling the step itself, fixing a PREEMPT_RT issue where the handler could call a sleeping function with preemption disabled. Signed-off-by: Ada Couprie Diaz <[email protected]> Closes: https://lore.kernel.org/linux-arm-kernel/[email protected]/ Tested-by: Luis Claudio R. Goncalves <[email protected]> Reviewed-by: Will Deacon <[email protected]> Acked-by: Mark Rutland <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 80691d3 commit 0ac7584

File tree

4 files changed

+73
-47
lines changed

4 files changed

+73
-47
lines changed

arch/arm64/include/asm/exception.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,8 @@ void do_breakpoint(unsigned long esr, struct pt_regs *regs);
6666
#else
6767
static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
6868
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
69+
void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
70+
void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
6971
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
7072
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
7173
void do_sme_acc(unsigned long esr, struct pt_regs *regs);

arch/arm64/kernel/debug-monitors.c

Lines changed: 27 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <asm/cputype.h>
2222
#include <asm/daifflags.h>
2323
#include <asm/debug-monitors.h>
24+
#include <asm/exception.h>
2425
#include <asm/kgdb.h>
2526
#include <asm/kprobes.h>
2627
#include <asm/system_misc.h>
@@ -159,21 +160,6 @@ NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
159160
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
160161
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
161162

162-
/*
163-
* Call single step handlers
164-
* There is no Syndrome info to check for determining the handler.
165-
* However, there is only one possible handler for user and kernel modes, so
166-
* check and call the appropriate one.
167-
*/
168-
static int call_step_hook(struct pt_regs *regs, unsigned long esr)
169-
{
170-
if (user_mode(regs))
171-
return uprobe_single_step_handler(regs, esr);
172-
173-
return kgdb_single_step_handler(regs, esr);
174-
}
175-
NOKPROBE_SYMBOL(call_step_hook);
176-
177163
static void send_user_sigtrap(int si_code)
178164
{
179165
struct pt_regs *regs = current_pt_regs();
@@ -188,41 +174,38 @@ static void send_user_sigtrap(int si_code)
188174
"User debug trap");
189175
}
190176

191-
static int single_step_handler(unsigned long unused, unsigned long esr,
192-
struct pt_regs *regs)
177+
/*
178+
* We have already unmasked interrupts and enabled preemption
179+
* when calling do_el0_softstep() from entry-common.c.
180+
*/
181+
void do_el0_softstep(unsigned long esr, struct pt_regs *regs)
193182
{
183+
if (uprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
184+
return;
185+
186+
send_user_sigtrap(TRAP_TRACE);
194187
/*
195-
* If we are stepping a pending breakpoint, call the hw_breakpoint
196-
* handler first.
188+
* ptrace will disable single step unless explicitly
189+
* asked to re-enable it. For other clients, it makes
190+
* sense to leave it enabled (i.e. rewind the controls
191+
* to the active-not-pending state).
197192
*/
198-
if (try_step_suspended_breakpoints(regs))
199-
return 0;
200-
201-
if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
202-
return 0;
193+
user_rewind_single_step(current);
194+
}
203195

204-
if (user_mode(regs)) {
205-
send_user_sigtrap(TRAP_TRACE);
206-
207-
/*
208-
* ptrace will disable single step unless explicitly
209-
* asked to re-enable it. For other clients, it makes
210-
* sense to leave it enabled (i.e. rewind the controls
211-
* to the active-not-pending state).
212-
*/
213-
user_rewind_single_step(current);
214-
} else {
215-
pr_warn("Unexpected kernel single-step exception at EL1\n");
216-
/*
217-
* Re-enable stepping since we know that we will be
218-
* returning to regs.
219-
*/
220-
set_regs_spsr_ss(regs);
221-
}
196+
void do_el1_softstep(unsigned long esr, struct pt_regs *regs)
197+
{
198+
if (kgdb_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
199+
return;
222200

223-
return 0;
201+
pr_warn("Unexpected kernel single-step exception at EL1\n");
202+
/*
203+
* Re-enable stepping since we know that we will be
204+
* returning to regs.
205+
*/
206+
set_regs_spsr_ss(regs);
224207
}
225-
NOKPROBE_SYMBOL(single_step_handler);
208+
NOKPROBE_SYMBOL(do_el1_softstep);
226209

227210
static int call_break_hook(struct pt_regs *regs, unsigned long esr)
228211
{
@@ -329,8 +312,6 @@ NOKPROBE_SYMBOL(try_handle_aarch32_break);
329312

330313
void __init debug_traps_init(void)
331314
{
332-
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
333-
TRAP_TRACE, "single-step handler");
334315
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
335316
TRAP_BRKPT, "BRK handler");
336317
}

arch/arm64/kernel/entry-common.c

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,24 @@ static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
535535
arm64_exit_el1_dbg(regs);
536536
}
537537

538+
static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
539+
{
540+
arm64_enter_el1_dbg(regs);
541+
if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
542+
debug_exception_enter(regs);
543+
/*
544+
* After handling a breakpoint, we suspend the breakpoint
545+
* and use single-step to move to the next instruction.
546+
* If we are stepping a suspended breakpoint there's nothing more to do:
547+
* the single-step is complete.
548+
*/
549+
if (!try_step_suspended_breakpoints(regs))
550+
do_el1_softstep(esr, regs);
551+
debug_exception_exit(regs);
552+
}
553+
arm64_exit_el1_dbg(regs);
554+
}
555+
538556
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
539557
{
540558
unsigned long far = read_sysreg(far_el1);
@@ -587,6 +605,8 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
587605
el1_breakpt(regs, esr);
588606
break;
589607
case ESR_ELx_EC_SOFTSTP_CUR:
608+
el1_softstp(regs, esr);
609+
break;
590610
case ESR_ELx_EC_WATCHPT_CUR:
591611
case ESR_ELx_EC_BRK64:
592612
el1_dbg(regs, esr);
@@ -793,6 +813,25 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
793813
exit_to_user_mode(regs);
794814
}
795815

816+
static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
817+
{
818+
if (!is_ttbr0_addr(regs->pc))
819+
arm64_apply_bp_hardening();
820+
821+
enter_from_user_mode(regs);
822+
/*
823+
* After handling a breakpoint, we suspend the breakpoint
824+
* and use single-step to move to the next instruction.
825+
* If we are stepping a suspended breakpoint there's nothing more to do:
826+
* the single-step is complete.
827+
*/
828+
if (!try_step_suspended_breakpoints(regs)) {
829+
local_daif_restore(DAIF_PROCCTX);
830+
do_el0_softstep(esr, regs);
831+
}
832+
exit_to_user_mode(regs);
833+
}
834+
796835
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
797836
{
798837
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
@@ -875,6 +914,8 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
875914
el0_breakpt(regs, esr);
876915
break;
877916
case ESR_ELx_EC_SOFTSTP_LOW:
917+
el0_softstp(regs, esr);
918+
break;
878919
case ESR_ELx_EC_WATCHPT_LOW:
879920
case ESR_ELx_EC_BRK64:
880921
el0_dbg(regs, esr);
@@ -997,6 +1038,8 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
9971038
el0_breakpt(regs, esr);
9981039
break;
9991040
case ESR_ELx_EC_SOFTSTP_LOW:
1041+
el0_softstp(regs, esr);
1042+
break;
10001043
case ESR_ELx_EC_WATCHPT_LOW:
10011044
case ESR_ELx_EC_BKPT32:
10021045
el0_dbg(regs, esr);

arch/arm64/kernel/hw_breakpoint.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -854,7 +854,7 @@ bool try_step_suspended_breakpoints(struct pt_regs *regs)
854854
bool handled_exception = false;
855855

856856
/*
857-
* Called from single-step exception handler.
857+
* Called from single-step exception entry.
858858
* Return true if we stepped a breakpoint and can resume execution,
859859
* false if we need to handle a single-step.
860860
*/

0 commit comments

Comments
 (0)