Skip to content

Commit 753b323

Browse files
mrutland-armwilldeacon
authored andcommitted
arm64: extable: add load_unaligned_zeropad() handler
For inline assembly, we place exception fixups out-of-line in the `.fixup` section such that these are out of the way of the fast path. This has a few drawbacks: * Since the fixup code is anonymous, backtraces will symbolize fixups as offsets from the nearest prior symbol, currently `__entry_tramp_text_end`. This is confusing, and painful to debug without access to the relevant vmlinux. * Since the exception handler adjusts the PC to execute the fixup, and the fixup uses a direct branch back into the function it fixes, backtraces of fixups miss the original function. This is confusing, and violates requirements for RELIABLE_STACKTRACE (and therefore LIVEPATCH). * Inline assembly and associated fixups are generated from templates, and we have many copies of logically identical fixups which only differ in which specific registers are written to and which address is branched to at the end of the fixup. This is potentially wasteful of I-cache resources, and makes it hard to add additional logic to fixups without significant bloat. * In the case of load_unaligned_zeropad(), the logic in the fixup requires a temporary register that we must allocate even in the fast-path where it will not be used. This patch address all four concerns for load_unaligned_zeropad() fixups by adding a dedicated exception handler which performs the fixup logic in exception context and subsequent returns back after the faulting instruction. For the moment, the fixup logic is identical to the old assembly fixup logic, but in future we could enhance this by taking the ESR and FAR into account to constrain the faults we try to fix up, or to specialize fixups for MTE tag check faults. Other than backtracing, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Reviewed-by: Ard Biesheuvel <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: James Morse <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 2e77a62 commit 753b323

File tree

3 files changed

+48
-17
lines changed

3 files changed

+48
-17
lines changed

arch/arm64/include/asm/asm-extable.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#define EX_TYPE_FIXUP 1
77
#define EX_TYPE_BPF 2
88
#define EX_TYPE_UACCESS_ERR_ZERO 3
9+
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
910

1011
#ifdef __ASSEMBLY__
1112

@@ -75,6 +76,20 @@
7576
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
7677
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
7778

79+
#define EX_DATA_REG_DATA_SHIFT 0
80+
#define EX_DATA_REG_DATA GENMASK(4, 0)
81+
#define EX_DATA_REG_ADDR_SHIFT 5
82+
#define EX_DATA_REG_ADDR GENMASK(9, 5)
83+
84+
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
85+
__DEFINE_ASM_GPR_NUMS \
86+
__ASM_EXTABLE_RAW(#insn, #fixup, \
87+
__stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
88+
"(" \
89+
EX_DATA_REG(DATA, data) " | " \
90+
EX_DATA_REG(ADDR, addr) \
91+
")")
92+
7893
#endif /* __ASSEMBLY__ */
7994

8095
#endif /* __ASM_ASM_EXTABLE_H */

arch/arm64/include/asm/word-at-a-time.h

Lines changed: 4 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -53,29 +53,16 @@ static inline unsigned long find_zero(unsigned long mask)
5353
*/
5454
static inline unsigned long load_unaligned_zeropad(const void *addr)
5555
{
56-
unsigned long ret, tmp;
56+
unsigned long ret;
5757

5858
__uaccess_enable_tco_async();
5959

6060
/* Load word from unaligned pointer addr */
6161
asm(
62-
"1: ldr %0, %3\n"
62+
"1: ldr %0, %2\n"
6363
"2:\n"
64-
" .pushsection .fixup,\"ax\"\n"
65-
" .align 2\n"
66-
"3: bic %1, %2, #0x7\n"
67-
" ldr %0, [%1]\n"
68-
" and %1, %2, #0x7\n"
69-
" lsl %1, %1, #0x3\n"
70-
#ifndef __AARCH64EB__
71-
" lsr %0, %0, %1\n"
72-
#else
73-
" lsl %0, %0, %1\n"
74-
#endif
75-
" b 2b\n"
76-
" .popsection\n"
77-
_ASM_EXTABLE(1b, 3b)
78-
: "=&r" (ret), "=&r" (tmp)
64+
_ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
65+
: "=&r" (ret)
7966
: "r" (addr), "Q" (*(unsigned long *)addr));
8067

8168
__uaccess_disable_tco_async();

arch/arm64/mm/extable.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,33 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
3939
return true;
4040
}
4141

42+
static bool
43+
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
44+
struct pt_regs *regs)
45+
{
46+
int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type);
47+
int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type);
48+
unsigned long data, addr, offset;
49+
50+
addr = pt_regs_read_reg(regs, reg_addr);
51+
52+
offset = addr & 0x7UL;
53+
addr &= ~0x7UL;
54+
55+
data = *(unsigned long*)addr;
56+
57+
#ifndef __AARCH64EB__
58+
data >>= 8 * offset;
59+
#else
60+
data <<= 8 * offset;
61+
#endif
62+
63+
pt_regs_write_reg(regs, reg_data, data);
64+
65+
regs->pc = get_ex_fixup(ex);
66+
return true;
67+
}
68+
4269
bool fixup_exception(struct pt_regs *regs)
4370
{
4471
const struct exception_table_entry *ex;
@@ -54,6 +81,8 @@ bool fixup_exception(struct pt_regs *regs)
5481
return ex_handler_bpf(ex, regs);
5582
case EX_TYPE_UACCESS_ERR_ZERO:
5683
return ex_handler_uaccess_err_zero(ex, regs);
84+
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
85+
return ex_handler_load_unaligned_zeropad(ex, regs);
5786
}
5887

5988
BUG();

0 commit comments

Comments
 (0)