Skip to content

Commit 7fd298d

Browse files
committed
arm64: start using 'asm goto' for put_user()
This generates noticeably better code since we don't need to test the error register etc, the exception just jumps to the error handling directly. Unlike get_user(), there's no need to worry about old compilers. All supported compilers support the regular non-output 'asm goto', as pointed out by Nathan Chancellor. Signed-off-by: Linus Torvalds <[email protected]>
1 parent 86a6a68 commit 7fd298d

File tree

2 files changed

+39
-34
lines changed

2 files changed

+39
-34
lines changed

arch/arm64/include/asm/asm-extable.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,9 @@
112112
#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
113113
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
114114

115+
#define _ASM_EXTABLE_KACCESS(insn, fixup) \
116+
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
117+
115118
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
116119
__DEFINE_ASM_GPR_NUMS \
117120
__ASM_EXTABLE_RAW(#insn, #fixup, \

arch/arm64/include/asm/uaccess.h

Lines changed: 36 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -294,29 +294,28 @@ do { \
294294
} while (0); \
295295
} while (0)
296296

297-
#define __put_mem_asm(store, reg, x, addr, err, type) \
298-
asm volatile( \
299-
"1: " store " " reg "1, [%2]\n" \
297+
#define __put_mem_asm(store, reg, x, addr, label, type) \
298+
asm goto( \
299+
"1: " store " " reg "0, [%1]\n" \
300300
"2:\n" \
301-
_ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
302-
: "+r" (err) \
303-
: "rZ" (x), "r" (addr))
301+
_ASM_EXTABLE_##type##ACCESS(1b, %l2) \
302+
: : "rZ" (x), "r" (addr) : : label)
304303

305-
#define __raw_put_mem(str, x, ptr, err, type) \
304+
#define __raw_put_mem(str, x, ptr, label, type) \
306305
do { \
307306
__typeof__(*(ptr)) __pu_val = (x); \
308307
switch (sizeof(*(ptr))) { \
309308
case 1: \
310-
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
309+
__put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
311310
break; \
312311
case 2: \
313-
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
312+
__put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
314313
break; \
315314
case 4: \
316-
__put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
315+
__put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
317316
break; \
318317
case 8: \
319-
__put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
318+
__put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
320319
break; \
321320
default: \
322321
BUILD_BUG(); \
@@ -328,25 +327,34 @@ do { \
328327
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
329328
* we must evaluate these outside of the critical section.
330329
*/
331-
#define __raw_put_user(x, ptr, err) \
330+
#define __raw_put_user(x, ptr, label) \
332331
do { \
332+
__label__ __rpu_failed; \
333333
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
334334
__typeof__(*(ptr)) __rpu_val = (x); \
335335
__chk_user_ptr(__rpu_ptr); \
336336
\
337-
uaccess_ttbr0_enable(); \
338-
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
339-
uaccess_ttbr0_disable(); \
337+
do { \
338+
uaccess_ttbr0_enable(); \
339+
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
340+
uaccess_ttbr0_disable(); \
341+
break; \
342+
__rpu_failed: \
343+
uaccess_ttbr0_disable(); \
344+
goto label; \
345+
} while (0); \
340346
} while (0)
341347

342348
#define __put_user_error(x, ptr, err) \
343349
do { \
350+
__label__ __pu_failed; \
344351
__typeof__(*(ptr)) __user *__p = (ptr); \
345352
might_fault(); \
346353
if (access_ok(__p, sizeof(*__p))) { \
347354
__p = uaccess_mask_ptr(__p); \
348-
__raw_put_user((x), __p, (err)); \
355+
__raw_put_user((x), __p, __pu_failed); \
349356
} else { \
357+
__pu_failed: \
350358
(err) = -EFAULT; \
351359
} \
352360
} while (0)
@@ -369,15 +377,18 @@ do { \
369377
do { \
370378
__typeof__(dst) __pkn_dst = (dst); \
371379
__typeof__(src) __pkn_src = (src); \
372-
int __pkn_err = 0; \
373-
\
374-
__mte_enable_tco_async(); \
375-
__raw_put_mem("str", *((type *)(__pkn_src)), \
376-
(__force type *)(__pkn_dst), __pkn_err, K); \
377-
__mte_disable_tco_async(); \
378380
\
379-
if (unlikely(__pkn_err)) \
381+
do { \
382+
__label__ __pkn_err; \
383+
__mte_enable_tco_async(); \
384+
__raw_put_mem("str", *((type *)(__pkn_src)), \
385+
(__force type *)(__pkn_dst), __pkn_err, K); \
386+
__mte_disable_tco_async(); \
387+
break; \
388+
__pkn_err: \
389+
__mte_disable_tco_async(); \
380390
goto err_label; \
391+
} while (0); \
381392
} while(0)
382393

383394
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
@@ -411,17 +422,8 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
411422
}
412423
#define user_access_begin(a,b) user_access_begin(a,b)
413424
#define user_access_end() uaccess_ttbr0_disable()
414-
415-
/*
416-
* The arm64 inline asms should learn abut asm goto, and we should
417-
* teach user_access_begin() about address masking.
418-
*/
419-
#define unsafe_put_user(x, ptr, label) do { \
420-
int __upu_err = 0; \
421-
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), __upu_err, U); \
422-
if (__upu_err) goto label; \
423-
} while (0)
424-
425+
#define unsafe_put_user(x, ptr, label) \
426+
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
425427
#define unsafe_get_user(x, ptr, label) \
426428
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
427429

0 commit comments

Comments
 (0)