@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
184184 * The "__xxx_error" versions set the third argument to -EFAULT if an error
185185 * occurs, and leave it unchanged on success.
186186 */
187- #define __get_mem_asm (load , reg , x , addr , err , type ) \
187+ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
188+ #define __get_mem_asm (load , reg , x , addr , label , type ) \
189+ asm_goto_output( \
190+ "1: " load " " reg "0, [%1]\n" \
191+ _ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0) \
192+ : "=r" (x) \
193+ : "r" (addr) : : label)
194+ #else
195+ #define __get_mem_asm (load , reg , x , addr , label , type ) do { \
196+ int __gma_err = 0; \
188197 asm volatile( \
189198 "1: " load " " reg "1, [%2]\n" \
190199 "2:\n" \
191200 _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
192- : "+r" (err), "=r" (x) \
193- : "r" (addr))
201+ : "+r" (__gma_err), "=r" (x) \
202+ : "r" (addr)); \
203+ if (__gma_err) goto label; } while (0)
204+ #endif
194205
195- #define __raw_get_mem (ldr , x , ptr , err , type ) \
206+ #define __raw_get_mem (ldr , x , ptr , label , type ) \
196207do { \
197208 unsigned long __gu_val; \
198209 switch (sizeof(*(ptr))) { \
199210 case 1: \
200- __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err) , type); \
211+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label , type); \
201212 break; \
202213 case 2: \
203- __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err) , type); \
214+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label , type); \
204215 break; \
205216 case 4: \
206- __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err) , type); \
217+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), label , type); \
207218 break; \
208219 case 8: \
209- __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err) , type); \
220+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), label , type); \
210221 break; \
211222 default: \
212223 BUILD_BUG(); \
@@ -219,27 +230,34 @@ do { \
219230 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
220231 * we must evaluate these outside of the critical section.
221232 */
222- #define __raw_get_user (x , ptr , err ) \
233+ #define __raw_get_user (x , ptr , label ) \
223234do { \
224235 __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
225236 __typeof__(x) __rgu_val; \
226237 __chk_user_ptr(ptr); \
227- \
228- uaccess_ttbr0_enable(); \
229- __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
230- uaccess_ttbr0_disable(); \
231- \
232- (x) = __rgu_val; \
238+ do { \
239+ __label__ __rgu_failed; \
240+ uaccess_ttbr0_enable(); \
241+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
242+ uaccess_ttbr0_disable(); \
243+ (x) = __rgu_val; \
244+ break; \
245+ __rgu_failed: \
246+ uaccess_ttbr0_disable(); \
247+ goto label; \
248+ } while (0); \
233249} while (0)
234250
235251#define __get_user_error (x , ptr , err ) \
236252do { \
253+ __label__ __gu_failed; \
237254 __typeof__(*(ptr)) __user *__p = (ptr); \
238255 might_fault(); \
239256 if (access_ok(__p, sizeof(*__p))) { \
240257 __p = uaccess_mask_ptr(__p); \
241- __raw_get_user((x), __p, (err) ); \
258+ __raw_get_user((x), __p, __gu_failed ); \
242259 } else { \
260+ __gu_failed: \
243261 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
244262 } \
245263} while (0)
@@ -262,40 +280,42 @@ do { \
262280do { \
263281 __typeof__(dst) __gkn_dst = (dst); \
264282 __typeof__(src) __gkn_src = (src); \
265- int __gkn_err = 0; \
266- \
267- __mte_enable_tco_async(); \
268- __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
269- (__force type *)(__gkn_src), __gkn_err, K); \
270- __mte_disable_tco_async(); \
283+ do { \
284+ __label__ __gkn_label; \
271285 \
272- if (unlikely(__gkn_err)) \
286+ __mte_enable_tco_async(); \
287+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
288+ (__force type *)(__gkn_src), __gkn_label, K); \
289+ __mte_disable_tco_async(); \
290+ break; \
291+ __gkn_label: \
292+ __mte_disable_tco_async(); \
273293 goto err_label; \
294+ } while (0); \
274295} while (0)
275296
276- #define __put_mem_asm (store , reg , x , addr , err , type ) \
277- asm volatile ( \
278- "1: " store " " reg "1 , [%2 ]\n" \
297+ #define __put_mem_asm (store , reg , x , addr , label , type ) \
298+ asm goto ( \
299+ "1: " store " " reg "0 , [%1 ]\n" \
279300 "2:\n" \
280- _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
281- : "+r" (err) \
282- : "rZ" (x), "r" (addr))
301+ _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
302+ : : "rZ" (x), "r" (addr) : : label)
283303
284- #define __raw_put_mem (str , x , ptr , err , type ) \
304+ #define __raw_put_mem (str , x , ptr , label , type ) \
285305do { \
286306 __typeof__(*(ptr)) __pu_val = (x); \
287307 switch (sizeof(*(ptr))) { \
288308 case 1: \
289- __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err) , type); \
309+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), label , type); \
290310 break; \
291311 case 2: \
292- __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err) , type); \
312+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), label , type); \
293313 break; \
294314 case 4: \
295- __put_mem_asm(str, "%w", __pu_val, (ptr), (err) , type); \
315+ __put_mem_asm(str, "%w", __pu_val, (ptr), label , type); \
296316 break; \
297317 case 8: \
298- __put_mem_asm(str, "%x", __pu_val, (ptr), (err) , type); \
318+ __put_mem_asm(str, "%x", __pu_val, (ptr), label , type); \
299319 break; \
300320 default: \
301321 BUILD_BUG(); \
@@ -307,25 +327,34 @@ do { \
307327 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
308328 * we must evaluate these outside of the critical section.
309329 */
310- #define __raw_put_user (x , ptr , err ) \
330+ #define __raw_put_user (x , ptr , label ) \
311331do { \
332+ __label__ __rpu_failed; \
312333 __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
313334 __typeof__(*(ptr)) __rpu_val = (x); \
314335 __chk_user_ptr(__rpu_ptr); \
315336 \
316- uaccess_ttbr0_enable(); \
317- __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
318- uaccess_ttbr0_disable(); \
337+ do { \
338+ uaccess_ttbr0_enable(); \
339+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
340+ uaccess_ttbr0_disable(); \
341+ break; \
342+ __rpu_failed: \
343+ uaccess_ttbr0_disable(); \
344+ goto label; \
345+ } while (0); \
319346} while (0)
320347
321348#define __put_user_error (x , ptr , err ) \
322349do { \
350+ __label__ __pu_failed; \
323351 __typeof__(*(ptr)) __user *__p = (ptr); \
324352 might_fault(); \
325353 if (access_ok(__p, sizeof(*__p))) { \
326354 __p = uaccess_mask_ptr(__p); \
327- __raw_put_user((x), __p, (err) ); \
355+ __raw_put_user((x), __p, __pu_failed ); \
328356 } else { \
357+ __pu_failed: \
329358 (err) = -EFAULT; \
330359 } \
331360} while (0)
@@ -348,15 +377,18 @@ do { \
348377do { \
349378 __typeof__(dst) __pkn_dst = (dst); \
350379 __typeof__(src) __pkn_src = (src); \
351- int __pkn_err = 0; \
352380 \
353- __mte_enable_tco_async(); \
354- __raw_put_mem("str", *((type *)(__pkn_src)), \
355- (__force type *)(__pkn_dst), __pkn_err, K); \
356- __mte_disable_tco_async(); \
357- \
358- if (unlikely(__pkn_err)) \
381+ do { \
382+ __label__ __pkn_err; \
383+ __mte_enable_tco_async(); \
384+ __raw_put_mem("str", *((type *)(__pkn_src)), \
385+ (__force type *)(__pkn_dst), __pkn_err, K); \
386+ __mte_disable_tco_async(); \
387+ break; \
388+ __pkn_err: \
389+ __mte_disable_tco_async(); \
359390 goto err_label; \
391+ } while (0); \
360392} while(0)
361393
362394extern unsigned long __must_check __arch_copy_from_user (void * to , const void __user * from , unsigned long n );
@@ -381,6 +413,51 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
381413 __actu_ret; \
382414})
383415
416+ static __must_check __always_inline bool user_access_begin (const void __user * ptr , size_t len )
417+ {
418+ if (unlikely (!access_ok (ptr ,len )))
419+ return 0 ;
420+ uaccess_ttbr0_enable ();
421+ return 1 ;
422+ }
423+ #define user_access_begin (a ,b ) user_access_begin(a,b)
424+ #define user_access_end () uaccess_ttbr0_disable()
425+ #define unsafe_put_user (x , ptr , label ) \
426+ __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
427+ #define unsafe_get_user (x , ptr , label ) \
428+ __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
429+
430+ /*
431+ * KCSAN uses these to save and restore ttbr state.
432+ * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
433+ * they are no-ops.
434+ */
435+ static inline unsigned long user_access_save (void ) { return 0 ; }
436+ static inline void user_access_restore (unsigned long enabled ) { }
437+
438+ /*
439+ * We want the unsafe accessors to always be inlined and use
440+ * the error labels - thus the macro games.
441+ */
442+ #define unsafe_copy_loop (dst , src , len , type , label ) \
443+ while (len >= sizeof(type)) { \
444+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
445+ dst += sizeof(type); \
446+ src += sizeof(type); \
447+ len -= sizeof(type); \
448+ }
449+
450+ #define unsafe_copy_to_user (_dst ,_src ,_len ,label ) \
451+ do { \
452+ char __user *__ucu_dst = (_dst); \
453+ const char *__ucu_src = (_src); \
454+ size_t __ucu_len = (_len); \
455+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
456+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
457+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
458+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
459+ } while (0)
460+
384461#define INLINE_COPY_TO_USER
385462#define INLINE_COPY_FROM_USER
386463
0 commit comments