@@ -61,6 +61,19 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigne
61
61
#define __disable_user_access () \
62
62
__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
63
63
64
+ /*
65
+ * This is the smallest unsigned integer type that can fit a value
66
+ * (up to 'long long')
67
+ */
68
+ #define __inttype (x ) __typeof__( \
69
+ __typefits(x, char, \
70
+ __typefits(x, short, \
71
+ __typefits(x, int, \
72
+ __typefits(x, long, 0ULL)))))
73
+
74
+ #define __typefits (x , type , not ) \
75
+ __builtin_choose_expr(sizeof(x) <= sizeof(type), (unsigned type)0, not)
76
+
64
77
/*
65
78
* The exception table consists of pairs of addresses: the first is the
66
79
* address of an instruction that is allowed to fault, and the second is
@@ -368,6 +381,69 @@ do { \
368
381
goto err_label; \
369
382
} while (0)
370
383
384
+ static __must_check __always_inline bool user_access_begin (const void __user * ptr , size_t len )
385
+ {
386
+ if (unlikely (!access_ok (ptr , len )))
387
+ return 0 ;
388
+ __enable_user_access ();
389
+ return 1 ;
390
+ }
391
+ #define user_access_begin user_access_begin
392
+ #define user_access_end __disable_user_access
393
+
394
+ static inline unsigned long user_access_save (void ) { return 0UL ; }
395
+ static inline void user_access_restore (unsigned long enabled ) { }
396
+
397
+ /*
398
+ * We want the unsafe accessors to always be inlined and use
399
+ * the error labels - thus the macro games.
400
+ */
401
+ #define unsafe_put_user (x , ptr , label ) do { \
402
+ long __err = 0; \
403
+ __put_user_nocheck(x, (ptr), __err); \
404
+ if (__err) \
405
+ goto label; \
406
+ } while (0)
407
+
408
+ #define unsafe_get_user (x , ptr , label ) do { \
409
+ long __err = 0; \
410
+ __inttype(*(ptr)) __gu_val; \
411
+ __get_user_nocheck(__gu_val, (ptr), __err); \
412
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
413
+ if (__err) \
414
+ goto label; \
415
+ } while (0)
416
+
417
+ #define unsafe_copy_loop (dst , src , len , type , op , label ) \
418
+ while (len >= sizeof(type)) { \
419
+ op(*(type *)(src), (type __user *)(dst), label); \
420
+ dst += sizeof(type); \
421
+ src += sizeof(type); \
422
+ len -= sizeof(type); \
423
+ }
424
+
425
+ #define unsafe_copy_to_user (_dst , _src , _len , label ) \
426
+ do { \
427
+ char __user *__ucu_dst = (_dst); \
428
+ const char *__ucu_src = (_src); \
429
+ size_t __ucu_len = (_len); \
430
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, unsafe_put_user, label); \
431
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, unsafe_put_user, label); \
432
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, unsafe_put_user, label); \
433
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, unsafe_put_user, label); \
434
+ } while (0)
435
+
436
+ #define unsafe_copy_from_user (_dst , _src , _len , label ) \
437
+ do { \
438
+ char *__ucu_dst = (_dst); \
439
+ const char __user *__ucu_src = (_src); \
440
+ size_t __ucu_len = (_len); \
441
+ unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u64, unsafe_get_user, label); \
442
+ unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u32, unsafe_get_user, label); \
443
+ unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u16, unsafe_get_user, label); \
444
+ unsafe_copy_loop(__ucu_src, __ucu_dst, __ucu_len, u8, unsafe_get_user, label); \
445
+ } while (0)
446
+
371
447
#else /* CONFIG_MMU */
372
448
#include <asm-generic/uaccess.h>
373
449
#endif /* CONFIG_MMU */
0 commit comments