@@ -940,6 +940,7 @@ struct wc_swallow_the_semicolon
940940#include <wolfssl/wolfcrypt/random.h>
941941
942942struct wc_linuxkm_drbg_ctx {
943+ size_t n_rngs ;
943944 struct wc_rng_inst {
944945 wolfSSL_Atomic_Int lock ;
945946 WC_RNG rng ;
@@ -951,7 +952,7 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
951952 unsigned int i ;
952953
953954 if (ctx -> rngs ) {
954- for (i = 0 ; i < nr_cpu_ids ; ++ i ) {
955+ for (i = 0 ; i < ctx -> n_rngs ; ++ i ) {
955956 if (ctx -> rngs [i ].lock != 0 ) {
956957 /* better to leak than to crash. */
957958 pr_err ("BUG: wc_linuxkm_drbg_ctx_clear called with DRBG #%d still locked." , i );
@@ -961,6 +962,7 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
961962 }
962963 free (ctx -> rngs );
963964 ctx -> rngs = NULL ;
965+ ctx -> n_rngs = 0 ;
964966 }
965967
966968 return ;
@@ -976,12 +978,15 @@ static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
976978 int need_reenable_vec = 0 ;
977979 int can_sleep = (preempt_count () == 0 );
978980
979- ctx -> rngs = (struct wc_rng_inst * )malloc (sizeof (* ctx -> rngs ) * nr_cpu_ids );
980- if (! ctx -> rngs )
981+ ctx -> n_rngs = max (4 , nr_cpu_ids );
982+ ctx -> rngs = (struct wc_rng_inst * )malloc (sizeof (* ctx -> rngs ) * ctx -> n_rngs );
983+ if (! ctx -> rngs ) {
984+ ctx -> n_rngs = 0 ;
981985 return - ENOMEM ;
982- XMEMSET (ctx -> rngs , 0 , sizeof (* ctx -> rngs ) * nr_cpu_ids );
986+ }
987+ XMEMSET (ctx -> rngs , 0 , sizeof (* ctx -> rngs ) * ctx -> n_rngs );
983988
984- for (i = 0 ; i < nr_cpu_ids ; ++ i ) {
989+ for (i = 0 ; i < ctx -> n_rngs ; ++ i ) {
985990 ctx -> rngs [i ].lock = 0 ;
986991 if (wc_linuxkm_drbg_init_tfm_disable_vector_registers )
987992 need_reenable_vec = (DISABLE_VECTOR_REGISTERS () == 0 );
@@ -1015,10 +1020,29 @@ static void wc_linuxkm_drbg_exit_tfm(struct crypto_tfm *tfm)
10151020
10161021static int wc_linuxkm_drbg_default_instance_registered = 0 ;
10171022
1023+ /* get_drbg() uses atomic operations to get exclusive ownership of a DRBG
1024+ * without delay. It expects to be called in uninterruptible context, though
1025+ * works fine in any context. It starts by trying the DRBG matching the current
1026+ * CPU ID, and if that doesn't immediately succeed, it iterates upward until one
1027+ * succeeds. The first attempt will always succeed, even under intense load,
1028+ * unless there is or has recently been a reseed or mix-in operation competing
1029+ * with generators.
1030+ *
1031+ * Note that wc_linuxkm_drbg_init_tfm() allocates at least 4 DRBGs, regardless
1032+ * of nominal core count, to avoid stalling generators on unicore targets.
1033+ */
1034+
10181035static inline struct wc_rng_inst * get_drbg (struct crypto_rng * tfm ) {
10191036 struct wc_linuxkm_drbg_ctx * ctx = (struct wc_linuxkm_drbg_ctx * )crypto_rng_ctx (tfm );
10201037 int n , new_lock_value ;
10211038
1039+ /* check for mismatched handler or missing instance array. */
1040+ if ((tfm -> base .__crt_alg -> cra_init != wc_linuxkm_drbg_init_tfm ) ||
1041+ (ctx -> rngs == NULL ))
1042+ {
1043+ return NULL ;
1044+ }
1045+
10221046 #if defined(CONFIG_SMP ) && !defined(CONFIG_PREEMPT_COUNT ) && \
10231047 (LINUX_VERSION_CODE >= KERNEL_VERSION (5 , 7 , 0 ))
10241048 if (tfm == crypto_default_rng ) {
@@ -1041,20 +1065,28 @@ static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
10411065 if (likely (__atomic_compare_exchange_n (& ctx -> rngs [n ].lock , & expected , new_lock_value , 0 , __ATOMIC_SEQ_CST , __ATOMIC_ACQUIRE )))
10421066 return & ctx -> rngs [n ];
10431067 ++ n ;
1044- if (n >= (int )nr_cpu_ids )
1068+ if (n >= (int )ctx -> n_rngs )
10451069 n = 0 ;
10461070 cpu_relax ();
10471071 }
10481072
10491073 __builtin_unreachable ();
10501074}
10511075
1076+ /* get_drbg_n() is used by bulk seed, mix-in, and reseed operations. It expects
1077+ * the caller to be able to wait until the requested DRBG is available.
1078+ */
10521079static inline struct wc_rng_inst * get_drbg_n (struct wc_linuxkm_drbg_ctx * ctx , int n ) {
1080+ int can_sleep = (preempt_count () == 0 );
1081+
10531082 for (;;) {
10541083 int expected = 0 ;
10551084 if (likely (__atomic_compare_exchange_n (& ctx -> rngs [n ].lock , & expected , 1 , 0 , __ATOMIC_SEQ_CST , __ATOMIC_ACQUIRE )))
10561085 return & ctx -> rngs [n ];
1057- cpu_relax ();
1086+ if (can_sleep )
1087+ cond_resched ();
1088+ else
1089+ cpu_relax ();
10581090 }
10591091
10601092 __builtin_unreachable ();
@@ -1078,17 +1110,18 @@ static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
10781110 u8 * dst , unsigned int dlen )
10791111{
10801112 int ret , retried = 0 ;
1081- /* Note, core is not necessarily locked on entry, so the actual core ID may
1082- * change while executing, hence the lock.
1083- *
1084- * The lock is also needed to coordinate with wc_linuxkm_drbg_seed(), which
1085- * seeds all instances.
1086- */
1113+ int need_fpu_restore ;
10871114 struct wc_rng_inst * drbg = get_drbg (tfm );
1115+
1116+ if (! drbg ) {
1117+ pr_err_once ("BUG: get_drbg() failed." );
1118+ return - EFAULT ;
1119+ }
1120+
10881121 /* for the default RNG, make sure we don't cache an underlying SHA256
10891122 * method that uses vector insns (forbidden from irq handlers).
10901123 */
1091- int need_fpu_restore = (tfm == crypto_default_rng ) ? (DISABLE_VECTOR_REGISTERS () == 0 ) : 0 ;
1124+ need_fpu_restore = (tfm == crypto_default_rng ) ? (DISABLE_VECTOR_REGISTERS () == 0 ) : 0 ;
10921125
10931126retry :
10941127
@@ -1138,6 +1171,13 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
11381171 int ret ;
11391172 int n ;
11401173
1174+ if ((tfm -> base .__crt_alg -> cra_init != wc_linuxkm_drbg_init_tfm ) ||
1175+ (ctx -> rngs == NULL ))
1176+ {
1177+ pr_err_once ("BUG: mismatched tfm." );
1178+ return - EFAULT ;
1179+ }
1180+
11411181 if (slen == 0 )
11421182 return 0 ;
11431183
@@ -1146,7 +1186,10 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
11461186 return - ENOMEM ;
11471187 XMEMCPY (seed_copy + 2 , seed , slen );
11481188
1149- for (n = nr_cpu_ids - 1 ; n >= 0 ; -- n ) {
1189+ /* this iteration counts down, whereas the iteration in get_drbg() counts
1190+ * up, to assure they can't possibly phase-lock to each other.
1191+ */
1192+ for (n = ctx -> n_rngs - 1 ; n >= 0 ; -- n ) {
11501193 struct wc_rng_inst * drbg = get_drbg_n (ctx , n );
11511194
11521195 /* perturb the seed with the CPU ID, so that no DRBG has the exact same
@@ -1249,7 +1292,13 @@ static inline struct crypto_rng *get_crypto_default_rng(void) {
12491292
12501293static inline struct wc_linuxkm_drbg_ctx * get_default_drbg_ctx (void ) {
12511294 struct crypto_rng * current_crypto_default_rng = get_crypto_default_rng ();
1252- return current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx * )crypto_rng_ctx (current_crypto_default_rng ) : NULL ;
1295+ struct wc_linuxkm_drbg_ctx * ctx = (current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx * )crypto_rng_ctx (current_crypto_default_rng ) : NULL );
1296+ if (ctx && (! ctx -> rngs )) {
1297+ pr_err_once ("BUG: get_default_drbg_ctx() found null ctx->rngs." );
1298+ return NULL ;
1299+ }
1300+ else
1301+ return ctx ;
12531302}
12541303
12551304static int wc__get_random_bytes (void * buf , size_t len )
@@ -1259,8 +1308,9 @@ static int wc__get_random_bytes(void *buf, size_t len)
12591308 return - EFAULT ;
12601309 else {
12611310 int ret = crypto_rng_get_bytes (current_crypto_default_rng , buf , len );
1262- if (ret )
1311+ if (ret ) {
12631312 pr_warn ("BUG: wc_get_random_bytes falling through to native get_random_bytes with wc_linuxkm_drbg_default_instance_registered, ret=%d." , ret );
1313+ }
12641314 return ret ;
12651315 }
12661316 __builtin_unreachable ();
@@ -1382,7 +1432,7 @@ static int wc_mix_pool_bytes(const void *buf, size_t len) {
13821432 if (! (ctx = get_default_drbg_ctx ()))
13831433 return - EFAULT ;
13841434
1385- for (n = nr_cpu_ids - 1 ; n >= 0 ; -- n ) {
1435+ for (n = ctx -> n_rngs - 1 ; n >= 0 ; -- n ) {
13861436 struct wc_rng_inst * drbg = get_drbg_n (ctx , n );
13871437 int V_offset = 0 ;
13881438
@@ -1406,7 +1456,7 @@ static int wc_crng_reseed(void) {
14061456 if (! ctx )
14071457 return - EFAULT ;
14081458
1409- for (n = nr_cpu_ids - 1 ; n >= 0 ; -- n ) {
1459+ for (n = ctx -> n_rngs - 1 ; n >= 0 ; -- n ) {
14101460 struct wc_rng_inst * drbg = get_drbg_n (ctx , n );
14111461 ((struct DRBG_internal * )drbg -> rng .drbg )-> reseedCtr = WC_RESEED_INTERVAL ;
14121462 if (can_sleep ) {
@@ -1786,6 +1836,7 @@ static int wc_linuxkm_drbg_startup(void)
17861836 }
17871837 else {
17881838 pr_err ("ERROR: wolfssl_linuxkm_register_random_bytes_handlers() failed: %d\n" , ret );
1839+ return ret ;
17891840 }
17901841
17911842 #elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES )
@@ -1797,6 +1848,7 @@ static int wc_linuxkm_drbg_startup(void)
17971848 }
17981849 else {
17991850 pr_err ("ERROR: wc_get_random_bytes_kprobe installation failed: %d\n" , ret );
1851+ return ret ;
18001852 }
18011853
18021854 #ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
@@ -1807,6 +1859,7 @@ static int wc_linuxkm_drbg_startup(void)
18071859 }
18081860 else {
18091861 pr_err ("ERROR: wc_get_random_bytes_user_kprobe installation failed: %d\n" , ret );
1862+ return ret ;
18101863 }
18111864 #endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
18121865
0 commit comments