@@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg)
781
781
}
782
782
early_param ("random.trust_cpu" , parse_trust_cpu );
783
783
784
- static void crng_initialize (struct crng_state * crng )
784
+ static bool crng_init_try_arch (struct crng_state * crng )
785
785
{
786
786
int i ;
787
- int arch_init = 1 ;
787
+ bool arch_init = true ;
788
788
unsigned long rv ;
789
789
790
- memcpy (& crng -> state [0 ], "expand 32-byte k" , 16 );
791
- if (crng == & primary_crng )
792
- _extract_entropy (& input_pool , & crng -> state [4 ],
793
- sizeof (__u32 ) * 12 , 0 );
794
- else
795
- _get_random_bytes (& crng -> state [4 ], sizeof (__u32 ) * 12 );
796
790
for (i = 4 ; i < 16 ; i ++ ) {
797
791
if (!arch_get_random_seed_long (& rv ) &&
798
792
!arch_get_random_long (& rv )) {
799
793
rv = random_get_entropy ();
800
- arch_init = 0 ;
794
+ arch_init = false;
795
+ }
796
+ crng -> state [i ] ^= rv ;
797
+ }
798
+
799
+ return arch_init ;
800
+ }
801
+
802
+ static bool __init crng_init_try_arch_early (struct crng_state * crng )
803
+ {
804
+ int i ;
805
+ bool arch_init = true;
806
+ unsigned long rv ;
807
+
808
+ for (i = 4 ; i < 16 ; i ++ ) {
809
+ if (!arch_get_random_seed_long_early (& rv ) &&
810
+ !arch_get_random_long_early (& rv )) {
811
+ rv = random_get_entropy ();
812
+ arch_init = false;
801
813
}
802
814
crng -> state [i ] ^= rv ;
803
815
}
804
- if (trust_cpu && arch_init && crng == & primary_crng ) {
816
+
817
+ return arch_init ;
818
+ }
819
+
820
+ static void __maybe_unused crng_initialize_secondary (struct crng_state * crng )
821
+ {
822
+ memcpy (& crng -> state [0 ], "expand 32-byte k" , 16 );
823
+ _get_random_bytes (& crng -> state [4 ], sizeof (__u32 ) * 12 );
824
+ crng_init_try_arch (crng );
825
+ crng -> init_time = jiffies - CRNG_RESEED_INTERVAL - 1 ;
826
+ }
827
+
828
+ static void __init crng_initialize_primary (struct crng_state * crng )
829
+ {
830
+ memcpy (& crng -> state [0 ], "expand 32-byte k" , 16 );
831
+ _extract_entropy (& input_pool , & crng -> state [4 ], sizeof (__u32 ) * 12 , 0 );
832
+ if (crng_init_try_arch_early (crng ) && trust_cpu ) {
805
833
invalidate_batched_entropy ();
806
834
numa_crng_init ();
807
835
crng_init = 2 ;
@@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work)
822
850
crng = kmalloc_node (sizeof (struct crng_state ),
823
851
GFP_KERNEL | __GFP_NOFAIL , i );
824
852
spin_lock_init (& crng -> lock );
825
- crng_initialize (crng );
853
+ crng_initialize_secondary (crng );
826
854
pool [i ] = crng ;
827
855
}
828
856
mb ();
@@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1142
1170
* We take into account the first, second and third-order deltas
1143
1171
* in order to make our estimate.
1144
1172
*/
1145
- delta = sample .jiffies - state -> last_time ;
1146
- state -> last_time = sample .jiffies ;
1173
+ delta = sample .jiffies - READ_ONCE ( state -> last_time ) ;
1174
+ WRITE_ONCE ( state -> last_time , sample .jiffies ) ;
1147
1175
1148
- delta2 = delta - state -> last_delta ;
1149
- state -> last_delta = delta ;
1176
+ delta2 = delta - READ_ONCE ( state -> last_delta ) ;
1177
+ WRITE_ONCE ( state -> last_delta , delta ) ;
1150
1178
1151
- delta3 = delta2 - state -> last_delta2 ;
1152
- state -> last_delta2 = delta2 ;
1179
+ delta3 = delta2 - READ_ONCE ( state -> last_delta2 ) ;
1180
+ WRITE_ONCE ( state -> last_delta2 , delta2 ) ;
1153
1181
1154
1182
if (delta < 0 )
1155
1183
delta = - delta ;
@@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r)
1771
1799
int __init rand_initialize (void )
1772
1800
{
1773
1801
init_std_data (& input_pool );
1774
- crng_initialize (& primary_crng );
1802
+ crng_initialize_primary (& primary_crng );
1775
1803
crng_global_init_time = jiffies ;
1776
1804
if (ratelimit_disable ) {
1777
1805
urandom_warning .interval = 0 ;
@@ -2149,11 +2177,11 @@ struct batched_entropy {
2149
2177
2150
2178
/*
2151
2179
* Get a random word for internal kernel use only. The quality of the random
2152
- * number is either as good as RDRAND or as good as /dev/urandom , with the
2153
- * goal of being quite fast and not depleting entropy. In order to ensure
2180
+ * number is good as /dev/urandom, but there is no backtrack protection , with
2181
+ * the goal of being quite fast and not depleting entropy. In order to ensure
2154
2182
* that the randomness provided by this function is okay, the function
2155
- * wait_for_random_bytes() should be called and return 0 at least once
2156
- * at any point prior.
2183
+ * wait_for_random_bytes() should be called and return 0 at least once at any
2184
+ * point prior.
2157
2185
*/
2158
2186
static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u64 ) = {
2159
2187
.batch_lock = __SPIN_LOCK_UNLOCKED (batched_entropy_u64 .lock ),
@@ -2166,15 +2194,6 @@ u64 get_random_u64(void)
2166
2194
struct batched_entropy * batch ;
2167
2195
static void * previous ;
2168
2196
2169
- #if BITS_PER_LONG == 64
2170
- if (arch_get_random_long ((unsigned long * )& ret ))
2171
- return ret ;
2172
- #else
2173
- if (arch_get_random_long ((unsigned long * )& ret ) &&
2174
- arch_get_random_long ((unsigned long * )& ret + 1 ))
2175
- return ret ;
2176
- #endif
2177
-
2178
2197
warn_unseeded_randomness (& previous );
2179
2198
2180
2199
batch = raw_cpu_ptr (& batched_entropy_u64 );
@@ -2199,9 +2218,6 @@ u32 get_random_u32(void)
2199
2218
struct batched_entropy * batch ;
2200
2219
static void * previous ;
2201
2220
2202
- if (arch_get_random_int (& ret ))
2203
- return ret ;
2204
-
2205
2221
warn_unseeded_randomness (& previous );
2206
2222
2207
2223
batch = raw_cpu_ptr (& batched_entropy_u32 );
0 commit comments